blob: 2ec5899207e3ac4c9d25fd633d9f88c6ace97d9c [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100040#include <linux/dmi.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070041#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090042#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070043#include "pci.h"
44
Fenghua Yu5b6985c2008-10-16 18:02:32 -070045#define ROOT_SIZE VTD_PAGE_SIZE
46#define CONTEXT_SIZE VTD_PAGE_SIZE
47
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
49#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50
51#define IOAPIC_RANGE_START (0xfee00000)
52#define IOAPIC_RANGE_END (0xfeefffff)
53#define IOVA_START_ADDR (0x1000)
54
55#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070057#define MAX_AGAW_WIDTH 64
58
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070059#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
David Woodhouse595badf2009-06-27 22:09:11 +010060#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070061
Mark McLoughlinf27be032008-11-20 15:49:43 +000062#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070063#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070064#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080065
David Woodhousefd18de52009-05-10 23:57:41 +010066
David Woodhousedd4e8312009-06-27 16:21:20 +010067/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
68 are never going to work. */
69static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
70{
71 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
72}
73
74static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
75{
76 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
77}
78static inline unsigned long page_to_dma_pfn(struct page *pg)
79{
80 return mm_to_dma_pfn(page_to_pfn(pg));
81}
82static inline unsigned long virt_to_dma_pfn(void *p)
83{
84 return page_to_dma_pfn(virt_to_page(p));
85}
86
Weidong Hand9630fe2008-12-08 11:06:32 +080087/* global iommu list, set NULL for ignored DMAR units */
88static struct intel_iommu **g_iommus;
89
David Woodhouse9af88142009-02-13 23:18:03 +000090static int rwbf_quirk;
91
Mark McLoughlin46b08e12008-11-20 15:49:44 +000092/*
93 * 0: Present
94 * 1-11: Reserved
95 * 12-63: Context Ptr (12 - (haw-1))
96 * 64-127: Reserved
97 */
98struct root_entry {
99 u64 val;
100 u64 rsvd1;
101};
102#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
103static inline bool root_present(struct root_entry *root)
104{
105 return (root->val & 1);
106}
107static inline void set_root_present(struct root_entry *root)
108{
109 root->val |= 1;
110}
111static inline void set_root_value(struct root_entry *root, unsigned long value)
112{
113 root->val |= value & VTD_PAGE_MASK;
114}
115
116static inline struct context_entry *
117get_context_addr_from_root(struct root_entry *root)
118{
119 return (struct context_entry *)
120 (root_present(root)?phys_to_virt(
121 root->val & VTD_PAGE_MASK) :
122 NULL);
123}
124
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000125/*
126 * low 64 bits:
127 * 0: present
128 * 1: fault processing disable
129 * 2-3: translation type
130 * 12-63: address space root
131 * high 64 bits:
132 * 0-2: address width
133 * 3-6: aval
134 * 8-23: domain id
135 */
136struct context_entry {
137 u64 lo;
138 u64 hi;
139};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000140
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000141static inline bool context_present(struct context_entry *context)
142{
143 return (context->lo & 1);
144}
145static inline void context_set_present(struct context_entry *context)
146{
147 context->lo |= 1;
148}
149
150static inline void context_set_fault_enable(struct context_entry *context)
151{
152 context->lo &= (((u64)-1) << 2) | 1;
153}
154
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000155static inline void context_set_translation_type(struct context_entry *context,
156 unsigned long value)
157{
158 context->lo &= (((u64)-1) << 4) | 3;
159 context->lo |= (value & 3) << 2;
160}
161
162static inline void context_set_address_root(struct context_entry *context,
163 unsigned long value)
164{
165 context->lo |= value & VTD_PAGE_MASK;
166}
167
168static inline void context_set_address_width(struct context_entry *context,
169 unsigned long value)
170{
171 context->hi |= value & 7;
172}
173
174static inline void context_set_domain_id(struct context_entry *context,
175 unsigned long value)
176{
177 context->hi |= (value & ((1 << 16) - 1)) << 8;
178}
179
180static inline void context_clear_entry(struct context_entry *context)
181{
182 context->lo = 0;
183 context->hi = 0;
184}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000185
Mark McLoughlin622ba122008-11-20 15:49:46 +0000186/*
187 * 0: readable
188 * 1: writable
189 * 2-6: reserved
190 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800191 * 8-10: available
192 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000193 * 12-63: Host physcial address
194 */
195struct dma_pte {
196 u64 val;
197};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000198
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000199static inline void dma_clear_pte(struct dma_pte *pte)
200{
201 pte->val = 0;
202}
203
204static inline void dma_set_pte_readable(struct dma_pte *pte)
205{
206 pte->val |= DMA_PTE_READ;
207}
208
209static inline void dma_set_pte_writable(struct dma_pte *pte)
210{
211 pte->val |= DMA_PTE_WRITE;
212}
213
Sheng Yang9cf06692009-03-18 15:33:07 +0800214static inline void dma_set_pte_snp(struct dma_pte *pte)
215{
216 pte->val |= DMA_PTE_SNP;
217}
218
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000219static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
220{
221 pte->val = (pte->val & ~3) | (prot & 3);
222}
223
224static inline u64 dma_pte_addr(struct dma_pte *pte)
225{
David Woodhousec85994e2009-07-01 19:21:24 +0100226#ifdef CONFIG_64BIT
227 return pte->val & VTD_PAGE_MASK;
228#else
229 /* Must have a full atomic 64-bit read */
230 return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
231#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000232}
233
David Woodhousedd4e8312009-06-27 16:21:20 +0100234static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000235{
David Woodhousedd4e8312009-06-27 16:21:20 +0100236 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000237}
238
239static inline bool dma_pte_present(struct dma_pte *pte)
240{
241 return (pte->val & 3) != 0;
242}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000243
David Woodhouse75e6bf92009-07-02 11:21:16 +0100244static inline int first_pte_in_page(struct dma_pte *pte)
245{
246 return !((unsigned long)pte & ~VTD_PAGE_MASK);
247}
248
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700249/*
250 * This domain is a statically identity mapping domain.
251 * 1. This domain creats a static 1:1 mapping to all usable memory.
252 * 2. It maps to each iommu if successful.
253 * 3. Each iommu mapps to this domain if successful.
254 */
David Woodhouse19943b02009-08-04 16:19:20 +0100255static struct dmar_domain *si_domain;
256static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700257
Weidong Han3b5410e2008-12-08 09:17:15 +0800258/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100259#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800260
Weidong Han1ce28fe2008-12-08 16:35:39 +0800261/* domain represents a virtual machine, more than one devices
262 * across iommus may be owned in one domain, e.g. kvm guest.
263 */
264#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
265
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700266/* si_domain contains mulitple devices */
267#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
268
Mark McLoughlin99126f72008-11-20 15:49:47 +0000269struct dmar_domain {
270 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800271 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000272
273 struct list_head devices; /* all devices' list */
274 struct iova_domain iovad; /* iova's that belong to this domain */
275
276 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000277 int gaw; /* max guest address width */
278
279 /* adjusted guest address width, 0 is level 2 30-bit */
280 int agaw;
281
Weidong Han3b5410e2008-12-08 09:17:15 +0800282 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800283
284 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800285 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800286 int iommu_count; /* reference count of iommu */
287 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800288 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000289};
290
Mark McLoughlina647dac2008-11-20 15:49:48 +0000291/* PCI domain-device relationship */
292struct device_domain_info {
293 struct list_head link; /* link to domain siblings */
294 struct list_head global; /* link to global list */
David Woodhouse276dbf92009-04-04 01:45:37 +0100295 int segment; /* PCI domain */
296 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000297 u8 devfn; /* PCI devfn number */
298 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800299 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000300 struct dmar_domain *domain; /* pointer to domain */
301};
302
mark gross5e0d2a62008-03-04 15:22:08 -0800303static void flush_unmaps_timeout(unsigned long data);
304
305DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
306
mark gross80b20dd2008-04-18 13:53:58 -0700307#define HIGH_WATER_MARK 250
308struct deferred_flush_tables {
309 int next;
310 struct iova *iova[HIGH_WATER_MARK];
311 struct dmar_domain *domain[HIGH_WATER_MARK];
312};
313
314static struct deferred_flush_tables *deferred_flush;
315
mark gross5e0d2a62008-03-04 15:22:08 -0800316/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800317static int g_num_of_iommus;
318
319static DEFINE_SPINLOCK(async_umap_flush_lock);
320static LIST_HEAD(unmaps_to_do);
321
322static int timer_on;
323static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800324
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700325static void domain_remove_dev_info(struct dmar_domain *domain);
326
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800327#ifdef CONFIG_DMAR_DEFAULT_ON
328int dmar_disabled = 0;
329#else
330int dmar_disabled = 1;
331#endif /*CONFIG_DMAR_DEFAULT_ON*/
332
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700333static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700334static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800335static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700336
337#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
338static DEFINE_SPINLOCK(device_domain_lock);
339static LIST_HEAD(device_domain_list);
340
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100341static struct iommu_ops intel_iommu_ops;
342
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700343static int __init intel_iommu_setup(char *str)
344{
345 if (!str)
346 return -EINVAL;
347 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800348 if (!strncmp(str, "on", 2)) {
349 dmar_disabled = 0;
350 printk(KERN_INFO "Intel-IOMMU: enabled\n");
351 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700352 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800353 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700354 } else if (!strncmp(str, "igfx_off", 8)) {
355 dmar_map_gfx = 0;
356 printk(KERN_INFO
357 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700358 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800359 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700360 "Intel-IOMMU: Forcing DAC for PCI devices\n");
361 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800362 } else if (!strncmp(str, "strict", 6)) {
363 printk(KERN_INFO
364 "Intel-IOMMU: disable batched IOTLB flush\n");
365 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700366 }
367
368 str += strcspn(str, ",");
369 while (*str == ',')
370 str++;
371 }
372 return 0;
373}
374__setup("intel_iommu=", intel_iommu_setup);
375
376static struct kmem_cache *iommu_domain_cache;
377static struct kmem_cache *iommu_devinfo_cache;
378static struct kmem_cache *iommu_iova_cache;
379
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700380static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
381{
382 unsigned int flags;
383 void *vaddr;
384
385 /* trying to avoid low memory issues */
386 flags = current->flags & PF_MEMALLOC;
387 current->flags |= PF_MEMALLOC;
388 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
389 current->flags &= (~PF_MEMALLOC | flags);
390 return vaddr;
391}
392
393
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700394static inline void *alloc_pgtable_page(void)
395{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700396 unsigned int flags;
397 void *vaddr;
398
399 /* trying to avoid low memory issues */
400 flags = current->flags & PF_MEMALLOC;
401 current->flags |= PF_MEMALLOC;
402 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
403 current->flags &= (~PF_MEMALLOC | flags);
404 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700405}
406
407static inline void free_pgtable_page(void *vaddr)
408{
409 free_page((unsigned long)vaddr);
410}
411
412static inline void *alloc_domain_mem(void)
413{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700414 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700415}
416
Kay, Allen M38717942008-09-09 18:37:29 +0300417static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700418{
419 kmem_cache_free(iommu_domain_cache, vaddr);
420}
421
422static inline void * alloc_devinfo_mem(void)
423{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700424 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700425}
426
427static inline void free_devinfo_mem(void *vaddr)
428{
429 kmem_cache_free(iommu_devinfo_cache, vaddr);
430}
431
432struct iova *alloc_iova_mem(void)
433{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700434 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700435}
436
437void free_iova_mem(struct iova *iova)
438{
439 kmem_cache_free(iommu_iova_cache, iova);
440}
441
Weidong Han1b573682008-12-08 15:34:06 +0800442
443static inline int width_to_agaw(int width);
444
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700445static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800446{
447 unsigned long sagaw;
448 int agaw = -1;
449
450 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700451 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800452 agaw >= 0; agaw--) {
453 if (test_bit(agaw, &sagaw))
454 break;
455 }
456
457 return agaw;
458}
459
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700460/*
461 * Calculate max SAGAW for each iommu.
462 */
463int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
464{
465 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
466}
467
468/*
469 * calculate agaw for each iommu.
470 * "SAGAW" may be different across iommus, use a default agaw, and
471 * get a supported less agaw for iommus that don't support the default agaw.
472 */
473int iommu_calculate_agaw(struct intel_iommu *iommu)
474{
475 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
476}
477
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700478/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800479static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
480{
481 int iommu_id;
482
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700483 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800484 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700485 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800486
Weidong Han8c11e792008-12-08 15:29:22 +0800487 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
488 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
489 return NULL;
490
491 return g_iommus[iommu_id];
492}
493
Weidong Han8e6040972008-12-08 15:49:06 +0800494static void domain_update_iommu_coherency(struct dmar_domain *domain)
495{
496 int i;
497
498 domain->iommu_coherency = 1;
499
500 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
501 for (; i < g_num_of_iommus; ) {
502 if (!ecap_coherent(g_iommus[i]->ecap)) {
503 domain->iommu_coherency = 0;
504 break;
505 }
506 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
507 }
508}
509
Sheng Yang58c610b2009-03-18 15:33:05 +0800510static void domain_update_iommu_snooping(struct dmar_domain *domain)
511{
512 int i;
513
514 domain->iommu_snooping = 1;
515
516 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
517 for (; i < g_num_of_iommus; ) {
518 if (!ecap_sc_support(g_iommus[i]->ecap)) {
519 domain->iommu_snooping = 0;
520 break;
521 }
522 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
523 }
524}
525
526/* Some capabilities may be different across iommus */
527static void domain_update_iommu_cap(struct dmar_domain *domain)
528{
529 domain_update_iommu_coherency(domain);
530 domain_update_iommu_snooping(domain);
531}
532
David Woodhouse276dbf92009-04-04 01:45:37 +0100533static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800534{
535 struct dmar_drhd_unit *drhd = NULL;
536 int i;
537
538 for_each_drhd_unit(drhd) {
539 if (drhd->ignored)
540 continue;
David Woodhouse276dbf92009-04-04 01:45:37 +0100541 if (segment != drhd->segment)
542 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800543
David Woodhouse924b6232009-04-04 00:39:25 +0100544 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000545 if (drhd->devices[i] &&
546 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800547 drhd->devices[i]->devfn == devfn)
548 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700549 if (drhd->devices[i] &&
550 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100551 drhd->devices[i]->subordinate->number <= bus &&
552 drhd->devices[i]->subordinate->subordinate >= bus)
553 return drhd->iommu;
554 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800555
556 if (drhd->include_all)
557 return drhd->iommu;
558 }
559
560 return NULL;
561}
562
Weidong Han5331fe62008-12-08 23:00:00 +0800563static void domain_flush_cache(struct dmar_domain *domain,
564 void *addr, int size)
565{
566 if (!domain->iommu_coherency)
567 clflush_cache_range(addr, size);
568}
569
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700570/* Gets context entry for a given bus and devfn */
571static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
572 u8 bus, u8 devfn)
573{
574 struct root_entry *root;
575 struct context_entry *context;
576 unsigned long phy_addr;
577 unsigned long flags;
578
579 spin_lock_irqsave(&iommu->lock, flags);
580 root = &iommu->root_entry[bus];
581 context = get_context_addr_from_root(root);
582 if (!context) {
583 context = (struct context_entry *)alloc_pgtable_page();
584 if (!context) {
585 spin_unlock_irqrestore(&iommu->lock, flags);
586 return NULL;
587 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700588 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700589 phy_addr = virt_to_phys((void *)context);
590 set_root_value(root, phy_addr);
591 set_root_present(root);
592 __iommu_flush_cache(iommu, root, sizeof(*root));
593 }
594 spin_unlock_irqrestore(&iommu->lock, flags);
595 return &context[devfn];
596}
597
598static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
599{
600 struct root_entry *root;
601 struct context_entry *context;
602 int ret;
603 unsigned long flags;
604
605 spin_lock_irqsave(&iommu->lock, flags);
606 root = &iommu->root_entry[bus];
607 context = get_context_addr_from_root(root);
608 if (!context) {
609 ret = 0;
610 goto out;
611 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000612 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700613out:
614 spin_unlock_irqrestore(&iommu->lock, flags);
615 return ret;
616}
617
618static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
619{
620 struct root_entry *root;
621 struct context_entry *context;
622 unsigned long flags;
623
624 spin_lock_irqsave(&iommu->lock, flags);
625 root = &iommu->root_entry[bus];
626 context = get_context_addr_from_root(root);
627 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000628 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700629 __iommu_flush_cache(iommu, &context[devfn], \
630 sizeof(*context));
631 }
632 spin_unlock_irqrestore(&iommu->lock, flags);
633}
634
635static void free_context_table(struct intel_iommu *iommu)
636{
637 struct root_entry *root;
638 int i;
639 unsigned long flags;
640 struct context_entry *context;
641
642 spin_lock_irqsave(&iommu->lock, flags);
643 if (!iommu->root_entry) {
644 goto out;
645 }
646 for (i = 0; i < ROOT_ENTRY_NR; i++) {
647 root = &iommu->root_entry[i];
648 context = get_context_addr_from_root(root);
649 if (context)
650 free_pgtable_page(context);
651 }
652 free_pgtable_page(iommu->root_entry);
653 iommu->root_entry = NULL;
654out:
655 spin_unlock_irqrestore(&iommu->lock, flags);
656}
657
658/* page table handling */
659#define LEVEL_STRIDE (9)
660#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
661
662static inline int agaw_to_level(int agaw)
663{
664 return agaw + 2;
665}
666
667static inline int agaw_to_width(int agaw)
668{
669 return 30 + agaw * LEVEL_STRIDE;
670
671}
672
673static inline int width_to_agaw(int width)
674{
675 return (width - 30) / LEVEL_STRIDE;
676}
677
678static inline unsigned int level_to_offset_bits(int level)
679{
David Woodhouse6660c632009-06-27 22:41:00 +0100680 return (level - 1) * LEVEL_STRIDE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700681}
682
David Woodhouse77dfa562009-06-27 16:40:08 +0100683static inline int pfn_level_offset(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700684{
David Woodhouse6660c632009-06-27 22:41:00 +0100685 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700686}
687
David Woodhouse6660c632009-06-27 22:41:00 +0100688static inline unsigned long level_mask(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700689{
David Woodhouse6660c632009-06-27 22:41:00 +0100690 return -1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700691}
692
David Woodhouse6660c632009-06-27 22:41:00 +0100693static inline unsigned long level_size(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700694{
David Woodhouse6660c632009-06-27 22:41:00 +0100695 return 1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700696}
697
David Woodhouse6660c632009-06-27 22:41:00 +0100698static inline unsigned long align_to_level(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700699{
David Woodhouse6660c632009-06-27 22:41:00 +0100700 return (pfn + level_size(level) - 1) & level_mask(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700701}
702
David Woodhouseb026fd22009-06-28 10:37:25 +0100703static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
704 unsigned long pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700705{
David Woodhouseb026fd22009-06-28 10:37:25 +0100706 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700707 struct dma_pte *parent, *pte = NULL;
708 int level = agaw_to_level(domain->agaw);
709 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700710
711 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100712 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700713 parent = domain->pgd;
714
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700715 while (level > 0) {
716 void *tmp_page;
717
David Woodhouseb026fd22009-06-28 10:37:25 +0100718 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700719 pte = &parent[offset];
720 if (level == 1)
721 break;
722
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000723 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100724 uint64_t pteval;
725
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700726 tmp_page = alloc_pgtable_page();
727
David Woodhouse206a73c2009-07-01 19:30:28 +0100728 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700729 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100730
David Woodhousec85994e2009-07-01 19:21:24 +0100731 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
732 pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
733 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
734 /* Someone else set it while we were thinking; use theirs. */
735 free_pgtable_page(tmp_page);
736 } else {
737 dma_pte_addr(pte);
738 domain_flush_cache(domain, pte, sizeof(*pte));
739 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700740 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000741 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700742 level--;
743 }
744
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700745 return pte;
746}
747
748/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100749static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
750 unsigned long pfn,
751 int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700752{
753 struct dma_pte *parent, *pte = NULL;
754 int total = agaw_to_level(domain->agaw);
755 int offset;
756
757 parent = domain->pgd;
758 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100759 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700760 pte = &parent[offset];
761 if (level == total)
762 return pte;
763
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000764 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700765 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000766 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700767 total--;
768 }
769 return NULL;
770}
771
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700772/* clear last level pte, a tlb flush should be followed */
David Woodhouse595badf2009-06-27 22:09:11 +0100773static void dma_pte_clear_range(struct dmar_domain *domain,
774 unsigned long start_pfn,
775 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700776{
David Woodhouse04b18e62009-06-27 19:15:01 +0100777 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100778 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700779
David Woodhouse04b18e62009-06-27 19:15:01 +0100780 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100781 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse66eae842009-06-27 19:00:32 +0100782
David Woodhouse04b18e62009-06-27 19:15:01 +0100783 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse595badf2009-06-27 22:09:11 +0100784 while (start_pfn <= last_pfn) {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100785 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
786 if (!pte) {
787 start_pfn = align_to_level(start_pfn + 1, 2);
788 continue;
789 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100790 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100791 dma_clear_pte(pte);
792 start_pfn++;
793 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100794 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
795
David Woodhouse310a5ab2009-06-28 18:52:20 +0100796 domain_flush_cache(domain, first_pte,
797 (void *)pte - (void *)first_pte);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700798 }
799}
800
801/* free page table pages. last level pte should already be cleared */
802static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100803 unsigned long start_pfn,
804 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700805{
David Woodhouse6660c632009-06-27 22:41:00 +0100806 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100807 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700808 int total = agaw_to_level(domain->agaw);
809 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100810 unsigned long tmp;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700811
David Woodhouse6660c632009-06-27 22:41:00 +0100812 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
813 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700814
David Woodhousef3a0a522009-06-30 03:40:07 +0100815 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700816 level = 2;
817 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100818 tmp = align_to_level(start_pfn, level);
819
David Woodhousef3a0a522009-06-30 03:40:07 +0100820 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100821 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700822 return;
823
David Woodhouse3d7b0e42009-06-30 03:38:09 +0100824 while (tmp + level_size(level) - 1 <= last_pfn) {
David Woodhousef3a0a522009-06-30 03:40:07 +0100825 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
826 if (!pte) {
827 tmp = align_to_level(tmp + 1, level + 1);
828 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700829 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100830 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100831 if (dma_pte_present(pte)) {
832 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
833 dma_clear_pte(pte);
834 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100835 pte++;
836 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100837 } while (!first_pte_in_page(pte) &&
838 tmp + level_size(level) - 1 <= last_pfn);
839
David Woodhousef3a0a522009-06-30 03:40:07 +0100840 domain_flush_cache(domain, first_pte,
841 (void *)pte - (void *)first_pte);
842
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700843 }
844 level++;
845 }
846 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100847 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700848 free_pgtable_page(domain->pgd);
849 domain->pgd = NULL;
850 }
851}
852
853/* iommu handling */
854static int iommu_alloc_root_entry(struct intel_iommu *iommu)
855{
856 struct root_entry *root;
857 unsigned long flags;
858
859 root = (struct root_entry *)alloc_pgtable_page();
860 if (!root)
861 return -ENOMEM;
862
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700863 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700864
865 spin_lock_irqsave(&iommu->lock, flags);
866 iommu->root_entry = root;
867 spin_unlock_irqrestore(&iommu->lock, flags);
868
869 return 0;
870}
871
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700872static void iommu_set_root_entry(struct intel_iommu *iommu)
873{
874 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100875 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700876 unsigned long flag;
877
878 addr = iommu->root_entry;
879
880 spin_lock_irqsave(&iommu->register_lock, flag);
881 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
882
David Woodhousec416daa2009-05-10 20:30:58 +0100883 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700884
885 /* Make sure hardware complete it */
886 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100887 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700888
889 spin_unlock_irqrestore(&iommu->register_lock, flag);
890}
891
892static void iommu_flush_write_buffer(struct intel_iommu *iommu)
893{
894 u32 val;
895 unsigned long flag;
896
David Woodhouse9af88142009-02-13 23:18:03 +0000897 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700898 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700899
900 spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100901 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700902
903 /* Make sure hardware complete it */
904 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100905 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700906
907 spin_unlock_irqrestore(&iommu->register_lock, flag);
908}
909
910/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100911static void __iommu_flush_context(struct intel_iommu *iommu,
912 u16 did, u16 source_id, u8 function_mask,
913 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700914{
915 u64 val = 0;
916 unsigned long flag;
917
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700918 switch (type) {
919 case DMA_CCMD_GLOBAL_INVL:
920 val = DMA_CCMD_GLOBAL_INVL;
921 break;
922 case DMA_CCMD_DOMAIN_INVL:
923 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
924 break;
925 case DMA_CCMD_DEVICE_INVL:
926 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
927 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
928 break;
929 default:
930 BUG();
931 }
932 val |= DMA_CCMD_ICC;
933
934 spin_lock_irqsave(&iommu->register_lock, flag);
935 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
936
937 /* Make sure hardware complete it */
938 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
939 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
940
941 spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700942}
943
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700944/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100945static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
946 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700947{
948 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
949 u64 val = 0, val_iva = 0;
950 unsigned long flag;
951
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700952 switch (type) {
953 case DMA_TLB_GLOBAL_FLUSH:
954 /* global flush doesn't need set IVA_REG */
955 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
956 break;
957 case DMA_TLB_DSI_FLUSH:
958 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
959 break;
960 case DMA_TLB_PSI_FLUSH:
961 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
962 /* Note: always flush non-leaf currently */
963 val_iva = size_order | addr;
964 break;
965 default:
966 BUG();
967 }
968 /* Note: set drain read/write */
969#if 0
970 /*
971 * This is probably to be super secure.. Looks like we can
972 * ignore it without any impact.
973 */
974 if (cap_read_drain(iommu->cap))
975 val |= DMA_TLB_READ_DRAIN;
976#endif
977 if (cap_write_drain(iommu->cap))
978 val |= DMA_TLB_WRITE_DRAIN;
979
980 spin_lock_irqsave(&iommu->register_lock, flag);
981 /* Note: Only uses first TLB reg currently */
982 if (val_iva)
983 dmar_writeq(iommu->reg + tlb_offset, val_iva);
984 dmar_writeq(iommu->reg + tlb_offset + 8, val);
985
986 /* Make sure hardware complete it */
987 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
988 dmar_readq, (!(val & DMA_TLB_IVT)), val);
989
990 spin_unlock_irqrestore(&iommu->register_lock, flag);
991
992 /* check IOTLB invalidation granularity */
993 if (DMA_TLB_IAIG(val) == 0)
994 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
995 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
996 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700997 (unsigned long long)DMA_TLB_IIRG(type),
998 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700999}
1000
Yu Zhao93a23a72009-05-18 13:51:37 +08001001static struct device_domain_info *iommu_support_dev_iotlb(
1002 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001003{
Yu Zhao93a23a72009-05-18 13:51:37 +08001004 int found = 0;
1005 unsigned long flags;
1006 struct device_domain_info *info;
1007 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1008
1009 if (!ecap_dev_iotlb_support(iommu->ecap))
1010 return NULL;
1011
1012 if (!iommu->qi)
1013 return NULL;
1014
1015 spin_lock_irqsave(&device_domain_lock, flags);
1016 list_for_each_entry(info, &domain->devices, link)
1017 if (info->bus == bus && info->devfn == devfn) {
1018 found = 1;
1019 break;
1020 }
1021 spin_unlock_irqrestore(&device_domain_lock, flags);
1022
1023 if (!found || !info->dev)
1024 return NULL;
1025
1026 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1027 return NULL;
1028
1029 if (!dmar_find_matched_atsr_unit(info->dev))
1030 return NULL;
1031
1032 info->iommu = iommu;
1033
1034 return info;
1035}
1036
1037static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1038{
1039 if (!info)
1040 return;
1041
1042 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1043}
1044
1045static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1046{
1047 if (!info->dev || !pci_ats_enabled(info->dev))
1048 return;
1049
1050 pci_disable_ats(info->dev);
1051}
1052
1053static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1054 u64 addr, unsigned mask)
1055{
1056 u16 sid, qdep;
1057 unsigned long flags;
1058 struct device_domain_info *info;
1059
1060 spin_lock_irqsave(&device_domain_lock, flags);
1061 list_for_each_entry(info, &domain->devices, link) {
1062 if (!info->dev || !pci_ats_enabled(info->dev))
1063 continue;
1064
1065 sid = info->bus << 8 | info->devfn;
1066 qdep = pci_ats_queue_depth(info->dev);
1067 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1068 }
1069 spin_unlock_irqrestore(&device_domain_lock, flags);
1070}
1071
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001072static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouse03d6a242009-06-28 15:33:46 +01001073 unsigned long pfn, unsigned int pages)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001074{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001075 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001076 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001077
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001078 BUG_ON(pages == 0);
1079
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001080 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001081 * Fallback to domain selective flush if no PSI support or the size is
1082 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001083 * PSI requires page size to be 2 ^ x, and the base address is naturally
1084 * aligned to the size
1085 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001086 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1087 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001088 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001089 else
1090 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1091 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001092
1093 /*
1094 * In caching mode, domain ID 0 is reserved for non-present to present
1095 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1096 */
1097 if (!cap_caching_mode(iommu->cap) || did)
Yu Zhao93a23a72009-05-18 13:51:37 +08001098 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001099}
1100
mark grossf8bab732008-02-08 04:18:38 -08001101static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1102{
1103 u32 pmen;
1104 unsigned long flags;
1105
1106 spin_lock_irqsave(&iommu->register_lock, flags);
1107 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1108 pmen &= ~DMA_PMEN_EPM;
1109 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1110
1111 /* wait for the protected region status bit to clear */
1112 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1113 readl, !(pmen & DMA_PMEN_PRS), pmen);
1114
1115 spin_unlock_irqrestore(&iommu->register_lock, flags);
1116}
1117
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001118static int iommu_enable_translation(struct intel_iommu *iommu)
1119{
1120 u32 sts;
1121 unsigned long flags;
1122
1123 spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001124 iommu->gcmd |= DMA_GCMD_TE;
1125 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001126
1127 /* Make sure hardware complete it */
1128 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001129 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001130
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001131 spin_unlock_irqrestore(&iommu->register_lock, flags);
1132 return 0;
1133}
1134
1135static int iommu_disable_translation(struct intel_iommu *iommu)
1136{
1137 u32 sts;
1138 unsigned long flag;
1139
1140 spin_lock_irqsave(&iommu->register_lock, flag);
1141 iommu->gcmd &= ~DMA_GCMD_TE;
1142 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1143
1144 /* Make sure hardware complete it */
1145 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001146 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001147
1148 spin_unlock_irqrestore(&iommu->register_lock, flag);
1149 return 0;
1150}
1151
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001152
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001153static int iommu_init_domains(struct intel_iommu *iommu)
1154{
1155 unsigned long ndomains;
1156 unsigned long nlongs;
1157
1158 ndomains = cap_ndoms(iommu->cap);
1159 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1160 nlongs = BITS_TO_LONGS(ndomains);
1161
Donald Dutile94a91b52009-08-20 16:51:34 -04001162 spin_lock_init(&iommu->lock);
1163
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001164 /* TBD: there might be 64K domains,
1165 * consider other allocation for future chip
1166 */
1167 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1168 if (!iommu->domain_ids) {
1169 printk(KERN_ERR "Allocating domain id array failed\n");
1170 return -ENOMEM;
1171 }
1172 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1173 GFP_KERNEL);
1174 if (!iommu->domains) {
1175 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001176 return -ENOMEM;
1177 }
1178
1179 /*
1180 * if Caching mode is set, then invalid translations are tagged
1181 * with domainid 0. Hence we need to pre-allocate it.
1182 */
1183 if (cap_caching_mode(iommu->cap))
1184 set_bit(0, iommu->domain_ids);
1185 return 0;
1186}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001187
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001188
1189static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001190static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001191
1192void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001193{
1194 struct dmar_domain *domain;
1195 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001196 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001197
Donald Dutile94a91b52009-08-20 16:51:34 -04001198 if ((iommu->domains) && (iommu->domain_ids)) {
1199 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1200 for (; i < cap_ndoms(iommu->cap); ) {
1201 domain = iommu->domains[i];
1202 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001203
Donald Dutile94a91b52009-08-20 16:51:34 -04001204 spin_lock_irqsave(&domain->iommu_lock, flags);
1205 if (--domain->iommu_count == 0) {
1206 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1207 vm_domain_exit(domain);
1208 else
1209 domain_exit(domain);
1210 }
1211 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1212
1213 i = find_next_bit(iommu->domain_ids,
1214 cap_ndoms(iommu->cap), i+1);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001215 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001216 }
1217
1218 if (iommu->gcmd & DMA_GCMD_TE)
1219 iommu_disable_translation(iommu);
1220
1221 if (iommu->irq) {
1222 set_irq_data(iommu->irq, NULL);
1223 /* This will mask the irq */
1224 free_irq(iommu->irq, iommu);
1225 destroy_irq(iommu->irq);
1226 }
1227
1228 kfree(iommu->domains);
1229 kfree(iommu->domain_ids);
1230
Weidong Hand9630fe2008-12-08 11:06:32 +08001231 g_iommus[iommu->seq_id] = NULL;
1232
1233 /* if all iommus are freed, free g_iommus */
1234 for (i = 0; i < g_num_of_iommus; i++) {
1235 if (g_iommus[i])
1236 break;
1237 }
1238
1239 if (i == g_num_of_iommus)
1240 kfree(g_iommus);
1241
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001242 /* free context mapping */
1243 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001244}
1245
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001246static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001247{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001248 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001249
1250 domain = alloc_domain_mem();
1251 if (!domain)
1252 return NULL;
1253
Weidong Han8c11e792008-12-08 15:29:22 +08001254 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001255 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001256
1257 return domain;
1258}
1259
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001260static int iommu_attach_domain(struct dmar_domain *domain,
1261 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001262{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001263 int num;
1264 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001265 unsigned long flags;
1266
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001267 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001268
1269 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001270
1271 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1272 if (num >= ndomains) {
1273 spin_unlock_irqrestore(&iommu->lock, flags);
1274 printk(KERN_ERR "IOMMU: no free domain ids\n");
1275 return -ENOMEM;
1276 }
1277
1278 domain->id = num;
1279 set_bit(num, iommu->domain_ids);
1280 set_bit(iommu->seq_id, &domain->iommu_bmp);
1281 iommu->domains[num] = domain;
1282 spin_unlock_irqrestore(&iommu->lock, flags);
1283
1284 return 0;
1285}
1286
1287static void iommu_detach_domain(struct dmar_domain *domain,
1288 struct intel_iommu *iommu)
1289{
1290 unsigned long flags;
1291 int num, ndomains;
1292 int found = 0;
1293
1294 spin_lock_irqsave(&iommu->lock, flags);
1295 ndomains = cap_ndoms(iommu->cap);
1296 num = find_first_bit(iommu->domain_ids, ndomains);
1297 for (; num < ndomains; ) {
1298 if (iommu->domains[num] == domain) {
1299 found = 1;
1300 break;
1301 }
1302 num = find_next_bit(iommu->domain_ids,
1303 cap_ndoms(iommu->cap), num+1);
1304 }
1305
1306 if (found) {
1307 clear_bit(num, iommu->domain_ids);
1308 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1309 iommu->domains[num] = NULL;
1310 }
Weidong Han8c11e792008-12-08 15:29:22 +08001311 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001312}
1313
1314static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001315static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001316
1317static void dmar_init_reserved_ranges(void)
1318{
1319 struct pci_dev *pdev = NULL;
1320 struct iova *iova;
1321 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001322
David Millerf6611972008-02-06 01:36:23 -08001323 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001324
Mark Gross8a443df2008-03-04 14:59:31 -08001325 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1326 &reserved_rbtree_key);
1327
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001328 /* IOAPIC ranges shouldn't be accessed by DMA */
1329 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1330 IOVA_PFN(IOAPIC_RANGE_END));
1331 if (!iova)
1332 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1333
1334 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1335 for_each_pci_dev(pdev) {
1336 struct resource *r;
1337
1338 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1339 r = &pdev->resource[i];
1340 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1341 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001342 iova = reserve_iova(&reserved_iova_list,
1343 IOVA_PFN(r->start),
1344 IOVA_PFN(r->end));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001345 if (!iova)
1346 printk(KERN_ERR "Reserve iova failed\n");
1347 }
1348 }
1349
1350}
1351
1352static void domain_reserve_special_ranges(struct dmar_domain *domain)
1353{
1354 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1355}
1356
1357static inline int guestwidth_to_adjustwidth(int gaw)
1358{
1359 int agaw;
1360 int r = (gaw - 12) % 9;
1361
1362 if (r == 0)
1363 agaw = gaw;
1364 else
1365 agaw = gaw + 9 - r;
1366 if (agaw > 64)
1367 agaw = 64;
1368 return agaw;
1369}
1370
1371static int domain_init(struct dmar_domain *domain, int guest_width)
1372{
1373 struct intel_iommu *iommu;
1374 int adjust_width, agaw;
1375 unsigned long sagaw;
1376
David Millerf6611972008-02-06 01:36:23 -08001377 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001378 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001379
1380 domain_reserve_special_ranges(domain);
1381
1382 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001383 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001384 if (guest_width > cap_mgaw(iommu->cap))
1385 guest_width = cap_mgaw(iommu->cap);
1386 domain->gaw = guest_width;
1387 adjust_width = guestwidth_to_adjustwidth(guest_width);
1388 agaw = width_to_agaw(adjust_width);
1389 sagaw = cap_sagaw(iommu->cap);
1390 if (!test_bit(agaw, &sagaw)) {
1391 /* hardware doesn't support it, choose a bigger one */
1392 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1393 agaw = find_next_bit(&sagaw, 5, agaw);
1394 if (agaw >= 5)
1395 return -ENODEV;
1396 }
1397 domain->agaw = agaw;
1398 INIT_LIST_HEAD(&domain->devices);
1399
Weidong Han8e6040972008-12-08 15:49:06 +08001400 if (ecap_coherent(iommu->ecap))
1401 domain->iommu_coherency = 1;
1402 else
1403 domain->iommu_coherency = 0;
1404
Sheng Yang58c610b2009-03-18 15:33:05 +08001405 if (ecap_sc_support(iommu->ecap))
1406 domain->iommu_snooping = 1;
1407 else
1408 domain->iommu_snooping = 0;
1409
Weidong Hanc7151a82008-12-08 22:51:37 +08001410 domain->iommu_count = 1;
1411
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001412 /* always allocate the top pgd */
1413 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1414 if (!domain->pgd)
1415 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001416 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001417 return 0;
1418}
1419
1420static void domain_exit(struct dmar_domain *domain)
1421{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001422 struct dmar_drhd_unit *drhd;
1423 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001424
1425 /* Domain 0 is reserved, so dont process it */
1426 if (!domain)
1427 return;
1428
1429 domain_remove_dev_info(domain);
1430 /* destroy iovas */
1431 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001432
1433 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001434 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001435
1436 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001437 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001438
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001439 for_each_active_iommu(iommu, drhd)
1440 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1441 iommu_detach_domain(domain, iommu);
1442
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001443 free_domain_mem(domain);
1444}
1445
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001446static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1447 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001448{
1449 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001450 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001451 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001452 struct dma_pte *pgd;
1453 unsigned long num;
1454 unsigned long ndomains;
1455 int id;
1456 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001457 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001458
1459 pr_debug("Set context mapping for %02x:%02x.%d\n",
1460 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001461
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001462 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001463 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1464 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001465
David Woodhouse276dbf92009-04-04 01:45:37 +01001466 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001467 if (!iommu)
1468 return -ENODEV;
1469
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001470 context = device_to_context_entry(iommu, bus, devfn);
1471 if (!context)
1472 return -ENOMEM;
1473 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001474 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001475 spin_unlock_irqrestore(&iommu->lock, flags);
1476 return 0;
1477 }
1478
Weidong Hanea6606b2008-12-08 23:08:15 +08001479 id = domain->id;
1480 pgd = domain->pgd;
1481
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001482 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1483 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001484 int found = 0;
1485
1486 /* find an available domain id for this device in iommu */
1487 ndomains = cap_ndoms(iommu->cap);
1488 num = find_first_bit(iommu->domain_ids, ndomains);
1489 for (; num < ndomains; ) {
1490 if (iommu->domains[num] == domain) {
1491 id = num;
1492 found = 1;
1493 break;
1494 }
1495 num = find_next_bit(iommu->domain_ids,
1496 cap_ndoms(iommu->cap), num+1);
1497 }
1498
1499 if (found == 0) {
1500 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1501 if (num >= ndomains) {
1502 spin_unlock_irqrestore(&iommu->lock, flags);
1503 printk(KERN_ERR "IOMMU: no free domain ids\n");
1504 return -EFAULT;
1505 }
1506
1507 set_bit(num, iommu->domain_ids);
1508 iommu->domains[num] = domain;
1509 id = num;
1510 }
1511
1512 /* Skip top levels of page tables for
1513 * iommu which has less agaw than default.
1514 */
1515 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1516 pgd = phys_to_virt(dma_pte_addr(pgd));
1517 if (!dma_pte_present(pgd)) {
1518 spin_unlock_irqrestore(&iommu->lock, flags);
1519 return -ENOMEM;
1520 }
1521 }
1522 }
1523
1524 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001525
Yu Zhao93a23a72009-05-18 13:51:37 +08001526 if (translation != CONTEXT_TT_PASS_THROUGH) {
1527 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1528 translation = info ? CONTEXT_TT_DEV_IOTLB :
1529 CONTEXT_TT_MULTI_LEVEL;
1530 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001531 /*
1532 * In pass through mode, AW must be programmed to indicate the largest
1533 * AGAW value supported by hardware. And ASR is ignored by hardware.
1534 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001535 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001536 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001537 else {
1538 context_set_address_root(context, virt_to_phys(pgd));
1539 context_set_address_width(context, iommu->agaw);
1540 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001541
1542 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001543 context_set_fault_enable(context);
1544 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001545 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001546
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001547 /*
1548 * It's a non-present to present mapping. If hardware doesn't cache
1549 * non-present entry we only need to flush the write-buffer. If the
1550 * _does_ cache non-present entries, then it does so in the special
1551 * domain #0, which we have to flush:
1552 */
1553 if (cap_caching_mode(iommu->cap)) {
1554 iommu->flush.flush_context(iommu, 0,
1555 (((u16)bus) << 8) | devfn,
1556 DMA_CCMD_MASK_NOBIT,
1557 DMA_CCMD_DEVICE_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001558 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001559 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001560 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001561 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001562 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001563 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001564
1565 spin_lock_irqsave(&domain->iommu_lock, flags);
1566 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1567 domain->iommu_count++;
Sheng Yang58c610b2009-03-18 15:33:05 +08001568 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001569 }
1570 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001571 return 0;
1572}
1573
1574static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001575domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1576 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001577{
1578 int ret;
1579 struct pci_dev *tmp, *parent;
1580
David Woodhouse276dbf92009-04-04 01:45:37 +01001581 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001582 pdev->bus->number, pdev->devfn,
1583 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001584 if (ret)
1585 return ret;
1586
1587 /* dependent device mapping */
1588 tmp = pci_find_upstream_pcie_bridge(pdev);
1589 if (!tmp)
1590 return 0;
1591 /* Secondary interface's bus number and devfn 0 */
1592 parent = pdev->bus->self;
1593 while (parent != tmp) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001594 ret = domain_context_mapping_one(domain,
1595 pci_domain_nr(parent->bus),
1596 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001597 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001598 if (ret)
1599 return ret;
1600 parent = parent->bus->self;
1601 }
1602 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1603 return domain_context_mapping_one(domain,
David Woodhouse276dbf92009-04-04 01:45:37 +01001604 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001605 tmp->subordinate->number, 0,
1606 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001607 else /* this is a legacy PCI bridge */
1608 return domain_context_mapping_one(domain,
David Woodhouse276dbf92009-04-04 01:45:37 +01001609 pci_domain_nr(tmp->bus),
1610 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001611 tmp->devfn,
1612 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001613}
1614
Weidong Han5331fe62008-12-08 23:00:00 +08001615static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001616{
1617 int ret;
1618 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001619 struct intel_iommu *iommu;
1620
David Woodhouse276dbf92009-04-04 01:45:37 +01001621 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1622 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001623 if (!iommu)
1624 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001625
David Woodhouse276dbf92009-04-04 01:45:37 +01001626 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001627 if (!ret)
1628 return ret;
1629 /* dependent device mapping */
1630 tmp = pci_find_upstream_pcie_bridge(pdev);
1631 if (!tmp)
1632 return ret;
1633 /* Secondary interface's bus number and devfn 0 */
1634 parent = pdev->bus->self;
1635 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001636 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf92009-04-04 01:45:37 +01001637 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001638 if (!ret)
1639 return ret;
1640 parent = parent->bus->self;
1641 }
1642 if (tmp->is_pcie)
David Woodhouse276dbf92009-04-04 01:45:37 +01001643 return device_context_mapped(iommu, tmp->subordinate->number,
1644 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001645 else
David Woodhouse276dbf92009-04-04 01:45:37 +01001646 return device_context_mapped(iommu, tmp->bus->number,
1647 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001648}
1649
Fenghua Yuf5329592009-08-04 15:09:37 -07001650/* Returns a number of VTD pages, but aligned to MM page size */
1651static inline unsigned long aligned_nrpages(unsigned long host_addr,
1652 size_t size)
1653{
1654 host_addr &= ~PAGE_MASK;
1655 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1656}
1657
David Woodhouse9051aa02009-06-29 12:30:54 +01001658static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1659 struct scatterlist *sg, unsigned long phys_pfn,
1660 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001661{
1662 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001663 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001664 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001665 unsigned long sg_res;
David Woodhousee1605492009-06-29 11:17:38 +01001666
1667 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1668
1669 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1670 return -EINVAL;
1671
1672 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1673
David Woodhouse9051aa02009-06-29 12:30:54 +01001674 if (sg)
1675 sg_res = 0;
1676 else {
1677 sg_res = nr_pages + 1;
1678 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1679 }
1680
David Woodhousee1605492009-06-29 11:17:38 +01001681 while (nr_pages--) {
David Woodhousec85994e2009-07-01 19:21:24 +01001682 uint64_t tmp;
1683
David Woodhousee1605492009-06-29 11:17:38 +01001684 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001685 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001686 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1687 sg->dma_length = sg->length;
1688 pteval = page_to_phys(sg_page(sg)) | prot;
1689 }
1690 if (!pte) {
1691 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1692 if (!pte)
1693 return -ENOMEM;
1694 }
1695 /* We don't need lock here, nobody else
1696 * touches the iova range
1697 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001698 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001699 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001700 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001701 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1702 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001703 if (dumps) {
1704 dumps--;
1705 debug_dma_dump_mappings(NULL);
1706 }
1707 WARN_ON(1);
1708 }
David Woodhousee1605492009-06-29 11:17:38 +01001709 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001710 if (!nr_pages || first_pte_in_page(pte)) {
David Woodhousee1605492009-06-29 11:17:38 +01001711 domain_flush_cache(domain, first_pte,
1712 (void *)pte - (void *)first_pte);
1713 pte = NULL;
1714 }
1715 iov_pfn++;
1716 pteval += VTD_PAGE_SIZE;
1717 sg_res--;
1718 if (!sg_res)
1719 sg = sg_next(sg);
1720 }
1721 return 0;
1722}
1723
David Woodhouse9051aa02009-06-29 12:30:54 +01001724static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1725 struct scatterlist *sg, unsigned long nr_pages,
1726 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001727{
David Woodhouse9051aa02009-06-29 12:30:54 +01001728 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1729}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001730
David Woodhouse9051aa02009-06-29 12:30:54 +01001731static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1732 unsigned long phys_pfn, unsigned long nr_pages,
1733 int prot)
1734{
1735 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001736}
1737
Weidong Hanc7151a82008-12-08 22:51:37 +08001738static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001739{
Weidong Hanc7151a82008-12-08 22:51:37 +08001740 if (!iommu)
1741 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001742
1743 clear_context_table(iommu, bus, devfn);
1744 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001745 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001746 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001747}
1748
1749static void domain_remove_dev_info(struct dmar_domain *domain)
1750{
1751 struct device_domain_info *info;
1752 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001753 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001754
1755 spin_lock_irqsave(&device_domain_lock, flags);
1756 while (!list_empty(&domain->devices)) {
1757 info = list_entry(domain->devices.next,
1758 struct device_domain_info, link);
1759 list_del(&info->link);
1760 list_del(&info->global);
1761 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001762 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001763 spin_unlock_irqrestore(&device_domain_lock, flags);
1764
Yu Zhao93a23a72009-05-18 13:51:37 +08001765 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf92009-04-04 01:45:37 +01001766 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001767 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001768 free_devinfo_mem(info);
1769
1770 spin_lock_irqsave(&device_domain_lock, flags);
1771 }
1772 spin_unlock_irqrestore(&device_domain_lock, flags);
1773}
1774
1775/*
1776 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001777 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001778 */
Kay, Allen M38717942008-09-09 18:37:29 +03001779static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001780find_domain(struct pci_dev *pdev)
1781{
1782 struct device_domain_info *info;
1783
1784 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001785 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001786 if (info)
1787 return info->domain;
1788 return NULL;
1789}
1790
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001791/* domain is initialized */
1792static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1793{
1794 struct dmar_domain *domain, *found = NULL;
1795 struct intel_iommu *iommu;
1796 struct dmar_drhd_unit *drhd;
1797 struct device_domain_info *info, *tmp;
1798 struct pci_dev *dev_tmp;
1799 unsigned long flags;
1800 int bus = 0, devfn = 0;
David Woodhouse276dbf92009-04-04 01:45:37 +01001801 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001802 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001803
1804 domain = find_domain(pdev);
1805 if (domain)
1806 return domain;
1807
David Woodhouse276dbf92009-04-04 01:45:37 +01001808 segment = pci_domain_nr(pdev->bus);
1809
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001810 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1811 if (dev_tmp) {
1812 if (dev_tmp->is_pcie) {
1813 bus = dev_tmp->subordinate->number;
1814 devfn = 0;
1815 } else {
1816 bus = dev_tmp->bus->number;
1817 devfn = dev_tmp->devfn;
1818 }
1819 spin_lock_irqsave(&device_domain_lock, flags);
1820 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001821 if (info->segment == segment &&
1822 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001823 found = info->domain;
1824 break;
1825 }
1826 }
1827 spin_unlock_irqrestore(&device_domain_lock, flags);
1828 /* pcie-pci bridge already has a domain, uses it */
1829 if (found) {
1830 domain = found;
1831 goto found_domain;
1832 }
1833 }
1834
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001835 domain = alloc_domain();
1836 if (!domain)
1837 goto error;
1838
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001839 /* Allocate new domain for the device */
1840 drhd = dmar_find_matched_drhd_unit(pdev);
1841 if (!drhd) {
1842 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1843 pci_name(pdev));
1844 return NULL;
1845 }
1846 iommu = drhd->iommu;
1847
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001848 ret = iommu_attach_domain(domain, iommu);
1849 if (ret) {
1850 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001851 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001852 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001853
1854 if (domain_init(domain, gaw)) {
1855 domain_exit(domain);
1856 goto error;
1857 }
1858
1859 /* register pcie-to-pci device */
1860 if (dev_tmp) {
1861 info = alloc_devinfo_mem();
1862 if (!info) {
1863 domain_exit(domain);
1864 goto error;
1865 }
David Woodhouse276dbf92009-04-04 01:45:37 +01001866 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001867 info->bus = bus;
1868 info->devfn = devfn;
1869 info->dev = NULL;
1870 info->domain = domain;
1871 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001872 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001873
1874 /* pcie-to-pci bridge already has a domain, uses it */
1875 found = NULL;
1876 spin_lock_irqsave(&device_domain_lock, flags);
1877 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001878 if (tmp->segment == segment &&
1879 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001880 found = tmp->domain;
1881 break;
1882 }
1883 }
1884 if (found) {
1885 free_devinfo_mem(info);
1886 domain_exit(domain);
1887 domain = found;
1888 } else {
1889 list_add(&info->link, &domain->devices);
1890 list_add(&info->global, &device_domain_list);
1891 }
1892 spin_unlock_irqrestore(&device_domain_lock, flags);
1893 }
1894
1895found_domain:
1896 info = alloc_devinfo_mem();
1897 if (!info)
1898 goto error;
David Woodhouse276dbf92009-04-04 01:45:37 +01001899 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001900 info->bus = pdev->bus->number;
1901 info->devfn = pdev->devfn;
1902 info->dev = pdev;
1903 info->domain = domain;
1904 spin_lock_irqsave(&device_domain_lock, flags);
1905 /* somebody is fast */
1906 found = find_domain(pdev);
1907 if (found != NULL) {
1908 spin_unlock_irqrestore(&device_domain_lock, flags);
1909 if (found != domain) {
1910 domain_exit(domain);
1911 domain = found;
1912 }
1913 free_devinfo_mem(info);
1914 return domain;
1915 }
1916 list_add(&info->link, &domain->devices);
1917 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001918 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001919 spin_unlock_irqrestore(&device_domain_lock, flags);
1920 return domain;
1921error:
1922 /* recheck it here, maybe others set it */
1923 return find_domain(pdev);
1924}
1925
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001926static int iommu_identity_mapping;
1927
David Woodhouseb2132032009-06-26 18:50:28 +01001928static int iommu_domain_identity_map(struct dmar_domain *domain,
1929 unsigned long long start,
1930 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001931{
David Woodhousec5395d52009-06-28 16:35:56 +01001932 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1933 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001934
David Woodhousec5395d52009-06-28 16:35:56 +01001935 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1936 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001937 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01001938 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001939 }
1940
David Woodhousec5395d52009-06-28 16:35:56 +01001941 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1942 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001943 /*
1944 * RMRR range might have overlap with physical memory range,
1945 * clear it first
1946 */
David Woodhousec5395d52009-06-28 16:35:56 +01001947 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001948
David Woodhousec5395d52009-06-28 16:35:56 +01001949 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1950 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01001951 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01001952}
1953
1954static int iommu_prepare_identity_map(struct pci_dev *pdev,
1955 unsigned long long start,
1956 unsigned long long end)
1957{
1958 struct dmar_domain *domain;
1959 int ret;
1960
David Woodhousec7ab48d2009-06-26 19:10:36 +01001961 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01001962 if (!domain)
1963 return -ENOMEM;
1964
David Woodhouse19943b02009-08-04 16:19:20 +01001965 /* For _hardware_ passthrough, don't bother. But for software
1966 passthrough, we do it anyway -- it may indicate a memory
1967 range which is reserved in E820, so which didn't get set
1968 up to start with in si_domain */
1969 if (domain == si_domain && hw_pass_through) {
1970 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
1971 pci_name(pdev), start, end);
1972 return 0;
1973 }
1974
1975 printk(KERN_INFO
1976 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1977 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01001978
1979 if (end >> agaw_to_width(domain->agaw)) {
1980 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
1981 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1982 agaw_to_width(domain->agaw),
1983 dmi_get_system_info(DMI_BIOS_VENDOR),
1984 dmi_get_system_info(DMI_BIOS_VERSION),
1985 dmi_get_system_info(DMI_PRODUCT_VERSION));
1986 ret = -EIO;
1987 goto error;
1988 }
David Woodhouse19943b02009-08-04 16:19:20 +01001989
David Woodhouseb2132032009-06-26 18:50:28 +01001990 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001991 if (ret)
1992 goto error;
1993
1994 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001995 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01001996 if (ret)
1997 goto error;
1998
1999 return 0;
2000
2001 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002002 domain_exit(domain);
2003 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002004}
2005
2006static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2007 struct pci_dev *pdev)
2008{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002009 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002010 return 0;
2011 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2012 rmrr->end_address + 1);
2013}
2014
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002015#ifdef CONFIG_DMAR_FLOPPY_WA
2016static inline void iommu_prepare_isa(void)
2017{
2018 struct pci_dev *pdev;
2019 int ret;
2020
2021 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2022 if (!pdev)
2023 return;
2024
David Woodhousec7ab48d2009-06-26 19:10:36 +01002025 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002026 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
2027
2028 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002029 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2030 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002031
2032}
2033#else
2034static inline void iommu_prepare_isa(void)
2035{
2036 return;
2037}
2038#endif /* !CONFIG_DMAR_FLPY_WA */
2039
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002040static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002041
2042static int __init si_domain_work_fn(unsigned long start_pfn,
2043 unsigned long end_pfn, void *datax)
2044{
2045 int *ret = datax;
2046
2047 *ret = iommu_domain_identity_map(si_domain,
2048 (uint64_t)start_pfn << PAGE_SHIFT,
2049 (uint64_t)end_pfn << PAGE_SHIFT);
2050 return *ret;
2051
2052}
2053
Matt Kraai071e1372009-08-23 22:30:22 -07002054static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002055{
2056 struct dmar_drhd_unit *drhd;
2057 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002058 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002059
2060 si_domain = alloc_domain();
2061 if (!si_domain)
2062 return -EFAULT;
2063
David Woodhousec7ab48d2009-06-26 19:10:36 +01002064 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002065
2066 for_each_active_iommu(iommu, drhd) {
2067 ret = iommu_attach_domain(si_domain, iommu);
2068 if (ret) {
2069 domain_exit(si_domain);
2070 return -EFAULT;
2071 }
2072 }
2073
2074 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2075 domain_exit(si_domain);
2076 return -EFAULT;
2077 }
2078
2079 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2080
David Woodhouse19943b02009-08-04 16:19:20 +01002081 if (hw)
2082 return 0;
2083
David Woodhousec7ab48d2009-06-26 19:10:36 +01002084 for_each_online_node(nid) {
2085 work_with_active_regions(nid, si_domain_work_fn, &ret);
2086 if (ret)
2087 return ret;
2088 }
2089
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002090 return 0;
2091}
2092
2093static void domain_remove_one_dev_info(struct dmar_domain *domain,
2094 struct pci_dev *pdev);
2095static int identity_mapping(struct pci_dev *pdev)
2096{
2097 struct device_domain_info *info;
2098
2099 if (likely(!iommu_identity_mapping))
2100 return 0;
2101
2102
2103 list_for_each_entry(info, &si_domain->devices, link)
2104 if (info->dev == pdev)
2105 return 1;
2106 return 0;
2107}
2108
2109static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002110 struct pci_dev *pdev,
2111 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002112{
2113 struct device_domain_info *info;
2114 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002115 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002116
2117 info = alloc_devinfo_mem();
2118 if (!info)
2119 return -ENOMEM;
2120
David Woodhouse5fe60f42009-08-09 10:53:41 +01002121 ret = domain_context_mapping(domain, pdev, translation);
2122 if (ret) {
2123 free_devinfo_mem(info);
2124 return ret;
2125 }
2126
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002127 info->segment = pci_domain_nr(pdev->bus);
2128 info->bus = pdev->bus->number;
2129 info->devfn = pdev->devfn;
2130 info->dev = pdev;
2131 info->domain = domain;
2132
2133 spin_lock_irqsave(&device_domain_lock, flags);
2134 list_add(&info->link, &domain->devices);
2135 list_add(&info->global, &device_domain_list);
2136 pdev->dev.archdata.iommu = info;
2137 spin_unlock_irqrestore(&device_domain_lock, flags);
2138
2139 return 0;
2140}
2141
David Woodhouse6941af22009-07-04 18:24:27 +01002142static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2143{
2144 if (iommu_identity_mapping == 2)
2145 return IS_GFX_DEVICE(pdev);
2146
David Woodhouse3dfc8132009-07-04 19:11:08 +01002147 /*
2148 * We want to start off with all devices in the 1:1 domain, and
2149 * take them out later if we find they can't access all of memory.
2150 *
2151 * However, we can't do this for PCI devices behind bridges,
2152 * because all PCI devices behind the same bridge will end up
2153 * with the same source-id on their transactions.
2154 *
2155 * Practically speaking, we can't change things around for these
2156 * devices at run-time, because we can't be sure there'll be no
2157 * DMA transactions in flight for any of their siblings.
2158 *
2159 * So PCI devices (unless they're on the root bus) as well as
2160 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2161 * the 1:1 domain, just in _case_ one of their siblings turns out
2162 * not to be able to map all of memory.
2163 */
2164 if (!pdev->is_pcie) {
2165 if (!pci_is_root_bus(pdev->bus))
2166 return 0;
2167 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2168 return 0;
2169 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2170 return 0;
2171
2172 /*
2173 * At boot time, we don't yet know if devices will be 64-bit capable.
2174 * Assume that they will -- if they turn out not to be, then we can
2175 * take them out of the 1:1 domain later.
2176 */
David Woodhouse6941af22009-07-04 18:24:27 +01002177 if (!startup)
2178 return pdev->dma_mask > DMA_BIT_MASK(32);
2179
2180 return 1;
2181}
2182
Matt Kraai071e1372009-08-23 22:30:22 -07002183static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002184{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002185 struct pci_dev *pdev = NULL;
2186 int ret;
2187
David Woodhouse19943b02009-08-04 16:19:20 +01002188 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002189 if (ret)
2190 return -EFAULT;
2191
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002192 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002193 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse19943b02009-08-04 16:19:20 +01002194 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2195 hw ? "hardware" : "software", pci_name(pdev));
David Woodhousec7ab48d2009-06-26 19:10:36 +01002196
David Woodhouse5fe60f42009-08-09 10:53:41 +01002197 ret = domain_add_dev_info(si_domain, pdev,
David Woodhouse19943b02009-08-04 16:19:20 +01002198 hw ? CONTEXT_TT_PASS_THROUGH :
David Woodhouse62edf5d2009-07-04 10:59:46 +01002199 CONTEXT_TT_MULTI_LEVEL);
2200 if (ret)
2201 return ret;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002202 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002203 }
2204
2205 return 0;
2206}
2207
2208int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002209{
2210 struct dmar_drhd_unit *drhd;
2211 struct dmar_rmrr_unit *rmrr;
2212 struct pci_dev *pdev;
2213 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002214 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002215
2216 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002217 * for each drhd
2218 * allocate root
2219 * initialize and program root entry to not present
2220 * endfor
2221 */
2222 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002223 g_num_of_iommus++;
2224 /*
2225 * lock not needed as this is only incremented in the single
2226 * threaded kernel __init code path all other access are read
2227 * only
2228 */
2229 }
2230
Weidong Hand9630fe2008-12-08 11:06:32 +08002231 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2232 GFP_KERNEL);
2233 if (!g_iommus) {
2234 printk(KERN_ERR "Allocating global iommu array failed\n");
2235 ret = -ENOMEM;
2236 goto error;
2237 }
2238
mark gross80b20dd2008-04-18 13:53:58 -07002239 deferred_flush = kzalloc(g_num_of_iommus *
2240 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2241 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002242 ret = -ENOMEM;
2243 goto error;
2244 }
2245
mark gross5e0d2a62008-03-04 15:22:08 -08002246 for_each_drhd_unit(drhd) {
2247 if (drhd->ignored)
2248 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002249
2250 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002251 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002252
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002253 ret = iommu_init_domains(iommu);
2254 if (ret)
2255 goto error;
2256
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002257 /*
2258 * TBD:
2259 * we could share the same root & context tables
2260 * amoung all IOMMU's. Need to Split it later.
2261 */
2262 ret = iommu_alloc_root_entry(iommu);
2263 if (ret) {
2264 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2265 goto error;
2266 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002267 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002268 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002269 }
2270
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002271 /*
2272 * Start from the sane iommu hardware state.
2273 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002274 for_each_drhd_unit(drhd) {
2275 if (drhd->ignored)
2276 continue;
2277
2278 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002279
2280 /*
2281 * If the queued invalidation is already initialized by us
2282 * (for example, while enabling interrupt-remapping) then
2283 * we got the things already rolling from a sane state.
2284 */
2285 if (iommu->qi)
2286 continue;
2287
2288 /*
2289 * Clear any previous faults.
2290 */
2291 dmar_fault(-1, iommu);
2292 /*
2293 * Disable queued invalidation if supported and already enabled
2294 * before OS handover.
2295 */
2296 dmar_disable_qi(iommu);
2297 }
2298
2299 for_each_drhd_unit(drhd) {
2300 if (drhd->ignored)
2301 continue;
2302
2303 iommu = drhd->iommu;
2304
Youquan Songa77b67d2008-10-16 16:31:56 -07002305 if (dmar_enable_qi(iommu)) {
2306 /*
2307 * Queued Invalidate not enabled, use Register Based
2308 * Invalidate
2309 */
2310 iommu->flush.flush_context = __iommu_flush_context;
2311 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2312 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002313 "invalidation\n",
2314 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002315 } else {
2316 iommu->flush.flush_context = qi_flush_context;
2317 iommu->flush.flush_iotlb = qi_flush_iotlb;
2318 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002319 "invalidation\n",
2320 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002321 }
2322 }
2323
David Woodhouse19943b02009-08-04 16:19:20 +01002324 if (iommu_pass_through)
2325 iommu_identity_mapping = 1;
2326#ifdef CONFIG_DMAR_BROKEN_GFX_WA
2327 else
2328 iommu_identity_mapping = 2;
2329#endif
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002330 /*
2331 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002332 * identity mappings for rmrr, gfx, and isa and may fall back to static
2333 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002334 */
David Woodhouse19943b02009-08-04 16:19:20 +01002335 if (iommu_identity_mapping) {
2336 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2337 if (ret) {
2338 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2339 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002340 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002341 }
David Woodhouse19943b02009-08-04 16:19:20 +01002342 /*
2343 * For each rmrr
2344 * for each dev attached to rmrr
2345 * do
2346 * locate drhd for dev, alloc domain for dev
2347 * allocate free domain
2348 * allocate page table entries for rmrr
2349 * if context not allocated for bus
2350 * allocate and init context
2351 * set present in root table for this bus
2352 * init context with domain, translation etc
2353 * endfor
2354 * endfor
2355 */
2356 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2357 for_each_rmrr_units(rmrr) {
2358 for (i = 0; i < rmrr->devices_cnt; i++) {
2359 pdev = rmrr->devices[i];
2360 /*
2361 * some BIOS lists non-exist devices in DMAR
2362 * table.
2363 */
2364 if (!pdev)
2365 continue;
2366 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2367 if (ret)
2368 printk(KERN_ERR
2369 "IOMMU: mapping reserved region failed\n");
2370 }
2371 }
2372
2373 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002374
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002375 /*
2376 * for each drhd
2377 * enable fault log
2378 * global invalidate context cache
2379 * global invalidate iotlb
2380 * enable translation
2381 */
2382 for_each_drhd_unit(drhd) {
2383 if (drhd->ignored)
2384 continue;
2385 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002386
2387 iommu_flush_write_buffer(iommu);
2388
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002389 ret = dmar_set_interrupt(iommu);
2390 if (ret)
2391 goto error;
2392
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002393 iommu_set_root_entry(iommu);
2394
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002395 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002396 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002397 iommu_disable_protect_mem_regions(iommu);
2398
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002399 ret = iommu_enable_translation(iommu);
2400 if (ret)
2401 goto error;
2402 }
2403
2404 return 0;
2405error:
2406 for_each_drhd_unit(drhd) {
2407 if (drhd->ignored)
2408 continue;
2409 iommu = drhd->iommu;
2410 free_iommu(iommu);
2411 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002412 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002413 return ret;
2414}
2415
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002416/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002417static struct iova *intel_alloc_iova(struct device *dev,
2418 struct dmar_domain *domain,
2419 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002420{
2421 struct pci_dev *pdev = to_pci_dev(dev);
2422 struct iova *iova = NULL;
2423
David Woodhouse875764d2009-06-28 21:20:51 +01002424 /* Restrict dma_mask to the width that the iommu can handle */
2425 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2426
2427 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002428 /*
2429 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002430 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002431 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002432 */
David Woodhouse875764d2009-06-28 21:20:51 +01002433 iova = alloc_iova(&domain->iovad, nrpages,
2434 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2435 if (iova)
2436 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002437 }
David Woodhouse875764d2009-06-28 21:20:51 +01002438 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2439 if (unlikely(!iova)) {
2440 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2441 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002442 return NULL;
2443 }
2444
2445 return iova;
2446}
2447
David Woodhouse147202a2009-07-07 19:43:20 +01002448static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002449{
2450 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002451 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002452
2453 domain = get_domain_for_dev(pdev,
2454 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2455 if (!domain) {
2456 printk(KERN_ERR
2457 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002458 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002459 }
2460
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002461 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002462 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002463 ret = domain_context_mapping(domain, pdev,
2464 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002465 if (ret) {
2466 printk(KERN_ERR
2467 "Domain context map for %s failed",
2468 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002469 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002470 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002471 }
2472
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002473 return domain;
2474}
2475
David Woodhouse147202a2009-07-07 19:43:20 +01002476static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2477{
2478 struct device_domain_info *info;
2479
2480 /* No lock here, assumes no domain exit in normal case */
2481 info = dev->dev.archdata.iommu;
2482 if (likely(info))
2483 return info->domain;
2484
2485 return __get_valid_domain_for_dev(dev);
2486}
2487
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002488static int iommu_dummy(struct pci_dev *pdev)
2489{
2490 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2491}
2492
2493/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002494static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002495{
David Woodhouse73676832009-07-04 14:08:36 +01002496 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002497 int found;
2498
David Woodhouse73676832009-07-04 14:08:36 +01002499 if (unlikely(dev->bus != &pci_bus_type))
2500 return 1;
2501
2502 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002503 if (iommu_dummy(pdev))
2504 return 1;
2505
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002506 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002507 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002508
2509 found = identity_mapping(pdev);
2510 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002511 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002512 return 1;
2513 else {
2514 /*
2515 * 32 bit DMA is removed from si_domain and fall back
2516 * to non-identity mapping.
2517 */
2518 domain_remove_one_dev_info(si_domain, pdev);
2519 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2520 pci_name(pdev));
2521 return 0;
2522 }
2523 } else {
2524 /*
2525 * In case of a detached 64 bit DMA device from vm, the device
2526 * is put into si_domain for identity mapping.
2527 */
David Woodhouse6941af22009-07-04 18:24:27 +01002528 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002529 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002530 ret = domain_add_dev_info(si_domain, pdev,
2531 hw_pass_through ?
2532 CONTEXT_TT_PASS_THROUGH :
2533 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002534 if (!ret) {
2535 printk(KERN_INFO "64bit %s uses identity mapping\n",
2536 pci_name(pdev));
2537 return 1;
2538 }
2539 }
2540 }
2541
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002542 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002543}
2544
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002545static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2546 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002547{
2548 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002549 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002550 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002551 struct iova *iova;
2552 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002553 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002554 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002555 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002556
2557 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002558
David Woodhouse73676832009-07-04 14:08:36 +01002559 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002560 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002561
2562 domain = get_valid_domain_for_dev(pdev);
2563 if (!domain)
2564 return 0;
2565
Weidong Han8c11e792008-12-08 15:29:22 +08002566 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002567 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002568
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002569 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2570 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002571 if (!iova)
2572 goto error;
2573
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002574 /*
2575 * Check if DMAR supports zero-length reads on write only
2576 * mappings..
2577 */
2578 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002579 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002580 prot |= DMA_PTE_READ;
2581 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2582 prot |= DMA_PTE_WRITE;
2583 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002584 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002585 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002586 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002587 * is not a big problem
2588 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002589 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002590 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002591 if (ret)
2592 goto error;
2593
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002594 /* it's a non-present to present mapping. Only flush if caching mode */
2595 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002596 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002597 else
Weidong Han8c11e792008-12-08 15:29:22 +08002598 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002599
David Woodhouse03d6a242009-06-28 15:33:46 +01002600 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2601 start_paddr += paddr & ~PAGE_MASK;
2602 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002603
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002604error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002605 if (iova)
2606 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002607 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002608 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002609 return 0;
2610}
2611
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002612static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2613 unsigned long offset, size_t size,
2614 enum dma_data_direction dir,
2615 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002616{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002617 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2618 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002619}
2620
mark gross5e0d2a62008-03-04 15:22:08 -08002621static void flush_unmaps(void)
2622{
mark gross80b20dd2008-04-18 13:53:58 -07002623 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002624
mark gross5e0d2a62008-03-04 15:22:08 -08002625 timer_on = 0;
2626
2627 /* just flush them all */
2628 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002629 struct intel_iommu *iommu = g_iommus[i];
2630 if (!iommu)
2631 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002632
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002633 if (!deferred_flush[i].next)
2634 continue;
2635
2636 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002637 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002638 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002639 unsigned long mask;
2640 struct iova *iova = deferred_flush[i].iova[j];
2641
2642 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2643 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2644 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2645 iova->pfn_lo << PAGE_SHIFT, mask);
2646 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002647 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002648 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002649 }
2650
mark gross5e0d2a62008-03-04 15:22:08 -08002651 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002652}
2653
2654static void flush_unmaps_timeout(unsigned long data)
2655{
mark gross80b20dd2008-04-18 13:53:58 -07002656 unsigned long flags;
2657
2658 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002659 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002660 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002661}
2662
2663static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2664{
2665 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002666 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002667 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002668
2669 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002670 if (list_size == HIGH_WATER_MARK)
2671 flush_unmaps();
2672
Weidong Han8c11e792008-12-08 15:29:22 +08002673 iommu = domain_get_iommu(dom);
2674 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002675
mark gross80b20dd2008-04-18 13:53:58 -07002676 next = deferred_flush[iommu_id].next;
2677 deferred_flush[iommu_id].domain[next] = dom;
2678 deferred_flush[iommu_id].iova[next] = iova;
2679 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002680
2681 if (!timer_on) {
2682 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2683 timer_on = 1;
2684 }
2685 list_size++;
2686 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2687}
2688
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002689static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2690 size_t size, enum dma_data_direction dir,
2691 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002692{
2693 struct pci_dev *pdev = to_pci_dev(dev);
2694 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002695 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002696 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002697 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002698
David Woodhouse73676832009-07-04 14:08:36 +01002699 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002700 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002701
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002702 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002703 BUG_ON(!domain);
2704
Weidong Han8c11e792008-12-08 15:29:22 +08002705 iommu = domain_get_iommu(domain);
2706
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002707 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002708 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2709 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002710 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002711
David Woodhoused794dc92009-06-28 00:27:49 +01002712 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2713 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002714
David Woodhoused794dc92009-06-28 00:27:49 +01002715 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2716 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002717
2718 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002719 dma_pte_clear_range(domain, start_pfn, last_pfn);
2720
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002721 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002722 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2723
mark gross5e0d2a62008-03-04 15:22:08 -08002724 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002725 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhoused794dc92009-06-28 00:27:49 +01002726 last_pfn - start_pfn + 1);
mark gross5e0d2a62008-03-04 15:22:08 -08002727 /* free iova */
2728 __free_iova(&domain->iovad, iova);
2729 } else {
2730 add_unmap(domain, iova);
2731 /*
2732 * queue up the release of the unmap to save the 1/6th of the
2733 * cpu used up by the iotlb flush operation...
2734 */
mark gross5e0d2a62008-03-04 15:22:08 -08002735 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002736}
2737
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002738static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2739 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002740{
2741 void *vaddr;
2742 int order;
2743
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002744 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002745 order = get_order(size);
2746 flags &= ~(GFP_DMA | GFP_DMA32);
2747
2748 vaddr = (void *)__get_free_pages(flags, order);
2749 if (!vaddr)
2750 return NULL;
2751 memset(vaddr, 0, size);
2752
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002753 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2754 DMA_BIDIRECTIONAL,
2755 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002756 if (*dma_handle)
2757 return vaddr;
2758 free_pages((unsigned long)vaddr, order);
2759 return NULL;
2760}
2761
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002762static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2763 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002764{
2765 int order;
2766
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002767 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002768 order = get_order(size);
2769
David Woodhouse0db9b7a2009-07-14 02:01:57 +01002770 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002771 free_pages((unsigned long)vaddr, order);
2772}
2773
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002774static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2775 int nelems, enum dma_data_direction dir,
2776 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002777{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002778 struct pci_dev *pdev = to_pci_dev(hwdev);
2779 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002780 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002781 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002782 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002783
David Woodhouse73676832009-07-04 14:08:36 +01002784 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002785 return;
2786
2787 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002788 BUG_ON(!domain);
2789
2790 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002791
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002792 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01002793 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2794 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002795 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002796
David Woodhoused794dc92009-06-28 00:27:49 +01002797 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2798 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002799
2800 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002801 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002802
David Woodhoused794dc92009-06-28 00:27:49 +01002803 /* free page tables */
2804 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2805
David Woodhouseacea0012009-07-14 01:55:11 +01002806 if (intel_iommu_strict) {
2807 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2808 last_pfn - start_pfn + 1);
2809 /* free iova */
2810 __free_iova(&domain->iovad, iova);
2811 } else {
2812 add_unmap(domain, iova);
2813 /*
2814 * queue up the release of the unmap to save the 1/6th of the
2815 * cpu used up by the iotlb flush operation...
2816 */
2817 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002818}
2819
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002820static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002821 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002822{
2823 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002824 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002825
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002826 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002827 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002828 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002829 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002830 }
2831 return nelems;
2832}
2833
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002834static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2835 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002836{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002837 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002838 struct pci_dev *pdev = to_pci_dev(hwdev);
2839 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002840 size_t size = 0;
2841 int prot = 0;
David Woodhouseb536d242009-06-28 14:49:31 +01002842 size_t offset_pfn = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002843 struct iova *iova = NULL;
2844 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002845 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01002846 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08002847 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002848
2849 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01002850 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002851 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002852
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002853 domain = get_valid_domain_for_dev(pdev);
2854 if (!domain)
2855 return 0;
2856
Weidong Han8c11e792008-12-08 15:29:22 +08002857 iommu = domain_get_iommu(domain);
2858
David Woodhouseb536d242009-06-28 14:49:31 +01002859 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01002860 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002861
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002862 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2863 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002864 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002865 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002866 return 0;
2867 }
2868
2869 /*
2870 * Check if DMAR supports zero-length reads on write only
2871 * mappings..
2872 */
2873 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002874 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002875 prot |= DMA_PTE_READ;
2876 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2877 prot |= DMA_PTE_WRITE;
2878
David Woodhouseb536d242009-06-28 14:49:31 +01002879 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01002880
Fenghua Yuf5329592009-08-04 15:09:37 -07002881 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01002882 if (unlikely(ret)) {
2883 /* clear the page */
2884 dma_pte_clear_range(domain, start_vpfn,
2885 start_vpfn + size - 1);
2886 /* free page tables */
2887 dma_pte_free_pagetable(domain, start_vpfn,
2888 start_vpfn + size - 1);
2889 /* free iova */
2890 __free_iova(&domain->iovad, iova);
2891 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002892 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002893
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002894 /* it's a non-present to present mapping. Only flush if caching mode */
2895 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002896 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002897 else
Weidong Han8c11e792008-12-08 15:29:22 +08002898 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002899
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002900 return nelems;
2901}
2902
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002903static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2904{
2905 return !dma_addr;
2906}
2907
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002908struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002909 .alloc_coherent = intel_alloc_coherent,
2910 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002911 .map_sg = intel_map_sg,
2912 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002913 .map_page = intel_map_page,
2914 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002915 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002916};
2917
2918static inline int iommu_domain_cache_init(void)
2919{
2920 int ret = 0;
2921
2922 iommu_domain_cache = kmem_cache_create("iommu_domain",
2923 sizeof(struct dmar_domain),
2924 0,
2925 SLAB_HWCACHE_ALIGN,
2926
2927 NULL);
2928 if (!iommu_domain_cache) {
2929 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2930 ret = -ENOMEM;
2931 }
2932
2933 return ret;
2934}
2935
2936static inline int iommu_devinfo_cache_init(void)
2937{
2938 int ret = 0;
2939
2940 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2941 sizeof(struct device_domain_info),
2942 0,
2943 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002944 NULL);
2945 if (!iommu_devinfo_cache) {
2946 printk(KERN_ERR "Couldn't create devinfo cache\n");
2947 ret = -ENOMEM;
2948 }
2949
2950 return ret;
2951}
2952
2953static inline int iommu_iova_cache_init(void)
2954{
2955 int ret = 0;
2956
2957 iommu_iova_cache = kmem_cache_create("iommu_iova",
2958 sizeof(struct iova),
2959 0,
2960 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002961 NULL);
2962 if (!iommu_iova_cache) {
2963 printk(KERN_ERR "Couldn't create iova cache\n");
2964 ret = -ENOMEM;
2965 }
2966
2967 return ret;
2968}
2969
2970static int __init iommu_init_mempool(void)
2971{
2972 int ret;
2973 ret = iommu_iova_cache_init();
2974 if (ret)
2975 return ret;
2976
2977 ret = iommu_domain_cache_init();
2978 if (ret)
2979 goto domain_error;
2980
2981 ret = iommu_devinfo_cache_init();
2982 if (!ret)
2983 return ret;
2984
2985 kmem_cache_destroy(iommu_domain_cache);
2986domain_error:
2987 kmem_cache_destroy(iommu_iova_cache);
2988
2989 return -ENOMEM;
2990}
2991
2992static void __init iommu_exit_mempool(void)
2993{
2994 kmem_cache_destroy(iommu_devinfo_cache);
2995 kmem_cache_destroy(iommu_domain_cache);
2996 kmem_cache_destroy(iommu_iova_cache);
2997
2998}
2999
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003000static void __init init_no_remapping_devices(void)
3001{
3002 struct dmar_drhd_unit *drhd;
3003
3004 for_each_drhd_unit(drhd) {
3005 if (!drhd->include_all) {
3006 int i;
3007 for (i = 0; i < drhd->devices_cnt; i++)
3008 if (drhd->devices[i] != NULL)
3009 break;
3010 /* ignore DMAR unit if no pci devices exist */
3011 if (i == drhd->devices_cnt)
3012 drhd->ignored = 1;
3013 }
3014 }
3015
3016 if (dmar_map_gfx)
3017 return;
3018
3019 for_each_drhd_unit(drhd) {
3020 int i;
3021 if (drhd->ignored || drhd->include_all)
3022 continue;
3023
3024 for (i = 0; i < drhd->devices_cnt; i++)
3025 if (drhd->devices[i] &&
3026 !IS_GFX_DEVICE(drhd->devices[i]))
3027 break;
3028
3029 if (i < drhd->devices_cnt)
3030 continue;
3031
3032 /* bypass IOMMU if it is just for gfx devices */
3033 drhd->ignored = 1;
3034 for (i = 0; i < drhd->devices_cnt; i++) {
3035 if (!drhd->devices[i])
3036 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07003037 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003038 }
3039 }
3040}
3041
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003042#ifdef CONFIG_SUSPEND
3043static int init_iommu_hw(void)
3044{
3045 struct dmar_drhd_unit *drhd;
3046 struct intel_iommu *iommu = NULL;
3047
3048 for_each_active_iommu(iommu, drhd)
3049 if (iommu->qi)
3050 dmar_reenable_qi(iommu);
3051
3052 for_each_active_iommu(iommu, drhd) {
3053 iommu_flush_write_buffer(iommu);
3054
3055 iommu_set_root_entry(iommu);
3056
3057 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003058 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003059 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003060 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003061 iommu_disable_protect_mem_regions(iommu);
3062 iommu_enable_translation(iommu);
3063 }
3064
3065 return 0;
3066}
3067
3068static void iommu_flush_all(void)
3069{
3070 struct dmar_drhd_unit *drhd;
3071 struct intel_iommu *iommu;
3072
3073 for_each_active_iommu(iommu, drhd) {
3074 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003075 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003076 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003077 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003078 }
3079}
3080
3081static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3082{
3083 struct dmar_drhd_unit *drhd;
3084 struct intel_iommu *iommu = NULL;
3085 unsigned long flag;
3086
3087 for_each_active_iommu(iommu, drhd) {
3088 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3089 GFP_ATOMIC);
3090 if (!iommu->iommu_state)
3091 goto nomem;
3092 }
3093
3094 iommu_flush_all();
3095
3096 for_each_active_iommu(iommu, drhd) {
3097 iommu_disable_translation(iommu);
3098
3099 spin_lock_irqsave(&iommu->register_lock, flag);
3100
3101 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3102 readl(iommu->reg + DMAR_FECTL_REG);
3103 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3104 readl(iommu->reg + DMAR_FEDATA_REG);
3105 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3106 readl(iommu->reg + DMAR_FEADDR_REG);
3107 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3108 readl(iommu->reg + DMAR_FEUADDR_REG);
3109
3110 spin_unlock_irqrestore(&iommu->register_lock, flag);
3111 }
3112 return 0;
3113
3114nomem:
3115 for_each_active_iommu(iommu, drhd)
3116 kfree(iommu->iommu_state);
3117
3118 return -ENOMEM;
3119}
3120
3121static int iommu_resume(struct sys_device *dev)
3122{
3123 struct dmar_drhd_unit *drhd;
3124 struct intel_iommu *iommu = NULL;
3125 unsigned long flag;
3126
3127 if (init_iommu_hw()) {
3128 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3129 return -EIO;
3130 }
3131
3132 for_each_active_iommu(iommu, drhd) {
3133
3134 spin_lock_irqsave(&iommu->register_lock, flag);
3135
3136 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3137 iommu->reg + DMAR_FECTL_REG);
3138 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3139 iommu->reg + DMAR_FEDATA_REG);
3140 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3141 iommu->reg + DMAR_FEADDR_REG);
3142 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3143 iommu->reg + DMAR_FEUADDR_REG);
3144
3145 spin_unlock_irqrestore(&iommu->register_lock, flag);
3146 }
3147
3148 for_each_active_iommu(iommu, drhd)
3149 kfree(iommu->iommu_state);
3150
3151 return 0;
3152}
3153
3154static struct sysdev_class iommu_sysclass = {
3155 .name = "iommu",
3156 .resume = iommu_resume,
3157 .suspend = iommu_suspend,
3158};
3159
3160static struct sys_device device_iommu = {
3161 .cls = &iommu_sysclass,
3162};
3163
3164static int __init init_iommu_sysfs(void)
3165{
3166 int error;
3167
3168 error = sysdev_class_register(&iommu_sysclass);
3169 if (error)
3170 return error;
3171
3172 error = sysdev_register(&device_iommu);
3173 if (error)
3174 sysdev_class_unregister(&iommu_sysclass);
3175
3176 return error;
3177}
3178
3179#else
3180static int __init init_iommu_sysfs(void)
3181{
3182 return 0;
3183}
3184#endif /* CONFIG_PM */
3185
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003186int __init intel_iommu_init(void)
3187{
3188 int ret = 0;
3189
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003190 if (dmar_table_init())
3191 return -ENODEV;
3192
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003193 if (dmar_dev_scope_init())
3194 return -ENODEV;
3195
Suresh Siddha2ae21012008-07-10 11:16:43 -07003196 /*
3197 * Check the need for DMA-remapping initialization now.
3198 * Above initialization will also be used by Interrupt-remapping.
3199 */
David Woodhouse19943b02009-08-04 16:19:20 +01003200 if (no_iommu || swiotlb || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003201 return -ENODEV;
3202
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003203 iommu_init_mempool();
3204 dmar_init_reserved_ranges();
3205
3206 init_no_remapping_devices();
3207
3208 ret = init_dmars();
3209 if (ret) {
3210 printk(KERN_ERR "IOMMU: dmar init failed\n");
3211 put_iova_domain(&reserved_iova_list);
3212 iommu_exit_mempool();
3213 return ret;
3214 }
3215 printk(KERN_INFO
3216 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3217
mark gross5e0d2a62008-03-04 15:22:08 -08003218 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003219 force_iommu = 1;
David Woodhouse19943b02009-08-04 16:19:20 +01003220 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003221
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003222 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003223
3224 register_iommu(&intel_iommu_ops);
3225
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003226 return 0;
3227}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003228
Han, Weidong3199aa62009-02-26 17:31:12 +08003229static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3230 struct pci_dev *pdev)
3231{
3232 struct pci_dev *tmp, *parent;
3233
3234 if (!iommu || !pdev)
3235 return;
3236
3237 /* dependent device detach */
3238 tmp = pci_find_upstream_pcie_bridge(pdev);
3239 /* Secondary interface's bus number and devfn 0 */
3240 if (tmp) {
3241 parent = pdev->bus->self;
3242 while (parent != tmp) {
3243 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf92009-04-04 01:45:37 +01003244 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003245 parent = parent->bus->self;
3246 }
3247 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3248 iommu_detach_dev(iommu,
3249 tmp->subordinate->number, 0);
3250 else /* this is a legacy PCI bridge */
David Woodhouse276dbf92009-04-04 01:45:37 +01003251 iommu_detach_dev(iommu, tmp->bus->number,
3252 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003253 }
3254}
3255
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003256static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003257 struct pci_dev *pdev)
3258{
3259 struct device_domain_info *info;
3260 struct intel_iommu *iommu;
3261 unsigned long flags;
3262 int found = 0;
3263 struct list_head *entry, *tmp;
3264
David Woodhouse276dbf92009-04-04 01:45:37 +01003265 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3266 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003267 if (!iommu)
3268 return;
3269
3270 spin_lock_irqsave(&device_domain_lock, flags);
3271 list_for_each_safe(entry, tmp, &domain->devices) {
3272 info = list_entry(entry, struct device_domain_info, link);
David Woodhouse276dbf92009-04-04 01:45:37 +01003273 /* No need to compare PCI domain; it has to be the same */
Weidong Hanc7151a82008-12-08 22:51:37 +08003274 if (info->bus == pdev->bus->number &&
3275 info->devfn == pdev->devfn) {
3276 list_del(&info->link);
3277 list_del(&info->global);
3278 if (info->dev)
3279 info->dev->dev.archdata.iommu = NULL;
3280 spin_unlock_irqrestore(&device_domain_lock, flags);
3281
Yu Zhao93a23a72009-05-18 13:51:37 +08003282 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003283 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003284 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003285 free_devinfo_mem(info);
3286
3287 spin_lock_irqsave(&device_domain_lock, flags);
3288
3289 if (found)
3290 break;
3291 else
3292 continue;
3293 }
3294
3295 /* if there is no other devices under the same iommu
3296 * owned by this domain, clear this iommu in iommu_bmp
3297 * update iommu count and coherency
3298 */
David Woodhouse276dbf92009-04-04 01:45:37 +01003299 if (iommu == device_to_iommu(info->segment, info->bus,
3300 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003301 found = 1;
3302 }
3303
3304 if (found == 0) {
3305 unsigned long tmp_flags;
3306 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3307 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3308 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003309 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003310 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3311 }
3312
3313 spin_unlock_irqrestore(&device_domain_lock, flags);
3314}
3315
3316static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3317{
3318 struct device_domain_info *info;
3319 struct intel_iommu *iommu;
3320 unsigned long flags1, flags2;
3321
3322 spin_lock_irqsave(&device_domain_lock, flags1);
3323 while (!list_empty(&domain->devices)) {
3324 info = list_entry(domain->devices.next,
3325 struct device_domain_info, link);
3326 list_del(&info->link);
3327 list_del(&info->global);
3328 if (info->dev)
3329 info->dev->dev.archdata.iommu = NULL;
3330
3331 spin_unlock_irqrestore(&device_domain_lock, flags1);
3332
Yu Zhao93a23a72009-05-18 13:51:37 +08003333 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf92009-04-04 01:45:37 +01003334 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003335 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003336 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003337
3338 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003339 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003340 */
3341 spin_lock_irqsave(&domain->iommu_lock, flags2);
3342 if (test_and_clear_bit(iommu->seq_id,
3343 &domain->iommu_bmp)) {
3344 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003345 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003346 }
3347 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3348
3349 free_devinfo_mem(info);
3350 spin_lock_irqsave(&device_domain_lock, flags1);
3351 }
3352 spin_unlock_irqrestore(&device_domain_lock, flags1);
3353}
3354
Weidong Han5e98c4b2008-12-08 23:03:27 +08003355/* domain id for virtual machine, it won't be set in context */
3356static unsigned long vm_domid;
3357
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003358static int vm_domain_min_agaw(struct dmar_domain *domain)
3359{
3360 int i;
3361 int min_agaw = domain->agaw;
3362
3363 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3364 for (; i < g_num_of_iommus; ) {
3365 if (min_agaw > g_iommus[i]->agaw)
3366 min_agaw = g_iommus[i]->agaw;
3367
3368 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3369 }
3370
3371 return min_agaw;
3372}
3373
Weidong Han5e98c4b2008-12-08 23:03:27 +08003374static struct dmar_domain *iommu_alloc_vm_domain(void)
3375{
3376 struct dmar_domain *domain;
3377
3378 domain = alloc_domain_mem();
3379 if (!domain)
3380 return NULL;
3381
3382 domain->id = vm_domid++;
3383 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3384 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3385
3386 return domain;
3387}
3388
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003389static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003390{
3391 int adjust_width;
3392
3393 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003394 spin_lock_init(&domain->iommu_lock);
3395
3396 domain_reserve_special_ranges(domain);
3397
3398 /* calculate AGAW */
3399 domain->gaw = guest_width;
3400 adjust_width = guestwidth_to_adjustwidth(guest_width);
3401 domain->agaw = width_to_agaw(adjust_width);
3402
3403 INIT_LIST_HEAD(&domain->devices);
3404
3405 domain->iommu_count = 0;
3406 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003407 domain->iommu_snooping = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003408 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003409
3410 /* always allocate the top pgd */
3411 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3412 if (!domain->pgd)
3413 return -ENOMEM;
3414 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3415 return 0;
3416}
3417
3418static void iommu_free_vm_domain(struct dmar_domain *domain)
3419{
3420 unsigned long flags;
3421 struct dmar_drhd_unit *drhd;
3422 struct intel_iommu *iommu;
3423 unsigned long i;
3424 unsigned long ndomains;
3425
3426 for_each_drhd_unit(drhd) {
3427 if (drhd->ignored)
3428 continue;
3429 iommu = drhd->iommu;
3430
3431 ndomains = cap_ndoms(iommu->cap);
3432 i = find_first_bit(iommu->domain_ids, ndomains);
3433 for (; i < ndomains; ) {
3434 if (iommu->domains[i] == domain) {
3435 spin_lock_irqsave(&iommu->lock, flags);
3436 clear_bit(i, iommu->domain_ids);
3437 iommu->domains[i] = NULL;
3438 spin_unlock_irqrestore(&iommu->lock, flags);
3439 break;
3440 }
3441 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3442 }
3443 }
3444}
3445
3446static void vm_domain_exit(struct dmar_domain *domain)
3447{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003448 /* Domain 0 is reserved, so dont process it */
3449 if (!domain)
3450 return;
3451
3452 vm_domain_remove_all_dev_info(domain);
3453 /* destroy iovas */
3454 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003455
3456 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003457 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003458
3459 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003460 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003461
3462 iommu_free_vm_domain(domain);
3463 free_domain_mem(domain);
3464}
3465
Joerg Roedel5d450802008-12-03 14:52:32 +01003466static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003467{
Joerg Roedel5d450802008-12-03 14:52:32 +01003468 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003469
Joerg Roedel5d450802008-12-03 14:52:32 +01003470 dmar_domain = iommu_alloc_vm_domain();
3471 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003472 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003473 "intel_iommu_domain_init: dmar_domain == NULL\n");
3474 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003475 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003476 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003477 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003478 "intel_iommu_domain_init() failed\n");
3479 vm_domain_exit(dmar_domain);
3480 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003481 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003482 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003483
Joerg Roedel5d450802008-12-03 14:52:32 +01003484 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003485}
Kay, Allen M38717942008-09-09 18:37:29 +03003486
Joerg Roedel5d450802008-12-03 14:52:32 +01003487static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003488{
Joerg Roedel5d450802008-12-03 14:52:32 +01003489 struct dmar_domain *dmar_domain = domain->priv;
3490
3491 domain->priv = NULL;
3492 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003493}
Kay, Allen M38717942008-09-09 18:37:29 +03003494
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003495static int intel_iommu_attach_device(struct iommu_domain *domain,
3496 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003497{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003498 struct dmar_domain *dmar_domain = domain->priv;
3499 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003500 struct intel_iommu *iommu;
3501 int addr_width;
3502 u64 end;
Kay, Allen M38717942008-09-09 18:37:29 +03003503
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003504 /* normally pdev is not mapped */
3505 if (unlikely(domain_context_mapped(pdev))) {
3506 struct dmar_domain *old_domain;
3507
3508 old_domain = find_domain(pdev);
3509 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003510 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3511 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3512 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003513 else
3514 domain_remove_dev_info(old_domain);
3515 }
3516 }
3517
David Woodhouse276dbf92009-04-04 01:45:37 +01003518 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3519 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003520 if (!iommu)
3521 return -ENODEV;
3522
3523 /* check if this iommu agaw is sufficient for max mapped address */
3524 addr_width = agaw_to_width(iommu->agaw);
3525 end = DOMAIN_MAX_ADDR(addr_width);
3526 end = end & VTD_PAGE_MASK;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003527 if (end < dmar_domain->max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003528 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3529 "sufficient for the mapped address (%llx)\n",
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003530 __func__, iommu->agaw, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003531 return -EFAULT;
3532 }
3533
David Woodhouse5fe60f42009-08-09 10:53:41 +01003534 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003535}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003536
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003537static void intel_iommu_detach_device(struct iommu_domain *domain,
3538 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003539{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003540 struct dmar_domain *dmar_domain = domain->priv;
3541 struct pci_dev *pdev = to_pci_dev(dev);
3542
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003543 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003544}
Kay, Allen M38717942008-09-09 18:37:29 +03003545
Joerg Roedeldde57a22008-12-03 15:04:09 +01003546static int intel_iommu_map_range(struct iommu_domain *domain,
3547 unsigned long iova, phys_addr_t hpa,
3548 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003549{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003550 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003551 u64 max_addr;
3552 int addr_width;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003553 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003554 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003555
Joerg Roedeldde57a22008-12-03 15:04:09 +01003556 if (iommu_prot & IOMMU_READ)
3557 prot |= DMA_PTE_READ;
3558 if (iommu_prot & IOMMU_WRITE)
3559 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08003560 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3561 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003562
David Woodhouse163cc522009-06-28 00:51:17 +01003563 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003564 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003565 int min_agaw;
3566 u64 end;
3567
3568 /* check if minimum agaw is sufficient for mapped address */
Joerg Roedeldde57a22008-12-03 15:04:09 +01003569 min_agaw = vm_domain_min_agaw(dmar_domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003570 addr_width = agaw_to_width(min_agaw);
3571 end = DOMAIN_MAX_ADDR(addr_width);
3572 end = end & VTD_PAGE_MASK;
3573 if (end < max_addr) {
3574 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3575 "sufficient for the mapped address (%llx)\n",
3576 __func__, min_agaw, max_addr);
3577 return -EFAULT;
3578 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003579 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003580 }
David Woodhousead051222009-06-28 14:22:28 +01003581 /* Round up size to next multiple of PAGE_SIZE, if it and
3582 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01003583 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01003584 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3585 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003586 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003587}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003588
Joerg Roedeldde57a22008-12-03 15:04:09 +01003589static void intel_iommu_unmap_range(struct iommu_domain *domain,
3590 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003591{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003592 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003593
Sheng Yang4b99d352009-07-08 11:52:52 +01003594 if (!size)
3595 return;
3596
David Woodhouse163cc522009-06-28 00:51:17 +01003597 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3598 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003599
David Woodhouse163cc522009-06-28 00:51:17 +01003600 if (dmar_domain->max_addr == iova + size)
3601 dmar_domain->max_addr = iova;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003602}
Kay, Allen M38717942008-09-09 18:37:29 +03003603
Joerg Roedeld14d6572008-12-03 15:06:57 +01003604static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3605 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003606{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003607 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003608 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003609 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003610
David Woodhouseb026fd22009-06-28 10:37:25 +01003611 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
Kay, Allen M38717942008-09-09 18:37:29 +03003612 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003613 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003614
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003615 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003616}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003617
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003618static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3619 unsigned long cap)
3620{
3621 struct dmar_domain *dmar_domain = domain->priv;
3622
3623 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3624 return dmar_domain->iommu_snooping;
3625
3626 return 0;
3627}
3628
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003629static struct iommu_ops intel_iommu_ops = {
3630 .domain_init = intel_iommu_domain_init,
3631 .domain_destroy = intel_iommu_domain_destroy,
3632 .attach_dev = intel_iommu_attach_device,
3633 .detach_dev = intel_iommu_detach_device,
3634 .map = intel_iommu_map_range,
3635 .unmap = intel_iommu_unmap_range,
3636 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003637 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003638};
David Woodhouse9af88142009-02-13 23:18:03 +00003639
3640static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3641{
3642 /*
3643 * Mobile 4 Series Chipset neglects to set RWBF capability,
3644 * but needs it:
3645 */
3646 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3647 rwbf_quirk = 1;
3648}
3649
3650DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);