blob: a60a54d8593a10aa66e43f42399ba2f80917ddbe [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Tejun Heo0ee332c2011-12-08 10:22:09 -080044#include <linux/memblock.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070045#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090046#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070047
Fenghua Yu5b6985c2008-10-16 18:02:32 -070048#define ROOT_SIZE VTD_PAGE_SIZE
49#define CONTEXT_SIZE VTD_PAGE_SIZE
50
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070051#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
52#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070053#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070054
55#define IOAPIC_RANGE_START (0xfee00000)
56#define IOAPIC_RANGE_END (0xfeefffff)
57#define IOVA_START_ADDR (0x1000)
58
59#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
60
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070061#define MAX_AGAW_WIDTH 64
62
David Woodhouse2ebe3152009-09-19 07:34:04 -070063#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
64#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
65
66/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
67 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
68#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
69 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
70#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070071
Mark McLoughlinf27be032008-11-20 15:49:43 +000072#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070073#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070074#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080075
Andrew Mortondf08cdc2010-09-22 13:05:11 -070076/* page table handling */
77#define LEVEL_STRIDE (9)
78#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
79
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020080/*
81 * This bitmap is used to advertise the page sizes our hardware support
82 * to the IOMMU core, which will then use this information to split
83 * physically contiguous memory regions it is mapping into page sizes
84 * that we support.
85 *
86 * Traditionally the IOMMU core just handed us the mappings directly,
87 * after making sure the size is an order of a 4KiB page and that the
88 * mapping has natural alignment.
89 *
90 * To retain this behavior, we currently advertise that we support
91 * all page sizes that are an order of 4KiB.
92 *
93 * If at some point we'd like to utilize the IOMMU core's new behavior,
94 * we could change this to advertise the real page sizes we support.
95 */
96#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
97
Andrew Mortondf08cdc2010-09-22 13:05:11 -070098static inline int agaw_to_level(int agaw)
99{
100 return agaw + 2;
101}
102
103static inline int agaw_to_width(int agaw)
104{
105 return 30 + agaw * LEVEL_STRIDE;
106}
107
108static inline int width_to_agaw(int width)
109{
110 return (width - 30) / LEVEL_STRIDE;
111}
112
113static inline unsigned int level_to_offset_bits(int level)
114{
115 return (level - 1) * LEVEL_STRIDE;
116}
117
118static inline int pfn_level_offset(unsigned long pfn, int level)
119{
120 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
121}
122
123static inline unsigned long level_mask(int level)
124{
125 return -1UL << level_to_offset_bits(level);
126}
127
128static inline unsigned long level_size(int level)
129{
130 return 1UL << level_to_offset_bits(level);
131}
132
133static inline unsigned long align_to_level(unsigned long pfn, int level)
134{
135 return (pfn + level_size(level) - 1) & level_mask(level);
136}
David Woodhousefd18de52009-05-10 23:57:41 +0100137
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100138static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
139{
140 return 1 << ((lvl - 1) * LEVEL_STRIDE);
141}
142
David Woodhousedd4e8312009-06-27 16:21:20 +0100143/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
144 are never going to work. */
145static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
146{
147 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
148}
149
150static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
151{
152 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
153}
154static inline unsigned long page_to_dma_pfn(struct page *pg)
155{
156 return mm_to_dma_pfn(page_to_pfn(pg));
157}
158static inline unsigned long virt_to_dma_pfn(void *p)
159{
160 return page_to_dma_pfn(virt_to_page(p));
161}
162
Weidong Hand9630fe2008-12-08 11:06:32 +0800163/* global iommu list, set NULL for ignored DMAR units */
164static struct intel_iommu **g_iommus;
165
David Woodhousee0fc7e02009-09-30 09:12:17 -0700166static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000167static int rwbf_quirk;
168
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000169/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700170 * set to 1 to panic kernel if can't successfully enable VT-d
171 * (used when kernel is launched w/ TXT)
172 */
173static int force_on = 0;
174
175/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000176 * 0: Present
177 * 1-11: Reserved
178 * 12-63: Context Ptr (12 - (haw-1))
179 * 64-127: Reserved
180 */
181struct root_entry {
182 u64 val;
183 u64 rsvd1;
184};
185#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
186static inline bool root_present(struct root_entry *root)
187{
188 return (root->val & 1);
189}
190static inline void set_root_present(struct root_entry *root)
191{
192 root->val |= 1;
193}
194static inline void set_root_value(struct root_entry *root, unsigned long value)
195{
196 root->val |= value & VTD_PAGE_MASK;
197}
198
199static inline struct context_entry *
200get_context_addr_from_root(struct root_entry *root)
201{
202 return (struct context_entry *)
203 (root_present(root)?phys_to_virt(
204 root->val & VTD_PAGE_MASK) :
205 NULL);
206}
207
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000208/*
209 * low 64 bits:
210 * 0: present
211 * 1: fault processing disable
212 * 2-3: translation type
213 * 12-63: address space root
214 * high 64 bits:
215 * 0-2: address width
216 * 3-6: aval
217 * 8-23: domain id
218 */
219struct context_entry {
220 u64 lo;
221 u64 hi;
222};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000223
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000224static inline bool context_present(struct context_entry *context)
225{
226 return (context->lo & 1);
227}
228static inline void context_set_present(struct context_entry *context)
229{
230 context->lo |= 1;
231}
232
233static inline void context_set_fault_enable(struct context_entry *context)
234{
235 context->lo &= (((u64)-1) << 2) | 1;
236}
237
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000238static inline void context_set_translation_type(struct context_entry *context,
239 unsigned long value)
240{
241 context->lo &= (((u64)-1) << 4) | 3;
242 context->lo |= (value & 3) << 2;
243}
244
245static inline void context_set_address_root(struct context_entry *context,
246 unsigned long value)
247{
248 context->lo |= value & VTD_PAGE_MASK;
249}
250
251static inline void context_set_address_width(struct context_entry *context,
252 unsigned long value)
253{
254 context->hi |= value & 7;
255}
256
257static inline void context_set_domain_id(struct context_entry *context,
258 unsigned long value)
259{
260 context->hi |= (value & ((1 << 16) - 1)) << 8;
261}
262
263static inline void context_clear_entry(struct context_entry *context)
264{
265 context->lo = 0;
266 context->hi = 0;
267}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000268
Mark McLoughlin622ba122008-11-20 15:49:46 +0000269/*
270 * 0: readable
271 * 1: writable
272 * 2-6: reserved
273 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800274 * 8-10: available
275 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000276 * 12-63: Host physcial address
277 */
278struct dma_pte {
279 u64 val;
280};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000281
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000282static inline void dma_clear_pte(struct dma_pte *pte)
283{
284 pte->val = 0;
285}
286
287static inline void dma_set_pte_readable(struct dma_pte *pte)
288{
289 pte->val |= DMA_PTE_READ;
290}
291
292static inline void dma_set_pte_writable(struct dma_pte *pte)
293{
294 pte->val |= DMA_PTE_WRITE;
295}
296
Sheng Yang9cf06692009-03-18 15:33:07 +0800297static inline void dma_set_pte_snp(struct dma_pte *pte)
298{
299 pte->val |= DMA_PTE_SNP;
300}
301
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000302static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
303{
304 pte->val = (pte->val & ~3) | (prot & 3);
305}
306
307static inline u64 dma_pte_addr(struct dma_pte *pte)
308{
David Woodhousec85994e2009-07-01 19:21:24 +0100309#ifdef CONFIG_64BIT
310 return pte->val & VTD_PAGE_MASK;
311#else
312 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100313 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100314#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000315}
316
David Woodhousedd4e8312009-06-27 16:21:20 +0100317static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000318{
David Woodhousedd4e8312009-06-27 16:21:20 +0100319 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000320}
321
322static inline bool dma_pte_present(struct dma_pte *pte)
323{
324 return (pte->val & 3) != 0;
325}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000326
Allen Kay4399c8b2011-10-14 12:32:46 -0700327static inline bool dma_pte_superpage(struct dma_pte *pte)
328{
329 return (pte->val & (1 << 7));
330}
331
David Woodhouse75e6bf92009-07-02 11:21:16 +0100332static inline int first_pte_in_page(struct dma_pte *pte)
333{
334 return !((unsigned long)pte & ~VTD_PAGE_MASK);
335}
336
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700337/*
338 * This domain is a statically identity mapping domain.
339 * 1. This domain creats a static 1:1 mapping to all usable memory.
340 * 2. It maps to each iommu if successful.
341 * 3. Each iommu mapps to this domain if successful.
342 */
David Woodhouse19943b02009-08-04 16:19:20 +0100343static struct dmar_domain *si_domain;
344static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700345
Weidong Han3b5410e2008-12-08 09:17:15 +0800346/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100347#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800348
Weidong Han1ce28fe2008-12-08 16:35:39 +0800349/* domain represents a virtual machine, more than one devices
350 * across iommus may be owned in one domain, e.g. kvm guest.
351 */
352#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
353
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700354/* si_domain contains mulitple devices */
355#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
356
Mike Travis1b198bb2012-03-05 15:05:16 -0800357/* define the limit of IOMMUs supported in each domain */
358#ifdef CONFIG_X86
359# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
360#else
361# define IOMMU_UNITS_SUPPORTED 64
362#endif
363
Mark McLoughlin99126f72008-11-20 15:49:47 +0000364struct dmar_domain {
365 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700366 int nid; /* node id */
Mike Travis1b198bb2012-03-05 15:05:16 -0800367 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
368 /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000369
370 struct list_head devices; /* all devices' list */
371 struct iova_domain iovad; /* iova's that belong to this domain */
372
373 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000374 int gaw; /* max guest address width */
375
376 /* adjusted guest address width, 0 is level 2 30-bit */
377 int agaw;
378
Weidong Han3b5410e2008-12-08 09:17:15 +0800379 int flags; /* flags to find out type of domain */
Weidong Han8e604092008-12-08 15:49:06 +0800380
381 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800382 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800383 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100384 int iommu_superpage;/* Level of superpages supported:
385 0 == 4KiB (no superpages), 1 == 2MiB,
386 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800387 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800388 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000389};
390
Mark McLoughlina647dac2008-11-20 15:49:48 +0000391/* PCI domain-device relationship */
392struct device_domain_info {
393 struct list_head link; /* link to domain siblings */
394 struct list_head global; /* link to global list */
David Woodhouse276dbf92009-04-04 01:45:37 +0100395 int segment; /* PCI domain */
396 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000397 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500398 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800399 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000400 struct dmar_domain *domain; /* pointer to domain */
401};
402
mark gross5e0d2a62008-03-04 15:22:08 -0800403static void flush_unmaps_timeout(unsigned long data);
404
405DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
406
mark gross80b20dd2008-04-18 13:53:58 -0700407#define HIGH_WATER_MARK 250
408struct deferred_flush_tables {
409 int next;
410 struct iova *iova[HIGH_WATER_MARK];
411 struct dmar_domain *domain[HIGH_WATER_MARK];
412};
413
414static struct deferred_flush_tables *deferred_flush;
415
mark gross5e0d2a62008-03-04 15:22:08 -0800416/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800417static int g_num_of_iommus;
418
419static DEFINE_SPINLOCK(async_umap_flush_lock);
420static LIST_HEAD(unmaps_to_do);
421
422static int timer_on;
423static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800424
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700425static void domain_remove_dev_info(struct dmar_domain *domain);
426
Suresh Siddhad3f13812011-08-23 17:05:25 -0700427#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800428int dmar_disabled = 0;
429#else
430int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700431#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800432
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200433int intel_iommu_enabled = 0;
434EXPORT_SYMBOL_GPL(intel_iommu_enabled);
435
David Woodhouse2d9e6672010-06-15 10:57:57 +0100436static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700437static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800438static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100439static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700440
David Woodhousec0771df2011-10-14 20:59:46 +0100441int intel_iommu_gfx_mapped;
442EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
443
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700444#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
445static DEFINE_SPINLOCK(device_domain_lock);
446static LIST_HEAD(device_domain_list);
447
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100448static struct iommu_ops intel_iommu_ops;
449
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700450static int __init intel_iommu_setup(char *str)
451{
452 if (!str)
453 return -EINVAL;
454 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800455 if (!strncmp(str, "on", 2)) {
456 dmar_disabled = 0;
457 printk(KERN_INFO "Intel-IOMMU: enabled\n");
458 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700459 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800460 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700461 } else if (!strncmp(str, "igfx_off", 8)) {
462 dmar_map_gfx = 0;
463 printk(KERN_INFO
464 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700465 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800466 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700467 "Intel-IOMMU: Forcing DAC for PCI devices\n");
468 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800469 } else if (!strncmp(str, "strict", 6)) {
470 printk(KERN_INFO
471 "Intel-IOMMU: disable batched IOTLB flush\n");
472 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100473 } else if (!strncmp(str, "sp_off", 6)) {
474 printk(KERN_INFO
475 "Intel-IOMMU: disable supported super page\n");
476 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700477 }
478
479 str += strcspn(str, ",");
480 while (*str == ',')
481 str++;
482 }
483 return 0;
484}
485__setup("intel_iommu=", intel_iommu_setup);
486
487static struct kmem_cache *iommu_domain_cache;
488static struct kmem_cache *iommu_devinfo_cache;
489static struct kmem_cache *iommu_iova_cache;
490
Suresh Siddha4c923d42009-10-02 11:01:24 -0700491static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700492{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700493 struct page *page;
494 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700495
Suresh Siddha4c923d42009-10-02 11:01:24 -0700496 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
497 if (page)
498 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700499 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700500}
501
502static inline void free_pgtable_page(void *vaddr)
503{
504 free_page((unsigned long)vaddr);
505}
506
507static inline void *alloc_domain_mem(void)
508{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900509 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700510}
511
Kay, Allen M38717942008-09-09 18:37:29 +0300512static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700513{
514 kmem_cache_free(iommu_domain_cache, vaddr);
515}
516
517static inline void * alloc_devinfo_mem(void)
518{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900519 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700520}
521
522static inline void free_devinfo_mem(void *vaddr)
523{
524 kmem_cache_free(iommu_devinfo_cache, vaddr);
525}
526
527struct iova *alloc_iova_mem(void)
528{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900529 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700530}
531
532void free_iova_mem(struct iova *iova)
533{
534 kmem_cache_free(iommu_iova_cache, iova);
535}
536
Weidong Han1b573682008-12-08 15:34:06 +0800537
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700538static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800539{
540 unsigned long sagaw;
541 int agaw = -1;
542
543 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700544 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800545 agaw >= 0; agaw--) {
546 if (test_bit(agaw, &sagaw))
547 break;
548 }
549
550 return agaw;
551}
552
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700553/*
554 * Calculate max SAGAW for each iommu.
555 */
556int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
557{
558 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
559}
560
561/*
562 * calculate agaw for each iommu.
563 * "SAGAW" may be different across iommus, use a default agaw, and
564 * get a supported less agaw for iommus that don't support the default agaw.
565 */
566int iommu_calculate_agaw(struct intel_iommu *iommu)
567{
568 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
569}
570
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700571/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800572static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
573{
574 int iommu_id;
575
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700576 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800577 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700578 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800579
Mike Travis1b198bb2012-03-05 15:05:16 -0800580 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
Weidong Han8c11e792008-12-08 15:29:22 +0800581 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
582 return NULL;
583
584 return g_iommus[iommu_id];
585}
586
Weidong Han8e604092008-12-08 15:49:06 +0800587static void domain_update_iommu_coherency(struct dmar_domain *domain)
588{
589 int i;
590
Alex Williamson63b2f082011-11-11 17:26:44 -0700591 i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
592
593 domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
Weidong Han8e604092008-12-08 15:49:06 +0800594
Mike Travis1b198bb2012-03-05 15:05:16 -0800595 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e604092008-12-08 15:49:06 +0800596 if (!ecap_coherent(g_iommus[i]->ecap)) {
597 domain->iommu_coherency = 0;
598 break;
599 }
Weidong Han8e604092008-12-08 15:49:06 +0800600 }
601}
602
Sheng Yang58c610b2009-03-18 15:33:05 +0800603static void domain_update_iommu_snooping(struct dmar_domain *domain)
604{
605 int i;
606
607 domain->iommu_snooping = 1;
608
Mike Travis1b198bb2012-03-05 15:05:16 -0800609 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800610 if (!ecap_sc_support(g_iommus[i]->ecap)) {
611 domain->iommu_snooping = 0;
612 break;
613 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800614 }
615}
616
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100617static void domain_update_iommu_superpage(struct dmar_domain *domain)
618{
Allen Kay8140a952011-10-14 12:32:17 -0700619 struct dmar_drhd_unit *drhd;
620 struct intel_iommu *iommu = NULL;
621 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100622
623 if (!intel_iommu_superpage) {
624 domain->iommu_superpage = 0;
625 return;
626 }
627
Allen Kay8140a952011-10-14 12:32:17 -0700628 /* set iommu_superpage to the smallest common denominator */
629 for_each_active_iommu(iommu, drhd) {
630 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100631 if (!mask) {
632 break;
633 }
634 }
635 domain->iommu_superpage = fls(mask);
636}
637
Sheng Yang58c610b2009-03-18 15:33:05 +0800638/* Some capabilities may be different across iommus */
639static void domain_update_iommu_cap(struct dmar_domain *domain)
640{
641 domain_update_iommu_coherency(domain);
642 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100643 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800644}
645
David Woodhouse276dbf92009-04-04 01:45:37 +0100646static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800647{
648 struct dmar_drhd_unit *drhd = NULL;
649 int i;
650
651 for_each_drhd_unit(drhd) {
652 if (drhd->ignored)
653 continue;
David Woodhouse276dbf92009-04-04 01:45:37 +0100654 if (segment != drhd->segment)
655 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800656
David Woodhouse924b6232009-04-04 00:39:25 +0100657 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000658 if (drhd->devices[i] &&
659 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800660 drhd->devices[i]->devfn == devfn)
661 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700662 if (drhd->devices[i] &&
663 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100664 drhd->devices[i]->subordinate->number <= bus &&
665 drhd->devices[i]->subordinate->subordinate >= bus)
666 return drhd->iommu;
667 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800668
669 if (drhd->include_all)
670 return drhd->iommu;
671 }
672
673 return NULL;
674}
675
Weidong Han5331fe62008-12-08 23:00:00 +0800676static void domain_flush_cache(struct dmar_domain *domain,
677 void *addr, int size)
678{
679 if (!domain->iommu_coherency)
680 clflush_cache_range(addr, size);
681}
682
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700683/* Gets context entry for a given bus and devfn */
684static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
685 u8 bus, u8 devfn)
686{
687 struct root_entry *root;
688 struct context_entry *context;
689 unsigned long phy_addr;
690 unsigned long flags;
691
692 spin_lock_irqsave(&iommu->lock, flags);
693 root = &iommu->root_entry[bus];
694 context = get_context_addr_from_root(root);
695 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700696 context = (struct context_entry *)
697 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700698 if (!context) {
699 spin_unlock_irqrestore(&iommu->lock, flags);
700 return NULL;
701 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700702 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700703 phy_addr = virt_to_phys((void *)context);
704 set_root_value(root, phy_addr);
705 set_root_present(root);
706 __iommu_flush_cache(iommu, root, sizeof(*root));
707 }
708 spin_unlock_irqrestore(&iommu->lock, flags);
709 return &context[devfn];
710}
711
712static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
713{
714 struct root_entry *root;
715 struct context_entry *context;
716 int ret;
717 unsigned long flags;
718
719 spin_lock_irqsave(&iommu->lock, flags);
720 root = &iommu->root_entry[bus];
721 context = get_context_addr_from_root(root);
722 if (!context) {
723 ret = 0;
724 goto out;
725 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000726 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700727out:
728 spin_unlock_irqrestore(&iommu->lock, flags);
729 return ret;
730}
731
732static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
733{
734 struct root_entry *root;
735 struct context_entry *context;
736 unsigned long flags;
737
738 spin_lock_irqsave(&iommu->lock, flags);
739 root = &iommu->root_entry[bus];
740 context = get_context_addr_from_root(root);
741 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000742 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700743 __iommu_flush_cache(iommu, &context[devfn], \
744 sizeof(*context));
745 }
746 spin_unlock_irqrestore(&iommu->lock, flags);
747}
748
749static void free_context_table(struct intel_iommu *iommu)
750{
751 struct root_entry *root;
752 int i;
753 unsigned long flags;
754 struct context_entry *context;
755
756 spin_lock_irqsave(&iommu->lock, flags);
757 if (!iommu->root_entry) {
758 goto out;
759 }
760 for (i = 0; i < ROOT_ENTRY_NR; i++) {
761 root = &iommu->root_entry[i];
762 context = get_context_addr_from_root(root);
763 if (context)
764 free_pgtable_page(context);
765 }
766 free_pgtable_page(iommu->root_entry);
767 iommu->root_entry = NULL;
768out:
769 spin_unlock_irqrestore(&iommu->lock, flags);
770}
771
David Woodhouseb026fd22009-06-28 10:37:25 +0100772static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700773 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700774{
David Woodhouseb026fd22009-06-28 10:37:25 +0100775 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700776 struct dma_pte *parent, *pte = NULL;
777 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700778 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700779
780 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100781 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700782 parent = domain->pgd;
783
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700784 while (level > 0) {
785 void *tmp_page;
786
David Woodhouseb026fd22009-06-28 10:37:25 +0100787 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700788 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700789 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100790 break;
791 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700792 break;
793
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000794 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100795 uint64_t pteval;
796
Suresh Siddha4c923d42009-10-02 11:01:24 -0700797 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700798
David Woodhouse206a73c2009-07-01 19:30:28 +0100799 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700800 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100801
David Woodhousec85994e2009-07-01 19:21:24 +0100802 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400803 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100804 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
805 /* Someone else set it while we were thinking; use theirs. */
806 free_pgtable_page(tmp_page);
807 } else {
808 dma_pte_addr(pte);
809 domain_flush_cache(domain, pte, sizeof(*pte));
810 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700811 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000812 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700813 level--;
814 }
815
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700816 return pte;
817}
818
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100819
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700820/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100821static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
822 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100823 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700824{
825 struct dma_pte *parent, *pte = NULL;
826 int total = agaw_to_level(domain->agaw);
827 int offset;
828
829 parent = domain->pgd;
830 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100831 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700832 pte = &parent[offset];
833 if (level == total)
834 return pte;
835
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100836 if (!dma_pte_present(pte)) {
837 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700838 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100839 }
840
841 if (pte->val & DMA_PTE_LARGE_PAGE) {
842 *large_page = total;
843 return pte;
844 }
845
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000846 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700847 total--;
848 }
849 return NULL;
850}
851
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700852/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700853static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100854 unsigned long start_pfn,
855 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700856{
David Woodhouse04b18e62009-06-27 19:15:01 +0100857 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100858 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100859 struct dma_pte *first_pte, *pte;
Allen Kay292827c2011-10-14 12:31:54 -0700860 int order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700861
David Woodhouse04b18e62009-06-27 19:15:01 +0100862 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100863 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700864 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100865
David Woodhouse04b18e62009-06-27 19:15:01 +0100866 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700867 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100868 large_page = 1;
869 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100870 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100871 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100872 continue;
873 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100874 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100875 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100876 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100877 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100878 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
879
David Woodhouse310a5ab2009-06-28 18:52:20 +0100880 domain_flush_cache(domain, first_pte,
881 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700882
883 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700884
885 order = (large_page - 1) * 9;
886 return order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700887}
888
Alex Williamson4013fc22013-06-15 10:27:19 -0600889static void dma_pte_free_level(struct dmar_domain *domain, int level,
890 struct dma_pte *pte, unsigned long pfn,
891 unsigned long start_pfn, unsigned long last_pfn)
892{
893 pfn = max(start_pfn, pfn);
894 pte = &pte[pfn_level_offset(pfn, level)];
895
896 do {
897 unsigned long level_pfn;
898 struct dma_pte *level_pte;
899
900 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
901 goto next;
902
903 level_pfn = pfn & level_mask(level - 1);
904 level_pte = phys_to_virt(dma_pte_addr(pte));
905
906 if (level > 2)
907 dma_pte_free_level(domain, level - 1, level_pte,
908 level_pfn, start_pfn, last_pfn);
909
910 /* If range covers entire pagetable, free it */
911 if (!(start_pfn > level_pfn ||
912 last_pfn < level_pfn + level_size(level))) {
913 dma_clear_pte(pte);
914 domain_flush_cache(domain, pte, sizeof(*pte));
915 free_pgtable_page(level_pte);
916 }
917next:
918 pfn += level_size(level);
919 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
920}
921
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700922/* free page table pages. last level pte should already be cleared */
923static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100924 unsigned long start_pfn,
925 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700926{
David Woodhouse6660c632009-06-27 22:41:00 +0100927 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700928
David Woodhouse6660c632009-06-27 22:41:00 +0100929 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
930 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700931 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700932
David Woodhousef3a0a522009-06-30 03:40:07 +0100933 /* We don't need lock here; nobody else touches the iova range */
Alex Williamson4013fc22013-06-15 10:27:19 -0600934 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
935 domain->pgd, 0, start_pfn, last_pfn);
David Woodhouse6660c632009-06-27 22:41:00 +0100936
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700937 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100938 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700939 free_pgtable_page(domain->pgd);
940 domain->pgd = NULL;
941 }
942}
943
944/* iommu handling */
945static int iommu_alloc_root_entry(struct intel_iommu *iommu)
946{
947 struct root_entry *root;
948 unsigned long flags;
949
Suresh Siddha4c923d42009-10-02 11:01:24 -0700950 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700951 if (!root)
952 return -ENOMEM;
953
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700954 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700955
956 spin_lock_irqsave(&iommu->lock, flags);
957 iommu->root_entry = root;
958 spin_unlock_irqrestore(&iommu->lock, flags);
959
960 return 0;
961}
962
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700963static void iommu_set_root_entry(struct intel_iommu *iommu)
964{
965 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100966 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700967 unsigned long flag;
968
969 addr = iommu->root_entry;
970
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200971 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700972 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
973
David Woodhousec416daa2009-05-10 20:30:58 +0100974 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700975
976 /* Make sure hardware complete it */
977 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100978 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200980 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700981}
982
983static void iommu_flush_write_buffer(struct intel_iommu *iommu)
984{
985 u32 val;
986 unsigned long flag;
987
David Woodhouse9af88142009-02-13 23:18:03 +0000988 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700990
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200991 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100992 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700993
994 /* Make sure hardware complete it */
995 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100996 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700997
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200998 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700999}
1000
1001/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001002static void __iommu_flush_context(struct intel_iommu *iommu,
1003 u16 did, u16 source_id, u8 function_mask,
1004 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001005{
1006 u64 val = 0;
1007 unsigned long flag;
1008
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001009 switch (type) {
1010 case DMA_CCMD_GLOBAL_INVL:
1011 val = DMA_CCMD_GLOBAL_INVL;
1012 break;
1013 case DMA_CCMD_DOMAIN_INVL:
1014 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1015 break;
1016 case DMA_CCMD_DEVICE_INVL:
1017 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1018 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1019 break;
1020 default:
1021 BUG();
1022 }
1023 val |= DMA_CCMD_ICC;
1024
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001025 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001026 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1027
1028 /* Make sure hardware complete it */
1029 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1030 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1031
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001032 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001033}
1034
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001035/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001036static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1037 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001038{
1039 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1040 u64 val = 0, val_iva = 0;
1041 unsigned long flag;
1042
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001043 switch (type) {
1044 case DMA_TLB_GLOBAL_FLUSH:
1045 /* global flush doesn't need set IVA_REG */
1046 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1047 break;
1048 case DMA_TLB_DSI_FLUSH:
1049 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1050 break;
1051 case DMA_TLB_PSI_FLUSH:
1052 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1053 /* Note: always flush non-leaf currently */
1054 val_iva = size_order | addr;
1055 break;
1056 default:
1057 BUG();
1058 }
1059 /* Note: set drain read/write */
1060#if 0
1061 /*
1062 * This is probably to be super secure.. Looks like we can
1063 * ignore it without any impact.
1064 */
1065 if (cap_read_drain(iommu->cap))
1066 val |= DMA_TLB_READ_DRAIN;
1067#endif
1068 if (cap_write_drain(iommu->cap))
1069 val |= DMA_TLB_WRITE_DRAIN;
1070
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001071 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001072 /* Note: Only uses first TLB reg currently */
1073 if (val_iva)
1074 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1075 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1076
1077 /* Make sure hardware complete it */
1078 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1079 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1080
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001081 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001082
1083 /* check IOTLB invalidation granularity */
1084 if (DMA_TLB_IAIG(val) == 0)
1085 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1086 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1087 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001088 (unsigned long long)DMA_TLB_IIRG(type),
1089 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001090}
1091
Yu Zhao93a23a72009-05-18 13:51:37 +08001092static struct device_domain_info *iommu_support_dev_iotlb(
1093 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001094{
Yu Zhao93a23a72009-05-18 13:51:37 +08001095 int found = 0;
1096 unsigned long flags;
1097 struct device_domain_info *info;
1098 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1099
1100 if (!ecap_dev_iotlb_support(iommu->ecap))
1101 return NULL;
1102
1103 if (!iommu->qi)
1104 return NULL;
1105
1106 spin_lock_irqsave(&device_domain_lock, flags);
1107 list_for_each_entry(info, &domain->devices, link)
1108 if (info->bus == bus && info->devfn == devfn) {
1109 found = 1;
1110 break;
1111 }
1112 spin_unlock_irqrestore(&device_domain_lock, flags);
1113
1114 if (!found || !info->dev)
1115 return NULL;
1116
1117 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1118 return NULL;
1119
1120 if (!dmar_find_matched_atsr_unit(info->dev))
1121 return NULL;
1122
1123 info->iommu = iommu;
1124
1125 return info;
1126}
1127
1128static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1129{
1130 if (!info)
1131 return;
1132
1133 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1134}
1135
1136static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1137{
1138 if (!info->dev || !pci_ats_enabled(info->dev))
1139 return;
1140
1141 pci_disable_ats(info->dev);
1142}
1143
1144static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1145 u64 addr, unsigned mask)
1146{
1147 u16 sid, qdep;
1148 unsigned long flags;
1149 struct device_domain_info *info;
1150
1151 spin_lock_irqsave(&device_domain_lock, flags);
1152 list_for_each_entry(info, &domain->devices, link) {
1153 if (!info->dev || !pci_ats_enabled(info->dev))
1154 continue;
1155
1156 sid = info->bus << 8 | info->devfn;
1157 qdep = pci_ats_queue_depth(info->dev);
1158 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1159 }
1160 spin_unlock_irqrestore(&device_domain_lock, flags);
1161}
1162
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001163static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001164 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001165{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001166 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001167 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001168
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001169 BUG_ON(pages == 0);
1170
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001172 * Fallback to domain selective flush if no PSI support or the size is
1173 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001174 * PSI requires page size to be 2 ^ x, and the base address is naturally
1175 * aligned to the size
1176 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001177 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1178 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001179 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001180 else
1181 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1182 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001183
1184 /*
Nadav Amit82653632010-04-01 13:24:40 +03001185 * In caching mode, changes of pages from non-present to present require
1186 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001187 */
Nadav Amit82653632010-04-01 13:24:40 +03001188 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001189 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001190}
1191
mark grossf8bab732008-02-08 04:18:38 -08001192static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1193{
1194 u32 pmen;
1195 unsigned long flags;
1196
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001197 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001198 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1199 pmen &= ~DMA_PMEN_EPM;
1200 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1201
1202 /* wait for the protected region status bit to clear */
1203 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1204 readl, !(pmen & DMA_PMEN_PRS), pmen);
1205
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001206 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001207}
1208
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001209static int iommu_enable_translation(struct intel_iommu *iommu)
1210{
1211 u32 sts;
1212 unsigned long flags;
1213
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001214 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001215 iommu->gcmd |= DMA_GCMD_TE;
1216 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001217
1218 /* Make sure hardware complete it */
1219 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001220 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001221
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001222 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001223 return 0;
1224}
1225
1226static int iommu_disable_translation(struct intel_iommu *iommu)
1227{
1228 u32 sts;
1229 unsigned long flag;
1230
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001231 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001232 iommu->gcmd &= ~DMA_GCMD_TE;
1233 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1234
1235 /* Make sure hardware complete it */
1236 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001237 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001238
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001239 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001240 return 0;
1241}
1242
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001243
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001244static int iommu_init_domains(struct intel_iommu *iommu)
1245{
1246 unsigned long ndomains;
1247 unsigned long nlongs;
1248
1249 ndomains = cap_ndoms(iommu->cap);
Masanari Iida68aeb962012-01-25 00:25:52 +09001250 pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
Yinghai Lu680a7522010-04-08 19:58:23 +01001251 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001252 nlongs = BITS_TO_LONGS(ndomains);
1253
Donald Dutile94a91b52009-08-20 16:51:34 -04001254 spin_lock_init(&iommu->lock);
1255
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001256 /* TBD: there might be 64K domains,
1257 * consider other allocation for future chip
1258 */
1259 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1260 if (!iommu->domain_ids) {
1261 printk(KERN_ERR "Allocating domain id array failed\n");
1262 return -ENOMEM;
1263 }
1264 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1265 GFP_KERNEL);
1266 if (!iommu->domains) {
1267 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001268 return -ENOMEM;
1269 }
1270
1271 /*
1272 * if Caching mode is set, then invalid translations are tagged
1273 * with domainid 0. Hence we need to pre-allocate it.
1274 */
1275 if (cap_caching_mode(iommu->cap))
1276 set_bit(0, iommu->domain_ids);
1277 return 0;
1278}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001279
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001280
1281static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001282static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001283
1284void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001285{
1286 struct dmar_domain *domain;
1287 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001288 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001289
Donald Dutile94a91b52009-08-20 16:51:34 -04001290 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001291 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001292 domain = iommu->domains[i];
1293 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001294
Donald Dutile94a91b52009-08-20 16:51:34 -04001295 spin_lock_irqsave(&domain->iommu_lock, flags);
1296 if (--domain->iommu_count == 0) {
1297 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1298 vm_domain_exit(domain);
1299 else
1300 domain_exit(domain);
1301 }
1302 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001303 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001304 }
1305
1306 if (iommu->gcmd & DMA_GCMD_TE)
1307 iommu_disable_translation(iommu);
1308
1309 if (iommu->irq) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001310 irq_set_handler_data(iommu->irq, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001311 /* This will mask the irq */
1312 free_irq(iommu->irq, iommu);
1313 destroy_irq(iommu->irq);
1314 }
1315
1316 kfree(iommu->domains);
1317 kfree(iommu->domain_ids);
1318
Weidong Hand9630fe2008-12-08 11:06:32 +08001319 g_iommus[iommu->seq_id] = NULL;
1320
1321 /* if all iommus are freed, free g_iommus */
1322 for (i = 0; i < g_num_of_iommus; i++) {
1323 if (g_iommus[i])
1324 break;
1325 }
1326
1327 if (i == g_num_of_iommus)
1328 kfree(g_iommus);
1329
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001330 /* free context mapping */
1331 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001332}
1333
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001334static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001335{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001336 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001337
1338 domain = alloc_domain_mem();
1339 if (!domain)
1340 return NULL;
1341
Suresh Siddha4c923d42009-10-02 11:01:24 -07001342 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08001343 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Hand71a2f32008-12-07 21:13:41 +08001344 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001345
1346 return domain;
1347}
1348
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001349static int iommu_attach_domain(struct dmar_domain *domain,
1350 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001351{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001352 int num;
1353 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001354 unsigned long flags;
1355
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001356 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001357
1358 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001359
1360 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1361 if (num >= ndomains) {
1362 spin_unlock_irqrestore(&iommu->lock, flags);
1363 printk(KERN_ERR "IOMMU: no free domain ids\n");
1364 return -ENOMEM;
1365 }
1366
1367 domain->id = num;
1368 set_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001369 set_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001370 iommu->domains[num] = domain;
1371 spin_unlock_irqrestore(&iommu->lock, flags);
1372
1373 return 0;
1374}
1375
1376static void iommu_detach_domain(struct dmar_domain *domain,
1377 struct intel_iommu *iommu)
1378{
1379 unsigned long flags;
1380 int num, ndomains;
1381 int found = 0;
1382
1383 spin_lock_irqsave(&iommu->lock, flags);
1384 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001385 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001386 if (iommu->domains[num] == domain) {
1387 found = 1;
1388 break;
1389 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001390 }
1391
1392 if (found) {
1393 clear_bit(num, iommu->domain_ids);
Mike Travis1b198bb2012-03-05 15:05:16 -08001394 clear_bit(iommu->seq_id, domain->iommu_bmp);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001395 iommu->domains[num] = NULL;
1396 }
Weidong Han8c11e792008-12-08 15:29:22 +08001397 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001398}
1399
1400static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001401static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001402
Joseph Cihula51a63e62011-03-21 11:04:24 -07001403static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001404{
1405 struct pci_dev *pdev = NULL;
1406 struct iova *iova;
1407 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001408
David Millerf6611972008-02-06 01:36:23 -08001409 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410
Mark Gross8a443df2008-03-04 14:59:31 -08001411 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1412 &reserved_rbtree_key);
1413
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001414 /* IOAPIC ranges shouldn't be accessed by DMA */
1415 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1416 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001417 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001419 return -ENODEV;
1420 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001421
1422 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1423 for_each_pci_dev(pdev) {
1424 struct resource *r;
1425
1426 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1427 r = &pdev->resource[i];
1428 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1429 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001430 iova = reserve_iova(&reserved_iova_list,
1431 IOVA_PFN(r->start),
1432 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001433 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001434 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001435 return -ENODEV;
1436 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001437 }
1438 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001439 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001440}
1441
1442static void domain_reserve_special_ranges(struct dmar_domain *domain)
1443{
1444 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1445}
1446
1447static inline int guestwidth_to_adjustwidth(int gaw)
1448{
1449 int agaw;
1450 int r = (gaw - 12) % 9;
1451
1452 if (r == 0)
1453 agaw = gaw;
1454 else
1455 agaw = gaw + 9 - r;
1456 if (agaw > 64)
1457 agaw = 64;
1458 return agaw;
1459}
1460
1461static int domain_init(struct dmar_domain *domain, int guest_width)
1462{
1463 struct intel_iommu *iommu;
1464 int adjust_width, agaw;
1465 unsigned long sagaw;
1466
David Millerf6611972008-02-06 01:36:23 -08001467 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001468 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001469
1470 domain_reserve_special_ranges(domain);
1471
1472 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001473 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001474 if (guest_width > cap_mgaw(iommu->cap))
1475 guest_width = cap_mgaw(iommu->cap);
1476 domain->gaw = guest_width;
1477 adjust_width = guestwidth_to_adjustwidth(guest_width);
1478 agaw = width_to_agaw(adjust_width);
1479 sagaw = cap_sagaw(iommu->cap);
1480 if (!test_bit(agaw, &sagaw)) {
1481 /* hardware doesn't support it, choose a bigger one */
1482 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1483 agaw = find_next_bit(&sagaw, 5, agaw);
1484 if (agaw >= 5)
1485 return -ENODEV;
1486 }
1487 domain->agaw = agaw;
1488 INIT_LIST_HEAD(&domain->devices);
1489
Weidong Han8e604092008-12-08 15:49:06 +08001490 if (ecap_coherent(iommu->ecap))
1491 domain->iommu_coherency = 1;
1492 else
1493 domain->iommu_coherency = 0;
1494
Sheng Yang58c610b2009-03-18 15:33:05 +08001495 if (ecap_sc_support(iommu->ecap))
1496 domain->iommu_snooping = 1;
1497 else
1498 domain->iommu_snooping = 0;
1499
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001500 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001501 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001502 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001503
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001504 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001505 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001506 if (!domain->pgd)
1507 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001508 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001509 return 0;
1510}
1511
1512static void domain_exit(struct dmar_domain *domain)
1513{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001514 struct dmar_drhd_unit *drhd;
1515 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001516
1517 /* Domain 0 is reserved, so dont process it */
1518 if (!domain)
1519 return;
1520
Alex Williamson7b668352011-05-24 12:02:41 +01001521 /* Flush any lazy unmaps that may reference this domain */
1522 if (!intel_iommu_strict)
1523 flush_unmaps_timeout(0);
1524
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001525 domain_remove_dev_info(domain);
1526 /* destroy iovas */
1527 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001528
1529 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001530 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001531
1532 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001533 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001534
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001535 for_each_active_iommu(iommu, drhd)
Mike Travis1b198bb2012-03-05 15:05:16 -08001536 if (test_bit(iommu->seq_id, domain->iommu_bmp))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001537 iommu_detach_domain(domain, iommu);
1538
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001539 free_domain_mem(domain);
1540}
1541
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001542static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1543 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001544{
1545 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001546 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001547 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001548 struct dma_pte *pgd;
1549 unsigned long num;
1550 unsigned long ndomains;
1551 int id;
1552 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001553 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001554
1555 pr_debug("Set context mapping for %02x:%02x.%d\n",
1556 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001557
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001559 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1560 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001561
David Woodhouse276dbf92009-04-04 01:45:37 +01001562 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001563 if (!iommu)
1564 return -ENODEV;
1565
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001566 context = device_to_context_entry(iommu, bus, devfn);
1567 if (!context)
1568 return -ENOMEM;
1569 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001570 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001571 spin_unlock_irqrestore(&iommu->lock, flags);
1572 return 0;
1573 }
1574
Weidong Hanea6606b2008-12-08 23:08:15 +08001575 id = domain->id;
1576 pgd = domain->pgd;
1577
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001578 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1579 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001580 int found = 0;
1581
1582 /* find an available domain id for this device in iommu */
1583 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001584 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001585 if (iommu->domains[num] == domain) {
1586 id = num;
1587 found = 1;
1588 break;
1589 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001590 }
1591
1592 if (found == 0) {
1593 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1594 if (num >= ndomains) {
1595 spin_unlock_irqrestore(&iommu->lock, flags);
1596 printk(KERN_ERR "IOMMU: no free domain ids\n");
1597 return -EFAULT;
1598 }
1599
1600 set_bit(num, iommu->domain_ids);
1601 iommu->domains[num] = domain;
1602 id = num;
1603 }
1604
1605 /* Skip top levels of page tables for
1606 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001607 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001608 */
Chris Wright1672af12009-12-02 12:06:34 -08001609 if (translation != CONTEXT_TT_PASS_THROUGH) {
1610 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1611 pgd = phys_to_virt(dma_pte_addr(pgd));
1612 if (!dma_pte_present(pgd)) {
1613 spin_unlock_irqrestore(&iommu->lock, flags);
1614 return -ENOMEM;
1615 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001616 }
1617 }
1618 }
1619
1620 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001621
Yu Zhao93a23a72009-05-18 13:51:37 +08001622 if (translation != CONTEXT_TT_PASS_THROUGH) {
1623 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1624 translation = info ? CONTEXT_TT_DEV_IOTLB :
1625 CONTEXT_TT_MULTI_LEVEL;
1626 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001627 /*
1628 * In pass through mode, AW must be programmed to indicate the largest
1629 * AGAW value supported by hardware. And ASR is ignored by hardware.
1630 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001631 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001632 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001633 else {
1634 context_set_address_root(context, virt_to_phys(pgd));
1635 context_set_address_width(context, iommu->agaw);
1636 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001637
1638 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001639 context_set_fault_enable(context);
1640 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001641 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001642
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001643 /*
1644 * It's a non-present to present mapping. If hardware doesn't cache
1645 * non-present entry we only need to flush the write-buffer. If the
1646 * _does_ cache non-present entries, then it does so in the special
1647 * domain #0, which we have to flush:
1648 */
1649 if (cap_caching_mode(iommu->cap)) {
1650 iommu->flush.flush_context(iommu, 0,
1651 (((u16)bus) << 8) | devfn,
1652 DMA_CCMD_MASK_NOBIT,
1653 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001654 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001655 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001656 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001657 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001658 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001659 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001660
1661 spin_lock_irqsave(&domain->iommu_lock, flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08001662 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08001663 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001664 if (domain->iommu_count == 1)
1665 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001666 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001667 }
1668 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001669 return 0;
1670}
1671
1672static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001673domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1674 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001675{
1676 int ret;
1677 struct pci_dev *tmp, *parent;
1678
David Woodhouse276dbf92009-04-04 01:45:37 +01001679 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001680 pdev->bus->number, pdev->devfn,
1681 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001682 if (ret)
1683 return ret;
1684
1685 /* dependent device mapping */
1686 tmp = pci_find_upstream_pcie_bridge(pdev);
1687 if (!tmp)
1688 return 0;
1689 /* Secondary interface's bus number and devfn 0 */
1690 parent = pdev->bus->self;
1691 while (parent != tmp) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001692 ret = domain_context_mapping_one(domain,
1693 pci_domain_nr(parent->bus),
1694 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001695 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001696 if (ret)
1697 return ret;
1698 parent = parent->bus->self;
1699 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001700 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001701 return domain_context_mapping_one(domain,
David Woodhouse276dbf92009-04-04 01:45:37 +01001702 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001703 tmp->subordinate->number, 0,
1704 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001705 else /* this is a legacy PCI bridge */
1706 return domain_context_mapping_one(domain,
David Woodhouse276dbf92009-04-04 01:45:37 +01001707 pci_domain_nr(tmp->bus),
1708 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001709 tmp->devfn,
1710 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001711}
1712
Weidong Han5331fe62008-12-08 23:00:00 +08001713static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001714{
1715 int ret;
1716 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001717 struct intel_iommu *iommu;
1718
David Woodhouse276dbf92009-04-04 01:45:37 +01001719 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1720 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001721 if (!iommu)
1722 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001723
David Woodhouse276dbf92009-04-04 01:45:37 +01001724 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001725 if (!ret)
1726 return ret;
1727 /* dependent device mapping */
1728 tmp = pci_find_upstream_pcie_bridge(pdev);
1729 if (!tmp)
1730 return ret;
1731 /* Secondary interface's bus number and devfn 0 */
1732 parent = pdev->bus->self;
1733 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001734 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf92009-04-04 01:45:37 +01001735 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001736 if (!ret)
1737 return ret;
1738 parent = parent->bus->self;
1739 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001740 if (pci_is_pcie(tmp))
David Woodhouse276dbf92009-04-04 01:45:37 +01001741 return device_context_mapped(iommu, tmp->subordinate->number,
1742 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001743 else
David Woodhouse276dbf92009-04-04 01:45:37 +01001744 return device_context_mapped(iommu, tmp->bus->number,
1745 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001746}
1747
Fenghua Yuf5329592009-08-04 15:09:37 -07001748/* Returns a number of VTD pages, but aligned to MM page size */
1749static inline unsigned long aligned_nrpages(unsigned long host_addr,
1750 size_t size)
1751{
1752 host_addr &= ~PAGE_MASK;
1753 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1754}
1755
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001756/* Return largest possible superpage level for a given mapping */
1757static inline int hardware_largepage_caps(struct dmar_domain *domain,
1758 unsigned long iov_pfn,
1759 unsigned long phy_pfn,
1760 unsigned long pages)
1761{
1762 int support, level = 1;
1763 unsigned long pfnmerge;
1764
1765 support = domain->iommu_superpage;
1766
1767 /* To use a large page, the virtual *and* physical addresses
1768 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1769 of them will mean we have to use smaller pages. So just
1770 merge them and check both at once. */
1771 pfnmerge = iov_pfn | phy_pfn;
1772
1773 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1774 pages >>= VTD_STRIDE_SHIFT;
1775 if (!pages)
1776 break;
1777 pfnmerge >>= VTD_STRIDE_SHIFT;
1778 level++;
1779 support--;
1780 }
1781 return level;
1782}
1783
David Woodhouse9051aa02009-06-29 12:30:54 +01001784static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1785 struct scatterlist *sg, unsigned long phys_pfn,
1786 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001787{
1788 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001789 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001790 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001791 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001792 unsigned int largepage_lvl = 0;
1793 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001794
1795 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1796
1797 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1798 return -EINVAL;
1799
1800 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1801
David Woodhouse9051aa02009-06-29 12:30:54 +01001802 if (sg)
1803 sg_res = 0;
1804 else {
1805 sg_res = nr_pages + 1;
1806 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1807 }
1808
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001809 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001810 uint64_t tmp;
1811
David Woodhousee1605492009-06-29 11:17:38 +01001812 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001813 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001814 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1815 sg->dma_length = sg->length;
1816 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001817 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001818 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001819
David Woodhousee1605492009-06-29 11:17:38 +01001820 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001821 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1822
1823 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001824 if (!pte)
1825 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001826 /* It is large page*/
Woodhouse, David16639bc2012-12-19 13:25:35 +00001827 if (largepage_lvl > 1) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001828 pteval |= DMA_PTE_LARGE_PAGE;
Woodhouse, David16639bc2012-12-19 13:25:35 +00001829 /* Ensure that old small page tables are removed to make room
1830 for superpage, if they exist. */
1831 dma_pte_clear_range(domain, iov_pfn,
1832 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1833 dma_pte_free_pagetable(domain, iov_pfn,
1834 iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1835 } else {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001836 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
Woodhouse, David16639bc2012-12-19 13:25:35 +00001837 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001838
David Woodhousee1605492009-06-29 11:17:38 +01001839 }
1840 /* We don't need lock here, nobody else
1841 * touches the iova range
1842 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001843 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001844 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001845 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001846 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1847 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001848 if (dumps) {
1849 dumps--;
1850 debug_dma_dump_mappings(NULL);
1851 }
1852 WARN_ON(1);
1853 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001854
1855 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1856
1857 BUG_ON(nr_pages < lvl_pages);
1858 BUG_ON(sg_res < lvl_pages);
1859
1860 nr_pages -= lvl_pages;
1861 iov_pfn += lvl_pages;
1862 phys_pfn += lvl_pages;
1863 pteval += lvl_pages * VTD_PAGE_SIZE;
1864 sg_res -= lvl_pages;
1865
1866 /* If the next PTE would be the first in a new page, then we
1867 need to flush the cache on the entries we've just written.
1868 And then we'll need to recalculate 'pte', so clear it and
1869 let it get set again in the if (!pte) block above.
1870
1871 If we're done (!nr_pages) we need to flush the cache too.
1872
1873 Also if we've been setting superpages, we may need to
1874 recalculate 'pte' and switch back to smaller pages for the
1875 end of the mapping, if the trailing size is not enough to
1876 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001877 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001878 if (!nr_pages || first_pte_in_page(pte) ||
1879 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001880 domain_flush_cache(domain, first_pte,
1881 (void *)pte - (void *)first_pte);
1882 pte = NULL;
1883 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001884
1885 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001886 sg = sg_next(sg);
1887 }
1888 return 0;
1889}
1890
David Woodhouse9051aa02009-06-29 12:30:54 +01001891static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1892 struct scatterlist *sg, unsigned long nr_pages,
1893 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001894{
David Woodhouse9051aa02009-06-29 12:30:54 +01001895 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1896}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001897
David Woodhouse9051aa02009-06-29 12:30:54 +01001898static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1899 unsigned long phys_pfn, unsigned long nr_pages,
1900 int prot)
1901{
1902 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001903}
1904
Weidong Hanc7151a82008-12-08 22:51:37 +08001905static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001906{
Weidong Hanc7151a82008-12-08 22:51:37 +08001907 if (!iommu)
1908 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001909
1910 clear_context_table(iommu, bus, devfn);
1911 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001912 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001913 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001914}
1915
1916static void domain_remove_dev_info(struct dmar_domain *domain)
1917{
1918 struct device_domain_info *info;
1919 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001920 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001921
1922 spin_lock_irqsave(&device_domain_lock, flags);
1923 while (!list_empty(&domain->devices)) {
1924 info = list_entry(domain->devices.next,
1925 struct device_domain_info, link);
1926 list_del(&info->link);
1927 list_del(&info->global);
1928 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001929 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001930 spin_unlock_irqrestore(&device_domain_lock, flags);
1931
Yu Zhao93a23a72009-05-18 13:51:37 +08001932 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf92009-04-04 01:45:37 +01001933 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001934 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001935 free_devinfo_mem(info);
1936
1937 spin_lock_irqsave(&device_domain_lock, flags);
1938 }
1939 spin_unlock_irqrestore(&device_domain_lock, flags);
1940}
1941
1942/*
1943 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001944 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001945 */
Kay, Allen M38717942008-09-09 18:37:29 +03001946static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001947find_domain(struct pci_dev *pdev)
1948{
1949 struct device_domain_info *info;
1950
1951 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001952 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001953 if (info)
1954 return info->domain;
1955 return NULL;
1956}
1957
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001958/* domain is initialized */
1959static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1960{
1961 struct dmar_domain *domain, *found = NULL;
1962 struct intel_iommu *iommu;
1963 struct dmar_drhd_unit *drhd;
1964 struct device_domain_info *info, *tmp;
1965 struct pci_dev *dev_tmp;
1966 unsigned long flags;
1967 int bus = 0, devfn = 0;
David Woodhouse276dbf92009-04-04 01:45:37 +01001968 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001969 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001970
1971 domain = find_domain(pdev);
1972 if (domain)
1973 return domain;
1974
David Woodhouse276dbf92009-04-04 01:45:37 +01001975 segment = pci_domain_nr(pdev->bus);
1976
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001977 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1978 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001979 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001980 bus = dev_tmp->subordinate->number;
1981 devfn = 0;
1982 } else {
1983 bus = dev_tmp->bus->number;
1984 devfn = dev_tmp->devfn;
1985 }
1986 spin_lock_irqsave(&device_domain_lock, flags);
1987 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001988 if (info->segment == segment &&
1989 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001990 found = info->domain;
1991 break;
1992 }
1993 }
1994 spin_unlock_irqrestore(&device_domain_lock, flags);
1995 /* pcie-pci bridge already has a domain, uses it */
1996 if (found) {
1997 domain = found;
1998 goto found_domain;
1999 }
2000 }
2001
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002002 domain = alloc_domain();
2003 if (!domain)
2004 goto error;
2005
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002006 /* Allocate new domain for the device */
2007 drhd = dmar_find_matched_drhd_unit(pdev);
2008 if (!drhd) {
2009 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2010 pci_name(pdev));
2011 return NULL;
2012 }
2013 iommu = drhd->iommu;
2014
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002015 ret = iommu_attach_domain(domain, iommu);
2016 if (ret) {
Alex Williamson2fe97232011-03-04 14:52:30 -07002017 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002018 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002019 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002020
2021 if (domain_init(domain, gaw)) {
2022 domain_exit(domain);
2023 goto error;
2024 }
2025
2026 /* register pcie-to-pci device */
2027 if (dev_tmp) {
2028 info = alloc_devinfo_mem();
2029 if (!info) {
2030 domain_exit(domain);
2031 goto error;
2032 }
David Woodhouse276dbf92009-04-04 01:45:37 +01002033 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002034 info->bus = bus;
2035 info->devfn = devfn;
2036 info->dev = NULL;
2037 info->domain = domain;
2038 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002039 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002040
2041 /* pcie-to-pci bridge already has a domain, uses it */
2042 found = NULL;
2043 spin_lock_irqsave(&device_domain_lock, flags);
2044 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf92009-04-04 01:45:37 +01002045 if (tmp->segment == segment &&
2046 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002047 found = tmp->domain;
2048 break;
2049 }
2050 }
2051 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002052 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002053 free_devinfo_mem(info);
2054 domain_exit(domain);
2055 domain = found;
2056 } else {
2057 list_add(&info->link, &domain->devices);
2058 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002059 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002060 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002061 }
2062
2063found_domain:
2064 info = alloc_devinfo_mem();
2065 if (!info)
2066 goto error;
David Woodhouse276dbf92009-04-04 01:45:37 +01002067 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002068 info->bus = pdev->bus->number;
2069 info->devfn = pdev->devfn;
2070 info->dev = pdev;
2071 info->domain = domain;
2072 spin_lock_irqsave(&device_domain_lock, flags);
2073 /* somebody is fast */
2074 found = find_domain(pdev);
2075 if (found != NULL) {
2076 spin_unlock_irqrestore(&device_domain_lock, flags);
2077 if (found != domain) {
2078 domain_exit(domain);
2079 domain = found;
2080 }
2081 free_devinfo_mem(info);
2082 return domain;
2083 }
2084 list_add(&info->link, &domain->devices);
2085 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002086 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002087 spin_unlock_irqrestore(&device_domain_lock, flags);
2088 return domain;
2089error:
2090 /* recheck it here, maybe others set it */
2091 return find_domain(pdev);
2092}
2093
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002094static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002095#define IDENTMAP_ALL 1
2096#define IDENTMAP_GFX 2
2097#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002098
David Woodhouseb2132032009-06-26 18:50:28 +01002099static int iommu_domain_identity_map(struct dmar_domain *domain,
2100 unsigned long long start,
2101 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002102{
David Woodhousec5395d52009-06-28 16:35:56 +01002103 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2104 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002105
David Woodhousec5395d52009-06-28 16:35:56 +01002106 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2107 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002108 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002109 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002110 }
2111
David Woodhousec5395d52009-06-28 16:35:56 +01002112 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2113 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002114 /*
2115 * RMRR range might have overlap with physical memory range,
2116 * clear it first
2117 */
David Woodhousec5395d52009-06-28 16:35:56 +01002118 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002119
David Woodhousec5395d52009-06-28 16:35:56 +01002120 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2121 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002122 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002123}
2124
2125static int iommu_prepare_identity_map(struct pci_dev *pdev,
2126 unsigned long long start,
2127 unsigned long long end)
2128{
2129 struct dmar_domain *domain;
2130 int ret;
2131
David Woodhousec7ab48d2009-06-26 19:10:36 +01002132 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002133 if (!domain)
2134 return -ENOMEM;
2135
David Woodhouse19943b02009-08-04 16:19:20 +01002136 /* For _hardware_ passthrough, don't bother. But for software
2137 passthrough, we do it anyway -- it may indicate a memory
2138 range which is reserved in E820, so which didn't get set
2139 up to start with in si_domain */
2140 if (domain == si_domain && hw_pass_through) {
2141 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2142 pci_name(pdev), start, end);
2143 return 0;
2144 }
2145
2146 printk(KERN_INFO
2147 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2148 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002149
David Woodhouse5595b522009-12-02 09:21:55 +00002150 if (end < start) {
2151 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2152 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2153 dmi_get_system_info(DMI_BIOS_VENDOR),
2154 dmi_get_system_info(DMI_BIOS_VERSION),
2155 dmi_get_system_info(DMI_PRODUCT_VERSION));
2156 ret = -EIO;
2157 goto error;
2158 }
2159
David Woodhouse2ff729f2009-08-26 14:25:41 +01002160 if (end >> agaw_to_width(domain->agaw)) {
2161 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2162 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2163 agaw_to_width(domain->agaw),
2164 dmi_get_system_info(DMI_BIOS_VENDOR),
2165 dmi_get_system_info(DMI_BIOS_VERSION),
2166 dmi_get_system_info(DMI_PRODUCT_VERSION));
2167 ret = -EIO;
2168 goto error;
2169 }
David Woodhouse19943b02009-08-04 16:19:20 +01002170
David Woodhouseb2132032009-06-26 18:50:28 +01002171 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002172 if (ret)
2173 goto error;
2174
2175 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002176 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002177 if (ret)
2178 goto error;
2179
2180 return 0;
2181
2182 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002183 domain_exit(domain);
2184 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002185}
2186
2187static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2188 struct pci_dev *pdev)
2189{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002190 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002191 return 0;
2192 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002193 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002194}
2195
Suresh Siddhad3f13812011-08-23 17:05:25 -07002196#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002197static inline void iommu_prepare_isa(void)
2198{
2199 struct pci_dev *pdev;
2200 int ret;
2201
2202 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2203 if (!pdev)
2204 return;
2205
David Woodhousec7ab48d2009-06-26 19:10:36 +01002206 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002207 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002208
2209 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002210 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2211 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002212
2213}
2214#else
2215static inline void iommu_prepare_isa(void)
2216{
2217 return;
2218}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002219#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002220
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002221static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002222
Matt Kraai071e1372009-08-23 22:30:22 -07002223static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002224{
2225 struct dmar_drhd_unit *drhd;
2226 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002227 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002228
2229 si_domain = alloc_domain();
2230 if (!si_domain)
2231 return -EFAULT;
2232
David Woodhousec7ab48d2009-06-26 19:10:36 +01002233 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002234
2235 for_each_active_iommu(iommu, drhd) {
2236 ret = iommu_attach_domain(si_domain, iommu);
2237 if (ret) {
2238 domain_exit(si_domain);
2239 return -EFAULT;
2240 }
2241 }
2242
2243 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2244 domain_exit(si_domain);
2245 return -EFAULT;
2246 }
2247
2248 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2249
David Woodhouse19943b02009-08-04 16:19:20 +01002250 if (hw)
2251 return 0;
2252
David Woodhousec7ab48d2009-06-26 19:10:36 +01002253 for_each_online_node(nid) {
Tejun Heod4bbf7e2011-11-28 09:46:22 -08002254 unsigned long start_pfn, end_pfn;
2255 int i;
2256
2257 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2258 ret = iommu_domain_identity_map(si_domain,
2259 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2260 if (ret)
2261 return ret;
2262 }
David Woodhousec7ab48d2009-06-26 19:10:36 +01002263 }
2264
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002265 return 0;
2266}
2267
2268static void domain_remove_one_dev_info(struct dmar_domain *domain,
2269 struct pci_dev *pdev);
2270static int identity_mapping(struct pci_dev *pdev)
2271{
2272 struct device_domain_info *info;
2273
2274 if (likely(!iommu_identity_mapping))
2275 return 0;
2276
Mike Traviscb452a42011-05-28 13:15:03 -05002277 info = pdev->dev.archdata.iommu;
2278 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2279 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002280
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002281 return 0;
2282}
2283
2284static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002285 struct pci_dev *pdev,
2286 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002287{
2288 struct device_domain_info *info;
2289 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002290 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002291
2292 info = alloc_devinfo_mem();
2293 if (!info)
2294 return -ENOMEM;
2295
2296 info->segment = pci_domain_nr(pdev->bus);
2297 info->bus = pdev->bus->number;
2298 info->devfn = pdev->devfn;
2299 info->dev = pdev;
2300 info->domain = domain;
2301
2302 spin_lock_irqsave(&device_domain_lock, flags);
2303 list_add(&info->link, &domain->devices);
2304 list_add(&info->global, &device_domain_list);
2305 pdev->dev.archdata.iommu = info;
2306 spin_unlock_irqrestore(&device_domain_lock, flags);
2307
David Woodhousee568e5e2012-05-25 17:42:54 +01002308 ret = domain_context_mapping(domain, pdev, translation);
2309 if (ret) {
2310 spin_lock_irqsave(&device_domain_lock, flags);
2311 list_del(&info->link);
2312 list_del(&info->global);
2313 pdev->dev.archdata.iommu = NULL;
2314 spin_unlock_irqrestore(&device_domain_lock, flags);
2315 free_devinfo_mem(info);
2316 return ret;
2317 }
2318
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002319 return 0;
2320}
2321
Tom Mingarelli5ad78132012-11-20 19:43:17 +00002322static bool device_has_rmrr(struct pci_dev *dev)
2323{
2324 struct dmar_rmrr_unit *rmrr;
2325 int i;
2326
2327 for_each_rmrr_units(rmrr) {
2328 for (i = 0; i < rmrr->devices_cnt; i++) {
2329 /*
2330 * Return TRUE if this RMRR contains the device that
2331 * is passed in.
2332 */
2333 if (rmrr->devices[i] == dev)
2334 return true;
2335 }
2336 }
2337 return false;
2338}
2339
David Woodhouse6941af22009-07-04 18:24:27 +01002340static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2341{
Tom Mingarelli5ad78132012-11-20 19:43:17 +00002342
2343 /*
2344 * We want to prevent any device associated with an RMRR from
2345 * getting placed into the SI Domain. This is done because
2346 * problems exist when devices are moved in and out of domains
2347 * and their respective RMRR info is lost. We exempt USB devices
2348 * from this process due to their usage of RMRRs that are known
2349 * to not be needed after BIOS hand-off to OS.
2350 */
2351 if (device_has_rmrr(pdev) &&
2352 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2353 return 0;
2354
David Woodhousee0fc7e02009-09-30 09:12:17 -07002355 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2356 return 1;
2357
2358 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2359 return 1;
2360
2361 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2362 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002363
David Woodhouse3dfc8132009-07-04 19:11:08 +01002364 /*
2365 * We want to start off with all devices in the 1:1 domain, and
2366 * take them out later if we find they can't access all of memory.
2367 *
2368 * However, we can't do this for PCI devices behind bridges,
2369 * because all PCI devices behind the same bridge will end up
2370 * with the same source-id on their transactions.
2371 *
2372 * Practically speaking, we can't change things around for these
2373 * devices at run-time, because we can't be sure there'll be no
2374 * DMA transactions in flight for any of their siblings.
2375 *
2376 * So PCI devices (unless they're on the root bus) as well as
2377 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2378 * the 1:1 domain, just in _case_ one of their siblings turns out
2379 * not to be able to map all of memory.
2380 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002381 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002382 if (!pci_is_root_bus(pdev->bus))
2383 return 0;
2384 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2385 return 0;
2386 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2387 return 0;
2388
2389 /*
2390 * At boot time, we don't yet know if devices will be 64-bit capable.
2391 * Assume that they will -- if they turn out not to be, then we can
2392 * take them out of the 1:1 domain later.
2393 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002394 if (!startup) {
2395 /*
2396 * If the device's dma_mask is less than the system's memory
2397 * size then this is not a candidate for identity mapping.
2398 */
2399 u64 dma_mask = pdev->dma_mask;
2400
2401 if (pdev->dev.coherent_dma_mask &&
2402 pdev->dev.coherent_dma_mask < dma_mask)
2403 dma_mask = pdev->dev.coherent_dma_mask;
2404
2405 return dma_mask >= dma_get_required_mask(&pdev->dev);
2406 }
David Woodhouse6941af22009-07-04 18:24:27 +01002407
2408 return 1;
2409}
2410
Matt Kraai071e1372009-08-23 22:30:22 -07002411static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002412{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002413 struct pci_dev *pdev = NULL;
2414 int ret;
2415
David Woodhouse19943b02009-08-04 16:19:20 +01002416 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002417 if (ret)
2418 return -EFAULT;
2419
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002420 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002421 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse5fe60f42009-08-09 10:53:41 +01002422 ret = domain_add_dev_info(si_domain, pdev,
Mike Traviseae460b2012-03-05 15:05:16 -08002423 hw ? CONTEXT_TT_PASS_THROUGH :
2424 CONTEXT_TT_MULTI_LEVEL);
2425 if (ret) {
2426 /* device not associated with an iommu */
2427 if (ret == -ENODEV)
2428 continue;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002429 return ret;
Mike Traviseae460b2012-03-05 15:05:16 -08002430 }
2431 pr_info("IOMMU: %s identity mapping for device %s\n",
2432 hw ? "hardware" : "software", pci_name(pdev));
David Woodhouse62edf5d2009-07-04 10:59:46 +01002433 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002434 }
2435
2436 return 0;
2437}
2438
Joseph Cihulab7792602011-05-03 00:08:37 -07002439static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002440{
2441 struct dmar_drhd_unit *drhd;
2442 struct dmar_rmrr_unit *rmrr;
2443 struct pci_dev *pdev;
2444 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002445 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002446
2447 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002448 * for each drhd
2449 * allocate root
2450 * initialize and program root entry to not present
2451 * endfor
2452 */
2453 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002454 /*
2455 * lock not needed as this is only incremented in the single
2456 * threaded kernel __init code path all other access are read
2457 * only
2458 */
Mike Travis1b198bb2012-03-05 15:05:16 -08002459 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2460 g_num_of_iommus++;
2461 continue;
2462 }
2463 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2464 IOMMU_UNITS_SUPPORTED);
mark gross5e0d2a62008-03-04 15:22:08 -08002465 }
2466
Weidong Hand9630fe2008-12-08 11:06:32 +08002467 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2468 GFP_KERNEL);
2469 if (!g_iommus) {
2470 printk(KERN_ERR "Allocating global iommu array failed\n");
2471 ret = -ENOMEM;
2472 goto error;
2473 }
2474
mark gross80b20dd2008-04-18 13:53:58 -07002475 deferred_flush = kzalloc(g_num_of_iommus *
2476 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2477 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002478 ret = -ENOMEM;
2479 goto error;
2480 }
2481
mark gross5e0d2a62008-03-04 15:22:08 -08002482 for_each_drhd_unit(drhd) {
2483 if (drhd->ignored)
2484 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002485
2486 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002487 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002488
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002489 ret = iommu_init_domains(iommu);
2490 if (ret)
2491 goto error;
2492
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002493 /*
2494 * TBD:
2495 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002496 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002497 */
2498 ret = iommu_alloc_root_entry(iommu);
2499 if (ret) {
2500 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2501 goto error;
2502 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002503 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002504 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002505 }
2506
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002507 /*
2508 * Start from the sane iommu hardware state.
2509 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002510 for_each_drhd_unit(drhd) {
2511 if (drhd->ignored)
2512 continue;
2513
2514 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002515
2516 /*
2517 * If the queued invalidation is already initialized by us
2518 * (for example, while enabling interrupt-remapping) then
2519 * we got the things already rolling from a sane state.
2520 */
2521 if (iommu->qi)
2522 continue;
2523
2524 /*
2525 * Clear any previous faults.
2526 */
2527 dmar_fault(-1, iommu);
2528 /*
2529 * Disable queued invalidation if supported and already enabled
2530 * before OS handover.
2531 */
2532 dmar_disable_qi(iommu);
2533 }
2534
2535 for_each_drhd_unit(drhd) {
2536 if (drhd->ignored)
2537 continue;
2538
2539 iommu = drhd->iommu;
2540
Youquan Songa77b67d2008-10-16 16:31:56 -07002541 if (dmar_enable_qi(iommu)) {
2542 /*
2543 * Queued Invalidate not enabled, use Register Based
2544 * Invalidate
2545 */
2546 iommu->flush.flush_context = __iommu_flush_context;
2547 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002548 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002549 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002550 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002551 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002552 } else {
2553 iommu->flush.flush_context = qi_flush_context;
2554 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002555 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002556 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002557 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002558 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002559 }
2560 }
2561
David Woodhouse19943b02009-08-04 16:19:20 +01002562 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002563 iommu_identity_mapping |= IDENTMAP_ALL;
2564
Suresh Siddhad3f13812011-08-23 17:05:25 -07002565#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002566 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002567#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002568
2569 check_tylersburg_isoch();
2570
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002571 /*
2572 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002573 * identity mappings for rmrr, gfx, and isa and may fall back to static
2574 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002575 */
David Woodhouse19943b02009-08-04 16:19:20 +01002576 if (iommu_identity_mapping) {
2577 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2578 if (ret) {
2579 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2580 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002581 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002582 }
David Woodhouse19943b02009-08-04 16:19:20 +01002583 /*
2584 * For each rmrr
2585 * for each dev attached to rmrr
2586 * do
2587 * locate drhd for dev, alloc domain for dev
2588 * allocate free domain
2589 * allocate page table entries for rmrr
2590 * if context not allocated for bus
2591 * allocate and init context
2592 * set present in root table for this bus
2593 * init context with domain, translation etc
2594 * endfor
2595 * endfor
2596 */
2597 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2598 for_each_rmrr_units(rmrr) {
2599 for (i = 0; i < rmrr->devices_cnt; i++) {
2600 pdev = rmrr->devices[i];
2601 /*
2602 * some BIOS lists non-exist devices in DMAR
2603 * table.
2604 */
2605 if (!pdev)
2606 continue;
2607 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2608 if (ret)
2609 printk(KERN_ERR
2610 "IOMMU: mapping reserved region failed\n");
2611 }
2612 }
2613
2614 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002615
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002616 /*
2617 * for each drhd
2618 * enable fault log
2619 * global invalidate context cache
2620 * global invalidate iotlb
2621 * enable translation
2622 */
2623 for_each_drhd_unit(drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002624 if (drhd->ignored) {
2625 /*
2626 * we always have to disable PMRs or DMA may fail on
2627 * this device
2628 */
2629 if (force_on)
2630 iommu_disable_protect_mem_regions(drhd->iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002631 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002632 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002633 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002634
2635 iommu_flush_write_buffer(iommu);
2636
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002637 ret = dmar_set_interrupt(iommu);
2638 if (ret)
2639 goto error;
2640
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002641 iommu_set_root_entry(iommu);
2642
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002643 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002644 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002645
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002646 ret = iommu_enable_translation(iommu);
2647 if (ret)
2648 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002649
2650 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002651 }
2652
2653 return 0;
2654error:
2655 for_each_drhd_unit(drhd) {
2656 if (drhd->ignored)
2657 continue;
2658 iommu = drhd->iommu;
2659 free_iommu(iommu);
2660 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002661 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002662 return ret;
2663}
2664
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002665/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002666static struct iova *intel_alloc_iova(struct device *dev,
2667 struct dmar_domain *domain,
2668 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002669{
2670 struct pci_dev *pdev = to_pci_dev(dev);
2671 struct iova *iova = NULL;
2672
David Woodhouse875764d2009-06-28 21:20:51 +01002673 /* Restrict dma_mask to the width that the iommu can handle */
2674 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2675
2676 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002677 /*
2678 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002679 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002680 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002681 */
David Woodhouse875764d2009-06-28 21:20:51 +01002682 iova = alloc_iova(&domain->iovad, nrpages,
2683 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2684 if (iova)
2685 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002686 }
David Woodhouse875764d2009-06-28 21:20:51 +01002687 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2688 if (unlikely(!iova)) {
2689 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2690 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002691 return NULL;
2692 }
2693
2694 return iova;
2695}
2696
David Woodhouse147202a2009-07-07 19:43:20 +01002697static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002698{
2699 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002700 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002701
2702 domain = get_domain_for_dev(pdev,
2703 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2704 if (!domain) {
2705 printk(KERN_ERR
2706 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002707 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002708 }
2709
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002710 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002711 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002712 ret = domain_context_mapping(domain, pdev,
2713 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002714 if (ret) {
2715 printk(KERN_ERR
2716 "Domain context map for %s failed",
2717 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002718 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002719 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002720 }
2721
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002722 return domain;
2723}
2724
David Woodhouse147202a2009-07-07 19:43:20 +01002725static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2726{
2727 struct device_domain_info *info;
2728
2729 /* No lock here, assumes no domain exit in normal case */
2730 info = dev->dev.archdata.iommu;
2731 if (likely(info))
2732 return info->domain;
2733
2734 return __get_valid_domain_for_dev(dev);
2735}
2736
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002737static int iommu_dummy(struct pci_dev *pdev)
2738{
2739 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2740}
2741
2742/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002743static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002744{
David Woodhouse73676832009-07-04 14:08:36 +01002745 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002746 int found;
2747
David Woodhouse73676832009-07-04 14:08:36 +01002748 if (unlikely(dev->bus != &pci_bus_type))
2749 return 1;
2750
2751 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002752 if (iommu_dummy(pdev))
2753 return 1;
2754
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002755 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002756 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002757
2758 found = identity_mapping(pdev);
2759 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002760 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002761 return 1;
2762 else {
2763 /*
2764 * 32 bit DMA is removed from si_domain and fall back
2765 * to non-identity mapping.
2766 */
2767 domain_remove_one_dev_info(si_domain, pdev);
2768 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2769 pci_name(pdev));
2770 return 0;
2771 }
2772 } else {
2773 /*
2774 * In case of a detached 64 bit DMA device from vm, the device
2775 * is put into si_domain for identity mapping.
2776 */
David Woodhouse6941af22009-07-04 18:24:27 +01002777 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002778 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002779 ret = domain_add_dev_info(si_domain, pdev,
2780 hw_pass_through ?
2781 CONTEXT_TT_PASS_THROUGH :
2782 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002783 if (!ret) {
2784 printk(KERN_INFO "64bit %s uses identity mapping\n",
2785 pci_name(pdev));
2786 return 1;
2787 }
2788 }
2789 }
2790
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002791 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002792}
2793
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002794static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2795 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002796{
2797 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002798 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002799 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002800 struct iova *iova;
2801 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002802 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002803 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002804 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002805
2806 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002807
David Woodhouse73676832009-07-04 14:08:36 +01002808 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002809 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002810
2811 domain = get_valid_domain_for_dev(pdev);
2812 if (!domain)
2813 return 0;
2814
Weidong Han8c11e792008-12-08 15:29:22 +08002815 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002816 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002817
Mike Travisc681d0b2011-05-28 13:15:05 -05002818 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002819 if (!iova)
2820 goto error;
2821
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002822 /*
2823 * Check if DMAR supports zero-length reads on write only
2824 * mappings..
2825 */
2826 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002827 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002828 prot |= DMA_PTE_READ;
2829 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2830 prot |= DMA_PTE_WRITE;
2831 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002832 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002833 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002834 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002835 * is not a big problem
2836 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002837 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002838 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002839 if (ret)
2840 goto error;
2841
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002842 /* it's a non-present to present mapping. Only flush if caching mode */
2843 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002844 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002845 else
Weidong Han8c11e792008-12-08 15:29:22 +08002846 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002847
David Woodhouse03d6a242009-06-28 15:33:46 +01002848 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2849 start_paddr += paddr & ~PAGE_MASK;
2850 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002851
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002852error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002853 if (iova)
2854 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002855 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002856 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002857 return 0;
2858}
2859
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002860static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2861 unsigned long offset, size_t size,
2862 enum dma_data_direction dir,
2863 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002864{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002865 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2866 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002867}
2868
mark gross5e0d2a62008-03-04 15:22:08 -08002869static void flush_unmaps(void)
2870{
mark gross80b20dd2008-04-18 13:53:58 -07002871 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002872
mark gross5e0d2a62008-03-04 15:22:08 -08002873 timer_on = 0;
2874
2875 /* just flush them all */
2876 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002877 struct intel_iommu *iommu = g_iommus[i];
2878 if (!iommu)
2879 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002880
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002881 if (!deferred_flush[i].next)
2882 continue;
2883
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002884 /* In caching mode, global flushes turn emulation expensive */
2885 if (!cap_caching_mode(iommu->cap))
2886 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002887 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002888 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002889 unsigned long mask;
2890 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002891 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002892
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002893 /* On real hardware multiple invalidations are expensive */
2894 if (cap_caching_mode(iommu->cap))
2895 iommu_flush_iotlb_psi(iommu, domain->id,
2896 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2897 else {
2898 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2899 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2900 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2901 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002902 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002903 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002904 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002905 }
2906
mark gross5e0d2a62008-03-04 15:22:08 -08002907 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002908}
2909
2910static void flush_unmaps_timeout(unsigned long data)
2911{
mark gross80b20dd2008-04-18 13:53:58 -07002912 unsigned long flags;
2913
2914 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002915 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002916 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002917}
2918
2919static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2920{
2921 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002922 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002923 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002924
2925 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002926 if (list_size == HIGH_WATER_MARK)
2927 flush_unmaps();
2928
Weidong Han8c11e792008-12-08 15:29:22 +08002929 iommu = domain_get_iommu(dom);
2930 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002931
mark gross80b20dd2008-04-18 13:53:58 -07002932 next = deferred_flush[iommu_id].next;
2933 deferred_flush[iommu_id].domain[next] = dom;
2934 deferred_flush[iommu_id].iova[next] = iova;
2935 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002936
2937 if (!timer_on) {
2938 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2939 timer_on = 1;
2940 }
2941 list_size++;
2942 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2943}
2944
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002945static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2946 size_t size, enum dma_data_direction dir,
2947 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002948{
2949 struct pci_dev *pdev = to_pci_dev(dev);
2950 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002951 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002952 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002953 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002954
David Woodhouse73676832009-07-04 14:08:36 +01002955 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002956 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002957
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002958 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002959 BUG_ON(!domain);
2960
Weidong Han8c11e792008-12-08 15:29:22 +08002961 iommu = domain_get_iommu(domain);
2962
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002963 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002964 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2965 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002966 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002967
David Woodhoused794dc92009-06-28 00:27:49 +01002968 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2969 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002970
David Woodhoused794dc92009-06-28 00:27:49 +01002971 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2972 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002973
2974 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002975 dma_pte_clear_range(domain, start_pfn, last_pfn);
2976
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002977 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002978 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2979
mark gross5e0d2a62008-03-04 15:22:08 -08002980 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002981 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002982 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002983 /* free iova */
2984 __free_iova(&domain->iovad, iova);
2985 } else {
2986 add_unmap(domain, iova);
2987 /*
2988 * queue up the release of the unmap to save the 1/6th of the
2989 * cpu used up by the iotlb flush operation...
2990 */
mark gross5e0d2a62008-03-04 15:22:08 -08002991 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002992}
2993
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002994static void *intel_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02002995 dma_addr_t *dma_handle, gfp_t flags,
2996 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002997{
2998 void *vaddr;
2999 int order;
3000
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003001 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003002 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07003003
3004 if (!iommu_no_mapping(hwdev))
3005 flags &= ~(GFP_DMA | GFP_DMA32);
3006 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
3007 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
3008 flags |= GFP_DMA;
3009 else
3010 flags |= GFP_DMA32;
3011 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003012
3013 vaddr = (void *)__get_free_pages(flags, order);
3014 if (!vaddr)
3015 return NULL;
3016 memset(vaddr, 0, size);
3017
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09003018 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
3019 DMA_BIDIRECTIONAL,
3020 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003021 if (*dma_handle)
3022 return vaddr;
3023 free_pages((unsigned long)vaddr, order);
3024 return NULL;
3025}
3026
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003027static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003028 dma_addr_t dma_handle, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003029{
3030 int order;
3031
Fenghua Yu5b6985c2008-10-16 18:02:32 -07003032 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003033 order = get_order(size);
3034
David Woodhouse0db9b7a2009-07-14 02:01:57 +01003035 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003036 free_pages((unsigned long)vaddr, order);
3037}
3038
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003039static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3040 int nelems, enum dma_data_direction dir,
3041 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003042{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003043 struct pci_dev *pdev = to_pci_dev(hwdev);
3044 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01003045 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003046 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08003047 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003048
David Woodhouse73676832009-07-04 14:08:36 +01003049 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003050 return;
3051
3052 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08003053 BUG_ON(!domain);
3054
3055 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003056
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003057 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003058 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3059 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003060 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003061
David Woodhoused794dc92009-06-28 00:27:49 +01003062 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3063 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003064
3065 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01003066 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003067
David Woodhoused794dc92009-06-28 00:27:49 +01003068 /* free page tables */
3069 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3070
David Woodhouseacea0012009-07-14 01:55:11 +01003071 if (intel_iommu_strict) {
3072 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003073 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003074 /* free iova */
3075 __free_iova(&domain->iovad, iova);
3076 } else {
3077 add_unmap(domain, iova);
3078 /*
3079 * queue up the release of the unmap to save the 1/6th of the
3080 * cpu used up by the iotlb flush operation...
3081 */
3082 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003083}
3084
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003085static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003086 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003087{
3088 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003089 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003090
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003091 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003092 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003093 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003094 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003095 }
3096 return nelems;
3097}
3098
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003099static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3100 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003101{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003102 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003103 struct pci_dev *pdev = to_pci_dev(hwdev);
3104 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003105 size_t size = 0;
3106 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003107 struct iova *iova = NULL;
3108 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003109 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003110 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003111 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003112
3113 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003114 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003115 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003116
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003117 domain = get_valid_domain_for_dev(pdev);
3118 if (!domain)
3119 return 0;
3120
Weidong Han8c11e792008-12-08 15:29:22 +08003121 iommu = domain_get_iommu(domain);
3122
David Woodhouseb536d242009-06-28 14:49:31 +01003123 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003124 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003125
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003126 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3127 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003128 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003129 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003130 return 0;
3131 }
3132
3133 /*
3134 * Check if DMAR supports zero-length reads on write only
3135 * mappings..
3136 */
3137 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003138 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003139 prot |= DMA_PTE_READ;
3140 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3141 prot |= DMA_PTE_WRITE;
3142
David Woodhouseb536d242009-06-28 14:49:31 +01003143 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003144
Fenghua Yuf5329592009-08-04 15:09:37 -07003145 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003146 if (unlikely(ret)) {
3147 /* clear the page */
3148 dma_pte_clear_range(domain, start_vpfn,
3149 start_vpfn + size - 1);
3150 /* free page tables */
3151 dma_pte_free_pagetable(domain, start_vpfn,
3152 start_vpfn + size - 1);
3153 /* free iova */
3154 __free_iova(&domain->iovad, iova);
3155 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003156 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003157
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003158 /* it's a non-present to present mapping. Only flush if caching mode */
3159 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003160 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003161 else
Weidong Han8c11e792008-12-08 15:29:22 +08003162 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003163
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003164 return nelems;
3165}
3166
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003167static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3168{
3169 return !dma_addr;
3170}
3171
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003172struct dma_map_ops intel_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02003173 .alloc = intel_alloc_coherent,
3174 .free = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003175 .map_sg = intel_map_sg,
3176 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003177 .map_page = intel_map_page,
3178 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003179 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003180};
3181
3182static inline int iommu_domain_cache_init(void)
3183{
3184 int ret = 0;
3185
3186 iommu_domain_cache = kmem_cache_create("iommu_domain",
3187 sizeof(struct dmar_domain),
3188 0,
3189 SLAB_HWCACHE_ALIGN,
3190
3191 NULL);
3192 if (!iommu_domain_cache) {
3193 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3194 ret = -ENOMEM;
3195 }
3196
3197 return ret;
3198}
3199
3200static inline int iommu_devinfo_cache_init(void)
3201{
3202 int ret = 0;
3203
3204 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3205 sizeof(struct device_domain_info),
3206 0,
3207 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003208 NULL);
3209 if (!iommu_devinfo_cache) {
3210 printk(KERN_ERR "Couldn't create devinfo cache\n");
3211 ret = -ENOMEM;
3212 }
3213
3214 return ret;
3215}
3216
3217static inline int iommu_iova_cache_init(void)
3218{
3219 int ret = 0;
3220
3221 iommu_iova_cache = kmem_cache_create("iommu_iova",
3222 sizeof(struct iova),
3223 0,
3224 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003225 NULL);
3226 if (!iommu_iova_cache) {
3227 printk(KERN_ERR "Couldn't create iova cache\n");
3228 ret = -ENOMEM;
3229 }
3230
3231 return ret;
3232}
3233
3234static int __init iommu_init_mempool(void)
3235{
3236 int ret;
3237 ret = iommu_iova_cache_init();
3238 if (ret)
3239 return ret;
3240
3241 ret = iommu_domain_cache_init();
3242 if (ret)
3243 goto domain_error;
3244
3245 ret = iommu_devinfo_cache_init();
3246 if (!ret)
3247 return ret;
3248
3249 kmem_cache_destroy(iommu_domain_cache);
3250domain_error:
3251 kmem_cache_destroy(iommu_iova_cache);
3252
3253 return -ENOMEM;
3254}
3255
3256static void __init iommu_exit_mempool(void)
3257{
3258 kmem_cache_destroy(iommu_devinfo_cache);
3259 kmem_cache_destroy(iommu_domain_cache);
3260 kmem_cache_destroy(iommu_iova_cache);
3261
3262}
3263
Dan Williams556ab452010-07-23 15:47:56 -07003264static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3265{
3266 struct dmar_drhd_unit *drhd;
3267 u32 vtbar;
3268 int rc;
3269
3270 /* We know that this device on this chipset has its own IOMMU.
3271 * If we find it under a different IOMMU, then the BIOS is lying
3272 * to us. Hope that the IOMMU for this device is actually
3273 * disabled, and it needs no translation...
3274 */
3275 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3276 if (rc) {
3277 /* "can't" happen */
3278 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3279 return;
3280 }
3281 vtbar &= 0xffff0000;
3282
3283 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3284 drhd = dmar_find_matched_drhd_unit(pdev);
3285 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3286 TAINT_FIRMWARE_WORKAROUND,
3287 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3288 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3289}
3290DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3291
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003292static void __init init_no_remapping_devices(void)
3293{
3294 struct dmar_drhd_unit *drhd;
3295
3296 for_each_drhd_unit(drhd) {
3297 if (!drhd->include_all) {
3298 int i;
3299 for (i = 0; i < drhd->devices_cnt; i++)
3300 if (drhd->devices[i] != NULL)
3301 break;
3302 /* ignore DMAR unit if no pci devices exist */
3303 if (i == drhd->devices_cnt)
3304 drhd->ignored = 1;
3305 }
3306 }
3307
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003308 for_each_drhd_unit(drhd) {
3309 int i;
3310 if (drhd->ignored || drhd->include_all)
3311 continue;
3312
3313 for (i = 0; i < drhd->devices_cnt; i++)
3314 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003315 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003316 break;
3317
3318 if (i < drhd->devices_cnt)
3319 continue;
3320
David Woodhousec0771df2011-10-14 20:59:46 +01003321 /* This IOMMU has *only* gfx devices. Either bypass it or
3322 set the gfx_mapped flag, as appropriate */
3323 if (dmar_map_gfx) {
3324 intel_iommu_gfx_mapped = 1;
3325 } else {
3326 drhd->ignored = 1;
3327 for (i = 0; i < drhd->devices_cnt; i++) {
3328 if (!drhd->devices[i])
3329 continue;
3330 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3331 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003332 }
3333 }
3334}
3335
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003336#ifdef CONFIG_SUSPEND
3337static int init_iommu_hw(void)
3338{
3339 struct dmar_drhd_unit *drhd;
3340 struct intel_iommu *iommu = NULL;
3341
3342 for_each_active_iommu(iommu, drhd)
3343 if (iommu->qi)
3344 dmar_reenable_qi(iommu);
3345
Joseph Cihulab7792602011-05-03 00:08:37 -07003346 for_each_iommu(iommu, drhd) {
3347 if (drhd->ignored) {
3348 /*
3349 * we always have to disable PMRs or DMA may fail on
3350 * this device
3351 */
3352 if (force_on)
3353 iommu_disable_protect_mem_regions(iommu);
3354 continue;
3355 }
3356
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003357 iommu_flush_write_buffer(iommu);
3358
3359 iommu_set_root_entry(iommu);
3360
3361 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003362 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003363 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003364 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003365 if (iommu_enable_translation(iommu))
3366 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003367 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003368 }
3369
3370 return 0;
3371}
3372
3373static void iommu_flush_all(void)
3374{
3375 struct dmar_drhd_unit *drhd;
3376 struct intel_iommu *iommu;
3377
3378 for_each_active_iommu(iommu, drhd) {
3379 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003380 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003381 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003382 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003383 }
3384}
3385
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003386static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003387{
3388 struct dmar_drhd_unit *drhd;
3389 struct intel_iommu *iommu = NULL;
3390 unsigned long flag;
3391
3392 for_each_active_iommu(iommu, drhd) {
3393 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3394 GFP_ATOMIC);
3395 if (!iommu->iommu_state)
3396 goto nomem;
3397 }
3398
3399 iommu_flush_all();
3400
3401 for_each_active_iommu(iommu, drhd) {
3402 iommu_disable_translation(iommu);
3403
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003404 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003405
3406 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3407 readl(iommu->reg + DMAR_FECTL_REG);
3408 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3409 readl(iommu->reg + DMAR_FEDATA_REG);
3410 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3411 readl(iommu->reg + DMAR_FEADDR_REG);
3412 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3413 readl(iommu->reg + DMAR_FEUADDR_REG);
3414
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003415 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003416 }
3417 return 0;
3418
3419nomem:
3420 for_each_active_iommu(iommu, drhd)
3421 kfree(iommu->iommu_state);
3422
3423 return -ENOMEM;
3424}
3425
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003426static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003427{
3428 struct dmar_drhd_unit *drhd;
3429 struct intel_iommu *iommu = NULL;
3430 unsigned long flag;
3431
3432 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003433 if (force_on)
3434 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3435 else
3436 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003437 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003438 }
3439
3440 for_each_active_iommu(iommu, drhd) {
3441
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003442 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003443
3444 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3445 iommu->reg + DMAR_FECTL_REG);
3446 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3447 iommu->reg + DMAR_FEDATA_REG);
3448 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3449 iommu->reg + DMAR_FEADDR_REG);
3450 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3451 iommu->reg + DMAR_FEUADDR_REG);
3452
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003453 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003454 }
3455
3456 for_each_active_iommu(iommu, drhd)
3457 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003458}
3459
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003460static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003461 .resume = iommu_resume,
3462 .suspend = iommu_suspend,
3463};
3464
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003465static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003466{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003467 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003468}
3469
3470#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003471static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003472#endif /* CONFIG_PM */
3473
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003474LIST_HEAD(dmar_rmrr_units);
3475
3476static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3477{
3478 list_add(&rmrr->list, &dmar_rmrr_units);
3479}
3480
3481
3482int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3483{
3484 struct acpi_dmar_reserved_memory *rmrr;
3485 struct dmar_rmrr_unit *rmrru;
3486
3487 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3488 if (!rmrru)
3489 return -ENOMEM;
3490
3491 rmrru->hdr = header;
3492 rmrr = (struct acpi_dmar_reserved_memory *)header;
3493 rmrru->base_address = rmrr->base_address;
3494 rmrru->end_address = rmrr->end_address;
3495
3496 dmar_register_rmrr_unit(rmrru);
3497 return 0;
3498}
3499
3500static int __init
3501rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3502{
3503 struct acpi_dmar_reserved_memory *rmrr;
3504 int ret;
3505
3506 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3507 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3508 ((void *)rmrr) + rmrr->header.length,
3509 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3510
3511 if (ret || (rmrru->devices_cnt == 0)) {
3512 list_del(&rmrru->list);
3513 kfree(rmrru);
3514 }
3515 return ret;
3516}
3517
3518static LIST_HEAD(dmar_atsr_units);
3519
3520int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3521{
3522 struct acpi_dmar_atsr *atsr;
3523 struct dmar_atsr_unit *atsru;
3524
3525 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3526 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3527 if (!atsru)
3528 return -ENOMEM;
3529
3530 atsru->hdr = hdr;
3531 atsru->include_all = atsr->flags & 0x1;
3532
3533 list_add(&atsru->list, &dmar_atsr_units);
3534
3535 return 0;
3536}
3537
3538static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3539{
3540 int rc;
3541 struct acpi_dmar_atsr *atsr;
3542
3543 if (atsru->include_all)
3544 return 0;
3545
3546 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3547 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3548 (void *)atsr + atsr->header.length,
3549 &atsru->devices_cnt, &atsru->devices,
3550 atsr->segment);
3551 if (rc || !atsru->devices_cnt) {
3552 list_del(&atsru->list);
3553 kfree(atsru);
3554 }
3555
3556 return rc;
3557}
3558
3559int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3560{
3561 int i;
3562 struct pci_bus *bus;
3563 struct acpi_dmar_atsr *atsr;
3564 struct dmar_atsr_unit *atsru;
3565
3566 dev = pci_physfn(dev);
3567
3568 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3569 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3570 if (atsr->segment == pci_domain_nr(dev->bus))
3571 goto found;
3572 }
3573
3574 return 0;
3575
3576found:
3577 for (bus = dev->bus; bus; bus = bus->parent) {
3578 struct pci_dev *bridge = bus->self;
3579
3580 if (!bridge || !pci_is_pcie(bridge) ||
3581 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
3582 return 0;
3583
3584 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
3585 for (i = 0; i < atsru->devices_cnt; i++)
3586 if (atsru->devices[i] == bridge)
3587 return 1;
3588 break;
3589 }
3590 }
3591
3592 if (atsru->include_all)
3593 return 1;
3594
3595 return 0;
3596}
3597
Sergey Senozhatskyc8f369a2011-10-26 18:45:39 +03003598int __init dmar_parse_rmrr_atsr_dev(void)
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003599{
3600 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3601 struct dmar_atsr_unit *atsr, *atsr_n;
3602 int ret = 0;
3603
3604 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3605 ret = rmrr_parse_dev(rmrr);
3606 if (ret)
3607 return ret;
3608 }
3609
3610 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3611 ret = atsr_parse_dev(atsr);
3612 if (ret)
3613 return ret;
3614 }
3615
3616 return ret;
3617}
3618
Fenghua Yu99dcade2009-11-11 07:23:06 -08003619/*
3620 * Here we only respond to action of unbound device from driver.
3621 *
3622 * Added device is not attached to its DMAR domain here yet. That will happen
3623 * when mapping the device to iova.
3624 */
3625static int device_notifier(struct notifier_block *nb,
3626 unsigned long action, void *data)
3627{
3628 struct device *dev = data;
3629 struct pci_dev *pdev = to_pci_dev(dev);
3630 struct dmar_domain *domain;
3631
David Woodhouse44cd6132009-12-02 10:18:30 +00003632 if (iommu_no_mapping(dev))
3633 return 0;
3634
Fenghua Yu99dcade2009-11-11 07:23:06 -08003635 domain = find_domain(pdev);
3636 if (!domain)
3637 return 0;
3638
Alex Williamsona97590e2011-03-04 14:52:16 -07003639 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003640 domain_remove_one_dev_info(domain, pdev);
3641
Alex Williamsona97590e2011-03-04 14:52:16 -07003642 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3643 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3644 list_empty(&domain->devices))
3645 domain_exit(domain);
3646 }
3647
Fenghua Yu99dcade2009-11-11 07:23:06 -08003648 return 0;
3649}
3650
3651static struct notifier_block device_nb = {
3652 .notifier_call = device_notifier,
3653};
3654
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003655int __init intel_iommu_init(void)
3656{
3657 int ret = 0;
3658
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003659 /* VT-d is required for a TXT/tboot launch, so enforce that */
3660 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003661
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003662 if (dmar_table_init()) {
3663 if (force_on)
3664 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003665 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003666 }
3667
Suresh Siddhac2c72862011-08-23 17:05:19 -07003668 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003669 if (force_on)
3670 panic("tboot: Failed to initialize DMAR device scope\n");
3671 return -ENODEV;
3672 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003673
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003674 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003675 return -ENODEV;
3676
Joseph Cihula51a63e62011-03-21 11:04:24 -07003677 if (iommu_init_mempool()) {
3678 if (force_on)
3679 panic("tboot: Failed to initialize iommu memory\n");
3680 return -ENODEV;
3681 }
3682
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003683 if (list_empty(&dmar_rmrr_units))
3684 printk(KERN_INFO "DMAR: No RMRR found\n");
3685
3686 if (list_empty(&dmar_atsr_units))
3687 printk(KERN_INFO "DMAR: No ATSR found\n");
3688
Joseph Cihula51a63e62011-03-21 11:04:24 -07003689 if (dmar_init_reserved_ranges()) {
3690 if (force_on)
3691 panic("tboot: Failed to reserve iommu ranges\n");
3692 return -ENODEV;
3693 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003694
3695 init_no_remapping_devices();
3696
Joseph Cihulab7792602011-05-03 00:08:37 -07003697 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003698 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003699 if (force_on)
3700 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003701 printk(KERN_ERR "IOMMU: dmar init failed\n");
3702 put_iova_domain(&reserved_iova_list);
3703 iommu_exit_mempool();
3704 return ret;
3705 }
3706 printk(KERN_INFO
3707 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3708
mark gross5e0d2a62008-03-04 15:22:08 -08003709 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003710#ifdef CONFIG_SWIOTLB
3711 swiotlb = 0;
3712#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003713 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003714
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003715 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003716
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003717 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003718
Fenghua Yu99dcade2009-11-11 07:23:06 -08003719 bus_register_notifier(&pci_bus_type, &device_nb);
3720
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02003721 intel_iommu_enabled = 1;
3722
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003723 return 0;
3724}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003725
Han, Weidong3199aa62009-02-26 17:31:12 +08003726static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3727 struct pci_dev *pdev)
3728{
3729 struct pci_dev *tmp, *parent;
3730
3731 if (!iommu || !pdev)
3732 return;
3733
3734 /* dependent device detach */
3735 tmp = pci_find_upstream_pcie_bridge(pdev);
3736 /* Secondary interface's bus number and devfn 0 */
3737 if (tmp) {
3738 parent = pdev->bus->self;
3739 while (parent != tmp) {
3740 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf92009-04-04 01:45:37 +01003741 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003742 parent = parent->bus->self;
3743 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003744 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003745 iommu_detach_dev(iommu,
3746 tmp->subordinate->number, 0);
3747 else /* this is a legacy PCI bridge */
David Woodhouse276dbf92009-04-04 01:45:37 +01003748 iommu_detach_dev(iommu, tmp->bus->number,
3749 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003750 }
3751}
3752
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003753static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003754 struct pci_dev *pdev)
3755{
3756 struct device_domain_info *info;
3757 struct intel_iommu *iommu;
3758 unsigned long flags;
3759 int found = 0;
3760 struct list_head *entry, *tmp;
3761
David Woodhouse276dbf92009-04-04 01:45:37 +01003762 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3763 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003764 if (!iommu)
3765 return;
3766
3767 spin_lock_irqsave(&device_domain_lock, flags);
3768 list_for_each_safe(entry, tmp, &domain->devices) {
3769 info = list_entry(entry, struct device_domain_info, link);
Mike Habeck8519dc42011-05-28 13:15:07 -05003770 if (info->segment == pci_domain_nr(pdev->bus) &&
3771 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003772 info->devfn == pdev->devfn) {
3773 list_del(&info->link);
3774 list_del(&info->global);
3775 if (info->dev)
3776 info->dev->dev.archdata.iommu = NULL;
3777 spin_unlock_irqrestore(&device_domain_lock, flags);
3778
Yu Zhao93a23a72009-05-18 13:51:37 +08003779 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003780 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003781 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003782 free_devinfo_mem(info);
3783
3784 spin_lock_irqsave(&device_domain_lock, flags);
3785
3786 if (found)
3787 break;
3788 else
3789 continue;
3790 }
3791
3792 /* if there is no other devices under the same iommu
3793 * owned by this domain, clear this iommu in iommu_bmp
3794 * update iommu count and coherency
3795 */
David Woodhouse276dbf92009-04-04 01:45:37 +01003796 if (iommu == device_to_iommu(info->segment, info->bus,
3797 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003798 found = 1;
3799 }
3800
Roland Dreier3e7abe22011-07-20 06:22:21 -07003801 spin_unlock_irqrestore(&device_domain_lock, flags);
3802
Weidong Hanc7151a82008-12-08 22:51:37 +08003803 if (found == 0) {
3804 unsigned long tmp_flags;
3805 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
Mike Travis1b198bb2012-03-05 15:05:16 -08003806 clear_bit(iommu->seq_id, domain->iommu_bmp);
Weidong Hanc7151a82008-12-08 22:51:37 +08003807 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003808 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003809 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003810
Alex Williamson9b4554b2011-05-24 12:19:04 -04003811 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3812 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3813 spin_lock_irqsave(&iommu->lock, tmp_flags);
3814 clear_bit(domain->id, iommu->domain_ids);
3815 iommu->domains[domain->id] = NULL;
3816 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3817 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003818 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003819}
3820
3821static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3822{
3823 struct device_domain_info *info;
3824 struct intel_iommu *iommu;
3825 unsigned long flags1, flags2;
3826
3827 spin_lock_irqsave(&device_domain_lock, flags1);
3828 while (!list_empty(&domain->devices)) {
3829 info = list_entry(domain->devices.next,
3830 struct device_domain_info, link);
3831 list_del(&info->link);
3832 list_del(&info->global);
3833 if (info->dev)
3834 info->dev->dev.archdata.iommu = NULL;
3835
3836 spin_unlock_irqrestore(&device_domain_lock, flags1);
3837
Yu Zhao93a23a72009-05-18 13:51:37 +08003838 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf92009-04-04 01:45:37 +01003839 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003840 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003841 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003842
3843 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003844 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003845 */
3846 spin_lock_irqsave(&domain->iommu_lock, flags2);
3847 if (test_and_clear_bit(iommu->seq_id,
Mike Travis1b198bb2012-03-05 15:05:16 -08003848 domain->iommu_bmp)) {
Weidong Hanc7151a82008-12-08 22:51:37 +08003849 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003850 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003851 }
3852 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3853
3854 free_devinfo_mem(info);
3855 spin_lock_irqsave(&device_domain_lock, flags1);
3856 }
3857 spin_unlock_irqrestore(&device_domain_lock, flags1);
3858}
3859
Weidong Han5e98c4b2008-12-08 23:03:27 +08003860/* domain id for virtual machine, it won't be set in context */
3861static unsigned long vm_domid;
3862
3863static struct dmar_domain *iommu_alloc_vm_domain(void)
3864{
3865 struct dmar_domain *domain;
3866
3867 domain = alloc_domain_mem();
3868 if (!domain)
3869 return NULL;
3870
3871 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003872 domain->nid = -1;
Mike Travis1b198bb2012-03-05 15:05:16 -08003873 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003874 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3875
3876 return domain;
3877}
3878
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003879static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003880{
3881 int adjust_width;
3882
3883 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003884 spin_lock_init(&domain->iommu_lock);
3885
3886 domain_reserve_special_ranges(domain);
3887
3888 /* calculate AGAW */
3889 domain->gaw = guest_width;
3890 adjust_width = guestwidth_to_adjustwidth(guest_width);
3891 domain->agaw = width_to_agaw(adjust_width);
3892
3893 INIT_LIST_HEAD(&domain->devices);
3894
3895 domain->iommu_count = 0;
3896 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003897 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003898 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003899 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003900 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003901
3902 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003903 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003904 if (!domain->pgd)
3905 return -ENOMEM;
3906 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3907 return 0;
3908}
3909
3910static void iommu_free_vm_domain(struct dmar_domain *domain)
3911{
3912 unsigned long flags;
3913 struct dmar_drhd_unit *drhd;
3914 struct intel_iommu *iommu;
3915 unsigned long i;
3916 unsigned long ndomains;
3917
3918 for_each_drhd_unit(drhd) {
3919 if (drhd->ignored)
3920 continue;
3921 iommu = drhd->iommu;
3922
3923 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003924 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003925 if (iommu->domains[i] == domain) {
3926 spin_lock_irqsave(&iommu->lock, flags);
3927 clear_bit(i, iommu->domain_ids);
3928 iommu->domains[i] = NULL;
3929 spin_unlock_irqrestore(&iommu->lock, flags);
3930 break;
3931 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003932 }
3933 }
3934}
3935
3936static void vm_domain_exit(struct dmar_domain *domain)
3937{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003938 /* Domain 0 is reserved, so dont process it */
3939 if (!domain)
3940 return;
3941
3942 vm_domain_remove_all_dev_info(domain);
3943 /* destroy iovas */
3944 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003945
3946 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003947 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003948
3949 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003950 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003951
3952 iommu_free_vm_domain(domain);
3953 free_domain_mem(domain);
3954}
3955
Joerg Roedel5d450802008-12-03 14:52:32 +01003956static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003957{
Joerg Roedel5d450802008-12-03 14:52:32 +01003958 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003959
Joerg Roedel5d450802008-12-03 14:52:32 +01003960 dmar_domain = iommu_alloc_vm_domain();
3961 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003962 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003963 "intel_iommu_domain_init: dmar_domain == NULL\n");
3964 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003965 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003966 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003967 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003968 "intel_iommu_domain_init() failed\n");
3969 vm_domain_exit(dmar_domain);
3970 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003971 }
Allen Kay8140a952011-10-14 12:32:17 -07003972 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003973 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003974
Joerg Roedel5d450802008-12-03 14:52:32 +01003975 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003976}
Kay, Allen M38717942008-09-09 18:37:29 +03003977
Joerg Roedel5d450802008-12-03 14:52:32 +01003978static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003979{
Joerg Roedel5d450802008-12-03 14:52:32 +01003980 struct dmar_domain *dmar_domain = domain->priv;
3981
3982 domain->priv = NULL;
3983 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003984}
Kay, Allen M38717942008-09-09 18:37:29 +03003985
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003986static int intel_iommu_attach_device(struct iommu_domain *domain,
3987 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003988{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003989 struct dmar_domain *dmar_domain = domain->priv;
3990 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003991 struct intel_iommu *iommu;
3992 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003993
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003994 /* normally pdev is not mapped */
3995 if (unlikely(domain_context_mapped(pdev))) {
3996 struct dmar_domain *old_domain;
3997
3998 old_domain = find_domain(pdev);
3999 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004000 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
4001 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
4002 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004003 else
4004 domain_remove_dev_info(old_domain);
4005 }
4006 }
4007
David Woodhouse276dbf92009-04-04 01:45:37 +01004008 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
4009 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004010 if (!iommu)
4011 return -ENODEV;
4012
4013 /* check if this iommu agaw is sufficient for max mapped address */
4014 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01004015 if (addr_width > cap_mgaw(iommu->cap))
4016 addr_width = cap_mgaw(iommu->cap);
4017
4018 if (dmar_domain->max_addr > (1LL << addr_width)) {
4019 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004020 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01004021 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004022 return -EFAULT;
4023 }
Tom Lyona99c47a2010-05-17 08:20:45 +01004024 dmar_domain->gaw = addr_width;
4025
4026 /*
4027 * Knock out extra levels of page tables if necessary
4028 */
4029 while (iommu->agaw < dmar_domain->agaw) {
4030 struct dma_pte *pte;
4031
4032 pte = dmar_domain->pgd;
4033 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08004034 dmar_domain->pgd = (struct dma_pte *)
4035 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01004036 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01004037 }
4038 dmar_domain->agaw--;
4039 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004040
David Woodhouse5fe60f42009-08-09 10:53:41 +01004041 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004042}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004043
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004044static void intel_iommu_detach_device(struct iommu_domain *domain,
4045 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03004046{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01004047 struct dmar_domain *dmar_domain = domain->priv;
4048 struct pci_dev *pdev = to_pci_dev(dev);
4049
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07004050 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03004051}
Kay, Allen M38717942008-09-09 18:37:29 +03004052
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004053static int intel_iommu_map(struct iommu_domain *domain,
4054 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004055 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004056{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004057 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004058 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004059 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004060 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004061
Joerg Roedeldde57a22008-12-03 15:04:09 +01004062 if (iommu_prot & IOMMU_READ)
4063 prot |= DMA_PTE_READ;
4064 if (iommu_prot & IOMMU_WRITE)
4065 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08004066 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4067 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004068
David Woodhouse163cc522009-06-28 00:51:17 +01004069 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004070 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004071 u64 end;
4072
4073 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004074 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004075 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004076 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004077 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004078 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004079 return -EFAULT;
4080 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004081 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004082 }
David Woodhousead051222009-06-28 14:22:28 +01004083 /* Round up size to next multiple of PAGE_SIZE, if it and
4084 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004085 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004086 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4087 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004088 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004089}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004090
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004091static size_t intel_iommu_unmap(struct iommu_domain *domain,
4092 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004093{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004094 struct dmar_domain *dmar_domain = domain->priv;
Allen Kay292827c2011-10-14 12:31:54 -07004095 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004096
Allen Kay292827c2011-10-14 12:31:54 -07004097 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004098 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004099
David Woodhouse163cc522009-06-28 00:51:17 +01004100 if (dmar_domain->max_addr == iova + size)
4101 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004102
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004103 return PAGE_SIZE << order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004104}
Kay, Allen M38717942008-09-09 18:37:29 +03004105
Joerg Roedeld14d6572008-12-03 15:06:57 +01004106static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4107 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004108{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004109 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004110 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004111 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004112
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004113 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004114 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004115 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004116
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004117 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004118}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004119
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004120static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4121 unsigned long cap)
4122{
4123 struct dmar_domain *dmar_domain = domain->priv;
4124
4125 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4126 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004127 if (cap == IOMMU_CAP_INTR_REMAP)
4128 return intr_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004129
4130 return 0;
4131}
4132
Alex Williamson70ae6f02011-10-21 15:56:11 -04004133/*
4134 * Group numbers are arbitrary. Device with the same group number
4135 * indicate the iommu cannot differentiate between them. To avoid
4136 * tracking used groups we just use the seg|bus|devfn of the lowest
4137 * level we're able to differentiate devices
4138 */
4139static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
4140{
4141 struct pci_dev *pdev = to_pci_dev(dev);
4142 struct pci_dev *bridge;
4143 union {
4144 struct {
4145 u8 devfn;
4146 u8 bus;
4147 u16 segment;
4148 } pci;
4149 u32 group;
4150 } id;
4151
4152 if (iommu_no_mapping(dev))
4153 return -ENODEV;
4154
4155 id.pci.segment = pci_domain_nr(pdev->bus);
4156 id.pci.bus = pdev->bus->number;
4157 id.pci.devfn = pdev->devfn;
4158
4159 if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
4160 return -ENODEV;
4161
4162 bridge = pci_find_upstream_pcie_bridge(pdev);
4163 if (bridge) {
4164 if (pci_is_pcie(bridge)) {
4165 id.pci.bus = bridge->subordinate->number;
4166 id.pci.devfn = 0;
4167 } else {
4168 id.pci.bus = bridge->bus->number;
4169 id.pci.devfn = bridge->devfn;
4170 }
4171 }
4172
Alex Williamsonbcb71ab2011-10-21 15:56:24 -04004173 if (!pdev->is_virtfn && iommu_group_mf)
4174 id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
4175
Alex Williamson70ae6f02011-10-21 15:56:11 -04004176 *groupid = id.group;
4177
4178 return 0;
4179}
4180
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004181static struct iommu_ops intel_iommu_ops = {
4182 .domain_init = intel_iommu_domain_init,
4183 .domain_destroy = intel_iommu_domain_destroy,
4184 .attach_dev = intel_iommu_attach_device,
4185 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004186 .map = intel_iommu_map,
4187 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004188 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004189 .domain_has_cap = intel_iommu_domain_has_cap,
Alex Williamson70ae6f02011-10-21 15:56:11 -04004190 .device_group = intel_iommu_device_group,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004191 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004192};
David Woodhouse9af88142009-02-13 23:18:03 +00004193
Daniel Vetter4b56ffb2013-01-20 23:50:13 +01004194static void __devinit quirk_iommu_g4x_gfx(struct pci_dev *dev)
4195{
4196 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4197 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4198 dmar_map_gfx = 0;
4199}
4200
4201DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4202DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4203DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4204DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4205DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4206DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4207DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4208
David Woodhouse9af88142009-02-13 23:18:03 +00004209static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4210{
4211 /*
4212 * Mobile 4 Series Chipset neglects to set RWBF capability,
Daniel Vetterdf87bd32013-01-21 19:48:59 +01004213 * but needs it. Same seems to hold for the desktop versions.
David Woodhouse9af88142009-02-13 23:18:03 +00004214 */
4215 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4216 rwbf_quirk = 1;
4217}
4218
4219DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
Daniel Vetterdf87bd32013-01-21 19:48:59 +01004220DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4221DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4222DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4223DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4224DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4225DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004226
Adam Jacksoneecfd572010-08-25 21:17:34 +01004227#define GGC 0x52
4228#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4229#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4230#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4231#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4232#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4233#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4234#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4235#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4236
David Woodhouse9eecabc2010-09-21 22:28:23 +01004237static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4238{
4239 unsigned short ggc;
4240
Adam Jacksoneecfd572010-08-25 21:17:34 +01004241 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004242 return;
4243
Adam Jacksoneecfd572010-08-25 21:17:34 +01004244 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004245 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4246 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004247 } else if (dmar_map_gfx) {
4248 /* we have to ensure the gfx device is idle before we flush */
4249 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4250 intel_iommu_strict = 1;
4251 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004252}
4253DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4254DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4255DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4256DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4257
David Woodhousee0fc7e02009-09-30 09:12:17 -07004258/* On Tylersburg chipsets, some BIOSes have been known to enable the
4259 ISOCH DMAR unit for the Azalia sound device, but not give it any
4260 TLB entries, which causes it to deadlock. Check for that. We do
4261 this in a function called from init_dmars(), instead of in a PCI
4262 quirk, because we don't want to print the obnoxious "BIOS broken"
4263 message if VT-d is actually disabled.
4264*/
4265static void __init check_tylersburg_isoch(void)
4266{
4267 struct pci_dev *pdev;
4268 uint32_t vtisochctrl;
4269
4270 /* If there's no Azalia in the system anyway, forget it. */
4271 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4272 if (!pdev)
4273 return;
4274 pci_dev_put(pdev);
4275
4276 /* System Management Registers. Might be hidden, in which case
4277 we can't do the sanity check. But that's OK, because the
4278 known-broken BIOSes _don't_ actually hide it, so far. */
4279 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4280 if (!pdev)
4281 return;
4282
4283 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4284 pci_dev_put(pdev);
4285 return;
4286 }
4287
4288 pci_dev_put(pdev);
4289
4290 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4291 if (vtisochctrl & 1)
4292 return;
4293
4294 /* Drop all bits other than the number of TLB entries */
4295 vtisochctrl &= 0x1c;
4296
4297 /* If we have the recommended number of TLB entries (16), fine. */
4298 if (vtisochctrl == 0x10)
4299 return;
4300
4301 /* Zero TLB entries? You get to ride the short bus to school. */
4302 if (!vtisochctrl) {
4303 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4304 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4305 dmi_get_system_info(DMI_BIOS_VENDOR),
4306 dmi_get_system_info(DMI_BIOS_VERSION),
4307 dmi_get_system_info(DMI_PRODUCT_VERSION));
4308 iommu_identity_mapping |= IDENTMAP_AZALIA;
4309 return;
4310 }
4311
4312 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4313 vtisochctrl);
4314}