blob: be1953c239b0f557348da28b23d8bd2b2d72653f [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010039#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070040#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100041#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020042#include <linux/pci-ats.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070043#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090044#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070045
Fenghua Yu5b6985c2008-10-16 18:02:32 -070046#define ROOT_SIZE VTD_PAGE_SIZE
47#define CONTEXT_SIZE VTD_PAGE_SIZE
48
Mike Travis825507d2011-05-28 13:15:06 -050049#define IS_BRIDGE_HOST_DEVICE(pdev) \
50 ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070051#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
52#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070053#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070054
55#define IOAPIC_RANGE_START (0xfee00000)
56#define IOAPIC_RANGE_END (0xfeefffff)
57#define IOVA_START_ADDR (0x1000)
58
59#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
60
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070061#define MAX_AGAW_WIDTH 64
62
David Woodhouse2ebe3152009-09-19 07:34:04 -070063#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
64#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
65
66/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
67 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
68#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
69 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
70#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070071
Mark McLoughlinf27be032008-11-20 15:49:43 +000072#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070073#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070074#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080075
Andrew Mortondf08cdc2010-09-22 13:05:11 -070076/* page table handling */
77#define LEVEL_STRIDE (9)
78#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
79
80static inline int agaw_to_level(int agaw)
81{
82 return agaw + 2;
83}
84
85static inline int agaw_to_width(int agaw)
86{
87 return 30 + agaw * LEVEL_STRIDE;
88}
89
90static inline int width_to_agaw(int width)
91{
92 return (width - 30) / LEVEL_STRIDE;
93}
94
95static inline unsigned int level_to_offset_bits(int level)
96{
97 return (level - 1) * LEVEL_STRIDE;
98}
99
100static inline int pfn_level_offset(unsigned long pfn, int level)
101{
102 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
103}
104
105static inline unsigned long level_mask(int level)
106{
107 return -1UL << level_to_offset_bits(level);
108}
109
110static inline unsigned long level_size(int level)
111{
112 return 1UL << level_to_offset_bits(level);
113}
114
115static inline unsigned long align_to_level(unsigned long pfn, int level)
116{
117 return (pfn + level_size(level) - 1) & level_mask(level);
118}
David Woodhousefd18de52009-05-10 23:57:41 +0100119
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100120static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
121{
122 return 1 << ((lvl - 1) * LEVEL_STRIDE);
123}
124
David Woodhousedd4e8312009-06-27 16:21:20 +0100125/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
126 are never going to work. */
127static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
128{
129 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
130}
131
132static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
133{
134 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
135}
136static inline unsigned long page_to_dma_pfn(struct page *pg)
137{
138 return mm_to_dma_pfn(page_to_pfn(pg));
139}
140static inline unsigned long virt_to_dma_pfn(void *p)
141{
142 return page_to_dma_pfn(virt_to_page(p));
143}
144
Weidong Hand9630fe2008-12-08 11:06:32 +0800145/* global iommu list, set NULL for ignored DMAR units */
146static struct intel_iommu **g_iommus;
147
David Woodhousee0fc7e02009-09-30 09:12:17 -0700148static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000149static int rwbf_quirk;
150
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000151/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700152 * set to 1 to panic kernel if can't successfully enable VT-d
153 * (used when kernel is launched w/ TXT)
154 */
155static int force_on = 0;
156
157/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000158 * 0: Present
159 * 1-11: Reserved
160 * 12-63: Context Ptr (12 - (haw-1))
161 * 64-127: Reserved
162 */
163struct root_entry {
164 u64 val;
165 u64 rsvd1;
166};
167#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
168static inline bool root_present(struct root_entry *root)
169{
170 return (root->val & 1);
171}
172static inline void set_root_present(struct root_entry *root)
173{
174 root->val |= 1;
175}
176static inline void set_root_value(struct root_entry *root, unsigned long value)
177{
178 root->val |= value & VTD_PAGE_MASK;
179}
180
181static inline struct context_entry *
182get_context_addr_from_root(struct root_entry *root)
183{
184 return (struct context_entry *)
185 (root_present(root)?phys_to_virt(
186 root->val & VTD_PAGE_MASK) :
187 NULL);
188}
189
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000190/*
191 * low 64 bits:
192 * 0: present
193 * 1: fault processing disable
194 * 2-3: translation type
195 * 12-63: address space root
196 * high 64 bits:
197 * 0-2: address width
198 * 3-6: aval
199 * 8-23: domain id
200 */
201struct context_entry {
202 u64 lo;
203 u64 hi;
204};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000205
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000206static inline bool context_present(struct context_entry *context)
207{
208 return (context->lo & 1);
209}
210static inline void context_set_present(struct context_entry *context)
211{
212 context->lo |= 1;
213}
214
215static inline void context_set_fault_enable(struct context_entry *context)
216{
217 context->lo &= (((u64)-1) << 2) | 1;
218}
219
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000220static inline void context_set_translation_type(struct context_entry *context,
221 unsigned long value)
222{
223 context->lo &= (((u64)-1) << 4) | 3;
224 context->lo |= (value & 3) << 2;
225}
226
227static inline void context_set_address_root(struct context_entry *context,
228 unsigned long value)
229{
230 context->lo |= value & VTD_PAGE_MASK;
231}
232
233static inline void context_set_address_width(struct context_entry *context,
234 unsigned long value)
235{
236 context->hi |= value & 7;
237}
238
239static inline void context_set_domain_id(struct context_entry *context,
240 unsigned long value)
241{
242 context->hi |= (value & ((1 << 16) - 1)) << 8;
243}
244
245static inline void context_clear_entry(struct context_entry *context)
246{
247 context->lo = 0;
248 context->hi = 0;
249}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000250
Mark McLoughlin622ba122008-11-20 15:49:46 +0000251/*
252 * 0: readable
253 * 1: writable
254 * 2-6: reserved
255 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800256 * 8-10: available
257 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000258 * 12-63: Host physcial address
259 */
260struct dma_pte {
261 u64 val;
262};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000263
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000264static inline void dma_clear_pte(struct dma_pte *pte)
265{
266 pte->val = 0;
267}
268
269static inline void dma_set_pte_readable(struct dma_pte *pte)
270{
271 pte->val |= DMA_PTE_READ;
272}
273
274static inline void dma_set_pte_writable(struct dma_pte *pte)
275{
276 pte->val |= DMA_PTE_WRITE;
277}
278
Sheng Yang9cf06692009-03-18 15:33:07 +0800279static inline void dma_set_pte_snp(struct dma_pte *pte)
280{
281 pte->val |= DMA_PTE_SNP;
282}
283
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000284static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
285{
286 pte->val = (pte->val & ~3) | (prot & 3);
287}
288
289static inline u64 dma_pte_addr(struct dma_pte *pte)
290{
David Woodhousec85994e2009-07-01 19:21:24 +0100291#ifdef CONFIG_64BIT
292 return pte->val & VTD_PAGE_MASK;
293#else
294 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100295 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100296#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000297}
298
David Woodhousedd4e8312009-06-27 16:21:20 +0100299static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000300{
David Woodhousedd4e8312009-06-27 16:21:20 +0100301 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000302}
303
304static inline bool dma_pte_present(struct dma_pte *pte)
305{
306 return (pte->val & 3) != 0;
307}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000308
Allen Kay4399c8b2011-10-14 12:32:46 -0700309static inline bool dma_pte_superpage(struct dma_pte *pte)
310{
311 return (pte->val & (1 << 7));
312}
313
David Woodhouse75e6bf92009-07-02 11:21:16 +0100314static inline int first_pte_in_page(struct dma_pte *pte)
315{
316 return !((unsigned long)pte & ~VTD_PAGE_MASK);
317}
318
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700319/*
320 * This domain is a statically identity mapping domain.
321 * 1. This domain creats a static 1:1 mapping to all usable memory.
322 * 2. It maps to each iommu if successful.
323 * 3. Each iommu mapps to this domain if successful.
324 */
David Woodhouse19943b02009-08-04 16:19:20 +0100325static struct dmar_domain *si_domain;
326static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700327
Weidong Han3b5410e2008-12-08 09:17:15 +0800328/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100329#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800330
Weidong Han1ce28fe2008-12-08 16:35:39 +0800331/* domain represents a virtual machine, more than one devices
332 * across iommus may be owned in one domain, e.g. kvm guest.
333 */
334#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
335
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700336/* si_domain contains mulitple devices */
337#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
338
Mark McLoughlin99126f72008-11-20 15:49:47 +0000339struct dmar_domain {
340 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700341 int nid; /* node id */
Weidong Han8c11e792008-12-08 15:29:22 +0800342 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000343
344 struct list_head devices; /* all devices' list */
345 struct iova_domain iovad; /* iova's that belong to this domain */
346
347 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000348 int gaw; /* max guest address width */
349
350 /* adjusted guest address width, 0 is level 2 30-bit */
351 int agaw;
352
Weidong Han3b5410e2008-12-08 09:17:15 +0800353 int flags; /* flags to find out type of domain */
Weidong Han8e604092008-12-08 15:49:06 +0800354
355 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800356 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800357 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100358 int iommu_superpage;/* Level of superpages supported:
359 0 == 4KiB (no superpages), 1 == 2MiB,
360 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800361 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800362 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000363};
364
Mark McLoughlina647dac2008-11-20 15:49:48 +0000365/* PCI domain-device relationship */
366struct device_domain_info {
367 struct list_head link; /* link to domain siblings */
368 struct list_head global; /* link to global list */
David Woodhouse276dbf92009-04-04 01:45:37 +0100369 int segment; /* PCI domain */
370 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000371 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500372 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800373 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000374 struct dmar_domain *domain; /* pointer to domain */
375};
376
mark gross5e0d2a62008-03-04 15:22:08 -0800377static void flush_unmaps_timeout(unsigned long data);
378
379DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
380
mark gross80b20dd2008-04-18 13:53:58 -0700381#define HIGH_WATER_MARK 250
382struct deferred_flush_tables {
383 int next;
384 struct iova *iova[HIGH_WATER_MARK];
385 struct dmar_domain *domain[HIGH_WATER_MARK];
386};
387
388static struct deferred_flush_tables *deferred_flush;
389
mark gross5e0d2a62008-03-04 15:22:08 -0800390/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800391static int g_num_of_iommus;
392
393static DEFINE_SPINLOCK(async_umap_flush_lock);
394static LIST_HEAD(unmaps_to_do);
395
396static int timer_on;
397static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800398
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700399static void domain_remove_dev_info(struct dmar_domain *domain);
400
Suresh Siddhad3f13812011-08-23 17:05:25 -0700401#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800402int dmar_disabled = 0;
403#else
404int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700405#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800406
David Woodhouse2d9e6672010-06-15 10:57:57 +0100407static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700408static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800409static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100410static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700411
David Woodhousec0771df2011-10-14 20:59:46 +0100412int intel_iommu_gfx_mapped;
413EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
414
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700415#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
416static DEFINE_SPINLOCK(device_domain_lock);
417static LIST_HEAD(device_domain_list);
418
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100419static struct iommu_ops intel_iommu_ops;
420
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700421static int __init intel_iommu_setup(char *str)
422{
423 if (!str)
424 return -EINVAL;
425 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800426 if (!strncmp(str, "on", 2)) {
427 dmar_disabled = 0;
428 printk(KERN_INFO "Intel-IOMMU: enabled\n");
429 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700430 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800431 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700432 } else if (!strncmp(str, "igfx_off", 8)) {
433 dmar_map_gfx = 0;
434 printk(KERN_INFO
435 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700436 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800437 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700438 "Intel-IOMMU: Forcing DAC for PCI devices\n");
439 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800440 } else if (!strncmp(str, "strict", 6)) {
441 printk(KERN_INFO
442 "Intel-IOMMU: disable batched IOTLB flush\n");
443 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100444 } else if (!strncmp(str, "sp_off", 6)) {
445 printk(KERN_INFO
446 "Intel-IOMMU: disable supported super page\n");
447 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700448 }
449
450 str += strcspn(str, ",");
451 while (*str == ',')
452 str++;
453 }
454 return 0;
455}
456__setup("intel_iommu=", intel_iommu_setup);
457
458static struct kmem_cache *iommu_domain_cache;
459static struct kmem_cache *iommu_devinfo_cache;
460static struct kmem_cache *iommu_iova_cache;
461
Suresh Siddha4c923d42009-10-02 11:01:24 -0700462static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700463{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700464 struct page *page;
465 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700466
Suresh Siddha4c923d42009-10-02 11:01:24 -0700467 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
468 if (page)
469 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700470 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700471}
472
473static inline void free_pgtable_page(void *vaddr)
474{
475 free_page((unsigned long)vaddr);
476}
477
478static inline void *alloc_domain_mem(void)
479{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900480 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700481}
482
Kay, Allen M38717942008-09-09 18:37:29 +0300483static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700484{
485 kmem_cache_free(iommu_domain_cache, vaddr);
486}
487
488static inline void * alloc_devinfo_mem(void)
489{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900490 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700491}
492
493static inline void free_devinfo_mem(void *vaddr)
494{
495 kmem_cache_free(iommu_devinfo_cache, vaddr);
496}
497
498struct iova *alloc_iova_mem(void)
499{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900500 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700501}
502
503void free_iova_mem(struct iova *iova)
504{
505 kmem_cache_free(iommu_iova_cache, iova);
506}
507
Weidong Han1b573682008-12-08 15:34:06 +0800508
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700509static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800510{
511 unsigned long sagaw;
512 int agaw = -1;
513
514 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700515 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800516 agaw >= 0; agaw--) {
517 if (test_bit(agaw, &sagaw))
518 break;
519 }
520
521 return agaw;
522}
523
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700524/*
525 * Calculate max SAGAW for each iommu.
526 */
527int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
528{
529 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
530}
531
532/*
533 * calculate agaw for each iommu.
534 * "SAGAW" may be different across iommus, use a default agaw, and
535 * get a supported less agaw for iommus that don't support the default agaw.
536 */
537int iommu_calculate_agaw(struct intel_iommu *iommu)
538{
539 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
540}
541
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700542/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800543static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
544{
545 int iommu_id;
546
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700547 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800548 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700549 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800550
Weidong Han8c11e792008-12-08 15:29:22 +0800551 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
552 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
553 return NULL;
554
555 return g_iommus[iommu_id];
556}
557
Weidong Han8e604092008-12-08 15:49:06 +0800558static void domain_update_iommu_coherency(struct dmar_domain *domain)
559{
560 int i;
561
562 domain->iommu_coherency = 1;
563
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800564 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e604092008-12-08 15:49:06 +0800565 if (!ecap_coherent(g_iommus[i]->ecap)) {
566 domain->iommu_coherency = 0;
567 break;
568 }
Weidong Han8e604092008-12-08 15:49:06 +0800569 }
570}
571
Sheng Yang58c610b2009-03-18 15:33:05 +0800572static void domain_update_iommu_snooping(struct dmar_domain *domain)
573{
574 int i;
575
576 domain->iommu_snooping = 1;
577
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800578 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800579 if (!ecap_sc_support(g_iommus[i]->ecap)) {
580 domain->iommu_snooping = 0;
581 break;
582 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800583 }
584}
585
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100586static void domain_update_iommu_superpage(struct dmar_domain *domain)
587{
Allen Kay8140a952011-10-14 12:32:17 -0700588 struct dmar_drhd_unit *drhd;
589 struct intel_iommu *iommu = NULL;
590 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100591
592 if (!intel_iommu_superpage) {
593 domain->iommu_superpage = 0;
594 return;
595 }
596
Allen Kay8140a952011-10-14 12:32:17 -0700597 /* set iommu_superpage to the smallest common denominator */
598 for_each_active_iommu(iommu, drhd) {
599 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100600 if (!mask) {
601 break;
602 }
603 }
604 domain->iommu_superpage = fls(mask);
605}
606
Sheng Yang58c610b2009-03-18 15:33:05 +0800607/* Some capabilities may be different across iommus */
608static void domain_update_iommu_cap(struct dmar_domain *domain)
609{
610 domain_update_iommu_coherency(domain);
611 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100612 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800613}
614
David Woodhouse276dbf92009-04-04 01:45:37 +0100615static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800616{
617 struct dmar_drhd_unit *drhd = NULL;
618 int i;
619
620 for_each_drhd_unit(drhd) {
621 if (drhd->ignored)
622 continue;
David Woodhouse276dbf92009-04-04 01:45:37 +0100623 if (segment != drhd->segment)
624 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800625
David Woodhouse924b6232009-04-04 00:39:25 +0100626 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000627 if (drhd->devices[i] &&
628 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800629 drhd->devices[i]->devfn == devfn)
630 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700631 if (drhd->devices[i] &&
632 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100633 drhd->devices[i]->subordinate->number <= bus &&
634 drhd->devices[i]->subordinate->subordinate >= bus)
635 return drhd->iommu;
636 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800637
638 if (drhd->include_all)
639 return drhd->iommu;
640 }
641
642 return NULL;
643}
644
Weidong Han5331fe62008-12-08 23:00:00 +0800645static void domain_flush_cache(struct dmar_domain *domain,
646 void *addr, int size)
647{
648 if (!domain->iommu_coherency)
649 clflush_cache_range(addr, size);
650}
651
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700652/* Gets context entry for a given bus and devfn */
653static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
654 u8 bus, u8 devfn)
655{
656 struct root_entry *root;
657 struct context_entry *context;
658 unsigned long phy_addr;
659 unsigned long flags;
660
661 spin_lock_irqsave(&iommu->lock, flags);
662 root = &iommu->root_entry[bus];
663 context = get_context_addr_from_root(root);
664 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700665 context = (struct context_entry *)
666 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700667 if (!context) {
668 spin_unlock_irqrestore(&iommu->lock, flags);
669 return NULL;
670 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700671 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700672 phy_addr = virt_to_phys((void *)context);
673 set_root_value(root, phy_addr);
674 set_root_present(root);
675 __iommu_flush_cache(iommu, root, sizeof(*root));
676 }
677 spin_unlock_irqrestore(&iommu->lock, flags);
678 return &context[devfn];
679}
680
681static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
682{
683 struct root_entry *root;
684 struct context_entry *context;
685 int ret;
686 unsigned long flags;
687
688 spin_lock_irqsave(&iommu->lock, flags);
689 root = &iommu->root_entry[bus];
690 context = get_context_addr_from_root(root);
691 if (!context) {
692 ret = 0;
693 goto out;
694 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000695 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700696out:
697 spin_unlock_irqrestore(&iommu->lock, flags);
698 return ret;
699}
700
701static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
702{
703 struct root_entry *root;
704 struct context_entry *context;
705 unsigned long flags;
706
707 spin_lock_irqsave(&iommu->lock, flags);
708 root = &iommu->root_entry[bus];
709 context = get_context_addr_from_root(root);
710 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000711 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700712 __iommu_flush_cache(iommu, &context[devfn], \
713 sizeof(*context));
714 }
715 spin_unlock_irqrestore(&iommu->lock, flags);
716}
717
718static void free_context_table(struct intel_iommu *iommu)
719{
720 struct root_entry *root;
721 int i;
722 unsigned long flags;
723 struct context_entry *context;
724
725 spin_lock_irqsave(&iommu->lock, flags);
726 if (!iommu->root_entry) {
727 goto out;
728 }
729 for (i = 0; i < ROOT_ENTRY_NR; i++) {
730 root = &iommu->root_entry[i];
731 context = get_context_addr_from_root(root);
732 if (context)
733 free_pgtable_page(context);
734 }
735 free_pgtable_page(iommu->root_entry);
736 iommu->root_entry = NULL;
737out:
738 spin_unlock_irqrestore(&iommu->lock, flags);
739}
740
David Woodhouseb026fd22009-06-28 10:37:25 +0100741static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700742 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700743{
David Woodhouseb026fd22009-06-28 10:37:25 +0100744 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700745 struct dma_pte *parent, *pte = NULL;
746 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700747 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700748
749 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100750 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700751 parent = domain->pgd;
752
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700753 while (level > 0) {
754 void *tmp_page;
755
David Woodhouseb026fd22009-06-28 10:37:25 +0100756 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700757 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700758 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100759 break;
760 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700761 break;
762
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000763 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100764 uint64_t pteval;
765
Suresh Siddha4c923d42009-10-02 11:01:24 -0700766 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700767
David Woodhouse206a73c2009-07-01 19:30:28 +0100768 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700769 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100770
David Woodhousec85994e2009-07-01 19:21:24 +0100771 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400772 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100773 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
774 /* Someone else set it while we were thinking; use theirs. */
775 free_pgtable_page(tmp_page);
776 } else {
777 dma_pte_addr(pte);
778 domain_flush_cache(domain, pte, sizeof(*pte));
779 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700780 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000781 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700782 level--;
783 }
784
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700785 return pte;
786}
787
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100788
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700789/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100790static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
791 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100792 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700793{
794 struct dma_pte *parent, *pte = NULL;
795 int total = agaw_to_level(domain->agaw);
796 int offset;
797
798 parent = domain->pgd;
799 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100800 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700801 pte = &parent[offset];
802 if (level == total)
803 return pte;
804
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100805 if (!dma_pte_present(pte)) {
806 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700807 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100808 }
809
810 if (pte->val & DMA_PTE_LARGE_PAGE) {
811 *large_page = total;
812 return pte;
813 }
814
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000815 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700816 total--;
817 }
818 return NULL;
819}
820
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700821/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700822static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100823 unsigned long start_pfn,
824 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700825{
David Woodhouse04b18e62009-06-27 19:15:01 +0100826 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100827 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100828 struct dma_pte *first_pte, *pte;
Allen Kay292827c2011-10-14 12:31:54 -0700829 int order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700830
David Woodhouse04b18e62009-06-27 19:15:01 +0100831 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100832 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700833 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100834
David Woodhouse04b18e62009-06-27 19:15:01 +0100835 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700836 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100837 large_page = 1;
838 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100839 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100840 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100841 continue;
842 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100843 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100844 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100845 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100846 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100847 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
848
David Woodhouse310a5ab2009-06-28 18:52:20 +0100849 domain_flush_cache(domain, first_pte,
850 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700851
852 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700853
854 order = (large_page - 1) * 9;
855 return order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700856}
857
858/* free page table pages. last level pte should already be cleared */
859static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100860 unsigned long start_pfn,
861 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700862{
David Woodhouse6660c632009-06-27 22:41:00 +0100863 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100864 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700865 int total = agaw_to_level(domain->agaw);
866 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100867 unsigned long tmp;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100868 int large_page = 2;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700869
David Woodhouse6660c632009-06-27 22:41:00 +0100870 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
871 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700872 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700873
David Woodhousef3a0a522009-06-30 03:40:07 +0100874 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700875 level = 2;
876 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100877 tmp = align_to_level(start_pfn, level);
878
David Woodhousef3a0a522009-06-30 03:40:07 +0100879 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100880 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700881 return;
882
David Woodhouse59c36282009-09-19 07:36:28 -0700883 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100884 large_page = level;
885 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
886 if (large_page > level)
887 level = large_page + 1;
David Woodhousef3a0a522009-06-30 03:40:07 +0100888 if (!pte) {
889 tmp = align_to_level(tmp + 1, level + 1);
890 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700891 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100892 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100893 if (dma_pte_present(pte)) {
894 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
895 dma_clear_pte(pte);
896 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100897 pte++;
898 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100899 } while (!first_pte_in_page(pte) &&
900 tmp + level_size(level) - 1 <= last_pfn);
901
David Woodhousef3a0a522009-06-30 03:40:07 +0100902 domain_flush_cache(domain, first_pte,
903 (void *)pte - (void *)first_pte);
904
David Woodhouse59c36282009-09-19 07:36:28 -0700905 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700906 level++;
907 }
908 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100909 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910 free_pgtable_page(domain->pgd);
911 domain->pgd = NULL;
912 }
913}
914
915/* iommu handling */
916static int iommu_alloc_root_entry(struct intel_iommu *iommu)
917{
918 struct root_entry *root;
919 unsigned long flags;
920
Suresh Siddha4c923d42009-10-02 11:01:24 -0700921 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700922 if (!root)
923 return -ENOMEM;
924
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700925 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700926
927 spin_lock_irqsave(&iommu->lock, flags);
928 iommu->root_entry = root;
929 spin_unlock_irqrestore(&iommu->lock, flags);
930
931 return 0;
932}
933
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700934static void iommu_set_root_entry(struct intel_iommu *iommu)
935{
936 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100937 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700938 unsigned long flag;
939
940 addr = iommu->root_entry;
941
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200942 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700943 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
944
David Woodhousec416daa2009-05-10 20:30:58 +0100945 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700946
947 /* Make sure hardware complete it */
948 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100949 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700950
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200951 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700952}
953
954static void iommu_flush_write_buffer(struct intel_iommu *iommu)
955{
956 u32 val;
957 unsigned long flag;
958
David Woodhouse9af88142009-02-13 23:18:03 +0000959 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700960 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700961
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200962 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100963 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700964
965 /* Make sure hardware complete it */
966 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100967 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700968
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200969 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700970}
971
972/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100973static void __iommu_flush_context(struct intel_iommu *iommu,
974 u16 did, u16 source_id, u8 function_mask,
975 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700976{
977 u64 val = 0;
978 unsigned long flag;
979
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700980 switch (type) {
981 case DMA_CCMD_GLOBAL_INVL:
982 val = DMA_CCMD_GLOBAL_INVL;
983 break;
984 case DMA_CCMD_DOMAIN_INVL:
985 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
986 break;
987 case DMA_CCMD_DEVICE_INVL:
988 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
989 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
990 break;
991 default:
992 BUG();
993 }
994 val |= DMA_CCMD_ICC;
995
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200996 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700997 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
998
999 /* Make sure hardware complete it */
1000 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1001 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1002
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001003 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001004}
1005
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001006/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001007static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1008 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001009{
1010 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1011 u64 val = 0, val_iva = 0;
1012 unsigned long flag;
1013
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001014 switch (type) {
1015 case DMA_TLB_GLOBAL_FLUSH:
1016 /* global flush doesn't need set IVA_REG */
1017 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1018 break;
1019 case DMA_TLB_DSI_FLUSH:
1020 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1021 break;
1022 case DMA_TLB_PSI_FLUSH:
1023 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1024 /* Note: always flush non-leaf currently */
1025 val_iva = size_order | addr;
1026 break;
1027 default:
1028 BUG();
1029 }
1030 /* Note: set drain read/write */
1031#if 0
1032 /*
1033 * This is probably to be super secure.. Looks like we can
1034 * ignore it without any impact.
1035 */
1036 if (cap_read_drain(iommu->cap))
1037 val |= DMA_TLB_READ_DRAIN;
1038#endif
1039 if (cap_write_drain(iommu->cap))
1040 val |= DMA_TLB_WRITE_DRAIN;
1041
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001042 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001043 /* Note: Only uses first TLB reg currently */
1044 if (val_iva)
1045 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1046 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1047
1048 /* Make sure hardware complete it */
1049 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1050 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1051
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001052 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001053
1054 /* check IOTLB invalidation granularity */
1055 if (DMA_TLB_IAIG(val) == 0)
1056 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1057 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1058 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001059 (unsigned long long)DMA_TLB_IIRG(type),
1060 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001061}
1062
Yu Zhao93a23a72009-05-18 13:51:37 +08001063static struct device_domain_info *iommu_support_dev_iotlb(
1064 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001065{
Yu Zhao93a23a72009-05-18 13:51:37 +08001066 int found = 0;
1067 unsigned long flags;
1068 struct device_domain_info *info;
1069 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1070
1071 if (!ecap_dev_iotlb_support(iommu->ecap))
1072 return NULL;
1073
1074 if (!iommu->qi)
1075 return NULL;
1076
1077 spin_lock_irqsave(&device_domain_lock, flags);
1078 list_for_each_entry(info, &domain->devices, link)
1079 if (info->bus == bus && info->devfn == devfn) {
1080 found = 1;
1081 break;
1082 }
1083 spin_unlock_irqrestore(&device_domain_lock, flags);
1084
1085 if (!found || !info->dev)
1086 return NULL;
1087
1088 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1089 return NULL;
1090
1091 if (!dmar_find_matched_atsr_unit(info->dev))
1092 return NULL;
1093
1094 info->iommu = iommu;
1095
1096 return info;
1097}
1098
1099static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1100{
1101 if (!info)
1102 return;
1103
1104 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1105}
1106
1107static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1108{
1109 if (!info->dev || !pci_ats_enabled(info->dev))
1110 return;
1111
1112 pci_disable_ats(info->dev);
1113}
1114
1115static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1116 u64 addr, unsigned mask)
1117{
1118 u16 sid, qdep;
1119 unsigned long flags;
1120 struct device_domain_info *info;
1121
1122 spin_lock_irqsave(&device_domain_lock, flags);
1123 list_for_each_entry(info, &domain->devices, link) {
1124 if (!info->dev || !pci_ats_enabled(info->dev))
1125 continue;
1126
1127 sid = info->bus << 8 | info->devfn;
1128 qdep = pci_ats_queue_depth(info->dev);
1129 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1130 }
1131 spin_unlock_irqrestore(&device_domain_lock, flags);
1132}
1133
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001134static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001135 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001136{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001137 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001138 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001139
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001140 BUG_ON(pages == 0);
1141
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001142 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001143 * Fallback to domain selective flush if no PSI support or the size is
1144 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001145 * PSI requires page size to be 2 ^ x, and the base address is naturally
1146 * aligned to the size
1147 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001148 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1149 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001150 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001151 else
1152 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1153 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001154
1155 /*
Nadav Amit82653632010-04-01 13:24:40 +03001156 * In caching mode, changes of pages from non-present to present require
1157 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001158 */
Nadav Amit82653632010-04-01 13:24:40 +03001159 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001160 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001161}
1162
mark grossf8bab732008-02-08 04:18:38 -08001163static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1164{
1165 u32 pmen;
1166 unsigned long flags;
1167
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001168 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001169 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1170 pmen &= ~DMA_PMEN_EPM;
1171 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1172
1173 /* wait for the protected region status bit to clear */
1174 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1175 readl, !(pmen & DMA_PMEN_PRS), pmen);
1176
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001177 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001178}
1179
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001180static int iommu_enable_translation(struct intel_iommu *iommu)
1181{
1182 u32 sts;
1183 unsigned long flags;
1184
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001185 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001186 iommu->gcmd |= DMA_GCMD_TE;
1187 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001188
1189 /* Make sure hardware complete it */
1190 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001191 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001192
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001193 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001194 return 0;
1195}
1196
1197static int iommu_disable_translation(struct intel_iommu *iommu)
1198{
1199 u32 sts;
1200 unsigned long flag;
1201
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001202 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001203 iommu->gcmd &= ~DMA_GCMD_TE;
1204 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1205
1206 /* Make sure hardware complete it */
1207 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001208 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001209
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001210 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001211 return 0;
1212}
1213
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001214
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001215static int iommu_init_domains(struct intel_iommu *iommu)
1216{
1217 unsigned long ndomains;
1218 unsigned long nlongs;
1219
1220 ndomains = cap_ndoms(iommu->cap);
Yinghai Lu680a7522010-04-08 19:58:23 +01001221 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1222 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001223 nlongs = BITS_TO_LONGS(ndomains);
1224
Donald Dutile94a91b52009-08-20 16:51:34 -04001225 spin_lock_init(&iommu->lock);
1226
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001227 /* TBD: there might be 64K domains,
1228 * consider other allocation for future chip
1229 */
1230 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1231 if (!iommu->domain_ids) {
1232 printk(KERN_ERR "Allocating domain id array failed\n");
1233 return -ENOMEM;
1234 }
1235 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1236 GFP_KERNEL);
1237 if (!iommu->domains) {
1238 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001239 return -ENOMEM;
1240 }
1241
1242 /*
1243 * if Caching mode is set, then invalid translations are tagged
1244 * with domainid 0. Hence we need to pre-allocate it.
1245 */
1246 if (cap_caching_mode(iommu->cap))
1247 set_bit(0, iommu->domain_ids);
1248 return 0;
1249}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001250
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001251
1252static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001253static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001254
1255void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001256{
1257 struct dmar_domain *domain;
1258 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001259 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001260
Donald Dutile94a91b52009-08-20 16:51:34 -04001261 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001262 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001263 domain = iommu->domains[i];
1264 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001265
Donald Dutile94a91b52009-08-20 16:51:34 -04001266 spin_lock_irqsave(&domain->iommu_lock, flags);
1267 if (--domain->iommu_count == 0) {
1268 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1269 vm_domain_exit(domain);
1270 else
1271 domain_exit(domain);
1272 }
1273 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001274 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001275 }
1276
1277 if (iommu->gcmd & DMA_GCMD_TE)
1278 iommu_disable_translation(iommu);
1279
1280 if (iommu->irq) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001281 irq_set_handler_data(iommu->irq, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001282 /* This will mask the irq */
1283 free_irq(iommu->irq, iommu);
1284 destroy_irq(iommu->irq);
1285 }
1286
1287 kfree(iommu->domains);
1288 kfree(iommu->domain_ids);
1289
Weidong Hand9630fe2008-12-08 11:06:32 +08001290 g_iommus[iommu->seq_id] = NULL;
1291
1292 /* if all iommus are freed, free g_iommus */
1293 for (i = 0; i < g_num_of_iommus; i++) {
1294 if (g_iommus[i])
1295 break;
1296 }
1297
1298 if (i == g_num_of_iommus)
1299 kfree(g_iommus);
1300
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001301 /* free context mapping */
1302 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001303}
1304
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001305static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001306{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001308
1309 domain = alloc_domain_mem();
1310 if (!domain)
1311 return NULL;
1312
Suresh Siddha4c923d42009-10-02 11:01:24 -07001313 domain->nid = -1;
Weidong Han8c11e792008-12-08 15:29:22 +08001314 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001315 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001316
1317 return domain;
1318}
1319
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001320static int iommu_attach_domain(struct dmar_domain *domain,
1321 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001322{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001323 int num;
1324 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001325 unsigned long flags;
1326
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001327 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001328
1329 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001330
1331 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1332 if (num >= ndomains) {
1333 spin_unlock_irqrestore(&iommu->lock, flags);
1334 printk(KERN_ERR "IOMMU: no free domain ids\n");
1335 return -ENOMEM;
1336 }
1337
1338 domain->id = num;
1339 set_bit(num, iommu->domain_ids);
1340 set_bit(iommu->seq_id, &domain->iommu_bmp);
1341 iommu->domains[num] = domain;
1342 spin_unlock_irqrestore(&iommu->lock, flags);
1343
1344 return 0;
1345}
1346
1347static void iommu_detach_domain(struct dmar_domain *domain,
1348 struct intel_iommu *iommu)
1349{
1350 unsigned long flags;
1351 int num, ndomains;
1352 int found = 0;
1353
1354 spin_lock_irqsave(&iommu->lock, flags);
1355 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001356 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001357 if (iommu->domains[num] == domain) {
1358 found = 1;
1359 break;
1360 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001361 }
1362
1363 if (found) {
1364 clear_bit(num, iommu->domain_ids);
1365 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1366 iommu->domains[num] = NULL;
1367 }
Weidong Han8c11e792008-12-08 15:29:22 +08001368 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001369}
1370
1371static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001372static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001373
Joseph Cihula51a63e62011-03-21 11:04:24 -07001374static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001375{
1376 struct pci_dev *pdev = NULL;
1377 struct iova *iova;
1378 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001379
David Millerf6611972008-02-06 01:36:23 -08001380 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001381
Mark Gross8a443df2008-03-04 14:59:31 -08001382 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1383 &reserved_rbtree_key);
1384
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001385 /* IOAPIC ranges shouldn't be accessed by DMA */
1386 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1387 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001388 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001389 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001390 return -ENODEV;
1391 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001392
1393 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1394 for_each_pci_dev(pdev) {
1395 struct resource *r;
1396
1397 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1398 r = &pdev->resource[i];
1399 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1400 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001401 iova = reserve_iova(&reserved_iova_list,
1402 IOVA_PFN(r->start),
1403 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001404 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001405 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001406 return -ENODEV;
1407 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001408 }
1409 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001410 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001411}
1412
1413static void domain_reserve_special_ranges(struct dmar_domain *domain)
1414{
1415 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1416}
1417
1418static inline int guestwidth_to_adjustwidth(int gaw)
1419{
1420 int agaw;
1421 int r = (gaw - 12) % 9;
1422
1423 if (r == 0)
1424 agaw = gaw;
1425 else
1426 agaw = gaw + 9 - r;
1427 if (agaw > 64)
1428 agaw = 64;
1429 return agaw;
1430}
1431
1432static int domain_init(struct dmar_domain *domain, int guest_width)
1433{
1434 struct intel_iommu *iommu;
1435 int adjust_width, agaw;
1436 unsigned long sagaw;
1437
David Millerf6611972008-02-06 01:36:23 -08001438 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001439 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001440
1441 domain_reserve_special_ranges(domain);
1442
1443 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001444 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001445 if (guest_width > cap_mgaw(iommu->cap))
1446 guest_width = cap_mgaw(iommu->cap);
1447 domain->gaw = guest_width;
1448 adjust_width = guestwidth_to_adjustwidth(guest_width);
1449 agaw = width_to_agaw(adjust_width);
1450 sagaw = cap_sagaw(iommu->cap);
1451 if (!test_bit(agaw, &sagaw)) {
1452 /* hardware doesn't support it, choose a bigger one */
1453 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1454 agaw = find_next_bit(&sagaw, 5, agaw);
1455 if (agaw >= 5)
1456 return -ENODEV;
1457 }
1458 domain->agaw = agaw;
1459 INIT_LIST_HEAD(&domain->devices);
1460
Weidong Han8e604092008-12-08 15:49:06 +08001461 if (ecap_coherent(iommu->ecap))
1462 domain->iommu_coherency = 1;
1463 else
1464 domain->iommu_coherency = 0;
1465
Sheng Yang58c610b2009-03-18 15:33:05 +08001466 if (ecap_sc_support(iommu->ecap))
1467 domain->iommu_snooping = 1;
1468 else
1469 domain->iommu_snooping = 0;
1470
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001471 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001472 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001473 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001474
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001475 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001476 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001477 if (!domain->pgd)
1478 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001479 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001480 return 0;
1481}
1482
1483static void domain_exit(struct dmar_domain *domain)
1484{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001485 struct dmar_drhd_unit *drhd;
1486 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001487
1488 /* Domain 0 is reserved, so dont process it */
1489 if (!domain)
1490 return;
1491
Alex Williamson7b668352011-05-24 12:02:41 +01001492 /* Flush any lazy unmaps that may reference this domain */
1493 if (!intel_iommu_strict)
1494 flush_unmaps_timeout(0);
1495
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001496 domain_remove_dev_info(domain);
1497 /* destroy iovas */
1498 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001499
1500 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001501 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001502
1503 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001504 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001505
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001506 for_each_active_iommu(iommu, drhd)
1507 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1508 iommu_detach_domain(domain, iommu);
1509
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001510 free_domain_mem(domain);
1511}
1512
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001513static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1514 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001515{
1516 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001517 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001518 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001519 struct dma_pte *pgd;
1520 unsigned long num;
1521 unsigned long ndomains;
1522 int id;
1523 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001524 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001525
1526 pr_debug("Set context mapping for %02x:%02x.%d\n",
1527 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001528
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001529 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001530 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1531 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001532
David Woodhouse276dbf92009-04-04 01:45:37 +01001533 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001534 if (!iommu)
1535 return -ENODEV;
1536
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001537 context = device_to_context_entry(iommu, bus, devfn);
1538 if (!context)
1539 return -ENOMEM;
1540 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001541 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001542 spin_unlock_irqrestore(&iommu->lock, flags);
1543 return 0;
1544 }
1545
Weidong Hanea6606b2008-12-08 23:08:15 +08001546 id = domain->id;
1547 pgd = domain->pgd;
1548
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001549 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1550 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001551 int found = 0;
1552
1553 /* find an available domain id for this device in iommu */
1554 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001555 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001556 if (iommu->domains[num] == domain) {
1557 id = num;
1558 found = 1;
1559 break;
1560 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001561 }
1562
1563 if (found == 0) {
1564 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1565 if (num >= ndomains) {
1566 spin_unlock_irqrestore(&iommu->lock, flags);
1567 printk(KERN_ERR "IOMMU: no free domain ids\n");
1568 return -EFAULT;
1569 }
1570
1571 set_bit(num, iommu->domain_ids);
1572 iommu->domains[num] = domain;
1573 id = num;
1574 }
1575
1576 /* Skip top levels of page tables for
1577 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001578 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001579 */
Chris Wright1672af12009-12-02 12:06:34 -08001580 if (translation != CONTEXT_TT_PASS_THROUGH) {
1581 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1582 pgd = phys_to_virt(dma_pte_addr(pgd));
1583 if (!dma_pte_present(pgd)) {
1584 spin_unlock_irqrestore(&iommu->lock, flags);
1585 return -ENOMEM;
1586 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001587 }
1588 }
1589 }
1590
1591 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001592
Yu Zhao93a23a72009-05-18 13:51:37 +08001593 if (translation != CONTEXT_TT_PASS_THROUGH) {
1594 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1595 translation = info ? CONTEXT_TT_DEV_IOTLB :
1596 CONTEXT_TT_MULTI_LEVEL;
1597 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001598 /*
1599 * In pass through mode, AW must be programmed to indicate the largest
1600 * AGAW value supported by hardware. And ASR is ignored by hardware.
1601 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001602 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001603 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001604 else {
1605 context_set_address_root(context, virt_to_phys(pgd));
1606 context_set_address_width(context, iommu->agaw);
1607 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001608
1609 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001610 context_set_fault_enable(context);
1611 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001612 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001613
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001614 /*
1615 * It's a non-present to present mapping. If hardware doesn't cache
1616 * non-present entry we only need to flush the write-buffer. If the
1617 * _does_ cache non-present entries, then it does so in the special
1618 * domain #0, which we have to flush:
1619 */
1620 if (cap_caching_mode(iommu->cap)) {
1621 iommu->flush.flush_context(iommu, 0,
1622 (((u16)bus) << 8) | devfn,
1623 DMA_CCMD_MASK_NOBIT,
1624 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001625 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001626 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001627 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001628 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001629 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001630 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001631
1632 spin_lock_irqsave(&domain->iommu_lock, flags);
1633 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1634 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001635 if (domain->iommu_count == 1)
1636 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001637 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001638 }
1639 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001640 return 0;
1641}
1642
1643static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001644domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1645 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001646{
1647 int ret;
1648 struct pci_dev *tmp, *parent;
1649
David Woodhouse276dbf92009-04-04 01:45:37 +01001650 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001651 pdev->bus->number, pdev->devfn,
1652 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001653 if (ret)
1654 return ret;
1655
1656 /* dependent device mapping */
1657 tmp = pci_find_upstream_pcie_bridge(pdev);
1658 if (!tmp)
1659 return 0;
1660 /* Secondary interface's bus number and devfn 0 */
1661 parent = pdev->bus->self;
1662 while (parent != tmp) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001663 ret = domain_context_mapping_one(domain,
1664 pci_domain_nr(parent->bus),
1665 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001666 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001667 if (ret)
1668 return ret;
1669 parent = parent->bus->self;
1670 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001671 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001672 return domain_context_mapping_one(domain,
David Woodhouse276dbf92009-04-04 01:45:37 +01001673 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001674 tmp->subordinate->number, 0,
1675 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001676 else /* this is a legacy PCI bridge */
1677 return domain_context_mapping_one(domain,
David Woodhouse276dbf92009-04-04 01:45:37 +01001678 pci_domain_nr(tmp->bus),
1679 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001680 tmp->devfn,
1681 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001682}
1683
Weidong Han5331fe62008-12-08 23:00:00 +08001684static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001685{
1686 int ret;
1687 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001688 struct intel_iommu *iommu;
1689
David Woodhouse276dbf92009-04-04 01:45:37 +01001690 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1691 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001692 if (!iommu)
1693 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001694
David Woodhouse276dbf92009-04-04 01:45:37 +01001695 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001696 if (!ret)
1697 return ret;
1698 /* dependent device mapping */
1699 tmp = pci_find_upstream_pcie_bridge(pdev);
1700 if (!tmp)
1701 return ret;
1702 /* Secondary interface's bus number and devfn 0 */
1703 parent = pdev->bus->self;
1704 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001705 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf92009-04-04 01:45:37 +01001706 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001707 if (!ret)
1708 return ret;
1709 parent = parent->bus->self;
1710 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001711 if (pci_is_pcie(tmp))
David Woodhouse276dbf92009-04-04 01:45:37 +01001712 return device_context_mapped(iommu, tmp->subordinate->number,
1713 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001714 else
David Woodhouse276dbf92009-04-04 01:45:37 +01001715 return device_context_mapped(iommu, tmp->bus->number,
1716 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001717}
1718
Fenghua Yuf5329592009-08-04 15:09:37 -07001719/* Returns a number of VTD pages, but aligned to MM page size */
1720static inline unsigned long aligned_nrpages(unsigned long host_addr,
1721 size_t size)
1722{
1723 host_addr &= ~PAGE_MASK;
1724 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1725}
1726
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001727/* Return largest possible superpage level for a given mapping */
1728static inline int hardware_largepage_caps(struct dmar_domain *domain,
1729 unsigned long iov_pfn,
1730 unsigned long phy_pfn,
1731 unsigned long pages)
1732{
1733 int support, level = 1;
1734 unsigned long pfnmerge;
1735
1736 support = domain->iommu_superpage;
1737
1738 /* To use a large page, the virtual *and* physical addresses
1739 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1740 of them will mean we have to use smaller pages. So just
1741 merge them and check both at once. */
1742 pfnmerge = iov_pfn | phy_pfn;
1743
1744 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1745 pages >>= VTD_STRIDE_SHIFT;
1746 if (!pages)
1747 break;
1748 pfnmerge >>= VTD_STRIDE_SHIFT;
1749 level++;
1750 support--;
1751 }
1752 return level;
1753}
1754
David Woodhouse9051aa02009-06-29 12:30:54 +01001755static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1756 struct scatterlist *sg, unsigned long phys_pfn,
1757 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001758{
1759 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001760 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001761 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001762 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001763 unsigned int largepage_lvl = 0;
1764 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001765
1766 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1767
1768 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1769 return -EINVAL;
1770
1771 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1772
David Woodhouse9051aa02009-06-29 12:30:54 +01001773 if (sg)
1774 sg_res = 0;
1775 else {
1776 sg_res = nr_pages + 1;
1777 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1778 }
1779
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001780 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001781 uint64_t tmp;
1782
David Woodhousee1605492009-06-29 11:17:38 +01001783 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001784 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001785 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1786 sg->dma_length = sg->length;
1787 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001788 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001789 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001790
David Woodhousee1605492009-06-29 11:17:38 +01001791 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001792 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1793
1794 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001795 if (!pte)
1796 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001797 /* It is large page*/
1798 if (largepage_lvl > 1)
1799 pteval |= DMA_PTE_LARGE_PAGE;
1800 else
1801 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1802
David Woodhousee1605492009-06-29 11:17:38 +01001803 }
1804 /* We don't need lock here, nobody else
1805 * touches the iova range
1806 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001807 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001808 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001809 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001810 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1811 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001812 if (dumps) {
1813 dumps--;
1814 debug_dma_dump_mappings(NULL);
1815 }
1816 WARN_ON(1);
1817 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001818
1819 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1820
1821 BUG_ON(nr_pages < lvl_pages);
1822 BUG_ON(sg_res < lvl_pages);
1823
1824 nr_pages -= lvl_pages;
1825 iov_pfn += lvl_pages;
1826 phys_pfn += lvl_pages;
1827 pteval += lvl_pages * VTD_PAGE_SIZE;
1828 sg_res -= lvl_pages;
1829
1830 /* If the next PTE would be the first in a new page, then we
1831 need to flush the cache on the entries we've just written.
1832 And then we'll need to recalculate 'pte', so clear it and
1833 let it get set again in the if (!pte) block above.
1834
1835 If we're done (!nr_pages) we need to flush the cache too.
1836
1837 Also if we've been setting superpages, we may need to
1838 recalculate 'pte' and switch back to smaller pages for the
1839 end of the mapping, if the trailing size is not enough to
1840 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001841 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001842 if (!nr_pages || first_pte_in_page(pte) ||
1843 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001844 domain_flush_cache(domain, first_pte,
1845 (void *)pte - (void *)first_pte);
1846 pte = NULL;
1847 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001848
1849 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001850 sg = sg_next(sg);
1851 }
1852 return 0;
1853}
1854
David Woodhouse9051aa02009-06-29 12:30:54 +01001855static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1856 struct scatterlist *sg, unsigned long nr_pages,
1857 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001858{
David Woodhouse9051aa02009-06-29 12:30:54 +01001859 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1860}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001861
David Woodhouse9051aa02009-06-29 12:30:54 +01001862static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1863 unsigned long phys_pfn, unsigned long nr_pages,
1864 int prot)
1865{
1866 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001867}
1868
Weidong Hanc7151a82008-12-08 22:51:37 +08001869static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001870{
Weidong Hanc7151a82008-12-08 22:51:37 +08001871 if (!iommu)
1872 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001873
1874 clear_context_table(iommu, bus, devfn);
1875 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001876 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001877 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001878}
1879
1880static void domain_remove_dev_info(struct dmar_domain *domain)
1881{
1882 struct device_domain_info *info;
1883 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001884 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001885
1886 spin_lock_irqsave(&device_domain_lock, flags);
1887 while (!list_empty(&domain->devices)) {
1888 info = list_entry(domain->devices.next,
1889 struct device_domain_info, link);
1890 list_del(&info->link);
1891 list_del(&info->global);
1892 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001893 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001894 spin_unlock_irqrestore(&device_domain_lock, flags);
1895
Yu Zhao93a23a72009-05-18 13:51:37 +08001896 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf92009-04-04 01:45:37 +01001897 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001898 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001899 free_devinfo_mem(info);
1900
1901 spin_lock_irqsave(&device_domain_lock, flags);
1902 }
1903 spin_unlock_irqrestore(&device_domain_lock, flags);
1904}
1905
1906/*
1907 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001908 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001909 */
Kay, Allen M38717942008-09-09 18:37:29 +03001910static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001911find_domain(struct pci_dev *pdev)
1912{
1913 struct device_domain_info *info;
1914
1915 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001916 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001917 if (info)
1918 return info->domain;
1919 return NULL;
1920}
1921
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001922/* domain is initialized */
1923static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1924{
1925 struct dmar_domain *domain, *found = NULL;
1926 struct intel_iommu *iommu;
1927 struct dmar_drhd_unit *drhd;
1928 struct device_domain_info *info, *tmp;
1929 struct pci_dev *dev_tmp;
1930 unsigned long flags;
1931 int bus = 0, devfn = 0;
David Woodhouse276dbf92009-04-04 01:45:37 +01001932 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001933 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001934
1935 domain = find_domain(pdev);
1936 if (domain)
1937 return domain;
1938
David Woodhouse276dbf92009-04-04 01:45:37 +01001939 segment = pci_domain_nr(pdev->bus);
1940
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001941 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1942 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001943 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001944 bus = dev_tmp->subordinate->number;
1945 devfn = 0;
1946 } else {
1947 bus = dev_tmp->bus->number;
1948 devfn = dev_tmp->devfn;
1949 }
1950 spin_lock_irqsave(&device_domain_lock, flags);
1951 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001952 if (info->segment == segment &&
1953 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001954 found = info->domain;
1955 break;
1956 }
1957 }
1958 spin_unlock_irqrestore(&device_domain_lock, flags);
1959 /* pcie-pci bridge already has a domain, uses it */
1960 if (found) {
1961 domain = found;
1962 goto found_domain;
1963 }
1964 }
1965
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001966 domain = alloc_domain();
1967 if (!domain)
1968 goto error;
1969
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001970 /* Allocate new domain for the device */
1971 drhd = dmar_find_matched_drhd_unit(pdev);
1972 if (!drhd) {
1973 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1974 pci_name(pdev));
1975 return NULL;
1976 }
1977 iommu = drhd->iommu;
1978
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001979 ret = iommu_attach_domain(domain, iommu);
1980 if (ret) {
Alex Williamson2fe97232011-03-04 14:52:30 -07001981 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001982 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001983 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001984
1985 if (domain_init(domain, gaw)) {
1986 domain_exit(domain);
1987 goto error;
1988 }
1989
1990 /* register pcie-to-pci device */
1991 if (dev_tmp) {
1992 info = alloc_devinfo_mem();
1993 if (!info) {
1994 domain_exit(domain);
1995 goto error;
1996 }
David Woodhouse276dbf92009-04-04 01:45:37 +01001997 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001998 info->bus = bus;
1999 info->devfn = devfn;
2000 info->dev = NULL;
2001 info->domain = domain;
2002 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002003 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002004
2005 /* pcie-to-pci bridge already has a domain, uses it */
2006 found = NULL;
2007 spin_lock_irqsave(&device_domain_lock, flags);
2008 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf92009-04-04 01:45:37 +01002009 if (tmp->segment == segment &&
2010 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002011 found = tmp->domain;
2012 break;
2013 }
2014 }
2015 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002016 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002017 free_devinfo_mem(info);
2018 domain_exit(domain);
2019 domain = found;
2020 } else {
2021 list_add(&info->link, &domain->devices);
2022 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002023 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002024 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002025 }
2026
2027found_domain:
2028 info = alloc_devinfo_mem();
2029 if (!info)
2030 goto error;
David Woodhouse276dbf92009-04-04 01:45:37 +01002031 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002032 info->bus = pdev->bus->number;
2033 info->devfn = pdev->devfn;
2034 info->dev = pdev;
2035 info->domain = domain;
2036 spin_lock_irqsave(&device_domain_lock, flags);
2037 /* somebody is fast */
2038 found = find_domain(pdev);
2039 if (found != NULL) {
2040 spin_unlock_irqrestore(&device_domain_lock, flags);
2041 if (found != domain) {
2042 domain_exit(domain);
2043 domain = found;
2044 }
2045 free_devinfo_mem(info);
2046 return domain;
2047 }
2048 list_add(&info->link, &domain->devices);
2049 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002050 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002051 spin_unlock_irqrestore(&device_domain_lock, flags);
2052 return domain;
2053error:
2054 /* recheck it here, maybe others set it */
2055 return find_domain(pdev);
2056}
2057
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002058static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002059#define IDENTMAP_ALL 1
2060#define IDENTMAP_GFX 2
2061#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002062
David Woodhouseb2132032009-06-26 18:50:28 +01002063static int iommu_domain_identity_map(struct dmar_domain *domain,
2064 unsigned long long start,
2065 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002066{
David Woodhousec5395d52009-06-28 16:35:56 +01002067 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2068 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002069
David Woodhousec5395d52009-06-28 16:35:56 +01002070 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2071 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002072 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002073 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002074 }
2075
David Woodhousec5395d52009-06-28 16:35:56 +01002076 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2077 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002078 /*
2079 * RMRR range might have overlap with physical memory range,
2080 * clear it first
2081 */
David Woodhousec5395d52009-06-28 16:35:56 +01002082 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002083
David Woodhousec5395d52009-06-28 16:35:56 +01002084 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2085 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002086 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002087}
2088
2089static int iommu_prepare_identity_map(struct pci_dev *pdev,
2090 unsigned long long start,
2091 unsigned long long end)
2092{
2093 struct dmar_domain *domain;
2094 int ret;
2095
David Woodhousec7ab48d2009-06-26 19:10:36 +01002096 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002097 if (!domain)
2098 return -ENOMEM;
2099
David Woodhouse19943b02009-08-04 16:19:20 +01002100 /* For _hardware_ passthrough, don't bother. But for software
2101 passthrough, we do it anyway -- it may indicate a memory
2102 range which is reserved in E820, so which didn't get set
2103 up to start with in si_domain */
2104 if (domain == si_domain && hw_pass_through) {
2105 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2106 pci_name(pdev), start, end);
2107 return 0;
2108 }
2109
2110 printk(KERN_INFO
2111 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2112 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002113
David Woodhouse5595b522009-12-02 09:21:55 +00002114 if (end < start) {
2115 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2116 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2117 dmi_get_system_info(DMI_BIOS_VENDOR),
2118 dmi_get_system_info(DMI_BIOS_VERSION),
2119 dmi_get_system_info(DMI_PRODUCT_VERSION));
2120 ret = -EIO;
2121 goto error;
2122 }
2123
David Woodhouse2ff729f2009-08-26 14:25:41 +01002124 if (end >> agaw_to_width(domain->agaw)) {
2125 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2126 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2127 agaw_to_width(domain->agaw),
2128 dmi_get_system_info(DMI_BIOS_VENDOR),
2129 dmi_get_system_info(DMI_BIOS_VERSION),
2130 dmi_get_system_info(DMI_PRODUCT_VERSION));
2131 ret = -EIO;
2132 goto error;
2133 }
David Woodhouse19943b02009-08-04 16:19:20 +01002134
David Woodhouseb2132032009-06-26 18:50:28 +01002135 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002136 if (ret)
2137 goto error;
2138
2139 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002140 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002141 if (ret)
2142 goto error;
2143
2144 return 0;
2145
2146 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002147 domain_exit(domain);
2148 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002149}
2150
2151static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2152 struct pci_dev *pdev)
2153{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002154 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002155 return 0;
2156 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002157 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002158}
2159
Suresh Siddhad3f13812011-08-23 17:05:25 -07002160#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002161static inline void iommu_prepare_isa(void)
2162{
2163 struct pci_dev *pdev;
2164 int ret;
2165
2166 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2167 if (!pdev)
2168 return;
2169
David Woodhousec7ab48d2009-06-26 19:10:36 +01002170 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002171 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002172
2173 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002174 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2175 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002176
2177}
2178#else
2179static inline void iommu_prepare_isa(void)
2180{
2181 return;
2182}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002183#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002184
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002185static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002186
2187static int __init si_domain_work_fn(unsigned long start_pfn,
2188 unsigned long end_pfn, void *datax)
2189{
2190 int *ret = datax;
2191
2192 *ret = iommu_domain_identity_map(si_domain,
2193 (uint64_t)start_pfn << PAGE_SHIFT,
2194 (uint64_t)end_pfn << PAGE_SHIFT);
2195 return *ret;
2196
2197}
2198
Matt Kraai071e1372009-08-23 22:30:22 -07002199static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002200{
2201 struct dmar_drhd_unit *drhd;
2202 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002203 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002204
2205 si_domain = alloc_domain();
2206 if (!si_domain)
2207 return -EFAULT;
2208
David Woodhousec7ab48d2009-06-26 19:10:36 +01002209 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002210
2211 for_each_active_iommu(iommu, drhd) {
2212 ret = iommu_attach_domain(si_domain, iommu);
2213 if (ret) {
2214 domain_exit(si_domain);
2215 return -EFAULT;
2216 }
2217 }
2218
2219 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2220 domain_exit(si_domain);
2221 return -EFAULT;
2222 }
2223
2224 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2225
David Woodhouse19943b02009-08-04 16:19:20 +01002226 if (hw)
2227 return 0;
2228
David Woodhousec7ab48d2009-06-26 19:10:36 +01002229 for_each_online_node(nid) {
2230 work_with_active_regions(nid, si_domain_work_fn, &ret);
2231 if (ret)
2232 return ret;
2233 }
2234
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002235 return 0;
2236}
2237
2238static void domain_remove_one_dev_info(struct dmar_domain *domain,
2239 struct pci_dev *pdev);
2240static int identity_mapping(struct pci_dev *pdev)
2241{
2242 struct device_domain_info *info;
2243
2244 if (likely(!iommu_identity_mapping))
2245 return 0;
2246
Mike Traviscb452a42011-05-28 13:15:03 -05002247 info = pdev->dev.archdata.iommu;
2248 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2249 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002250
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002251 return 0;
2252}
2253
2254static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002255 struct pci_dev *pdev,
2256 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002257{
2258 struct device_domain_info *info;
2259 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002260 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002261
2262 info = alloc_devinfo_mem();
2263 if (!info)
2264 return -ENOMEM;
2265
David Woodhouse5fe60f42009-08-09 10:53:41 +01002266 ret = domain_context_mapping(domain, pdev, translation);
2267 if (ret) {
2268 free_devinfo_mem(info);
2269 return ret;
2270 }
2271
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002272 info->segment = pci_domain_nr(pdev->bus);
2273 info->bus = pdev->bus->number;
2274 info->devfn = pdev->devfn;
2275 info->dev = pdev;
2276 info->domain = domain;
2277
2278 spin_lock_irqsave(&device_domain_lock, flags);
2279 list_add(&info->link, &domain->devices);
2280 list_add(&info->global, &device_domain_list);
2281 pdev->dev.archdata.iommu = info;
2282 spin_unlock_irqrestore(&device_domain_lock, flags);
2283
2284 return 0;
2285}
2286
David Woodhouse6941af22009-07-04 18:24:27 +01002287static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2288{
David Woodhousee0fc7e02009-09-30 09:12:17 -07002289 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2290 return 1;
2291
2292 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2293 return 1;
2294
2295 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2296 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002297
David Woodhouse3dfc8132009-07-04 19:11:08 +01002298 /*
2299 * We want to start off with all devices in the 1:1 domain, and
2300 * take them out later if we find they can't access all of memory.
2301 *
2302 * However, we can't do this for PCI devices behind bridges,
2303 * because all PCI devices behind the same bridge will end up
2304 * with the same source-id on their transactions.
2305 *
2306 * Practically speaking, we can't change things around for these
2307 * devices at run-time, because we can't be sure there'll be no
2308 * DMA transactions in flight for any of their siblings.
2309 *
2310 * So PCI devices (unless they're on the root bus) as well as
2311 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2312 * the 1:1 domain, just in _case_ one of their siblings turns out
2313 * not to be able to map all of memory.
2314 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002315 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002316 if (!pci_is_root_bus(pdev->bus))
2317 return 0;
2318 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2319 return 0;
2320 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2321 return 0;
2322
2323 /*
2324 * At boot time, we don't yet know if devices will be 64-bit capable.
2325 * Assume that they will -- if they turn out not to be, then we can
2326 * take them out of the 1:1 domain later.
2327 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002328 if (!startup) {
2329 /*
2330 * If the device's dma_mask is less than the system's memory
2331 * size then this is not a candidate for identity mapping.
2332 */
2333 u64 dma_mask = pdev->dma_mask;
2334
2335 if (pdev->dev.coherent_dma_mask &&
2336 pdev->dev.coherent_dma_mask < dma_mask)
2337 dma_mask = pdev->dev.coherent_dma_mask;
2338
2339 return dma_mask >= dma_get_required_mask(&pdev->dev);
2340 }
David Woodhouse6941af22009-07-04 18:24:27 +01002341
2342 return 1;
2343}
2344
Matt Kraai071e1372009-08-23 22:30:22 -07002345static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002346{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002347 struct pci_dev *pdev = NULL;
2348 int ret;
2349
David Woodhouse19943b02009-08-04 16:19:20 +01002350 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002351 if (ret)
2352 return -EFAULT;
2353
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002354 for_each_pci_dev(pdev) {
Mike Travis825507d2011-05-28 13:15:06 -05002355 /* Skip Host/PCI Bridge devices */
2356 if (IS_BRIDGE_HOST_DEVICE(pdev))
2357 continue;
David Woodhouse6941af22009-07-04 18:24:27 +01002358 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse19943b02009-08-04 16:19:20 +01002359 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2360 hw ? "hardware" : "software", pci_name(pdev));
David Woodhousec7ab48d2009-06-26 19:10:36 +01002361
David Woodhouse5fe60f42009-08-09 10:53:41 +01002362 ret = domain_add_dev_info(si_domain, pdev,
David Woodhouse19943b02009-08-04 16:19:20 +01002363 hw ? CONTEXT_TT_PASS_THROUGH :
David Woodhouse62edf5d2009-07-04 10:59:46 +01002364 CONTEXT_TT_MULTI_LEVEL);
2365 if (ret)
2366 return ret;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002367 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002368 }
2369
2370 return 0;
2371}
2372
Joseph Cihulab7792602011-05-03 00:08:37 -07002373static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002374{
2375 struct dmar_drhd_unit *drhd;
2376 struct dmar_rmrr_unit *rmrr;
2377 struct pci_dev *pdev;
2378 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002379 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002380
2381 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002382 * for each drhd
2383 * allocate root
2384 * initialize and program root entry to not present
2385 * endfor
2386 */
2387 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002388 g_num_of_iommus++;
2389 /*
2390 * lock not needed as this is only incremented in the single
2391 * threaded kernel __init code path all other access are read
2392 * only
2393 */
2394 }
2395
Weidong Hand9630fe2008-12-08 11:06:32 +08002396 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2397 GFP_KERNEL);
2398 if (!g_iommus) {
2399 printk(KERN_ERR "Allocating global iommu array failed\n");
2400 ret = -ENOMEM;
2401 goto error;
2402 }
2403
mark gross80b20dd2008-04-18 13:53:58 -07002404 deferred_flush = kzalloc(g_num_of_iommus *
2405 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2406 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002407 ret = -ENOMEM;
2408 goto error;
2409 }
2410
mark gross5e0d2a62008-03-04 15:22:08 -08002411 for_each_drhd_unit(drhd) {
2412 if (drhd->ignored)
2413 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002414
2415 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002416 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002417
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002418 ret = iommu_init_domains(iommu);
2419 if (ret)
2420 goto error;
2421
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002422 /*
2423 * TBD:
2424 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002425 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002426 */
2427 ret = iommu_alloc_root_entry(iommu);
2428 if (ret) {
2429 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2430 goto error;
2431 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002432 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002433 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002434 }
2435
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002436 /*
2437 * Start from the sane iommu hardware state.
2438 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002439 for_each_drhd_unit(drhd) {
2440 if (drhd->ignored)
2441 continue;
2442
2443 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002444
2445 /*
2446 * If the queued invalidation is already initialized by us
2447 * (for example, while enabling interrupt-remapping) then
2448 * we got the things already rolling from a sane state.
2449 */
2450 if (iommu->qi)
2451 continue;
2452
2453 /*
2454 * Clear any previous faults.
2455 */
2456 dmar_fault(-1, iommu);
2457 /*
2458 * Disable queued invalidation if supported and already enabled
2459 * before OS handover.
2460 */
2461 dmar_disable_qi(iommu);
2462 }
2463
2464 for_each_drhd_unit(drhd) {
2465 if (drhd->ignored)
2466 continue;
2467
2468 iommu = drhd->iommu;
2469
Youquan Songa77b67d2008-10-16 16:31:56 -07002470 if (dmar_enable_qi(iommu)) {
2471 /*
2472 * Queued Invalidate not enabled, use Register Based
2473 * Invalidate
2474 */
2475 iommu->flush.flush_context = __iommu_flush_context;
2476 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002477 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002478 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002479 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002480 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002481 } else {
2482 iommu->flush.flush_context = qi_flush_context;
2483 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002484 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002485 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002486 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002487 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002488 }
2489 }
2490
David Woodhouse19943b02009-08-04 16:19:20 +01002491 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002492 iommu_identity_mapping |= IDENTMAP_ALL;
2493
Suresh Siddhad3f13812011-08-23 17:05:25 -07002494#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002495 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002496#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002497
2498 check_tylersburg_isoch();
2499
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002500 /*
2501 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002502 * identity mappings for rmrr, gfx, and isa and may fall back to static
2503 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002504 */
David Woodhouse19943b02009-08-04 16:19:20 +01002505 if (iommu_identity_mapping) {
2506 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2507 if (ret) {
2508 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2509 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002510 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002511 }
David Woodhouse19943b02009-08-04 16:19:20 +01002512 /*
2513 * For each rmrr
2514 * for each dev attached to rmrr
2515 * do
2516 * locate drhd for dev, alloc domain for dev
2517 * allocate free domain
2518 * allocate page table entries for rmrr
2519 * if context not allocated for bus
2520 * allocate and init context
2521 * set present in root table for this bus
2522 * init context with domain, translation etc
2523 * endfor
2524 * endfor
2525 */
2526 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2527 for_each_rmrr_units(rmrr) {
2528 for (i = 0; i < rmrr->devices_cnt; i++) {
2529 pdev = rmrr->devices[i];
2530 /*
2531 * some BIOS lists non-exist devices in DMAR
2532 * table.
2533 */
2534 if (!pdev)
2535 continue;
2536 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2537 if (ret)
2538 printk(KERN_ERR
2539 "IOMMU: mapping reserved region failed\n");
2540 }
2541 }
2542
2543 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002544
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002545 /*
2546 * for each drhd
2547 * enable fault log
2548 * global invalidate context cache
2549 * global invalidate iotlb
2550 * enable translation
2551 */
2552 for_each_drhd_unit(drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002553 if (drhd->ignored) {
2554 /*
2555 * we always have to disable PMRs or DMA may fail on
2556 * this device
2557 */
2558 if (force_on)
2559 iommu_disable_protect_mem_regions(drhd->iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002560 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002561 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002562 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002563
2564 iommu_flush_write_buffer(iommu);
2565
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002566 ret = dmar_set_interrupt(iommu);
2567 if (ret)
2568 goto error;
2569
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002570 iommu_set_root_entry(iommu);
2571
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002572 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002573 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002574
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002575 ret = iommu_enable_translation(iommu);
2576 if (ret)
2577 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002578
2579 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002580 }
2581
2582 return 0;
2583error:
2584 for_each_drhd_unit(drhd) {
2585 if (drhd->ignored)
2586 continue;
2587 iommu = drhd->iommu;
2588 free_iommu(iommu);
2589 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002590 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002591 return ret;
2592}
2593
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002594/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002595static struct iova *intel_alloc_iova(struct device *dev,
2596 struct dmar_domain *domain,
2597 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002598{
2599 struct pci_dev *pdev = to_pci_dev(dev);
2600 struct iova *iova = NULL;
2601
David Woodhouse875764d2009-06-28 21:20:51 +01002602 /* Restrict dma_mask to the width that the iommu can handle */
2603 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2604
2605 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002606 /*
2607 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002608 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002609 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002610 */
David Woodhouse875764d2009-06-28 21:20:51 +01002611 iova = alloc_iova(&domain->iovad, nrpages,
2612 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2613 if (iova)
2614 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002615 }
David Woodhouse875764d2009-06-28 21:20:51 +01002616 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2617 if (unlikely(!iova)) {
2618 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2619 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002620 return NULL;
2621 }
2622
2623 return iova;
2624}
2625
David Woodhouse147202a2009-07-07 19:43:20 +01002626static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002627{
2628 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002629 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002630
2631 domain = get_domain_for_dev(pdev,
2632 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2633 if (!domain) {
2634 printk(KERN_ERR
2635 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002636 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002637 }
2638
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002639 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002640 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002641 ret = domain_context_mapping(domain, pdev,
2642 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002643 if (ret) {
2644 printk(KERN_ERR
2645 "Domain context map for %s failed",
2646 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002647 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002648 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002649 }
2650
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002651 return domain;
2652}
2653
David Woodhouse147202a2009-07-07 19:43:20 +01002654static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2655{
2656 struct device_domain_info *info;
2657
2658 /* No lock here, assumes no domain exit in normal case */
2659 info = dev->dev.archdata.iommu;
2660 if (likely(info))
2661 return info->domain;
2662
2663 return __get_valid_domain_for_dev(dev);
2664}
2665
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002666static int iommu_dummy(struct pci_dev *pdev)
2667{
2668 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2669}
2670
2671/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002672static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002673{
David Woodhouse73676832009-07-04 14:08:36 +01002674 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002675 int found;
2676
David Woodhouse73676832009-07-04 14:08:36 +01002677 if (unlikely(dev->bus != &pci_bus_type))
2678 return 1;
2679
2680 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002681 if (iommu_dummy(pdev))
2682 return 1;
2683
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002684 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002685 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002686
2687 found = identity_mapping(pdev);
2688 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002689 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002690 return 1;
2691 else {
2692 /*
2693 * 32 bit DMA is removed from si_domain and fall back
2694 * to non-identity mapping.
2695 */
2696 domain_remove_one_dev_info(si_domain, pdev);
2697 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2698 pci_name(pdev));
2699 return 0;
2700 }
2701 } else {
2702 /*
2703 * In case of a detached 64 bit DMA device from vm, the device
2704 * is put into si_domain for identity mapping.
2705 */
David Woodhouse6941af22009-07-04 18:24:27 +01002706 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002707 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002708 ret = domain_add_dev_info(si_domain, pdev,
2709 hw_pass_through ?
2710 CONTEXT_TT_PASS_THROUGH :
2711 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002712 if (!ret) {
2713 printk(KERN_INFO "64bit %s uses identity mapping\n",
2714 pci_name(pdev));
2715 return 1;
2716 }
2717 }
2718 }
2719
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002720 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002721}
2722
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002723static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2724 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002725{
2726 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002727 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002728 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002729 struct iova *iova;
2730 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002731 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002732 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002733 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002734
2735 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002736
David Woodhouse73676832009-07-04 14:08:36 +01002737 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002738 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002739
2740 domain = get_valid_domain_for_dev(pdev);
2741 if (!domain)
2742 return 0;
2743
Weidong Han8c11e792008-12-08 15:29:22 +08002744 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002745 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002746
Mike Travisc681d0b2011-05-28 13:15:05 -05002747 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002748 if (!iova)
2749 goto error;
2750
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002751 /*
2752 * Check if DMAR supports zero-length reads on write only
2753 * mappings..
2754 */
2755 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002756 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002757 prot |= DMA_PTE_READ;
2758 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2759 prot |= DMA_PTE_WRITE;
2760 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002761 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002762 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002763 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002764 * is not a big problem
2765 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002766 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002767 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002768 if (ret)
2769 goto error;
2770
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002771 /* it's a non-present to present mapping. Only flush if caching mode */
2772 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002773 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002774 else
Weidong Han8c11e792008-12-08 15:29:22 +08002775 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002776
David Woodhouse03d6a242009-06-28 15:33:46 +01002777 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2778 start_paddr += paddr & ~PAGE_MASK;
2779 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002780
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002781error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002782 if (iova)
2783 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002784 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002785 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002786 return 0;
2787}
2788
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002789static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2790 unsigned long offset, size_t size,
2791 enum dma_data_direction dir,
2792 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002793{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002794 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2795 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002796}
2797
mark gross5e0d2a62008-03-04 15:22:08 -08002798static void flush_unmaps(void)
2799{
mark gross80b20dd2008-04-18 13:53:58 -07002800 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002801
mark gross5e0d2a62008-03-04 15:22:08 -08002802 timer_on = 0;
2803
2804 /* just flush them all */
2805 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002806 struct intel_iommu *iommu = g_iommus[i];
2807 if (!iommu)
2808 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002809
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002810 if (!deferred_flush[i].next)
2811 continue;
2812
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002813 /* In caching mode, global flushes turn emulation expensive */
2814 if (!cap_caching_mode(iommu->cap))
2815 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002816 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002817 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002818 unsigned long mask;
2819 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002820 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002821
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002822 /* On real hardware multiple invalidations are expensive */
2823 if (cap_caching_mode(iommu->cap))
2824 iommu_flush_iotlb_psi(iommu, domain->id,
2825 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2826 else {
2827 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2828 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2829 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2830 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002831 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002832 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002833 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002834 }
2835
mark gross5e0d2a62008-03-04 15:22:08 -08002836 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002837}
2838
2839static void flush_unmaps_timeout(unsigned long data)
2840{
mark gross80b20dd2008-04-18 13:53:58 -07002841 unsigned long flags;
2842
2843 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002844 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002845 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002846}
2847
2848static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2849{
2850 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002851 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002852 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002853
2854 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002855 if (list_size == HIGH_WATER_MARK)
2856 flush_unmaps();
2857
Weidong Han8c11e792008-12-08 15:29:22 +08002858 iommu = domain_get_iommu(dom);
2859 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002860
mark gross80b20dd2008-04-18 13:53:58 -07002861 next = deferred_flush[iommu_id].next;
2862 deferred_flush[iommu_id].domain[next] = dom;
2863 deferred_flush[iommu_id].iova[next] = iova;
2864 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002865
2866 if (!timer_on) {
2867 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2868 timer_on = 1;
2869 }
2870 list_size++;
2871 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2872}
2873
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002874static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2875 size_t size, enum dma_data_direction dir,
2876 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002877{
2878 struct pci_dev *pdev = to_pci_dev(dev);
2879 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002880 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002881 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002882 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002883
David Woodhouse73676832009-07-04 14:08:36 +01002884 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002885 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002886
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002887 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002888 BUG_ON(!domain);
2889
Weidong Han8c11e792008-12-08 15:29:22 +08002890 iommu = domain_get_iommu(domain);
2891
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002892 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002893 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2894 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002895 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002896
David Woodhoused794dc92009-06-28 00:27:49 +01002897 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2898 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002899
David Woodhoused794dc92009-06-28 00:27:49 +01002900 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2901 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002902
2903 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002904 dma_pte_clear_range(domain, start_pfn, last_pfn);
2905
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002906 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002907 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2908
mark gross5e0d2a62008-03-04 15:22:08 -08002909 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002910 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002911 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002912 /* free iova */
2913 __free_iova(&domain->iovad, iova);
2914 } else {
2915 add_unmap(domain, iova);
2916 /*
2917 * queue up the release of the unmap to save the 1/6th of the
2918 * cpu used up by the iotlb flush operation...
2919 */
mark gross5e0d2a62008-03-04 15:22:08 -08002920 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002921}
2922
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002923static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2924 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002925{
2926 void *vaddr;
2927 int order;
2928
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002929 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002930 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07002931
2932 if (!iommu_no_mapping(hwdev))
2933 flags &= ~(GFP_DMA | GFP_DMA32);
2934 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2935 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2936 flags |= GFP_DMA;
2937 else
2938 flags |= GFP_DMA32;
2939 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002940
2941 vaddr = (void *)__get_free_pages(flags, order);
2942 if (!vaddr)
2943 return NULL;
2944 memset(vaddr, 0, size);
2945
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002946 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2947 DMA_BIDIRECTIONAL,
2948 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002949 if (*dma_handle)
2950 return vaddr;
2951 free_pages((unsigned long)vaddr, order);
2952 return NULL;
2953}
2954
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002955static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2956 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002957{
2958 int order;
2959
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002960 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002961 order = get_order(size);
2962
David Woodhouse0db9b7a2009-07-14 02:01:57 +01002963 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002964 free_pages((unsigned long)vaddr, order);
2965}
2966
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002967static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2968 int nelems, enum dma_data_direction dir,
2969 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002970{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002971 struct pci_dev *pdev = to_pci_dev(hwdev);
2972 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002973 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002974 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002975 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002976
David Woodhouse73676832009-07-04 14:08:36 +01002977 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002978 return;
2979
2980 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002981 BUG_ON(!domain);
2982
2983 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002984
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002985 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01002986 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2987 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002988 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002989
David Woodhoused794dc92009-06-28 00:27:49 +01002990 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2991 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002992
2993 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002994 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002995
David Woodhoused794dc92009-06-28 00:27:49 +01002996 /* free page tables */
2997 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2998
David Woodhouseacea0012009-07-14 01:55:11 +01002999 if (intel_iommu_strict) {
3000 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003001 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003002 /* free iova */
3003 __free_iova(&domain->iovad, iova);
3004 } else {
3005 add_unmap(domain, iova);
3006 /*
3007 * queue up the release of the unmap to save the 1/6th of the
3008 * cpu used up by the iotlb flush operation...
3009 */
3010 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003011}
3012
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003013static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003014 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003015{
3016 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003017 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003018
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003019 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003020 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003021 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003022 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003023 }
3024 return nelems;
3025}
3026
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003027static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3028 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003029{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003030 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003031 struct pci_dev *pdev = to_pci_dev(hwdev);
3032 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003033 size_t size = 0;
3034 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003035 struct iova *iova = NULL;
3036 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003037 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003038 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003039 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003040
3041 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003042 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003043 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003044
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003045 domain = get_valid_domain_for_dev(pdev);
3046 if (!domain)
3047 return 0;
3048
Weidong Han8c11e792008-12-08 15:29:22 +08003049 iommu = domain_get_iommu(domain);
3050
David Woodhouseb536d242009-06-28 14:49:31 +01003051 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003052 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003053
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003054 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3055 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003056 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003057 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003058 return 0;
3059 }
3060
3061 /*
3062 * Check if DMAR supports zero-length reads on write only
3063 * mappings..
3064 */
3065 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003066 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003067 prot |= DMA_PTE_READ;
3068 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3069 prot |= DMA_PTE_WRITE;
3070
David Woodhouseb536d242009-06-28 14:49:31 +01003071 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003072
Fenghua Yuf5329592009-08-04 15:09:37 -07003073 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003074 if (unlikely(ret)) {
3075 /* clear the page */
3076 dma_pte_clear_range(domain, start_vpfn,
3077 start_vpfn + size - 1);
3078 /* free page tables */
3079 dma_pte_free_pagetable(domain, start_vpfn,
3080 start_vpfn + size - 1);
3081 /* free iova */
3082 __free_iova(&domain->iovad, iova);
3083 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003084 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003085
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003086 /* it's a non-present to present mapping. Only flush if caching mode */
3087 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003088 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003089 else
Weidong Han8c11e792008-12-08 15:29:22 +08003090 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003091
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003092 return nelems;
3093}
3094
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003095static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3096{
3097 return !dma_addr;
3098}
3099
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003100struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003101 .alloc_coherent = intel_alloc_coherent,
3102 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003103 .map_sg = intel_map_sg,
3104 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003105 .map_page = intel_map_page,
3106 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003107 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003108};
3109
3110static inline int iommu_domain_cache_init(void)
3111{
3112 int ret = 0;
3113
3114 iommu_domain_cache = kmem_cache_create("iommu_domain",
3115 sizeof(struct dmar_domain),
3116 0,
3117 SLAB_HWCACHE_ALIGN,
3118
3119 NULL);
3120 if (!iommu_domain_cache) {
3121 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3122 ret = -ENOMEM;
3123 }
3124
3125 return ret;
3126}
3127
3128static inline int iommu_devinfo_cache_init(void)
3129{
3130 int ret = 0;
3131
3132 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3133 sizeof(struct device_domain_info),
3134 0,
3135 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003136 NULL);
3137 if (!iommu_devinfo_cache) {
3138 printk(KERN_ERR "Couldn't create devinfo cache\n");
3139 ret = -ENOMEM;
3140 }
3141
3142 return ret;
3143}
3144
3145static inline int iommu_iova_cache_init(void)
3146{
3147 int ret = 0;
3148
3149 iommu_iova_cache = kmem_cache_create("iommu_iova",
3150 sizeof(struct iova),
3151 0,
3152 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003153 NULL);
3154 if (!iommu_iova_cache) {
3155 printk(KERN_ERR "Couldn't create iova cache\n");
3156 ret = -ENOMEM;
3157 }
3158
3159 return ret;
3160}
3161
3162static int __init iommu_init_mempool(void)
3163{
3164 int ret;
3165 ret = iommu_iova_cache_init();
3166 if (ret)
3167 return ret;
3168
3169 ret = iommu_domain_cache_init();
3170 if (ret)
3171 goto domain_error;
3172
3173 ret = iommu_devinfo_cache_init();
3174 if (!ret)
3175 return ret;
3176
3177 kmem_cache_destroy(iommu_domain_cache);
3178domain_error:
3179 kmem_cache_destroy(iommu_iova_cache);
3180
3181 return -ENOMEM;
3182}
3183
3184static void __init iommu_exit_mempool(void)
3185{
3186 kmem_cache_destroy(iommu_devinfo_cache);
3187 kmem_cache_destroy(iommu_domain_cache);
3188 kmem_cache_destroy(iommu_iova_cache);
3189
3190}
3191
Dan Williams556ab452010-07-23 15:47:56 -07003192static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3193{
3194 struct dmar_drhd_unit *drhd;
3195 u32 vtbar;
3196 int rc;
3197
3198 /* We know that this device on this chipset has its own IOMMU.
3199 * If we find it under a different IOMMU, then the BIOS is lying
3200 * to us. Hope that the IOMMU for this device is actually
3201 * disabled, and it needs no translation...
3202 */
3203 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3204 if (rc) {
3205 /* "can't" happen */
3206 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3207 return;
3208 }
3209 vtbar &= 0xffff0000;
3210
3211 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3212 drhd = dmar_find_matched_drhd_unit(pdev);
3213 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3214 TAINT_FIRMWARE_WORKAROUND,
3215 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3216 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3217}
3218DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3219
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003220static void __init init_no_remapping_devices(void)
3221{
3222 struct dmar_drhd_unit *drhd;
3223
3224 for_each_drhd_unit(drhd) {
3225 if (!drhd->include_all) {
3226 int i;
3227 for (i = 0; i < drhd->devices_cnt; i++)
3228 if (drhd->devices[i] != NULL)
3229 break;
3230 /* ignore DMAR unit if no pci devices exist */
3231 if (i == drhd->devices_cnt)
3232 drhd->ignored = 1;
3233 }
3234 }
3235
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003236 for_each_drhd_unit(drhd) {
3237 int i;
3238 if (drhd->ignored || drhd->include_all)
3239 continue;
3240
3241 for (i = 0; i < drhd->devices_cnt; i++)
3242 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003243 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003244 break;
3245
3246 if (i < drhd->devices_cnt)
3247 continue;
3248
David Woodhousec0771df2011-10-14 20:59:46 +01003249 /* This IOMMU has *only* gfx devices. Either bypass it or
3250 set the gfx_mapped flag, as appropriate */
3251 if (dmar_map_gfx) {
3252 intel_iommu_gfx_mapped = 1;
3253 } else {
3254 drhd->ignored = 1;
3255 for (i = 0; i < drhd->devices_cnt; i++) {
3256 if (!drhd->devices[i])
3257 continue;
3258 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3259 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003260 }
3261 }
3262}
3263
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003264#ifdef CONFIG_SUSPEND
3265static int init_iommu_hw(void)
3266{
3267 struct dmar_drhd_unit *drhd;
3268 struct intel_iommu *iommu = NULL;
3269
3270 for_each_active_iommu(iommu, drhd)
3271 if (iommu->qi)
3272 dmar_reenable_qi(iommu);
3273
Joseph Cihulab7792602011-05-03 00:08:37 -07003274 for_each_iommu(iommu, drhd) {
3275 if (drhd->ignored) {
3276 /*
3277 * we always have to disable PMRs or DMA may fail on
3278 * this device
3279 */
3280 if (force_on)
3281 iommu_disable_protect_mem_regions(iommu);
3282 continue;
3283 }
3284
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003285 iommu_flush_write_buffer(iommu);
3286
3287 iommu_set_root_entry(iommu);
3288
3289 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003290 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003291 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003292 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003293 if (iommu_enable_translation(iommu))
3294 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003295 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003296 }
3297
3298 return 0;
3299}
3300
3301static void iommu_flush_all(void)
3302{
3303 struct dmar_drhd_unit *drhd;
3304 struct intel_iommu *iommu;
3305
3306 for_each_active_iommu(iommu, drhd) {
3307 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003308 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003309 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003310 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003311 }
3312}
3313
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003314static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003315{
3316 struct dmar_drhd_unit *drhd;
3317 struct intel_iommu *iommu = NULL;
3318 unsigned long flag;
3319
3320 for_each_active_iommu(iommu, drhd) {
3321 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3322 GFP_ATOMIC);
3323 if (!iommu->iommu_state)
3324 goto nomem;
3325 }
3326
3327 iommu_flush_all();
3328
3329 for_each_active_iommu(iommu, drhd) {
3330 iommu_disable_translation(iommu);
3331
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003332 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003333
3334 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3335 readl(iommu->reg + DMAR_FECTL_REG);
3336 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3337 readl(iommu->reg + DMAR_FEDATA_REG);
3338 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3339 readl(iommu->reg + DMAR_FEADDR_REG);
3340 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3341 readl(iommu->reg + DMAR_FEUADDR_REG);
3342
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003343 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003344 }
3345 return 0;
3346
3347nomem:
3348 for_each_active_iommu(iommu, drhd)
3349 kfree(iommu->iommu_state);
3350
3351 return -ENOMEM;
3352}
3353
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003354static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003355{
3356 struct dmar_drhd_unit *drhd;
3357 struct intel_iommu *iommu = NULL;
3358 unsigned long flag;
3359
3360 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003361 if (force_on)
3362 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3363 else
3364 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003365 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003366 }
3367
3368 for_each_active_iommu(iommu, drhd) {
3369
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003370 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003371
3372 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3373 iommu->reg + DMAR_FECTL_REG);
3374 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3375 iommu->reg + DMAR_FEDATA_REG);
3376 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3377 iommu->reg + DMAR_FEADDR_REG);
3378 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3379 iommu->reg + DMAR_FEUADDR_REG);
3380
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003381 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003382 }
3383
3384 for_each_active_iommu(iommu, drhd)
3385 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003386}
3387
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003388static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003389 .resume = iommu_resume,
3390 .suspend = iommu_suspend,
3391};
3392
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003393static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003394{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003395 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003396}
3397
3398#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003399static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003400#endif /* CONFIG_PM */
3401
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003402LIST_HEAD(dmar_rmrr_units);
3403
3404static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3405{
3406 list_add(&rmrr->list, &dmar_rmrr_units);
3407}
3408
3409
3410int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3411{
3412 struct acpi_dmar_reserved_memory *rmrr;
3413 struct dmar_rmrr_unit *rmrru;
3414
3415 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3416 if (!rmrru)
3417 return -ENOMEM;
3418
3419 rmrru->hdr = header;
3420 rmrr = (struct acpi_dmar_reserved_memory *)header;
3421 rmrru->base_address = rmrr->base_address;
3422 rmrru->end_address = rmrr->end_address;
3423
3424 dmar_register_rmrr_unit(rmrru);
3425 return 0;
3426}
3427
3428static int __init
3429rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3430{
3431 struct acpi_dmar_reserved_memory *rmrr;
3432 int ret;
3433
3434 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3435 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3436 ((void *)rmrr) + rmrr->header.length,
3437 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3438
3439 if (ret || (rmrru->devices_cnt == 0)) {
3440 list_del(&rmrru->list);
3441 kfree(rmrru);
3442 }
3443 return ret;
3444}
3445
3446static LIST_HEAD(dmar_atsr_units);
3447
3448int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3449{
3450 struct acpi_dmar_atsr *atsr;
3451 struct dmar_atsr_unit *atsru;
3452
3453 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3454 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3455 if (!atsru)
3456 return -ENOMEM;
3457
3458 atsru->hdr = hdr;
3459 atsru->include_all = atsr->flags & 0x1;
3460
3461 list_add(&atsru->list, &dmar_atsr_units);
3462
3463 return 0;
3464}
3465
3466static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3467{
3468 int rc;
3469 struct acpi_dmar_atsr *atsr;
3470
3471 if (atsru->include_all)
3472 return 0;
3473
3474 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3475 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3476 (void *)atsr + atsr->header.length,
3477 &atsru->devices_cnt, &atsru->devices,
3478 atsr->segment);
3479 if (rc || !atsru->devices_cnt) {
3480 list_del(&atsru->list);
3481 kfree(atsru);
3482 }
3483
3484 return rc;
3485}
3486
3487int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3488{
3489 int i;
3490 struct pci_bus *bus;
3491 struct acpi_dmar_atsr *atsr;
3492 struct dmar_atsr_unit *atsru;
3493
3494 dev = pci_physfn(dev);
3495
3496 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3497 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3498 if (atsr->segment == pci_domain_nr(dev->bus))
3499 goto found;
3500 }
3501
3502 return 0;
3503
3504found:
3505 for (bus = dev->bus; bus; bus = bus->parent) {
3506 struct pci_dev *bridge = bus->self;
3507
3508 if (!bridge || !pci_is_pcie(bridge) ||
3509 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
3510 return 0;
3511
3512 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
3513 for (i = 0; i < atsru->devices_cnt; i++)
3514 if (atsru->devices[i] == bridge)
3515 return 1;
3516 break;
3517 }
3518 }
3519
3520 if (atsru->include_all)
3521 return 1;
3522
3523 return 0;
3524}
3525
3526int dmar_parse_rmrr_atsr_dev(void)
3527{
3528 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3529 struct dmar_atsr_unit *atsr, *atsr_n;
3530 int ret = 0;
3531
3532 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3533 ret = rmrr_parse_dev(rmrr);
3534 if (ret)
3535 return ret;
3536 }
3537
3538 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3539 ret = atsr_parse_dev(atsr);
3540 if (ret)
3541 return ret;
3542 }
3543
3544 return ret;
3545}
3546
Fenghua Yu99dcade2009-11-11 07:23:06 -08003547/*
3548 * Here we only respond to action of unbound device from driver.
3549 *
3550 * Added device is not attached to its DMAR domain here yet. That will happen
3551 * when mapping the device to iova.
3552 */
3553static int device_notifier(struct notifier_block *nb,
3554 unsigned long action, void *data)
3555{
3556 struct device *dev = data;
3557 struct pci_dev *pdev = to_pci_dev(dev);
3558 struct dmar_domain *domain;
3559
David Woodhouse44cd6132009-12-02 10:18:30 +00003560 if (iommu_no_mapping(dev))
3561 return 0;
3562
Fenghua Yu99dcade2009-11-11 07:23:06 -08003563 domain = find_domain(pdev);
3564 if (!domain)
3565 return 0;
3566
Alex Williamsona97590e2011-03-04 14:52:16 -07003567 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003568 domain_remove_one_dev_info(domain, pdev);
3569
Alex Williamsona97590e2011-03-04 14:52:16 -07003570 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3571 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3572 list_empty(&domain->devices))
3573 domain_exit(domain);
3574 }
3575
Fenghua Yu99dcade2009-11-11 07:23:06 -08003576 return 0;
3577}
3578
3579static struct notifier_block device_nb = {
3580 .notifier_call = device_notifier,
3581};
3582
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003583int __init intel_iommu_init(void)
3584{
3585 int ret = 0;
3586
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003587 /* VT-d is required for a TXT/tboot launch, so enforce that */
3588 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003589
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003590 if (dmar_table_init()) {
3591 if (force_on)
3592 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003593 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003594 }
3595
Suresh Siddhac2c72862011-08-23 17:05:19 -07003596 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003597 if (force_on)
3598 panic("tboot: Failed to initialize DMAR device scope\n");
3599 return -ENODEV;
3600 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003601
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003602 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003603 return -ENODEV;
3604
Joseph Cihula51a63e62011-03-21 11:04:24 -07003605 if (iommu_init_mempool()) {
3606 if (force_on)
3607 panic("tboot: Failed to initialize iommu memory\n");
3608 return -ENODEV;
3609 }
3610
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003611 if (list_empty(&dmar_rmrr_units))
3612 printk(KERN_INFO "DMAR: No RMRR found\n");
3613
3614 if (list_empty(&dmar_atsr_units))
3615 printk(KERN_INFO "DMAR: No ATSR found\n");
3616
Joseph Cihula51a63e62011-03-21 11:04:24 -07003617 if (dmar_init_reserved_ranges()) {
3618 if (force_on)
3619 panic("tboot: Failed to reserve iommu ranges\n");
3620 return -ENODEV;
3621 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003622
3623 init_no_remapping_devices();
3624
Joseph Cihulab7792602011-05-03 00:08:37 -07003625 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003626 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003627 if (force_on)
3628 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003629 printk(KERN_ERR "IOMMU: dmar init failed\n");
3630 put_iova_domain(&reserved_iova_list);
3631 iommu_exit_mempool();
3632 return ret;
3633 }
3634 printk(KERN_INFO
3635 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3636
mark gross5e0d2a62008-03-04 15:22:08 -08003637 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003638#ifdef CONFIG_SWIOTLB
3639 swiotlb = 0;
3640#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003641 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003642
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003643 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003644
3645 register_iommu(&intel_iommu_ops);
3646
Fenghua Yu99dcade2009-11-11 07:23:06 -08003647 bus_register_notifier(&pci_bus_type, &device_nb);
3648
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003649 return 0;
3650}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003651
Han, Weidong3199aa62009-02-26 17:31:12 +08003652static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3653 struct pci_dev *pdev)
3654{
3655 struct pci_dev *tmp, *parent;
3656
3657 if (!iommu || !pdev)
3658 return;
3659
3660 /* dependent device detach */
3661 tmp = pci_find_upstream_pcie_bridge(pdev);
3662 /* Secondary interface's bus number and devfn 0 */
3663 if (tmp) {
3664 parent = pdev->bus->self;
3665 while (parent != tmp) {
3666 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf92009-04-04 01:45:37 +01003667 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003668 parent = parent->bus->self;
3669 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003670 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003671 iommu_detach_dev(iommu,
3672 tmp->subordinate->number, 0);
3673 else /* this is a legacy PCI bridge */
David Woodhouse276dbf92009-04-04 01:45:37 +01003674 iommu_detach_dev(iommu, tmp->bus->number,
3675 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003676 }
3677}
3678
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003679static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003680 struct pci_dev *pdev)
3681{
3682 struct device_domain_info *info;
3683 struct intel_iommu *iommu;
3684 unsigned long flags;
3685 int found = 0;
3686 struct list_head *entry, *tmp;
3687
David Woodhouse276dbf92009-04-04 01:45:37 +01003688 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3689 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003690 if (!iommu)
3691 return;
3692
3693 spin_lock_irqsave(&device_domain_lock, flags);
3694 list_for_each_safe(entry, tmp, &domain->devices) {
3695 info = list_entry(entry, struct device_domain_info, link);
Mike Habeck8519dc42011-05-28 13:15:07 -05003696 if (info->segment == pci_domain_nr(pdev->bus) &&
3697 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003698 info->devfn == pdev->devfn) {
3699 list_del(&info->link);
3700 list_del(&info->global);
3701 if (info->dev)
3702 info->dev->dev.archdata.iommu = NULL;
3703 spin_unlock_irqrestore(&device_domain_lock, flags);
3704
Yu Zhao93a23a72009-05-18 13:51:37 +08003705 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003706 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003707 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003708 free_devinfo_mem(info);
3709
3710 spin_lock_irqsave(&device_domain_lock, flags);
3711
3712 if (found)
3713 break;
3714 else
3715 continue;
3716 }
3717
3718 /* if there is no other devices under the same iommu
3719 * owned by this domain, clear this iommu in iommu_bmp
3720 * update iommu count and coherency
3721 */
David Woodhouse276dbf92009-04-04 01:45:37 +01003722 if (iommu == device_to_iommu(info->segment, info->bus,
3723 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003724 found = 1;
3725 }
3726
Roland Dreier3e7abe22011-07-20 06:22:21 -07003727 spin_unlock_irqrestore(&device_domain_lock, flags);
3728
Weidong Hanc7151a82008-12-08 22:51:37 +08003729 if (found == 0) {
3730 unsigned long tmp_flags;
3731 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3732 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3733 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003734 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003735 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003736
Alex Williamson9b4554b2011-05-24 12:19:04 -04003737 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3738 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3739 spin_lock_irqsave(&iommu->lock, tmp_flags);
3740 clear_bit(domain->id, iommu->domain_ids);
3741 iommu->domains[domain->id] = NULL;
3742 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3743 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003744 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003745}
3746
3747static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3748{
3749 struct device_domain_info *info;
3750 struct intel_iommu *iommu;
3751 unsigned long flags1, flags2;
3752
3753 spin_lock_irqsave(&device_domain_lock, flags1);
3754 while (!list_empty(&domain->devices)) {
3755 info = list_entry(domain->devices.next,
3756 struct device_domain_info, link);
3757 list_del(&info->link);
3758 list_del(&info->global);
3759 if (info->dev)
3760 info->dev->dev.archdata.iommu = NULL;
3761
3762 spin_unlock_irqrestore(&device_domain_lock, flags1);
3763
Yu Zhao93a23a72009-05-18 13:51:37 +08003764 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf92009-04-04 01:45:37 +01003765 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003766 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003767 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003768
3769 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003770 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003771 */
3772 spin_lock_irqsave(&domain->iommu_lock, flags2);
3773 if (test_and_clear_bit(iommu->seq_id,
3774 &domain->iommu_bmp)) {
3775 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003776 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003777 }
3778 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3779
3780 free_devinfo_mem(info);
3781 spin_lock_irqsave(&device_domain_lock, flags1);
3782 }
3783 spin_unlock_irqrestore(&device_domain_lock, flags1);
3784}
3785
Weidong Han5e98c4b2008-12-08 23:03:27 +08003786/* domain id for virtual machine, it won't be set in context */
3787static unsigned long vm_domid;
3788
3789static struct dmar_domain *iommu_alloc_vm_domain(void)
3790{
3791 struct dmar_domain *domain;
3792
3793 domain = alloc_domain_mem();
3794 if (!domain)
3795 return NULL;
3796
3797 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003798 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003799 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3800 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3801
3802 return domain;
3803}
3804
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003805static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003806{
3807 int adjust_width;
3808
3809 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003810 spin_lock_init(&domain->iommu_lock);
3811
3812 domain_reserve_special_ranges(domain);
3813
3814 /* calculate AGAW */
3815 domain->gaw = guest_width;
3816 adjust_width = guestwidth_to_adjustwidth(guest_width);
3817 domain->agaw = width_to_agaw(adjust_width);
3818
3819 INIT_LIST_HEAD(&domain->devices);
3820
3821 domain->iommu_count = 0;
3822 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003823 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003824 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003825 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003826 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003827
3828 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003829 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003830 if (!domain->pgd)
3831 return -ENOMEM;
3832 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3833 return 0;
3834}
3835
3836static void iommu_free_vm_domain(struct dmar_domain *domain)
3837{
3838 unsigned long flags;
3839 struct dmar_drhd_unit *drhd;
3840 struct intel_iommu *iommu;
3841 unsigned long i;
3842 unsigned long ndomains;
3843
3844 for_each_drhd_unit(drhd) {
3845 if (drhd->ignored)
3846 continue;
3847 iommu = drhd->iommu;
3848
3849 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003850 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003851 if (iommu->domains[i] == domain) {
3852 spin_lock_irqsave(&iommu->lock, flags);
3853 clear_bit(i, iommu->domain_ids);
3854 iommu->domains[i] = NULL;
3855 spin_unlock_irqrestore(&iommu->lock, flags);
3856 break;
3857 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003858 }
3859 }
3860}
3861
3862static void vm_domain_exit(struct dmar_domain *domain)
3863{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003864 /* Domain 0 is reserved, so dont process it */
3865 if (!domain)
3866 return;
3867
3868 vm_domain_remove_all_dev_info(domain);
3869 /* destroy iovas */
3870 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003871
3872 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003873 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003874
3875 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003876 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003877
3878 iommu_free_vm_domain(domain);
3879 free_domain_mem(domain);
3880}
3881
Joerg Roedel5d450802008-12-03 14:52:32 +01003882static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003883{
Joerg Roedel5d450802008-12-03 14:52:32 +01003884 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003885
Joerg Roedel5d450802008-12-03 14:52:32 +01003886 dmar_domain = iommu_alloc_vm_domain();
3887 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003888 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003889 "intel_iommu_domain_init: dmar_domain == NULL\n");
3890 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003891 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003892 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003893 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003894 "intel_iommu_domain_init() failed\n");
3895 vm_domain_exit(dmar_domain);
3896 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003897 }
Allen Kay8140a952011-10-14 12:32:17 -07003898 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003899 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003900
Joerg Roedel5d450802008-12-03 14:52:32 +01003901 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003902}
Kay, Allen M38717942008-09-09 18:37:29 +03003903
Joerg Roedel5d450802008-12-03 14:52:32 +01003904static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003905{
Joerg Roedel5d450802008-12-03 14:52:32 +01003906 struct dmar_domain *dmar_domain = domain->priv;
3907
3908 domain->priv = NULL;
3909 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003910}
Kay, Allen M38717942008-09-09 18:37:29 +03003911
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003912static int intel_iommu_attach_device(struct iommu_domain *domain,
3913 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003914{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003915 struct dmar_domain *dmar_domain = domain->priv;
3916 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003917 struct intel_iommu *iommu;
3918 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003919
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003920 /* normally pdev is not mapped */
3921 if (unlikely(domain_context_mapped(pdev))) {
3922 struct dmar_domain *old_domain;
3923
3924 old_domain = find_domain(pdev);
3925 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003926 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3927 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3928 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003929 else
3930 domain_remove_dev_info(old_domain);
3931 }
3932 }
3933
David Woodhouse276dbf92009-04-04 01:45:37 +01003934 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3935 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003936 if (!iommu)
3937 return -ENODEV;
3938
3939 /* check if this iommu agaw is sufficient for max mapped address */
3940 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01003941 if (addr_width > cap_mgaw(iommu->cap))
3942 addr_width = cap_mgaw(iommu->cap);
3943
3944 if (dmar_domain->max_addr > (1LL << addr_width)) {
3945 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003946 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01003947 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003948 return -EFAULT;
3949 }
Tom Lyona99c47a2010-05-17 08:20:45 +01003950 dmar_domain->gaw = addr_width;
3951
3952 /*
3953 * Knock out extra levels of page tables if necessary
3954 */
3955 while (iommu->agaw < dmar_domain->agaw) {
3956 struct dma_pte *pte;
3957
3958 pte = dmar_domain->pgd;
3959 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08003960 dmar_domain->pgd = (struct dma_pte *)
3961 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01003962 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01003963 }
3964 dmar_domain->agaw--;
3965 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003966
David Woodhouse5fe60f42009-08-09 10:53:41 +01003967 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003968}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003969
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003970static void intel_iommu_detach_device(struct iommu_domain *domain,
3971 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003972{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003973 struct dmar_domain *dmar_domain = domain->priv;
3974 struct pci_dev *pdev = to_pci_dev(dev);
3975
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003976 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003977}
Kay, Allen M38717942008-09-09 18:37:29 +03003978
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003979static int intel_iommu_map(struct iommu_domain *domain,
3980 unsigned long iova, phys_addr_t hpa,
3981 int gfp_order, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003982{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003983 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003984 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003985 int prot = 0;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003986 size_t size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003987 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003988
Joerg Roedeldde57a22008-12-03 15:04:09 +01003989 if (iommu_prot & IOMMU_READ)
3990 prot |= DMA_PTE_READ;
3991 if (iommu_prot & IOMMU_WRITE)
3992 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08003993 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3994 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003995
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003996 size = PAGE_SIZE << gfp_order;
David Woodhouse163cc522009-06-28 00:51:17 +01003997 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003998 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003999 u64 end;
4000
4001 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004002 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004003 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004004 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004005 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004006 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004007 return -EFAULT;
4008 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004009 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004010 }
David Woodhousead051222009-06-28 14:22:28 +01004011 /* Round up size to next multiple of PAGE_SIZE, if it and
4012 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004013 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004014 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4015 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004016 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004017}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004018
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004019static int intel_iommu_unmap(struct iommu_domain *domain,
4020 unsigned long iova, int gfp_order)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004021{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004022 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004023 size_t size = PAGE_SIZE << gfp_order;
Allen Kay292827c2011-10-14 12:31:54 -07004024 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004025
Allen Kay292827c2011-10-14 12:31:54 -07004026 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004027 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004028
David Woodhouse163cc522009-06-28 00:51:17 +01004029 if (dmar_domain->max_addr == iova + size)
4030 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004031
Allen Kay292827c2011-10-14 12:31:54 -07004032 return order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004033}
Kay, Allen M38717942008-09-09 18:37:29 +03004034
Joerg Roedeld14d6572008-12-03 15:06:57 +01004035static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4036 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004037{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004038 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004039 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004040 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004041
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004042 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004043 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004044 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004045
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004046 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004047}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004048
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004049static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4050 unsigned long cap)
4051{
4052 struct dmar_domain *dmar_domain = domain->priv;
4053
4054 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4055 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004056 if (cap == IOMMU_CAP_INTR_REMAP)
4057 return intr_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004058
4059 return 0;
4060}
4061
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004062static struct iommu_ops intel_iommu_ops = {
4063 .domain_init = intel_iommu_domain_init,
4064 .domain_destroy = intel_iommu_domain_destroy,
4065 .attach_dev = intel_iommu_attach_device,
4066 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004067 .map = intel_iommu_map,
4068 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004069 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004070 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004071};
David Woodhouse9af88142009-02-13 23:18:03 +00004072
4073static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4074{
4075 /*
4076 * Mobile 4 Series Chipset neglects to set RWBF capability,
4077 * but needs it:
4078 */
4079 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4080 rwbf_quirk = 1;
David Woodhouse2d9e6672010-06-15 10:57:57 +01004081
4082 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
4083 if (dev->revision == 0x07) {
4084 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4085 dmar_map_gfx = 0;
4086 }
David Woodhouse9af88142009-02-13 23:18:03 +00004087}
4088
4089DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004090
Adam Jacksoneecfd572010-08-25 21:17:34 +01004091#define GGC 0x52
4092#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4093#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4094#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4095#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4096#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4097#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4098#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4099#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4100
David Woodhouse9eecabc2010-09-21 22:28:23 +01004101static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4102{
4103 unsigned short ggc;
4104
Adam Jacksoneecfd572010-08-25 21:17:34 +01004105 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004106 return;
4107
Adam Jacksoneecfd572010-08-25 21:17:34 +01004108 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004109 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4110 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004111 } else if (dmar_map_gfx) {
4112 /* we have to ensure the gfx device is idle before we flush */
4113 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4114 intel_iommu_strict = 1;
4115 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004116}
4117DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4118DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4119DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4120DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4121
David Woodhousee0fc7e02009-09-30 09:12:17 -07004122/* On Tylersburg chipsets, some BIOSes have been known to enable the
4123 ISOCH DMAR unit for the Azalia sound device, but not give it any
4124 TLB entries, which causes it to deadlock. Check for that. We do
4125 this in a function called from init_dmars(), instead of in a PCI
4126 quirk, because we don't want to print the obnoxious "BIOS broken"
4127 message if VT-d is actually disabled.
4128*/
4129static void __init check_tylersburg_isoch(void)
4130{
4131 struct pci_dev *pdev;
4132 uint32_t vtisochctrl;
4133
4134 /* If there's no Azalia in the system anyway, forget it. */
4135 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4136 if (!pdev)
4137 return;
4138 pci_dev_put(pdev);
4139
4140 /* System Management Registers. Might be hidden, in which case
4141 we can't do the sanity check. But that's OK, because the
4142 known-broken BIOSes _don't_ actually hide it, so far. */
4143 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4144 if (!pdev)
4145 return;
4146
4147 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4148 pci_dev_put(pdev);
4149 return;
4150 }
4151
4152 pci_dev_put(pdev);
4153
4154 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4155 if (vtisochctrl & 1)
4156 return;
4157
4158 /* Drop all bits other than the number of TLB entries */
4159 vtisochctrl &= 0x1c;
4160
4161 /* If we have the recommended number of TLB entries (16), fine. */
4162 if (vtisochctrl == 0x10)
4163 return;
4164
4165 /* Zero TLB entries? You get to ride the short bus to school. */
4166 if (!vtisochctrl) {
4167 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4168 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4169 dmi_get_system_info(DMI_BIOS_VENDOR),
4170 dmi_get_system_info(DMI_BIOS_VERSION),
4171 dmi_get_system_info(DMI_PRODUCT_VERSION));
4172 iommu_identity_mapping |= IDENTMAP_AZALIA;
4173 return;
4174 }
4175
4176 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4177 vtisochctrl);
4178}