blob: ec7e032d5ab50cc0c1c6c2eac700a14c908dc15a [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070040#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090041#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070042#include "pci.h"
43
Fenghua Yu5b6985c2008-10-16 18:02:32 -070044#define ROOT_SIZE VTD_PAGE_SIZE
45#define CONTEXT_SIZE VTD_PAGE_SIZE
46
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070047#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
49
50#define IOAPIC_RANGE_START (0xfee00000)
51#define IOAPIC_RANGE_END (0xfeefffff)
52#define IOVA_START_ADDR (0x1000)
53
54#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070056#define MAX_AGAW_WIDTH 64
57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
David Woodhouse595badf2009-06-27 22:09:11 +010059#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060
Mark McLoughlinf27be032008-11-20 15:49:43 +000061#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070062#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070063#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080064
David Woodhousefd18de52009-05-10 23:57:41 +010065
David Woodhousedd4e8312009-06-27 16:21:20 +010066/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
67 are never going to work. */
68static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
69{
70 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
71}
72
73static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
74{
75 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
76}
77static inline unsigned long page_to_dma_pfn(struct page *pg)
78{
79 return mm_to_dma_pfn(page_to_pfn(pg));
80}
81static inline unsigned long virt_to_dma_pfn(void *p)
82{
83 return page_to_dma_pfn(virt_to_page(p));
84}
85
Weidong Hand9630fe2008-12-08 11:06:32 +080086/* global iommu list, set NULL for ignored DMAR units */
87static struct intel_iommu **g_iommus;
88
David Woodhouse9af88142009-02-13 23:18:03 +000089static int rwbf_quirk;
90
Mark McLoughlin46b08e12008-11-20 15:49:44 +000091/*
92 * 0: Present
93 * 1-11: Reserved
94 * 12-63: Context Ptr (12 - (haw-1))
95 * 64-127: Reserved
96 */
97struct root_entry {
98 u64 val;
99 u64 rsvd1;
100};
101#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
102static inline bool root_present(struct root_entry *root)
103{
104 return (root->val & 1);
105}
106static inline void set_root_present(struct root_entry *root)
107{
108 root->val |= 1;
109}
110static inline void set_root_value(struct root_entry *root, unsigned long value)
111{
112 root->val |= value & VTD_PAGE_MASK;
113}
114
115static inline struct context_entry *
116get_context_addr_from_root(struct root_entry *root)
117{
118 return (struct context_entry *)
119 (root_present(root)?phys_to_virt(
120 root->val & VTD_PAGE_MASK) :
121 NULL);
122}
123
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000124/*
125 * low 64 bits:
126 * 0: present
127 * 1: fault processing disable
128 * 2-3: translation type
129 * 12-63: address space root
130 * high 64 bits:
131 * 0-2: address width
132 * 3-6: aval
133 * 8-23: domain id
134 */
135struct context_entry {
136 u64 lo;
137 u64 hi;
138};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000139
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000140static inline bool context_present(struct context_entry *context)
141{
142 return (context->lo & 1);
143}
144static inline void context_set_present(struct context_entry *context)
145{
146 context->lo |= 1;
147}
148
149static inline void context_set_fault_enable(struct context_entry *context)
150{
151 context->lo &= (((u64)-1) << 2) | 1;
152}
153
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000154static inline void context_set_translation_type(struct context_entry *context,
155 unsigned long value)
156{
157 context->lo &= (((u64)-1) << 4) | 3;
158 context->lo |= (value & 3) << 2;
159}
160
161static inline void context_set_address_root(struct context_entry *context,
162 unsigned long value)
163{
164 context->lo |= value & VTD_PAGE_MASK;
165}
166
167static inline void context_set_address_width(struct context_entry *context,
168 unsigned long value)
169{
170 context->hi |= value & 7;
171}
172
173static inline void context_set_domain_id(struct context_entry *context,
174 unsigned long value)
175{
176 context->hi |= (value & ((1 << 16) - 1)) << 8;
177}
178
179static inline void context_clear_entry(struct context_entry *context)
180{
181 context->lo = 0;
182 context->hi = 0;
183}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000184
Mark McLoughlin622ba122008-11-20 15:49:46 +0000185/*
186 * 0: readable
187 * 1: writable
188 * 2-6: reserved
189 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800190 * 8-10: available
191 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000192 * 12-63: Host physcial address
193 */
194struct dma_pte {
195 u64 val;
196};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000197
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000198static inline void dma_clear_pte(struct dma_pte *pte)
199{
200 pte->val = 0;
201}
202
203static inline void dma_set_pte_readable(struct dma_pte *pte)
204{
205 pte->val |= DMA_PTE_READ;
206}
207
208static inline void dma_set_pte_writable(struct dma_pte *pte)
209{
210 pte->val |= DMA_PTE_WRITE;
211}
212
Sheng Yang9cf06692009-03-18 15:33:07 +0800213static inline void dma_set_pte_snp(struct dma_pte *pte)
214{
215 pte->val |= DMA_PTE_SNP;
216}
217
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000218static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
219{
220 pte->val = (pte->val & ~3) | (prot & 3);
221}
222
223static inline u64 dma_pte_addr(struct dma_pte *pte)
224{
225 return (pte->val & VTD_PAGE_MASK);
226}
227
David Woodhousedd4e8312009-06-27 16:21:20 +0100228static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000229{
David Woodhousedd4e8312009-06-27 16:21:20 +0100230 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000231}
232
233static inline bool dma_pte_present(struct dma_pte *pte)
234{
235 return (pte->val & 3) != 0;
236}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000237
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700238/*
239 * This domain is a statically identity mapping domain.
240 * 1. This domain creats a static 1:1 mapping to all usable memory.
241 * 2. It maps to each iommu if successful.
242 * 3. Each iommu mapps to this domain if successful.
243 */
244struct dmar_domain *si_domain;
245
Weidong Han3b5410e2008-12-08 09:17:15 +0800246/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100247#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800248
Weidong Han1ce28fe2008-12-08 16:35:39 +0800249/* domain represents a virtual machine, more than one devices
250 * across iommus may be owned in one domain, e.g. kvm guest.
251 */
252#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
253
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700254/* si_domain contains mulitple devices */
255#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
256
Mark McLoughlin99126f72008-11-20 15:49:47 +0000257struct dmar_domain {
258 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800259 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000260
261 struct list_head devices; /* all devices' list */
262 struct iova_domain iovad; /* iova's that belong to this domain */
263
264 struct dma_pte *pgd; /* virtual address */
265 spinlock_t mapping_lock; /* page table lock */
266 int gaw; /* max guest address width */
267
268 /* adjusted guest address width, 0 is level 2 30-bit */
269 int agaw;
270
Weidong Han3b5410e2008-12-08 09:17:15 +0800271 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800272
273 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800274 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800275 int iommu_count; /* reference count of iommu */
276 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800277 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000278};
279
Mark McLoughlina647dac2008-11-20 15:49:48 +0000280/* PCI domain-device relationship */
281struct device_domain_info {
282 struct list_head link; /* link to domain siblings */
283 struct list_head global; /* link to global list */
David Woodhouse276dbf92009-04-04 01:45:37 +0100284 int segment; /* PCI domain */
285 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000286 u8 devfn; /* PCI devfn number */
287 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800288 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000289 struct dmar_domain *domain; /* pointer to domain */
290};
291
mark gross5e0d2a62008-03-04 15:22:08 -0800292static void flush_unmaps_timeout(unsigned long data);
293
294DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
295
mark gross80b20dd2008-04-18 13:53:58 -0700296#define HIGH_WATER_MARK 250
297struct deferred_flush_tables {
298 int next;
299 struct iova *iova[HIGH_WATER_MARK];
300 struct dmar_domain *domain[HIGH_WATER_MARK];
301};
302
303static struct deferred_flush_tables *deferred_flush;
304
mark gross5e0d2a62008-03-04 15:22:08 -0800305/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800306static int g_num_of_iommus;
307
308static DEFINE_SPINLOCK(async_umap_flush_lock);
309static LIST_HEAD(unmaps_to_do);
310
311static int timer_on;
312static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800313
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700314static void domain_remove_dev_info(struct dmar_domain *domain);
315
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800316#ifdef CONFIG_DMAR_DEFAULT_ON
317int dmar_disabled = 0;
318#else
319int dmar_disabled = 1;
320#endif /*CONFIG_DMAR_DEFAULT_ON*/
321
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700322static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700323static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800324static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700325
326#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
327static DEFINE_SPINLOCK(device_domain_lock);
328static LIST_HEAD(device_domain_list);
329
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100330static struct iommu_ops intel_iommu_ops;
331
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700332static int __init intel_iommu_setup(char *str)
333{
334 if (!str)
335 return -EINVAL;
336 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800337 if (!strncmp(str, "on", 2)) {
338 dmar_disabled = 0;
339 printk(KERN_INFO "Intel-IOMMU: enabled\n");
340 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700341 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800342 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700343 } else if (!strncmp(str, "igfx_off", 8)) {
344 dmar_map_gfx = 0;
345 printk(KERN_INFO
346 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700347 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800348 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700349 "Intel-IOMMU: Forcing DAC for PCI devices\n");
350 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800351 } else if (!strncmp(str, "strict", 6)) {
352 printk(KERN_INFO
353 "Intel-IOMMU: disable batched IOTLB flush\n");
354 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700355 }
356
357 str += strcspn(str, ",");
358 while (*str == ',')
359 str++;
360 }
361 return 0;
362}
363__setup("intel_iommu=", intel_iommu_setup);
364
365static struct kmem_cache *iommu_domain_cache;
366static struct kmem_cache *iommu_devinfo_cache;
367static struct kmem_cache *iommu_iova_cache;
368
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700369static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
370{
371 unsigned int flags;
372 void *vaddr;
373
374 /* trying to avoid low memory issues */
375 flags = current->flags & PF_MEMALLOC;
376 current->flags |= PF_MEMALLOC;
377 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
378 current->flags &= (~PF_MEMALLOC | flags);
379 return vaddr;
380}
381
382
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700383static inline void *alloc_pgtable_page(void)
384{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700385 unsigned int flags;
386 void *vaddr;
387
388 /* trying to avoid low memory issues */
389 flags = current->flags & PF_MEMALLOC;
390 current->flags |= PF_MEMALLOC;
391 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
392 current->flags &= (~PF_MEMALLOC | flags);
393 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700394}
395
396static inline void free_pgtable_page(void *vaddr)
397{
398 free_page((unsigned long)vaddr);
399}
400
401static inline void *alloc_domain_mem(void)
402{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700403 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700404}
405
Kay, Allen M38717942008-09-09 18:37:29 +0300406static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700407{
408 kmem_cache_free(iommu_domain_cache, vaddr);
409}
410
411static inline void * alloc_devinfo_mem(void)
412{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700413 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700414}
415
416static inline void free_devinfo_mem(void *vaddr)
417{
418 kmem_cache_free(iommu_devinfo_cache, vaddr);
419}
420
421struct iova *alloc_iova_mem(void)
422{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700423 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700424}
425
426void free_iova_mem(struct iova *iova)
427{
428 kmem_cache_free(iommu_iova_cache, iova);
429}
430
Weidong Han1b573682008-12-08 15:34:06 +0800431
432static inline int width_to_agaw(int width);
433
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700434static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800435{
436 unsigned long sagaw;
437 int agaw = -1;
438
439 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700440 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800441 agaw >= 0; agaw--) {
442 if (test_bit(agaw, &sagaw))
443 break;
444 }
445
446 return agaw;
447}
448
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700449/*
450 * Calculate max SAGAW for each iommu.
451 */
452int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
453{
454 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
455}
456
457/*
458 * calculate agaw for each iommu.
459 * "SAGAW" may be different across iommus, use a default agaw, and
460 * get a supported less agaw for iommus that don't support the default agaw.
461 */
462int iommu_calculate_agaw(struct intel_iommu *iommu)
463{
464 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
465}
466
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700467/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800468static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
469{
470 int iommu_id;
471
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700472 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800473 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700474 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800475
Weidong Han8c11e792008-12-08 15:29:22 +0800476 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
477 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
478 return NULL;
479
480 return g_iommus[iommu_id];
481}
482
Weidong Han8e6040972008-12-08 15:49:06 +0800483static void domain_update_iommu_coherency(struct dmar_domain *domain)
484{
485 int i;
486
487 domain->iommu_coherency = 1;
488
489 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
490 for (; i < g_num_of_iommus; ) {
491 if (!ecap_coherent(g_iommus[i]->ecap)) {
492 domain->iommu_coherency = 0;
493 break;
494 }
495 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
496 }
497}
498
Sheng Yang58c610b2009-03-18 15:33:05 +0800499static void domain_update_iommu_snooping(struct dmar_domain *domain)
500{
501 int i;
502
503 domain->iommu_snooping = 1;
504
505 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
506 for (; i < g_num_of_iommus; ) {
507 if (!ecap_sc_support(g_iommus[i]->ecap)) {
508 domain->iommu_snooping = 0;
509 break;
510 }
511 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
512 }
513}
514
515/* Some capabilities may be different across iommus */
516static void domain_update_iommu_cap(struct dmar_domain *domain)
517{
518 domain_update_iommu_coherency(domain);
519 domain_update_iommu_snooping(domain);
520}
521
David Woodhouse276dbf92009-04-04 01:45:37 +0100522static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800523{
524 struct dmar_drhd_unit *drhd = NULL;
525 int i;
526
527 for_each_drhd_unit(drhd) {
528 if (drhd->ignored)
529 continue;
David Woodhouse276dbf92009-04-04 01:45:37 +0100530 if (segment != drhd->segment)
531 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800532
David Woodhouse924b6232009-04-04 00:39:25 +0100533 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000534 if (drhd->devices[i] &&
535 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800536 drhd->devices[i]->devfn == devfn)
537 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700538 if (drhd->devices[i] &&
539 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100540 drhd->devices[i]->subordinate->number <= bus &&
541 drhd->devices[i]->subordinate->subordinate >= bus)
542 return drhd->iommu;
543 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800544
545 if (drhd->include_all)
546 return drhd->iommu;
547 }
548
549 return NULL;
550}
551
Weidong Han5331fe62008-12-08 23:00:00 +0800552static void domain_flush_cache(struct dmar_domain *domain,
553 void *addr, int size)
554{
555 if (!domain->iommu_coherency)
556 clflush_cache_range(addr, size);
557}
558
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700559/* Gets context entry for a given bus and devfn */
560static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
561 u8 bus, u8 devfn)
562{
563 struct root_entry *root;
564 struct context_entry *context;
565 unsigned long phy_addr;
566 unsigned long flags;
567
568 spin_lock_irqsave(&iommu->lock, flags);
569 root = &iommu->root_entry[bus];
570 context = get_context_addr_from_root(root);
571 if (!context) {
572 context = (struct context_entry *)alloc_pgtable_page();
573 if (!context) {
574 spin_unlock_irqrestore(&iommu->lock, flags);
575 return NULL;
576 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700577 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700578 phy_addr = virt_to_phys((void *)context);
579 set_root_value(root, phy_addr);
580 set_root_present(root);
581 __iommu_flush_cache(iommu, root, sizeof(*root));
582 }
583 spin_unlock_irqrestore(&iommu->lock, flags);
584 return &context[devfn];
585}
586
587static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
588{
589 struct root_entry *root;
590 struct context_entry *context;
591 int ret;
592 unsigned long flags;
593
594 spin_lock_irqsave(&iommu->lock, flags);
595 root = &iommu->root_entry[bus];
596 context = get_context_addr_from_root(root);
597 if (!context) {
598 ret = 0;
599 goto out;
600 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000601 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700602out:
603 spin_unlock_irqrestore(&iommu->lock, flags);
604 return ret;
605}
606
607static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
608{
609 struct root_entry *root;
610 struct context_entry *context;
611 unsigned long flags;
612
613 spin_lock_irqsave(&iommu->lock, flags);
614 root = &iommu->root_entry[bus];
615 context = get_context_addr_from_root(root);
616 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000617 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700618 __iommu_flush_cache(iommu, &context[devfn], \
619 sizeof(*context));
620 }
621 spin_unlock_irqrestore(&iommu->lock, flags);
622}
623
624static void free_context_table(struct intel_iommu *iommu)
625{
626 struct root_entry *root;
627 int i;
628 unsigned long flags;
629 struct context_entry *context;
630
631 spin_lock_irqsave(&iommu->lock, flags);
632 if (!iommu->root_entry) {
633 goto out;
634 }
635 for (i = 0; i < ROOT_ENTRY_NR; i++) {
636 root = &iommu->root_entry[i];
637 context = get_context_addr_from_root(root);
638 if (context)
639 free_pgtable_page(context);
640 }
641 free_pgtable_page(iommu->root_entry);
642 iommu->root_entry = NULL;
643out:
644 spin_unlock_irqrestore(&iommu->lock, flags);
645}
646
647/* page table handling */
648#define LEVEL_STRIDE (9)
649#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
650
651static inline int agaw_to_level(int agaw)
652{
653 return agaw + 2;
654}
655
656static inline int agaw_to_width(int agaw)
657{
658 return 30 + agaw * LEVEL_STRIDE;
659
660}
661
662static inline int width_to_agaw(int width)
663{
664 return (width - 30) / LEVEL_STRIDE;
665}
666
667static inline unsigned int level_to_offset_bits(int level)
668{
David Woodhouse6660c632009-06-27 22:41:00 +0100669 return (level - 1) * LEVEL_STRIDE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700670}
671
David Woodhouse77dfa562009-06-27 16:40:08 +0100672static inline int pfn_level_offset(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700673{
David Woodhouse6660c632009-06-27 22:41:00 +0100674 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700675}
676
David Woodhouse6660c632009-06-27 22:41:00 +0100677static inline unsigned long level_mask(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700678{
David Woodhouse6660c632009-06-27 22:41:00 +0100679 return -1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700680}
681
David Woodhouse6660c632009-06-27 22:41:00 +0100682static inline unsigned long level_size(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700683{
David Woodhouse6660c632009-06-27 22:41:00 +0100684 return 1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700685}
686
David Woodhouse6660c632009-06-27 22:41:00 +0100687static inline unsigned long align_to_level(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700688{
David Woodhouse6660c632009-06-27 22:41:00 +0100689 return (pfn + level_size(level) - 1) & level_mask(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700690}
691
David Woodhouseb026fd22009-06-28 10:37:25 +0100692static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
693 unsigned long pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700694{
David Woodhouseb026fd22009-06-28 10:37:25 +0100695 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700696 struct dma_pte *parent, *pte = NULL;
697 int level = agaw_to_level(domain->agaw);
698 int offset;
699 unsigned long flags;
700
701 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100702 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700703 parent = domain->pgd;
704
705 spin_lock_irqsave(&domain->mapping_lock, flags);
706 while (level > 0) {
707 void *tmp_page;
708
David Woodhouseb026fd22009-06-28 10:37:25 +0100709 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700710 pte = &parent[offset];
711 if (level == 1)
712 break;
713
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000714 if (!dma_pte_present(pte)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700715 tmp_page = alloc_pgtable_page();
716
717 if (!tmp_page) {
718 spin_unlock_irqrestore(&domain->mapping_lock,
719 flags);
720 return NULL;
721 }
Weidong Han5331fe62008-12-08 23:00:00 +0800722 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
David Woodhousedd4e8312009-06-27 16:21:20 +0100723 dma_set_pte_pfn(pte, virt_to_dma_pfn(tmp_page));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700724 /*
725 * high level table always sets r/w, last level page
726 * table control read/write
727 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000728 dma_set_pte_readable(pte);
729 dma_set_pte_writable(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800730 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700731 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000732 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700733 level--;
734 }
735
736 spin_unlock_irqrestore(&domain->mapping_lock, flags);
737 return pte;
738}
739
740/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100741static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
742 unsigned long pfn,
743 int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700744{
745 struct dma_pte *parent, *pte = NULL;
746 int total = agaw_to_level(domain->agaw);
747 int offset;
748
749 parent = domain->pgd;
750 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100751 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700752 pte = &parent[offset];
753 if (level == total)
754 return pte;
755
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000756 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700757 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000758 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700759 total--;
760 }
761 return NULL;
762}
763
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700764/* clear last level pte, a tlb flush should be followed */
David Woodhouse595badf2009-06-27 22:09:11 +0100765static void dma_pte_clear_range(struct dmar_domain *domain,
766 unsigned long start_pfn,
767 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700768{
David Woodhouse04b18e62009-06-27 19:15:01 +0100769 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100770 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700771
David Woodhouse04b18e62009-06-27 19:15:01 +0100772 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100773 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse66eae842009-06-27 19:00:32 +0100774
David Woodhouse04b18e62009-06-27 19:15:01 +0100775 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse595badf2009-06-27 22:09:11 +0100776 while (start_pfn <= last_pfn) {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100777 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
778 if (!pte) {
779 start_pfn = align_to_level(start_pfn + 1, 2);
780 continue;
781 }
782 while (start_pfn <= last_pfn &&
783 (unsigned long)pte >> VTD_PAGE_SHIFT ==
784 (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
785 dma_clear_pte(pte);
786 start_pfn++;
787 pte++;
788 }
789 domain_flush_cache(domain, first_pte,
790 (void *)pte - (void *)first_pte);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700791 }
792}
793
794/* free page table pages. last level pte should already be cleared */
795static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100796 unsigned long start_pfn,
797 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700798{
David Woodhouse6660c632009-06-27 22:41:00 +0100799 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100800 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700801 int total = agaw_to_level(domain->agaw);
802 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100803 unsigned long tmp;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700804
David Woodhouse6660c632009-06-27 22:41:00 +0100805 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
806 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700807
David Woodhousef3a0a522009-06-30 03:40:07 +0100808 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700809 level = 2;
810 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100811 tmp = align_to_level(start_pfn, level);
812
David Woodhousef3a0a522009-06-30 03:40:07 +0100813 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100814 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700815 return;
816
David Woodhouse3d7b0e42009-06-30 03:38:09 +0100817 while (tmp + level_size(level) - 1 <= last_pfn) {
David Woodhousef3a0a522009-06-30 03:40:07 +0100818 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
819 if (!pte) {
820 tmp = align_to_level(tmp + 1, level + 1);
821 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700822 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100823 while (tmp + level_size(level) - 1 <= last_pfn &&
824 (unsigned long)pte >> VTD_PAGE_SHIFT ==
825 (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
826 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
827 dma_clear_pte(pte);
828 pte++;
829 tmp += level_size(level);
830 }
831 domain_flush_cache(domain, first_pte,
832 (void *)pte - (void *)first_pte);
833
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700834 }
835 level++;
836 }
837 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100838 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700839 free_pgtable_page(domain->pgd);
840 domain->pgd = NULL;
841 }
842}
843
844/* iommu handling */
845static int iommu_alloc_root_entry(struct intel_iommu *iommu)
846{
847 struct root_entry *root;
848 unsigned long flags;
849
850 root = (struct root_entry *)alloc_pgtable_page();
851 if (!root)
852 return -ENOMEM;
853
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700854 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700855
856 spin_lock_irqsave(&iommu->lock, flags);
857 iommu->root_entry = root;
858 spin_unlock_irqrestore(&iommu->lock, flags);
859
860 return 0;
861}
862
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700863static void iommu_set_root_entry(struct intel_iommu *iommu)
864{
865 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100866 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700867 unsigned long flag;
868
869 addr = iommu->root_entry;
870
871 spin_lock_irqsave(&iommu->register_lock, flag);
872 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
873
David Woodhousec416daa2009-05-10 20:30:58 +0100874 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700875
876 /* Make sure hardware complete it */
877 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100878 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700879
880 spin_unlock_irqrestore(&iommu->register_lock, flag);
881}
882
883static void iommu_flush_write_buffer(struct intel_iommu *iommu)
884{
885 u32 val;
886 unsigned long flag;
887
David Woodhouse9af88142009-02-13 23:18:03 +0000888 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700889 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700890
891 spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100892 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700893
894 /* Make sure hardware complete it */
895 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100896 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700897
898 spin_unlock_irqrestore(&iommu->register_lock, flag);
899}
900
901/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100902static void __iommu_flush_context(struct intel_iommu *iommu,
903 u16 did, u16 source_id, u8 function_mask,
904 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700905{
906 u64 val = 0;
907 unsigned long flag;
908
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700909 switch (type) {
910 case DMA_CCMD_GLOBAL_INVL:
911 val = DMA_CCMD_GLOBAL_INVL;
912 break;
913 case DMA_CCMD_DOMAIN_INVL:
914 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
915 break;
916 case DMA_CCMD_DEVICE_INVL:
917 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
918 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
919 break;
920 default:
921 BUG();
922 }
923 val |= DMA_CCMD_ICC;
924
925 spin_lock_irqsave(&iommu->register_lock, flag);
926 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
927
928 /* Make sure hardware complete it */
929 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
930 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
931
932 spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700933}
934
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700935/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100936static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
937 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700938{
939 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
940 u64 val = 0, val_iva = 0;
941 unsigned long flag;
942
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700943 switch (type) {
944 case DMA_TLB_GLOBAL_FLUSH:
945 /* global flush doesn't need set IVA_REG */
946 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
947 break;
948 case DMA_TLB_DSI_FLUSH:
949 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
950 break;
951 case DMA_TLB_PSI_FLUSH:
952 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
953 /* Note: always flush non-leaf currently */
954 val_iva = size_order | addr;
955 break;
956 default:
957 BUG();
958 }
959 /* Note: set drain read/write */
960#if 0
961 /*
962 * This is probably to be super secure.. Looks like we can
963 * ignore it without any impact.
964 */
965 if (cap_read_drain(iommu->cap))
966 val |= DMA_TLB_READ_DRAIN;
967#endif
968 if (cap_write_drain(iommu->cap))
969 val |= DMA_TLB_WRITE_DRAIN;
970
971 spin_lock_irqsave(&iommu->register_lock, flag);
972 /* Note: Only uses first TLB reg currently */
973 if (val_iva)
974 dmar_writeq(iommu->reg + tlb_offset, val_iva);
975 dmar_writeq(iommu->reg + tlb_offset + 8, val);
976
977 /* Make sure hardware complete it */
978 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
979 dmar_readq, (!(val & DMA_TLB_IVT)), val);
980
981 spin_unlock_irqrestore(&iommu->register_lock, flag);
982
983 /* check IOTLB invalidation granularity */
984 if (DMA_TLB_IAIG(val) == 0)
985 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
986 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
987 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700988 (unsigned long long)DMA_TLB_IIRG(type),
989 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700990}
991
Yu Zhao93a23a72009-05-18 13:51:37 +0800992static struct device_domain_info *iommu_support_dev_iotlb(
993 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700994{
Yu Zhao93a23a72009-05-18 13:51:37 +0800995 int found = 0;
996 unsigned long flags;
997 struct device_domain_info *info;
998 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
999
1000 if (!ecap_dev_iotlb_support(iommu->ecap))
1001 return NULL;
1002
1003 if (!iommu->qi)
1004 return NULL;
1005
1006 spin_lock_irqsave(&device_domain_lock, flags);
1007 list_for_each_entry(info, &domain->devices, link)
1008 if (info->bus == bus && info->devfn == devfn) {
1009 found = 1;
1010 break;
1011 }
1012 spin_unlock_irqrestore(&device_domain_lock, flags);
1013
1014 if (!found || !info->dev)
1015 return NULL;
1016
1017 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1018 return NULL;
1019
1020 if (!dmar_find_matched_atsr_unit(info->dev))
1021 return NULL;
1022
1023 info->iommu = iommu;
1024
1025 return info;
1026}
1027
1028static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1029{
1030 if (!info)
1031 return;
1032
1033 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1034}
1035
1036static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1037{
1038 if (!info->dev || !pci_ats_enabled(info->dev))
1039 return;
1040
1041 pci_disable_ats(info->dev);
1042}
1043
1044static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1045 u64 addr, unsigned mask)
1046{
1047 u16 sid, qdep;
1048 unsigned long flags;
1049 struct device_domain_info *info;
1050
1051 spin_lock_irqsave(&device_domain_lock, flags);
1052 list_for_each_entry(info, &domain->devices, link) {
1053 if (!info->dev || !pci_ats_enabled(info->dev))
1054 continue;
1055
1056 sid = info->bus << 8 | info->devfn;
1057 qdep = pci_ats_queue_depth(info->dev);
1058 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1059 }
1060 spin_unlock_irqrestore(&device_domain_lock, flags);
1061}
1062
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001063static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouse03d6a242009-06-28 15:33:46 +01001064 unsigned long pfn, unsigned int pages)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001065{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001066 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001067 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001068
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001069 BUG_ON(pages == 0);
1070
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001071 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001072 * Fallback to domain selective flush if no PSI support or the size is
1073 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001074 * PSI requires page size to be 2 ^ x, and the base address is naturally
1075 * aligned to the size
1076 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001077 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1078 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001079 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001080 else
1081 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1082 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001083
1084 /*
1085 * In caching mode, domain ID 0 is reserved for non-present to present
1086 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1087 */
1088 if (!cap_caching_mode(iommu->cap) || did)
Yu Zhao93a23a72009-05-18 13:51:37 +08001089 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001090}
1091
mark grossf8bab732008-02-08 04:18:38 -08001092static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1093{
1094 u32 pmen;
1095 unsigned long flags;
1096
1097 spin_lock_irqsave(&iommu->register_lock, flags);
1098 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1099 pmen &= ~DMA_PMEN_EPM;
1100 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1101
1102 /* wait for the protected region status bit to clear */
1103 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1104 readl, !(pmen & DMA_PMEN_PRS), pmen);
1105
1106 spin_unlock_irqrestore(&iommu->register_lock, flags);
1107}
1108
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001109static int iommu_enable_translation(struct intel_iommu *iommu)
1110{
1111 u32 sts;
1112 unsigned long flags;
1113
1114 spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001115 iommu->gcmd |= DMA_GCMD_TE;
1116 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001117
1118 /* Make sure hardware complete it */
1119 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001120 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001121
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001122 spin_unlock_irqrestore(&iommu->register_lock, flags);
1123 return 0;
1124}
1125
1126static int iommu_disable_translation(struct intel_iommu *iommu)
1127{
1128 u32 sts;
1129 unsigned long flag;
1130
1131 spin_lock_irqsave(&iommu->register_lock, flag);
1132 iommu->gcmd &= ~DMA_GCMD_TE;
1133 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1134
1135 /* Make sure hardware complete it */
1136 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001137 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001138
1139 spin_unlock_irqrestore(&iommu->register_lock, flag);
1140 return 0;
1141}
1142
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001143
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001144static int iommu_init_domains(struct intel_iommu *iommu)
1145{
1146 unsigned long ndomains;
1147 unsigned long nlongs;
1148
1149 ndomains = cap_ndoms(iommu->cap);
1150 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1151 nlongs = BITS_TO_LONGS(ndomains);
1152
1153 /* TBD: there might be 64K domains,
1154 * consider other allocation for future chip
1155 */
1156 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1157 if (!iommu->domain_ids) {
1158 printk(KERN_ERR "Allocating domain id array failed\n");
1159 return -ENOMEM;
1160 }
1161 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1162 GFP_KERNEL);
1163 if (!iommu->domains) {
1164 printk(KERN_ERR "Allocating domain array failed\n");
1165 kfree(iommu->domain_ids);
1166 return -ENOMEM;
1167 }
1168
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001169 spin_lock_init(&iommu->lock);
1170
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001171 /*
1172 * if Caching mode is set, then invalid translations are tagged
1173 * with domainid 0. Hence we need to pre-allocate it.
1174 */
1175 if (cap_caching_mode(iommu->cap))
1176 set_bit(0, iommu->domain_ids);
1177 return 0;
1178}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001179
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001180
1181static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001182static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001183
1184void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001185{
1186 struct dmar_domain *domain;
1187 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001188 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001189
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001190 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1191 for (; i < cap_ndoms(iommu->cap); ) {
1192 domain = iommu->domains[i];
1193 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001194
1195 spin_lock_irqsave(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001196 if (--domain->iommu_count == 0) {
1197 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1198 vm_domain_exit(domain);
1199 else
1200 domain_exit(domain);
1201 }
Weidong Hanc7151a82008-12-08 22:51:37 +08001202 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1203
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001204 i = find_next_bit(iommu->domain_ids,
1205 cap_ndoms(iommu->cap), i+1);
1206 }
1207
1208 if (iommu->gcmd & DMA_GCMD_TE)
1209 iommu_disable_translation(iommu);
1210
1211 if (iommu->irq) {
1212 set_irq_data(iommu->irq, NULL);
1213 /* This will mask the irq */
1214 free_irq(iommu->irq, iommu);
1215 destroy_irq(iommu->irq);
1216 }
1217
1218 kfree(iommu->domains);
1219 kfree(iommu->domain_ids);
1220
Weidong Hand9630fe2008-12-08 11:06:32 +08001221 g_iommus[iommu->seq_id] = NULL;
1222
1223 /* if all iommus are freed, free g_iommus */
1224 for (i = 0; i < g_num_of_iommus; i++) {
1225 if (g_iommus[i])
1226 break;
1227 }
1228
1229 if (i == g_num_of_iommus)
1230 kfree(g_iommus);
1231
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001232 /* free context mapping */
1233 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001234}
1235
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001236static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001237{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001238 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001239
1240 domain = alloc_domain_mem();
1241 if (!domain)
1242 return NULL;
1243
Weidong Han8c11e792008-12-08 15:29:22 +08001244 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001245 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001246
1247 return domain;
1248}
1249
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001250static int iommu_attach_domain(struct dmar_domain *domain,
1251 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001252{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001253 int num;
1254 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001255 unsigned long flags;
1256
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001257 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001258
1259 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001260
1261 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1262 if (num >= ndomains) {
1263 spin_unlock_irqrestore(&iommu->lock, flags);
1264 printk(KERN_ERR "IOMMU: no free domain ids\n");
1265 return -ENOMEM;
1266 }
1267
1268 domain->id = num;
1269 set_bit(num, iommu->domain_ids);
1270 set_bit(iommu->seq_id, &domain->iommu_bmp);
1271 iommu->domains[num] = domain;
1272 spin_unlock_irqrestore(&iommu->lock, flags);
1273
1274 return 0;
1275}
1276
1277static void iommu_detach_domain(struct dmar_domain *domain,
1278 struct intel_iommu *iommu)
1279{
1280 unsigned long flags;
1281 int num, ndomains;
1282 int found = 0;
1283
1284 spin_lock_irqsave(&iommu->lock, flags);
1285 ndomains = cap_ndoms(iommu->cap);
1286 num = find_first_bit(iommu->domain_ids, ndomains);
1287 for (; num < ndomains; ) {
1288 if (iommu->domains[num] == domain) {
1289 found = 1;
1290 break;
1291 }
1292 num = find_next_bit(iommu->domain_ids,
1293 cap_ndoms(iommu->cap), num+1);
1294 }
1295
1296 if (found) {
1297 clear_bit(num, iommu->domain_ids);
1298 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1299 iommu->domains[num] = NULL;
1300 }
Weidong Han8c11e792008-12-08 15:29:22 +08001301 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001302}
1303
1304static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001305static struct lock_class_key reserved_alloc_key;
1306static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307
1308static void dmar_init_reserved_ranges(void)
1309{
1310 struct pci_dev *pdev = NULL;
1311 struct iova *iova;
1312 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001313
David Millerf6611972008-02-06 01:36:23 -08001314 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001315
Mark Gross8a443df2008-03-04 14:59:31 -08001316 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1317 &reserved_alloc_key);
1318 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1319 &reserved_rbtree_key);
1320
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001321 /* IOAPIC ranges shouldn't be accessed by DMA */
1322 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1323 IOVA_PFN(IOAPIC_RANGE_END));
1324 if (!iova)
1325 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1326
1327 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1328 for_each_pci_dev(pdev) {
1329 struct resource *r;
1330
1331 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1332 r = &pdev->resource[i];
1333 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1334 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001335 iova = reserve_iova(&reserved_iova_list,
1336 IOVA_PFN(r->start),
1337 IOVA_PFN(r->end));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001338 if (!iova)
1339 printk(KERN_ERR "Reserve iova failed\n");
1340 }
1341 }
1342
1343}
1344
1345static void domain_reserve_special_ranges(struct dmar_domain *domain)
1346{
1347 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1348}
1349
1350static inline int guestwidth_to_adjustwidth(int gaw)
1351{
1352 int agaw;
1353 int r = (gaw - 12) % 9;
1354
1355 if (r == 0)
1356 agaw = gaw;
1357 else
1358 agaw = gaw + 9 - r;
1359 if (agaw > 64)
1360 agaw = 64;
1361 return agaw;
1362}
1363
1364static int domain_init(struct dmar_domain *domain, int guest_width)
1365{
1366 struct intel_iommu *iommu;
1367 int adjust_width, agaw;
1368 unsigned long sagaw;
1369
David Millerf6611972008-02-06 01:36:23 -08001370 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001371 spin_lock_init(&domain->mapping_lock);
Weidong Hanc7151a82008-12-08 22:51:37 +08001372 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001373
1374 domain_reserve_special_ranges(domain);
1375
1376 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001377 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001378 if (guest_width > cap_mgaw(iommu->cap))
1379 guest_width = cap_mgaw(iommu->cap);
1380 domain->gaw = guest_width;
1381 adjust_width = guestwidth_to_adjustwidth(guest_width);
1382 agaw = width_to_agaw(adjust_width);
1383 sagaw = cap_sagaw(iommu->cap);
1384 if (!test_bit(agaw, &sagaw)) {
1385 /* hardware doesn't support it, choose a bigger one */
1386 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1387 agaw = find_next_bit(&sagaw, 5, agaw);
1388 if (agaw >= 5)
1389 return -ENODEV;
1390 }
1391 domain->agaw = agaw;
1392 INIT_LIST_HEAD(&domain->devices);
1393
Weidong Han8e6040972008-12-08 15:49:06 +08001394 if (ecap_coherent(iommu->ecap))
1395 domain->iommu_coherency = 1;
1396 else
1397 domain->iommu_coherency = 0;
1398
Sheng Yang58c610b2009-03-18 15:33:05 +08001399 if (ecap_sc_support(iommu->ecap))
1400 domain->iommu_snooping = 1;
1401 else
1402 domain->iommu_snooping = 0;
1403
Weidong Hanc7151a82008-12-08 22:51:37 +08001404 domain->iommu_count = 1;
1405
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001406 /* always allocate the top pgd */
1407 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1408 if (!domain->pgd)
1409 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001410 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001411 return 0;
1412}
1413
1414static void domain_exit(struct dmar_domain *domain)
1415{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001416 struct dmar_drhd_unit *drhd;
1417 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418
1419 /* Domain 0 is reserved, so dont process it */
1420 if (!domain)
1421 return;
1422
1423 domain_remove_dev_info(domain);
1424 /* destroy iovas */
1425 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001426
1427 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001428 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001429
1430 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001431 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001432
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001433 for_each_active_iommu(iommu, drhd)
1434 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1435 iommu_detach_domain(domain, iommu);
1436
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001437 free_domain_mem(domain);
1438}
1439
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001440static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1441 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001442{
1443 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001444 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001445 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001446 struct dma_pte *pgd;
1447 unsigned long num;
1448 unsigned long ndomains;
1449 int id;
1450 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001451 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001452
1453 pr_debug("Set context mapping for %02x:%02x.%d\n",
1454 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001455
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001456 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001457 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1458 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001459
David Woodhouse276dbf92009-04-04 01:45:37 +01001460 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001461 if (!iommu)
1462 return -ENODEV;
1463
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001464 context = device_to_context_entry(iommu, bus, devfn);
1465 if (!context)
1466 return -ENOMEM;
1467 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001468 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001469 spin_unlock_irqrestore(&iommu->lock, flags);
1470 return 0;
1471 }
1472
Weidong Hanea6606b2008-12-08 23:08:15 +08001473 id = domain->id;
1474 pgd = domain->pgd;
1475
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001476 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1477 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001478 int found = 0;
1479
1480 /* find an available domain id for this device in iommu */
1481 ndomains = cap_ndoms(iommu->cap);
1482 num = find_first_bit(iommu->domain_ids, ndomains);
1483 for (; num < ndomains; ) {
1484 if (iommu->domains[num] == domain) {
1485 id = num;
1486 found = 1;
1487 break;
1488 }
1489 num = find_next_bit(iommu->domain_ids,
1490 cap_ndoms(iommu->cap), num+1);
1491 }
1492
1493 if (found == 0) {
1494 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1495 if (num >= ndomains) {
1496 spin_unlock_irqrestore(&iommu->lock, flags);
1497 printk(KERN_ERR "IOMMU: no free domain ids\n");
1498 return -EFAULT;
1499 }
1500
1501 set_bit(num, iommu->domain_ids);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001502 set_bit(iommu->seq_id, &domain->iommu_bmp);
Weidong Hanea6606b2008-12-08 23:08:15 +08001503 iommu->domains[num] = domain;
1504 id = num;
1505 }
1506
1507 /* Skip top levels of page tables for
1508 * iommu which has less agaw than default.
1509 */
1510 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1511 pgd = phys_to_virt(dma_pte_addr(pgd));
1512 if (!dma_pte_present(pgd)) {
1513 spin_unlock_irqrestore(&iommu->lock, flags);
1514 return -ENOMEM;
1515 }
1516 }
1517 }
1518
1519 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001520
Yu Zhao93a23a72009-05-18 13:51:37 +08001521 if (translation != CONTEXT_TT_PASS_THROUGH) {
1522 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1523 translation = info ? CONTEXT_TT_DEV_IOTLB :
1524 CONTEXT_TT_MULTI_LEVEL;
1525 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001526 /*
1527 * In pass through mode, AW must be programmed to indicate the largest
1528 * AGAW value supported by hardware. And ASR is ignored by hardware.
1529 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001530 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001531 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001532 else {
1533 context_set_address_root(context, virt_to_phys(pgd));
1534 context_set_address_width(context, iommu->agaw);
1535 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001536
1537 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001538 context_set_fault_enable(context);
1539 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001540 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001541
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001542 /*
1543 * It's a non-present to present mapping. If hardware doesn't cache
1544 * non-present entry we only need to flush the write-buffer. If the
1545 * _does_ cache non-present entries, then it does so in the special
1546 * domain #0, which we have to flush:
1547 */
1548 if (cap_caching_mode(iommu->cap)) {
1549 iommu->flush.flush_context(iommu, 0,
1550 (((u16)bus) << 8) | devfn,
1551 DMA_CCMD_MASK_NOBIT,
1552 DMA_CCMD_DEVICE_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001553 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001554 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001555 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001556 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001557 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001559
1560 spin_lock_irqsave(&domain->iommu_lock, flags);
1561 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1562 domain->iommu_count++;
Sheng Yang58c610b2009-03-18 15:33:05 +08001563 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001564 }
1565 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001566 return 0;
1567}
1568
1569static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001570domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1571 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001572{
1573 int ret;
1574 struct pci_dev *tmp, *parent;
1575
David Woodhouse276dbf92009-04-04 01:45:37 +01001576 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001577 pdev->bus->number, pdev->devfn,
1578 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001579 if (ret)
1580 return ret;
1581
1582 /* dependent device mapping */
1583 tmp = pci_find_upstream_pcie_bridge(pdev);
1584 if (!tmp)
1585 return 0;
1586 /* Secondary interface's bus number and devfn 0 */
1587 parent = pdev->bus->self;
1588 while (parent != tmp) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001589 ret = domain_context_mapping_one(domain,
1590 pci_domain_nr(parent->bus),
1591 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001592 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001593 if (ret)
1594 return ret;
1595 parent = parent->bus->self;
1596 }
1597 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1598 return domain_context_mapping_one(domain,
David Woodhouse276dbf92009-04-04 01:45:37 +01001599 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001600 tmp->subordinate->number, 0,
1601 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001602 else /* this is a legacy PCI bridge */
1603 return domain_context_mapping_one(domain,
David Woodhouse276dbf92009-04-04 01:45:37 +01001604 pci_domain_nr(tmp->bus),
1605 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001606 tmp->devfn,
1607 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001608}
1609
Weidong Han5331fe62008-12-08 23:00:00 +08001610static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001611{
1612 int ret;
1613 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001614 struct intel_iommu *iommu;
1615
David Woodhouse276dbf92009-04-04 01:45:37 +01001616 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1617 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001618 if (!iommu)
1619 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001620
David Woodhouse276dbf92009-04-04 01:45:37 +01001621 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001622 if (!ret)
1623 return ret;
1624 /* dependent device mapping */
1625 tmp = pci_find_upstream_pcie_bridge(pdev);
1626 if (!tmp)
1627 return ret;
1628 /* Secondary interface's bus number and devfn 0 */
1629 parent = pdev->bus->self;
1630 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001631 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf92009-04-04 01:45:37 +01001632 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001633 if (!ret)
1634 return ret;
1635 parent = parent->bus->self;
1636 }
1637 if (tmp->is_pcie)
David Woodhouse276dbf92009-04-04 01:45:37 +01001638 return device_context_mapped(iommu, tmp->subordinate->number,
1639 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001640 else
David Woodhouse276dbf92009-04-04 01:45:37 +01001641 return device_context_mapped(iommu, tmp->bus->number,
1642 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001643}
1644
David Woodhouse9051aa02009-06-29 12:30:54 +01001645static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1646 struct scatterlist *sg, unsigned long phys_pfn,
1647 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001648{
1649 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001650 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001651 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001652 unsigned long sg_res;
David Woodhousee1605492009-06-29 11:17:38 +01001653
1654 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1655
1656 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1657 return -EINVAL;
1658
1659 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1660
David Woodhouse9051aa02009-06-29 12:30:54 +01001661 if (sg)
1662 sg_res = 0;
1663 else {
1664 sg_res = nr_pages + 1;
1665 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1666 }
1667
David Woodhousee1605492009-06-29 11:17:38 +01001668 while (nr_pages--) {
1669 if (!sg_res) {
1670 sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
1671 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1672 sg->dma_length = sg->length;
1673 pteval = page_to_phys(sg_page(sg)) | prot;
1674 }
1675 if (!pte) {
1676 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1677 if (!pte)
1678 return -ENOMEM;
1679 }
1680 /* We don't need lock here, nobody else
1681 * touches the iova range
1682 */
David Woodhouse1bf20f02009-06-29 22:06:43 +01001683 if (unlikely(dma_pte_addr(pte))) {
1684 static int dumps = 5;
1685 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx)\n",
1686 iov_pfn, pte->val);
1687 if (dumps) {
1688 dumps--;
1689 debug_dma_dump_mappings(NULL);
1690 }
1691 WARN_ON(1);
1692 }
David Woodhousee1605492009-06-29 11:17:38 +01001693 pte->val = pteval;
1694 pte++;
1695 if (!nr_pages ||
1696 (unsigned long)pte >> VTD_PAGE_SHIFT !=
1697 (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
1698 domain_flush_cache(domain, first_pte,
1699 (void *)pte - (void *)first_pte);
1700 pte = NULL;
1701 }
1702 iov_pfn++;
1703 pteval += VTD_PAGE_SIZE;
1704 sg_res--;
1705 if (!sg_res)
1706 sg = sg_next(sg);
1707 }
1708 return 0;
1709}
1710
David Woodhouse9051aa02009-06-29 12:30:54 +01001711static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1712 struct scatterlist *sg, unsigned long nr_pages,
1713 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001714{
David Woodhouse9051aa02009-06-29 12:30:54 +01001715 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1716}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001717
David Woodhouse9051aa02009-06-29 12:30:54 +01001718static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1719 unsigned long phys_pfn, unsigned long nr_pages,
1720 int prot)
1721{
1722 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001723}
1724
Weidong Hanc7151a82008-12-08 22:51:37 +08001725static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001726{
Weidong Hanc7151a82008-12-08 22:51:37 +08001727 if (!iommu)
1728 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001729
1730 clear_context_table(iommu, bus, devfn);
1731 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001732 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001733 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001734}
1735
1736static void domain_remove_dev_info(struct dmar_domain *domain)
1737{
1738 struct device_domain_info *info;
1739 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001740 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001741
1742 spin_lock_irqsave(&device_domain_lock, flags);
1743 while (!list_empty(&domain->devices)) {
1744 info = list_entry(domain->devices.next,
1745 struct device_domain_info, link);
1746 list_del(&info->link);
1747 list_del(&info->global);
1748 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001749 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001750 spin_unlock_irqrestore(&device_domain_lock, flags);
1751
Yu Zhao93a23a72009-05-18 13:51:37 +08001752 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf92009-04-04 01:45:37 +01001753 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001754 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001755 free_devinfo_mem(info);
1756
1757 spin_lock_irqsave(&device_domain_lock, flags);
1758 }
1759 spin_unlock_irqrestore(&device_domain_lock, flags);
1760}
1761
1762/*
1763 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001764 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001765 */
Kay, Allen M38717942008-09-09 18:37:29 +03001766static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001767find_domain(struct pci_dev *pdev)
1768{
1769 struct device_domain_info *info;
1770
1771 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001772 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001773 if (info)
1774 return info->domain;
1775 return NULL;
1776}
1777
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001778/* domain is initialized */
1779static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1780{
1781 struct dmar_domain *domain, *found = NULL;
1782 struct intel_iommu *iommu;
1783 struct dmar_drhd_unit *drhd;
1784 struct device_domain_info *info, *tmp;
1785 struct pci_dev *dev_tmp;
1786 unsigned long flags;
1787 int bus = 0, devfn = 0;
David Woodhouse276dbf92009-04-04 01:45:37 +01001788 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001789 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001790
1791 domain = find_domain(pdev);
1792 if (domain)
1793 return domain;
1794
David Woodhouse276dbf92009-04-04 01:45:37 +01001795 segment = pci_domain_nr(pdev->bus);
1796
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001797 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1798 if (dev_tmp) {
1799 if (dev_tmp->is_pcie) {
1800 bus = dev_tmp->subordinate->number;
1801 devfn = 0;
1802 } else {
1803 bus = dev_tmp->bus->number;
1804 devfn = dev_tmp->devfn;
1805 }
1806 spin_lock_irqsave(&device_domain_lock, flags);
1807 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001808 if (info->segment == segment &&
1809 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001810 found = info->domain;
1811 break;
1812 }
1813 }
1814 spin_unlock_irqrestore(&device_domain_lock, flags);
1815 /* pcie-pci bridge already has a domain, uses it */
1816 if (found) {
1817 domain = found;
1818 goto found_domain;
1819 }
1820 }
1821
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001822 domain = alloc_domain();
1823 if (!domain)
1824 goto error;
1825
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001826 /* Allocate new domain for the device */
1827 drhd = dmar_find_matched_drhd_unit(pdev);
1828 if (!drhd) {
1829 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1830 pci_name(pdev));
1831 return NULL;
1832 }
1833 iommu = drhd->iommu;
1834
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001835 ret = iommu_attach_domain(domain, iommu);
1836 if (ret) {
1837 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001838 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001839 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001840
1841 if (domain_init(domain, gaw)) {
1842 domain_exit(domain);
1843 goto error;
1844 }
1845
1846 /* register pcie-to-pci device */
1847 if (dev_tmp) {
1848 info = alloc_devinfo_mem();
1849 if (!info) {
1850 domain_exit(domain);
1851 goto error;
1852 }
David Woodhouse276dbf92009-04-04 01:45:37 +01001853 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001854 info->bus = bus;
1855 info->devfn = devfn;
1856 info->dev = NULL;
1857 info->domain = domain;
1858 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001859 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001860
1861 /* pcie-to-pci bridge already has a domain, uses it */
1862 found = NULL;
1863 spin_lock_irqsave(&device_domain_lock, flags);
1864 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001865 if (tmp->segment == segment &&
1866 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001867 found = tmp->domain;
1868 break;
1869 }
1870 }
1871 if (found) {
1872 free_devinfo_mem(info);
1873 domain_exit(domain);
1874 domain = found;
1875 } else {
1876 list_add(&info->link, &domain->devices);
1877 list_add(&info->global, &device_domain_list);
1878 }
1879 spin_unlock_irqrestore(&device_domain_lock, flags);
1880 }
1881
1882found_domain:
1883 info = alloc_devinfo_mem();
1884 if (!info)
1885 goto error;
David Woodhouse276dbf92009-04-04 01:45:37 +01001886 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001887 info->bus = pdev->bus->number;
1888 info->devfn = pdev->devfn;
1889 info->dev = pdev;
1890 info->domain = domain;
1891 spin_lock_irqsave(&device_domain_lock, flags);
1892 /* somebody is fast */
1893 found = find_domain(pdev);
1894 if (found != NULL) {
1895 spin_unlock_irqrestore(&device_domain_lock, flags);
1896 if (found != domain) {
1897 domain_exit(domain);
1898 domain = found;
1899 }
1900 free_devinfo_mem(info);
1901 return domain;
1902 }
1903 list_add(&info->link, &domain->devices);
1904 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001905 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001906 spin_unlock_irqrestore(&device_domain_lock, flags);
1907 return domain;
1908error:
1909 /* recheck it here, maybe others set it */
1910 return find_domain(pdev);
1911}
1912
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001913static int iommu_identity_mapping;
1914
David Woodhouseb2132032009-06-26 18:50:28 +01001915static int iommu_domain_identity_map(struct dmar_domain *domain,
1916 unsigned long long start,
1917 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001918{
David Woodhousec5395d52009-06-28 16:35:56 +01001919 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1920 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001921
David Woodhousec5395d52009-06-28 16:35:56 +01001922 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1923 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001924 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01001925 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001926 }
1927
David Woodhousec5395d52009-06-28 16:35:56 +01001928 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1929 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001930 /*
1931 * RMRR range might have overlap with physical memory range,
1932 * clear it first
1933 */
David Woodhousec5395d52009-06-28 16:35:56 +01001934 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001935
David Woodhousec5395d52009-06-28 16:35:56 +01001936 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1937 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01001938 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01001939}
1940
1941static int iommu_prepare_identity_map(struct pci_dev *pdev,
1942 unsigned long long start,
1943 unsigned long long end)
1944{
1945 struct dmar_domain *domain;
1946 int ret;
1947
1948 printk(KERN_INFO
1949 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1950 pci_name(pdev), start, end);
1951
David Woodhousec7ab48d2009-06-26 19:10:36 +01001952 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01001953 if (!domain)
1954 return -ENOMEM;
1955
1956 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001957 if (ret)
1958 goto error;
1959
1960 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001961 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01001962 if (ret)
1963 goto error;
1964
1965 return 0;
1966
1967 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001968 domain_exit(domain);
1969 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001970}
1971
1972static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1973 struct pci_dev *pdev)
1974{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001975 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001976 return 0;
1977 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1978 rmrr->end_address + 1);
1979}
1980
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001981#ifdef CONFIG_DMAR_FLOPPY_WA
1982static inline void iommu_prepare_isa(void)
1983{
1984 struct pci_dev *pdev;
1985 int ret;
1986
1987 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1988 if (!pdev)
1989 return;
1990
David Woodhousec7ab48d2009-06-26 19:10:36 +01001991 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001992 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1993
1994 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01001995 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
1996 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001997
1998}
1999#else
2000static inline void iommu_prepare_isa(void)
2001{
2002 return;
2003}
2004#endif /* !CONFIG_DMAR_FLPY_WA */
2005
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002006/* Initialize each context entry as pass through.*/
2007static int __init init_context_pass_through(void)
2008{
2009 struct pci_dev *pdev = NULL;
2010 struct dmar_domain *domain;
2011 int ret;
2012
2013 for_each_pci_dev(pdev) {
2014 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2015 ret = domain_context_mapping(domain, pdev,
2016 CONTEXT_TT_PASS_THROUGH);
2017 if (ret)
2018 return ret;
2019 }
2020 return 0;
2021}
2022
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002023static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002024
2025static int __init si_domain_work_fn(unsigned long start_pfn,
2026 unsigned long end_pfn, void *datax)
2027{
2028 int *ret = datax;
2029
2030 *ret = iommu_domain_identity_map(si_domain,
2031 (uint64_t)start_pfn << PAGE_SHIFT,
2032 (uint64_t)end_pfn << PAGE_SHIFT);
2033 return *ret;
2034
2035}
2036
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002037static int si_domain_init(void)
2038{
2039 struct dmar_drhd_unit *drhd;
2040 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002041 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002042
2043 si_domain = alloc_domain();
2044 if (!si_domain)
2045 return -EFAULT;
2046
David Woodhousec7ab48d2009-06-26 19:10:36 +01002047 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002048
2049 for_each_active_iommu(iommu, drhd) {
2050 ret = iommu_attach_domain(si_domain, iommu);
2051 if (ret) {
2052 domain_exit(si_domain);
2053 return -EFAULT;
2054 }
2055 }
2056
2057 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2058 domain_exit(si_domain);
2059 return -EFAULT;
2060 }
2061
2062 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2063
David Woodhousec7ab48d2009-06-26 19:10:36 +01002064 for_each_online_node(nid) {
2065 work_with_active_regions(nid, si_domain_work_fn, &ret);
2066 if (ret)
2067 return ret;
2068 }
2069
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002070 return 0;
2071}
2072
2073static void domain_remove_one_dev_info(struct dmar_domain *domain,
2074 struct pci_dev *pdev);
2075static int identity_mapping(struct pci_dev *pdev)
2076{
2077 struct device_domain_info *info;
2078
2079 if (likely(!iommu_identity_mapping))
2080 return 0;
2081
2082
2083 list_for_each_entry(info, &si_domain->devices, link)
2084 if (info->dev == pdev)
2085 return 1;
2086 return 0;
2087}
2088
2089static int domain_add_dev_info(struct dmar_domain *domain,
2090 struct pci_dev *pdev)
2091{
2092 struct device_domain_info *info;
2093 unsigned long flags;
2094
2095 info = alloc_devinfo_mem();
2096 if (!info)
2097 return -ENOMEM;
2098
2099 info->segment = pci_domain_nr(pdev->bus);
2100 info->bus = pdev->bus->number;
2101 info->devfn = pdev->devfn;
2102 info->dev = pdev;
2103 info->domain = domain;
2104
2105 spin_lock_irqsave(&device_domain_lock, flags);
2106 list_add(&info->link, &domain->devices);
2107 list_add(&info->global, &device_domain_list);
2108 pdev->dev.archdata.iommu = info;
2109 spin_unlock_irqrestore(&device_domain_lock, flags);
2110
2111 return 0;
2112}
2113
2114static int iommu_prepare_static_identity_mapping(void)
2115{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002116 struct pci_dev *pdev = NULL;
2117 int ret;
2118
2119 ret = si_domain_init();
2120 if (ret)
2121 return -EFAULT;
2122
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002123 for_each_pci_dev(pdev) {
David Woodhousec7ab48d2009-06-26 19:10:36 +01002124 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2125 pci_name(pdev));
2126
2127 ret = domain_context_mapping(si_domain, pdev,
2128 CONTEXT_TT_MULTI_LEVEL);
2129 if (ret)
2130 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002131 ret = domain_add_dev_info(si_domain, pdev);
2132 if (ret)
2133 return ret;
2134 }
2135
2136 return 0;
2137}
2138
2139int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002140{
2141 struct dmar_drhd_unit *drhd;
2142 struct dmar_rmrr_unit *rmrr;
2143 struct pci_dev *pdev;
2144 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002145 int i, ret;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002146 int pass_through = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002147
2148 /*
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002149 * In case pass through can not be enabled, iommu tries to use identity
2150 * mapping.
2151 */
2152 if (iommu_pass_through)
2153 iommu_identity_mapping = 1;
2154
2155 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002156 * for each drhd
2157 * allocate root
2158 * initialize and program root entry to not present
2159 * endfor
2160 */
2161 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002162 g_num_of_iommus++;
2163 /*
2164 * lock not needed as this is only incremented in the single
2165 * threaded kernel __init code path all other access are read
2166 * only
2167 */
2168 }
2169
Weidong Hand9630fe2008-12-08 11:06:32 +08002170 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2171 GFP_KERNEL);
2172 if (!g_iommus) {
2173 printk(KERN_ERR "Allocating global iommu array failed\n");
2174 ret = -ENOMEM;
2175 goto error;
2176 }
2177
mark gross80b20dd2008-04-18 13:53:58 -07002178 deferred_flush = kzalloc(g_num_of_iommus *
2179 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2180 if (!deferred_flush) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002181 kfree(g_iommus);
mark gross5e0d2a62008-03-04 15:22:08 -08002182 ret = -ENOMEM;
2183 goto error;
2184 }
2185
mark gross5e0d2a62008-03-04 15:22:08 -08002186 for_each_drhd_unit(drhd) {
2187 if (drhd->ignored)
2188 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002189
2190 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002191 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002192
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002193 ret = iommu_init_domains(iommu);
2194 if (ret)
2195 goto error;
2196
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002197 /*
2198 * TBD:
2199 * we could share the same root & context tables
2200 * amoung all IOMMU's. Need to Split it later.
2201 */
2202 ret = iommu_alloc_root_entry(iommu);
2203 if (ret) {
2204 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2205 goto error;
2206 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002207 if (!ecap_pass_through(iommu->ecap))
2208 pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002209 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002210 if (iommu_pass_through)
2211 if (!pass_through) {
2212 printk(KERN_INFO
2213 "Pass Through is not supported by hardware.\n");
2214 iommu_pass_through = 0;
2215 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002216
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002217 /*
2218 * Start from the sane iommu hardware state.
2219 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002220 for_each_drhd_unit(drhd) {
2221 if (drhd->ignored)
2222 continue;
2223
2224 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002225
2226 /*
2227 * If the queued invalidation is already initialized by us
2228 * (for example, while enabling interrupt-remapping) then
2229 * we got the things already rolling from a sane state.
2230 */
2231 if (iommu->qi)
2232 continue;
2233
2234 /*
2235 * Clear any previous faults.
2236 */
2237 dmar_fault(-1, iommu);
2238 /*
2239 * Disable queued invalidation if supported and already enabled
2240 * before OS handover.
2241 */
2242 dmar_disable_qi(iommu);
2243 }
2244
2245 for_each_drhd_unit(drhd) {
2246 if (drhd->ignored)
2247 continue;
2248
2249 iommu = drhd->iommu;
2250
Youquan Songa77b67d2008-10-16 16:31:56 -07002251 if (dmar_enable_qi(iommu)) {
2252 /*
2253 * Queued Invalidate not enabled, use Register Based
2254 * Invalidate
2255 */
2256 iommu->flush.flush_context = __iommu_flush_context;
2257 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2258 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002259 "invalidation\n",
2260 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002261 } else {
2262 iommu->flush.flush_context = qi_flush_context;
2263 iommu->flush.flush_iotlb = qi_flush_iotlb;
2264 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002265 "invalidation\n",
2266 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002267 }
2268 }
2269
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002270 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002271 * If pass through is set and enabled, context entries of all pci
2272 * devices are intialized by pass through translation type.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002273 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002274 if (iommu_pass_through) {
2275 ret = init_context_pass_through();
2276 if (ret) {
2277 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2278 iommu_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002279 }
2280 }
2281
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002282 /*
2283 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002284 * identity mappings for rmrr, gfx, and isa and may fall back to static
2285 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002286 */
2287 if (!iommu_pass_through) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002288 if (iommu_identity_mapping)
2289 iommu_prepare_static_identity_mapping();
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002290 /*
2291 * For each rmrr
2292 * for each dev attached to rmrr
2293 * do
2294 * locate drhd for dev, alloc domain for dev
2295 * allocate free domain
2296 * allocate page table entries for rmrr
2297 * if context not allocated for bus
2298 * allocate and init context
2299 * set present in root table for this bus
2300 * init context with domain, translation etc
2301 * endfor
2302 * endfor
2303 */
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002304 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002305 for_each_rmrr_units(rmrr) {
2306 for (i = 0; i < rmrr->devices_cnt; i++) {
2307 pdev = rmrr->devices[i];
2308 /*
2309 * some BIOS lists non-exist devices in DMAR
2310 * table.
2311 */
2312 if (!pdev)
2313 continue;
2314 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2315 if (ret)
2316 printk(KERN_ERR
2317 "IOMMU: mapping reserved region failed\n");
2318 }
2319 }
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07002320
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002321 iommu_prepare_isa();
2322 }
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002323
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002324 /*
2325 * for each drhd
2326 * enable fault log
2327 * global invalidate context cache
2328 * global invalidate iotlb
2329 * enable translation
2330 */
2331 for_each_drhd_unit(drhd) {
2332 if (drhd->ignored)
2333 continue;
2334 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002335
2336 iommu_flush_write_buffer(iommu);
2337
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002338 ret = dmar_set_interrupt(iommu);
2339 if (ret)
2340 goto error;
2341
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002342 iommu_set_root_entry(iommu);
2343
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002344 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002345 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002346 iommu_disable_protect_mem_regions(iommu);
2347
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002348 ret = iommu_enable_translation(iommu);
2349 if (ret)
2350 goto error;
2351 }
2352
2353 return 0;
2354error:
2355 for_each_drhd_unit(drhd) {
2356 if (drhd->ignored)
2357 continue;
2358 iommu = drhd->iommu;
2359 free_iommu(iommu);
2360 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002361 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002362 return ret;
2363}
2364
David Woodhouse88cb6a72009-06-28 15:03:06 +01002365static inline unsigned long aligned_nrpages(unsigned long host_addr,
2366 size_t size)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002367{
David Woodhouse88cb6a72009-06-28 15:03:06 +01002368 host_addr &= ~PAGE_MASK;
2369 host_addr += size + PAGE_SIZE - 1;
2370
2371 return host_addr >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002372}
2373
David Woodhouse875764d2009-06-28 21:20:51 +01002374static struct iova *intel_alloc_iova(struct device *dev,
2375 struct dmar_domain *domain,
2376 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002377{
2378 struct pci_dev *pdev = to_pci_dev(dev);
2379 struct iova *iova = NULL;
2380
David Woodhouse875764d2009-06-28 21:20:51 +01002381 /* Restrict dma_mask to the width that the iommu can handle */
2382 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2383
2384 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002385 /*
2386 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002387 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002388 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002389 */
David Woodhouse875764d2009-06-28 21:20:51 +01002390 iova = alloc_iova(&domain->iovad, nrpages,
2391 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2392 if (iova)
2393 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002394 }
David Woodhouse875764d2009-06-28 21:20:51 +01002395 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2396 if (unlikely(!iova)) {
2397 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2398 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002399 return NULL;
2400 }
2401
2402 return iova;
2403}
2404
2405static struct dmar_domain *
2406get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002407{
2408 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002409 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002410
2411 domain = get_domain_for_dev(pdev,
2412 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2413 if (!domain) {
2414 printk(KERN_ERR
2415 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002416 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002417 }
2418
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002419 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002420 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002421 ret = domain_context_mapping(domain, pdev,
2422 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002423 if (ret) {
2424 printk(KERN_ERR
2425 "Domain context map for %s failed",
2426 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002427 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002428 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002429 }
2430
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002431 return domain;
2432}
2433
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002434static int iommu_dummy(struct pci_dev *pdev)
2435{
2436 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2437}
2438
2439/* Check if the pdev needs to go through non-identity map and unmap process.*/
2440static int iommu_no_mapping(struct pci_dev *pdev)
2441{
2442 int found;
2443
2444 if (!iommu_identity_mapping)
2445 return iommu_dummy(pdev);
2446
2447 found = identity_mapping(pdev);
2448 if (found) {
2449 if (pdev->dma_mask > DMA_BIT_MASK(32))
2450 return 1;
2451 else {
2452 /*
2453 * 32 bit DMA is removed from si_domain and fall back
2454 * to non-identity mapping.
2455 */
2456 domain_remove_one_dev_info(si_domain, pdev);
2457 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2458 pci_name(pdev));
2459 return 0;
2460 }
2461 } else {
2462 /*
2463 * In case of a detached 64 bit DMA device from vm, the device
2464 * is put into si_domain for identity mapping.
2465 */
2466 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2467 int ret;
2468 ret = domain_add_dev_info(si_domain, pdev);
2469 if (!ret) {
2470 printk(KERN_INFO "64bit %s uses identity mapping\n",
2471 pci_name(pdev));
2472 return 1;
2473 }
2474 }
2475 }
2476
2477 return iommu_dummy(pdev);
2478}
2479
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002480static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2481 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002482{
2483 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002484 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002485 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002486 struct iova *iova;
2487 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002488 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002489 struct intel_iommu *iommu;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002490
2491 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002492
2493 if (iommu_no_mapping(pdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002494 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002495
2496 domain = get_valid_domain_for_dev(pdev);
2497 if (!domain)
2498 return 0;
2499
Weidong Han8c11e792008-12-08 15:29:22 +08002500 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002501 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002502
David Woodhouse875764d2009-06-28 21:20:51 +01002503 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002504 if (!iova)
2505 goto error;
2506
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002507 /*
2508 * Check if DMAR supports zero-length reads on write only
2509 * mappings..
2510 */
2511 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002512 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002513 prot |= DMA_PTE_READ;
2514 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2515 prot |= DMA_PTE_WRITE;
2516 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002517 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002518 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002519 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002520 * is not a big problem
2521 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002522 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2523 paddr >> VTD_PAGE_SHIFT, size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002524 if (ret)
2525 goto error;
2526
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002527 /* it's a non-present to present mapping. Only flush if caching mode */
2528 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002529 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002530 else
Weidong Han8c11e792008-12-08 15:29:22 +08002531 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002532
David Woodhouse03d6a242009-06-28 15:33:46 +01002533 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2534 start_paddr += paddr & ~PAGE_MASK;
2535 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002536
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002537error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002538 if (iova)
2539 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002540 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002541 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002542 return 0;
2543}
2544
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002545static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2546 unsigned long offset, size_t size,
2547 enum dma_data_direction dir,
2548 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002549{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002550 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2551 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002552}
2553
mark gross5e0d2a62008-03-04 15:22:08 -08002554static void flush_unmaps(void)
2555{
mark gross80b20dd2008-04-18 13:53:58 -07002556 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002557
mark gross5e0d2a62008-03-04 15:22:08 -08002558 timer_on = 0;
2559
2560 /* just flush them all */
2561 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002562 struct intel_iommu *iommu = g_iommus[i];
2563 if (!iommu)
2564 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002565
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002566 if (!deferred_flush[i].next)
2567 continue;
2568
2569 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002570 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002571 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002572 unsigned long mask;
2573 struct iova *iova = deferred_flush[i].iova[j];
2574
2575 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2576 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2577 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2578 iova->pfn_lo << PAGE_SHIFT, mask);
2579 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002580 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002581 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002582 }
2583
mark gross5e0d2a62008-03-04 15:22:08 -08002584 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002585}
2586
2587static void flush_unmaps_timeout(unsigned long data)
2588{
mark gross80b20dd2008-04-18 13:53:58 -07002589 unsigned long flags;
2590
2591 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002592 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002593 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002594}
2595
2596static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2597{
2598 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002599 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002600 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002601
2602 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002603 if (list_size == HIGH_WATER_MARK)
2604 flush_unmaps();
2605
Weidong Han8c11e792008-12-08 15:29:22 +08002606 iommu = domain_get_iommu(dom);
2607 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002608
mark gross80b20dd2008-04-18 13:53:58 -07002609 next = deferred_flush[iommu_id].next;
2610 deferred_flush[iommu_id].domain[next] = dom;
2611 deferred_flush[iommu_id].iova[next] = iova;
2612 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002613
2614 if (!timer_on) {
2615 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2616 timer_on = 1;
2617 }
2618 list_size++;
2619 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2620}
2621
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002622static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2623 size_t size, enum dma_data_direction dir,
2624 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002625{
2626 struct pci_dev *pdev = to_pci_dev(dev);
2627 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002628 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002629 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002630 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002631
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002632 if (iommu_no_mapping(pdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002633 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002634
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002635 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002636 BUG_ON(!domain);
2637
Weidong Han8c11e792008-12-08 15:29:22 +08002638 iommu = domain_get_iommu(domain);
2639
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002640 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2641 if (!iova)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002642 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002643
David Woodhoused794dc92009-06-28 00:27:49 +01002644 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2645 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002646
David Woodhoused794dc92009-06-28 00:27:49 +01002647 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2648 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002649
2650 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002651 dma_pte_clear_range(domain, start_pfn, last_pfn);
2652
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002653 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002654 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2655
mark gross5e0d2a62008-03-04 15:22:08 -08002656 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002657 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhoused794dc92009-06-28 00:27:49 +01002658 last_pfn - start_pfn + 1);
mark gross5e0d2a62008-03-04 15:22:08 -08002659 /* free iova */
2660 __free_iova(&domain->iovad, iova);
2661 } else {
2662 add_unmap(domain, iova);
2663 /*
2664 * queue up the release of the unmap to save the 1/6th of the
2665 * cpu used up by the iotlb flush operation...
2666 */
mark gross5e0d2a62008-03-04 15:22:08 -08002667 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002668}
2669
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002670static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2671 int dir)
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002672{
2673 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2674}
2675
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002676static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2677 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002678{
2679 void *vaddr;
2680 int order;
2681
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002682 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002683 order = get_order(size);
2684 flags &= ~(GFP_DMA | GFP_DMA32);
2685
2686 vaddr = (void *)__get_free_pages(flags, order);
2687 if (!vaddr)
2688 return NULL;
2689 memset(vaddr, 0, size);
2690
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002691 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2692 DMA_BIDIRECTIONAL,
2693 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002694 if (*dma_handle)
2695 return vaddr;
2696 free_pages((unsigned long)vaddr, order);
2697 return NULL;
2698}
2699
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002700static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2701 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002702{
2703 int order;
2704
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002705 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002706 order = get_order(size);
2707
2708 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2709 free_pages((unsigned long)vaddr, order);
2710}
2711
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002712static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2713 int nelems, enum dma_data_direction dir,
2714 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002715{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002716 struct pci_dev *pdev = to_pci_dev(hwdev);
2717 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002718 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002719 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002720 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002721
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002722 if (iommu_no_mapping(pdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002723 return;
2724
2725 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002726 BUG_ON(!domain);
2727
2728 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002729
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002730 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002731 if (!iova)
2732 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002733
David Woodhoused794dc92009-06-28 00:27:49 +01002734 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2735 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002736
2737 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002738 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002739
David Woodhoused794dc92009-06-28 00:27:49 +01002740 /* free page tables */
2741 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2742
David Woodhouse03d6a242009-06-28 15:33:46 +01002743 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhoused794dc92009-06-28 00:27:49 +01002744 (last_pfn - start_pfn + 1));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002745
2746 /* free iova */
2747 __free_iova(&domain->iovad, iova);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002748}
2749
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002750static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002751 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002752{
2753 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002754 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002755
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002756 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002757 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002758 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002759 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002760 }
2761 return nelems;
2762}
2763
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002764static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2765 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002766{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002767 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002768 struct pci_dev *pdev = to_pci_dev(hwdev);
2769 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002770 size_t size = 0;
2771 int prot = 0;
David Woodhouseb536d242009-06-28 14:49:31 +01002772 size_t offset_pfn = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002773 struct iova *iova = NULL;
2774 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002775 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01002776 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08002777 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002778
2779 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002780 if (iommu_no_mapping(pdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002781 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002782
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002783 domain = get_valid_domain_for_dev(pdev);
2784 if (!domain)
2785 return 0;
2786
Weidong Han8c11e792008-12-08 15:29:22 +08002787 iommu = domain_get_iommu(domain);
2788
David Woodhouseb536d242009-06-28 14:49:31 +01002789 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01002790 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002791
David Woodhouse875764d2009-06-28 21:20:51 +01002792 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002793 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002794 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002795 return 0;
2796 }
2797
2798 /*
2799 * Check if DMAR supports zero-length reads on write only
2800 * mappings..
2801 */
2802 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002803 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002804 prot |= DMA_PTE_READ;
2805 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2806 prot |= DMA_PTE_WRITE;
2807
David Woodhouseb536d242009-06-28 14:49:31 +01002808 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01002809
2810 ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot);
2811 if (unlikely(ret)) {
2812 /* clear the page */
2813 dma_pte_clear_range(domain, start_vpfn,
2814 start_vpfn + size - 1);
2815 /* free page tables */
2816 dma_pte_free_pagetable(domain, start_vpfn,
2817 start_vpfn + size - 1);
2818 /* free iova */
2819 __free_iova(&domain->iovad, iova);
2820 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002821 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002822
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002823 /* it's a non-present to present mapping. Only flush if caching mode */
2824 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002825 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002826 else
Weidong Han8c11e792008-12-08 15:29:22 +08002827 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002828
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002829 return nelems;
2830}
2831
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002832static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2833{
2834 return !dma_addr;
2835}
2836
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002837struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002838 .alloc_coherent = intel_alloc_coherent,
2839 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002840 .map_sg = intel_map_sg,
2841 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002842 .map_page = intel_map_page,
2843 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002844 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002845};
2846
2847static inline int iommu_domain_cache_init(void)
2848{
2849 int ret = 0;
2850
2851 iommu_domain_cache = kmem_cache_create("iommu_domain",
2852 sizeof(struct dmar_domain),
2853 0,
2854 SLAB_HWCACHE_ALIGN,
2855
2856 NULL);
2857 if (!iommu_domain_cache) {
2858 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2859 ret = -ENOMEM;
2860 }
2861
2862 return ret;
2863}
2864
2865static inline int iommu_devinfo_cache_init(void)
2866{
2867 int ret = 0;
2868
2869 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2870 sizeof(struct device_domain_info),
2871 0,
2872 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002873 NULL);
2874 if (!iommu_devinfo_cache) {
2875 printk(KERN_ERR "Couldn't create devinfo cache\n");
2876 ret = -ENOMEM;
2877 }
2878
2879 return ret;
2880}
2881
2882static inline int iommu_iova_cache_init(void)
2883{
2884 int ret = 0;
2885
2886 iommu_iova_cache = kmem_cache_create("iommu_iova",
2887 sizeof(struct iova),
2888 0,
2889 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002890 NULL);
2891 if (!iommu_iova_cache) {
2892 printk(KERN_ERR "Couldn't create iova cache\n");
2893 ret = -ENOMEM;
2894 }
2895
2896 return ret;
2897}
2898
2899static int __init iommu_init_mempool(void)
2900{
2901 int ret;
2902 ret = iommu_iova_cache_init();
2903 if (ret)
2904 return ret;
2905
2906 ret = iommu_domain_cache_init();
2907 if (ret)
2908 goto domain_error;
2909
2910 ret = iommu_devinfo_cache_init();
2911 if (!ret)
2912 return ret;
2913
2914 kmem_cache_destroy(iommu_domain_cache);
2915domain_error:
2916 kmem_cache_destroy(iommu_iova_cache);
2917
2918 return -ENOMEM;
2919}
2920
2921static void __init iommu_exit_mempool(void)
2922{
2923 kmem_cache_destroy(iommu_devinfo_cache);
2924 kmem_cache_destroy(iommu_domain_cache);
2925 kmem_cache_destroy(iommu_iova_cache);
2926
2927}
2928
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002929static void __init init_no_remapping_devices(void)
2930{
2931 struct dmar_drhd_unit *drhd;
2932
2933 for_each_drhd_unit(drhd) {
2934 if (!drhd->include_all) {
2935 int i;
2936 for (i = 0; i < drhd->devices_cnt; i++)
2937 if (drhd->devices[i] != NULL)
2938 break;
2939 /* ignore DMAR unit if no pci devices exist */
2940 if (i == drhd->devices_cnt)
2941 drhd->ignored = 1;
2942 }
2943 }
2944
2945 if (dmar_map_gfx)
2946 return;
2947
2948 for_each_drhd_unit(drhd) {
2949 int i;
2950 if (drhd->ignored || drhd->include_all)
2951 continue;
2952
2953 for (i = 0; i < drhd->devices_cnt; i++)
2954 if (drhd->devices[i] &&
2955 !IS_GFX_DEVICE(drhd->devices[i]))
2956 break;
2957
2958 if (i < drhd->devices_cnt)
2959 continue;
2960
2961 /* bypass IOMMU if it is just for gfx devices */
2962 drhd->ignored = 1;
2963 for (i = 0; i < drhd->devices_cnt; i++) {
2964 if (!drhd->devices[i])
2965 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002966 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002967 }
2968 }
2969}
2970
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002971#ifdef CONFIG_SUSPEND
2972static int init_iommu_hw(void)
2973{
2974 struct dmar_drhd_unit *drhd;
2975 struct intel_iommu *iommu = NULL;
2976
2977 for_each_active_iommu(iommu, drhd)
2978 if (iommu->qi)
2979 dmar_reenable_qi(iommu);
2980
2981 for_each_active_iommu(iommu, drhd) {
2982 iommu_flush_write_buffer(iommu);
2983
2984 iommu_set_root_entry(iommu);
2985
2986 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002987 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002988 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002989 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002990 iommu_disable_protect_mem_regions(iommu);
2991 iommu_enable_translation(iommu);
2992 }
2993
2994 return 0;
2995}
2996
2997static void iommu_flush_all(void)
2998{
2999 struct dmar_drhd_unit *drhd;
3000 struct intel_iommu *iommu;
3001
3002 for_each_active_iommu(iommu, drhd) {
3003 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003004 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003005 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003006 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003007 }
3008}
3009
3010static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3011{
3012 struct dmar_drhd_unit *drhd;
3013 struct intel_iommu *iommu = NULL;
3014 unsigned long flag;
3015
3016 for_each_active_iommu(iommu, drhd) {
3017 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3018 GFP_ATOMIC);
3019 if (!iommu->iommu_state)
3020 goto nomem;
3021 }
3022
3023 iommu_flush_all();
3024
3025 for_each_active_iommu(iommu, drhd) {
3026 iommu_disable_translation(iommu);
3027
3028 spin_lock_irqsave(&iommu->register_lock, flag);
3029
3030 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3031 readl(iommu->reg + DMAR_FECTL_REG);
3032 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3033 readl(iommu->reg + DMAR_FEDATA_REG);
3034 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3035 readl(iommu->reg + DMAR_FEADDR_REG);
3036 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3037 readl(iommu->reg + DMAR_FEUADDR_REG);
3038
3039 spin_unlock_irqrestore(&iommu->register_lock, flag);
3040 }
3041 return 0;
3042
3043nomem:
3044 for_each_active_iommu(iommu, drhd)
3045 kfree(iommu->iommu_state);
3046
3047 return -ENOMEM;
3048}
3049
3050static int iommu_resume(struct sys_device *dev)
3051{
3052 struct dmar_drhd_unit *drhd;
3053 struct intel_iommu *iommu = NULL;
3054 unsigned long flag;
3055
3056 if (init_iommu_hw()) {
3057 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3058 return -EIO;
3059 }
3060
3061 for_each_active_iommu(iommu, drhd) {
3062
3063 spin_lock_irqsave(&iommu->register_lock, flag);
3064
3065 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3066 iommu->reg + DMAR_FECTL_REG);
3067 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3068 iommu->reg + DMAR_FEDATA_REG);
3069 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3070 iommu->reg + DMAR_FEADDR_REG);
3071 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3072 iommu->reg + DMAR_FEUADDR_REG);
3073
3074 spin_unlock_irqrestore(&iommu->register_lock, flag);
3075 }
3076
3077 for_each_active_iommu(iommu, drhd)
3078 kfree(iommu->iommu_state);
3079
3080 return 0;
3081}
3082
3083static struct sysdev_class iommu_sysclass = {
3084 .name = "iommu",
3085 .resume = iommu_resume,
3086 .suspend = iommu_suspend,
3087};
3088
3089static struct sys_device device_iommu = {
3090 .cls = &iommu_sysclass,
3091};
3092
3093static int __init init_iommu_sysfs(void)
3094{
3095 int error;
3096
3097 error = sysdev_class_register(&iommu_sysclass);
3098 if (error)
3099 return error;
3100
3101 error = sysdev_register(&device_iommu);
3102 if (error)
3103 sysdev_class_unregister(&iommu_sysclass);
3104
3105 return error;
3106}
3107
3108#else
3109static int __init init_iommu_sysfs(void)
3110{
3111 return 0;
3112}
3113#endif /* CONFIG_PM */
3114
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003115int __init intel_iommu_init(void)
3116{
3117 int ret = 0;
3118
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003119 if (dmar_table_init())
3120 return -ENODEV;
3121
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003122 if (dmar_dev_scope_init())
3123 return -ENODEV;
3124
Suresh Siddha2ae21012008-07-10 11:16:43 -07003125 /*
3126 * Check the need for DMA-remapping initialization now.
3127 * Above initialization will also be used by Interrupt-remapping.
3128 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003129 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003130 return -ENODEV;
3131
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003132 iommu_init_mempool();
3133 dmar_init_reserved_ranges();
3134
3135 init_no_remapping_devices();
3136
3137 ret = init_dmars();
3138 if (ret) {
3139 printk(KERN_ERR "IOMMU: dmar init failed\n");
3140 put_iova_domain(&reserved_iova_list);
3141 iommu_exit_mempool();
3142 return ret;
3143 }
3144 printk(KERN_INFO
3145 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3146
mark gross5e0d2a62008-03-04 15:22:08 -08003147 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003148 force_iommu = 1;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003149
3150 if (!iommu_pass_through) {
3151 printk(KERN_INFO
3152 "Multi-level page-table translation for DMAR.\n");
3153 dma_ops = &intel_dma_ops;
3154 } else
3155 printk(KERN_INFO
3156 "DMAR: Pass through translation for DMAR.\n");
3157
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003158 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003159
3160 register_iommu(&intel_iommu_ops);
3161
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003162 return 0;
3163}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003164
Han, Weidong3199aa62009-02-26 17:31:12 +08003165static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3166 struct pci_dev *pdev)
3167{
3168 struct pci_dev *tmp, *parent;
3169
3170 if (!iommu || !pdev)
3171 return;
3172
3173 /* dependent device detach */
3174 tmp = pci_find_upstream_pcie_bridge(pdev);
3175 /* Secondary interface's bus number and devfn 0 */
3176 if (tmp) {
3177 parent = pdev->bus->self;
3178 while (parent != tmp) {
3179 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf92009-04-04 01:45:37 +01003180 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003181 parent = parent->bus->self;
3182 }
3183 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3184 iommu_detach_dev(iommu,
3185 tmp->subordinate->number, 0);
3186 else /* this is a legacy PCI bridge */
David Woodhouse276dbf92009-04-04 01:45:37 +01003187 iommu_detach_dev(iommu, tmp->bus->number,
3188 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003189 }
3190}
3191
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003192static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003193 struct pci_dev *pdev)
3194{
3195 struct device_domain_info *info;
3196 struct intel_iommu *iommu;
3197 unsigned long flags;
3198 int found = 0;
3199 struct list_head *entry, *tmp;
3200
David Woodhouse276dbf92009-04-04 01:45:37 +01003201 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3202 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003203 if (!iommu)
3204 return;
3205
3206 spin_lock_irqsave(&device_domain_lock, flags);
3207 list_for_each_safe(entry, tmp, &domain->devices) {
3208 info = list_entry(entry, struct device_domain_info, link);
David Woodhouse276dbf92009-04-04 01:45:37 +01003209 /* No need to compare PCI domain; it has to be the same */
Weidong Hanc7151a82008-12-08 22:51:37 +08003210 if (info->bus == pdev->bus->number &&
3211 info->devfn == pdev->devfn) {
3212 list_del(&info->link);
3213 list_del(&info->global);
3214 if (info->dev)
3215 info->dev->dev.archdata.iommu = NULL;
3216 spin_unlock_irqrestore(&device_domain_lock, flags);
3217
Yu Zhao93a23a72009-05-18 13:51:37 +08003218 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003219 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003220 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003221 free_devinfo_mem(info);
3222
3223 spin_lock_irqsave(&device_domain_lock, flags);
3224
3225 if (found)
3226 break;
3227 else
3228 continue;
3229 }
3230
3231 /* if there is no other devices under the same iommu
3232 * owned by this domain, clear this iommu in iommu_bmp
3233 * update iommu count and coherency
3234 */
David Woodhouse276dbf92009-04-04 01:45:37 +01003235 if (iommu == device_to_iommu(info->segment, info->bus,
3236 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003237 found = 1;
3238 }
3239
3240 if (found == 0) {
3241 unsigned long tmp_flags;
3242 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3243 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3244 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003245 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003246 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3247 }
3248
3249 spin_unlock_irqrestore(&device_domain_lock, flags);
3250}
3251
3252static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3253{
3254 struct device_domain_info *info;
3255 struct intel_iommu *iommu;
3256 unsigned long flags1, flags2;
3257
3258 spin_lock_irqsave(&device_domain_lock, flags1);
3259 while (!list_empty(&domain->devices)) {
3260 info = list_entry(domain->devices.next,
3261 struct device_domain_info, link);
3262 list_del(&info->link);
3263 list_del(&info->global);
3264 if (info->dev)
3265 info->dev->dev.archdata.iommu = NULL;
3266
3267 spin_unlock_irqrestore(&device_domain_lock, flags1);
3268
Yu Zhao93a23a72009-05-18 13:51:37 +08003269 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf92009-04-04 01:45:37 +01003270 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003271 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003272 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003273
3274 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003275 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003276 */
3277 spin_lock_irqsave(&domain->iommu_lock, flags2);
3278 if (test_and_clear_bit(iommu->seq_id,
3279 &domain->iommu_bmp)) {
3280 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003281 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003282 }
3283 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3284
3285 free_devinfo_mem(info);
3286 spin_lock_irqsave(&device_domain_lock, flags1);
3287 }
3288 spin_unlock_irqrestore(&device_domain_lock, flags1);
3289}
3290
Weidong Han5e98c4b2008-12-08 23:03:27 +08003291/* domain id for virtual machine, it won't be set in context */
3292static unsigned long vm_domid;
3293
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003294static int vm_domain_min_agaw(struct dmar_domain *domain)
3295{
3296 int i;
3297 int min_agaw = domain->agaw;
3298
3299 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3300 for (; i < g_num_of_iommus; ) {
3301 if (min_agaw > g_iommus[i]->agaw)
3302 min_agaw = g_iommus[i]->agaw;
3303
3304 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3305 }
3306
3307 return min_agaw;
3308}
3309
Weidong Han5e98c4b2008-12-08 23:03:27 +08003310static struct dmar_domain *iommu_alloc_vm_domain(void)
3311{
3312 struct dmar_domain *domain;
3313
3314 domain = alloc_domain_mem();
3315 if (!domain)
3316 return NULL;
3317
3318 domain->id = vm_domid++;
3319 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3320 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3321
3322 return domain;
3323}
3324
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003325static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003326{
3327 int adjust_width;
3328
3329 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3330 spin_lock_init(&domain->mapping_lock);
3331 spin_lock_init(&domain->iommu_lock);
3332
3333 domain_reserve_special_ranges(domain);
3334
3335 /* calculate AGAW */
3336 domain->gaw = guest_width;
3337 adjust_width = guestwidth_to_adjustwidth(guest_width);
3338 domain->agaw = width_to_agaw(adjust_width);
3339
3340 INIT_LIST_HEAD(&domain->devices);
3341
3342 domain->iommu_count = 0;
3343 domain->iommu_coherency = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003344 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003345
3346 /* always allocate the top pgd */
3347 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3348 if (!domain->pgd)
3349 return -ENOMEM;
3350 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3351 return 0;
3352}
3353
3354static void iommu_free_vm_domain(struct dmar_domain *domain)
3355{
3356 unsigned long flags;
3357 struct dmar_drhd_unit *drhd;
3358 struct intel_iommu *iommu;
3359 unsigned long i;
3360 unsigned long ndomains;
3361
3362 for_each_drhd_unit(drhd) {
3363 if (drhd->ignored)
3364 continue;
3365 iommu = drhd->iommu;
3366
3367 ndomains = cap_ndoms(iommu->cap);
3368 i = find_first_bit(iommu->domain_ids, ndomains);
3369 for (; i < ndomains; ) {
3370 if (iommu->domains[i] == domain) {
3371 spin_lock_irqsave(&iommu->lock, flags);
3372 clear_bit(i, iommu->domain_ids);
3373 iommu->domains[i] = NULL;
3374 spin_unlock_irqrestore(&iommu->lock, flags);
3375 break;
3376 }
3377 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3378 }
3379 }
3380}
3381
3382static void vm_domain_exit(struct dmar_domain *domain)
3383{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003384 /* Domain 0 is reserved, so dont process it */
3385 if (!domain)
3386 return;
3387
3388 vm_domain_remove_all_dev_info(domain);
3389 /* destroy iovas */
3390 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003391
3392 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003393 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003394
3395 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003396 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003397
3398 iommu_free_vm_domain(domain);
3399 free_domain_mem(domain);
3400}
3401
Joerg Roedel5d450802008-12-03 14:52:32 +01003402static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003403{
Joerg Roedel5d450802008-12-03 14:52:32 +01003404 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003405
Joerg Roedel5d450802008-12-03 14:52:32 +01003406 dmar_domain = iommu_alloc_vm_domain();
3407 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003408 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003409 "intel_iommu_domain_init: dmar_domain == NULL\n");
3410 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003411 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003412 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003413 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003414 "intel_iommu_domain_init() failed\n");
3415 vm_domain_exit(dmar_domain);
3416 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003417 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003418 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003419
Joerg Roedel5d450802008-12-03 14:52:32 +01003420 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003421}
Kay, Allen M38717942008-09-09 18:37:29 +03003422
Joerg Roedel5d450802008-12-03 14:52:32 +01003423static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003424{
Joerg Roedel5d450802008-12-03 14:52:32 +01003425 struct dmar_domain *dmar_domain = domain->priv;
3426
3427 domain->priv = NULL;
3428 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003429}
Kay, Allen M38717942008-09-09 18:37:29 +03003430
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003431static int intel_iommu_attach_device(struct iommu_domain *domain,
3432 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003433{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003434 struct dmar_domain *dmar_domain = domain->priv;
3435 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003436 struct intel_iommu *iommu;
3437 int addr_width;
3438 u64 end;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003439 int ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003440
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003441 /* normally pdev is not mapped */
3442 if (unlikely(domain_context_mapped(pdev))) {
3443 struct dmar_domain *old_domain;
3444
3445 old_domain = find_domain(pdev);
3446 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003447 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3448 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3449 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003450 else
3451 domain_remove_dev_info(old_domain);
3452 }
3453 }
3454
David Woodhouse276dbf92009-04-04 01:45:37 +01003455 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3456 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003457 if (!iommu)
3458 return -ENODEV;
3459
3460 /* check if this iommu agaw is sufficient for max mapped address */
3461 addr_width = agaw_to_width(iommu->agaw);
3462 end = DOMAIN_MAX_ADDR(addr_width);
3463 end = end & VTD_PAGE_MASK;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003464 if (end < dmar_domain->max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003465 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3466 "sufficient for the mapped address (%llx)\n",
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003467 __func__, iommu->agaw, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003468 return -EFAULT;
3469 }
3470
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003471 ret = domain_add_dev_info(dmar_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003472 if (ret)
3473 return ret;
3474
Yu Zhao93a23a72009-05-18 13:51:37 +08003475 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003476 return ret;
3477}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003478
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003479static void intel_iommu_detach_device(struct iommu_domain *domain,
3480 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003481{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003482 struct dmar_domain *dmar_domain = domain->priv;
3483 struct pci_dev *pdev = to_pci_dev(dev);
3484
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003485 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003486}
Kay, Allen M38717942008-09-09 18:37:29 +03003487
Joerg Roedeldde57a22008-12-03 15:04:09 +01003488static int intel_iommu_map_range(struct iommu_domain *domain,
3489 unsigned long iova, phys_addr_t hpa,
3490 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003491{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003492 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003493 u64 max_addr;
3494 int addr_width;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003495 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003496 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003497
Joerg Roedeldde57a22008-12-03 15:04:09 +01003498 if (iommu_prot & IOMMU_READ)
3499 prot |= DMA_PTE_READ;
3500 if (iommu_prot & IOMMU_WRITE)
3501 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08003502 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3503 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003504
David Woodhouse163cc522009-06-28 00:51:17 +01003505 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003506 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003507 int min_agaw;
3508 u64 end;
3509
3510 /* check if minimum agaw is sufficient for mapped address */
Joerg Roedeldde57a22008-12-03 15:04:09 +01003511 min_agaw = vm_domain_min_agaw(dmar_domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003512 addr_width = agaw_to_width(min_agaw);
3513 end = DOMAIN_MAX_ADDR(addr_width);
3514 end = end & VTD_PAGE_MASK;
3515 if (end < max_addr) {
3516 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3517 "sufficient for the mapped address (%llx)\n",
3518 __func__, min_agaw, max_addr);
3519 return -EFAULT;
3520 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003521 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003522 }
David Woodhousead051222009-06-28 14:22:28 +01003523 /* Round up size to next multiple of PAGE_SIZE, if it and
3524 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01003525 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01003526 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3527 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003528 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003529}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003530
Joerg Roedeldde57a22008-12-03 15:04:09 +01003531static void intel_iommu_unmap_range(struct iommu_domain *domain,
3532 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003533{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003534 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003535
David Woodhouse163cc522009-06-28 00:51:17 +01003536 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3537 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003538
David Woodhouse163cc522009-06-28 00:51:17 +01003539 if (dmar_domain->max_addr == iova + size)
3540 dmar_domain->max_addr = iova;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003541}
Kay, Allen M38717942008-09-09 18:37:29 +03003542
Joerg Roedeld14d6572008-12-03 15:06:57 +01003543static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3544 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003545{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003546 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003547 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003548 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003549
David Woodhouseb026fd22009-06-28 10:37:25 +01003550 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
Kay, Allen M38717942008-09-09 18:37:29 +03003551 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003552 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003553
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003554 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003555}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003556
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003557static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3558 unsigned long cap)
3559{
3560 struct dmar_domain *dmar_domain = domain->priv;
3561
3562 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3563 return dmar_domain->iommu_snooping;
3564
3565 return 0;
3566}
3567
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003568static struct iommu_ops intel_iommu_ops = {
3569 .domain_init = intel_iommu_domain_init,
3570 .domain_destroy = intel_iommu_domain_destroy,
3571 .attach_dev = intel_iommu_attach_device,
3572 .detach_dev = intel_iommu_detach_device,
3573 .map = intel_iommu_map_range,
3574 .unmap = intel_iommu_unmap_range,
3575 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003576 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003577};
David Woodhouse9af88142009-02-13 23:18:03 +00003578
3579static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3580{
3581 /*
3582 * Mobile 4 Series Chipset neglects to set RWBF capability,
3583 * but needs it:
3584 */
3585 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3586 rwbf_quirk = 1;
3587}
3588
3589DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);