blob: f3eebd2b2d72109bd122c731842d596aba6e242b [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070040#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090041#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070042#include "pci.h"
43
Fenghua Yu5b6985c2008-10-16 18:02:32 -070044#define ROOT_SIZE VTD_PAGE_SIZE
45#define CONTEXT_SIZE VTD_PAGE_SIZE
46
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070047#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
49
50#define IOAPIC_RANGE_START (0xfee00000)
51#define IOAPIC_RANGE_END (0xfeefffff)
52#define IOVA_START_ADDR (0x1000)
53
54#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070056#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
57
Mark McLoughlinf27be032008-11-20 15:49:43 +000058#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
59#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
60#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
mark gross5e0d2a62008-03-04 15:22:08 -080061
Weidong Hand9630fe2008-12-08 11:06:32 +080062/* global iommu list, set NULL for ignored DMAR units */
63static struct intel_iommu **g_iommus;
64
David Woodhouse9af88142009-02-13 23:18:03 +000065static int rwbf_quirk;
66
Mark McLoughlin46b08e12008-11-20 15:49:44 +000067/*
68 * 0: Present
69 * 1-11: Reserved
70 * 12-63: Context Ptr (12 - (haw-1))
71 * 64-127: Reserved
72 */
73struct root_entry {
74 u64 val;
75 u64 rsvd1;
76};
77#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
78static inline bool root_present(struct root_entry *root)
79{
80 return (root->val & 1);
81}
82static inline void set_root_present(struct root_entry *root)
83{
84 root->val |= 1;
85}
86static inline void set_root_value(struct root_entry *root, unsigned long value)
87{
88 root->val |= value & VTD_PAGE_MASK;
89}
90
91static inline struct context_entry *
92get_context_addr_from_root(struct root_entry *root)
93{
94 return (struct context_entry *)
95 (root_present(root)?phys_to_virt(
96 root->val & VTD_PAGE_MASK) :
97 NULL);
98}
99
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000100/*
101 * low 64 bits:
102 * 0: present
103 * 1: fault processing disable
104 * 2-3: translation type
105 * 12-63: address space root
106 * high 64 bits:
107 * 0-2: address width
108 * 3-6: aval
109 * 8-23: domain id
110 */
111struct context_entry {
112 u64 lo;
113 u64 hi;
114};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000115
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000116static inline bool context_present(struct context_entry *context)
117{
118 return (context->lo & 1);
119}
120static inline void context_set_present(struct context_entry *context)
121{
122 context->lo |= 1;
123}
124
125static inline void context_set_fault_enable(struct context_entry *context)
126{
127 context->lo &= (((u64)-1) << 2) | 1;
128}
129
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000130#define CONTEXT_TT_MULTI_LEVEL 0
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000131
132static inline void context_set_translation_type(struct context_entry *context,
133 unsigned long value)
134{
135 context->lo &= (((u64)-1) << 4) | 3;
136 context->lo |= (value & 3) << 2;
137}
138
139static inline void context_set_address_root(struct context_entry *context,
140 unsigned long value)
141{
142 context->lo |= value & VTD_PAGE_MASK;
143}
144
145static inline void context_set_address_width(struct context_entry *context,
146 unsigned long value)
147{
148 context->hi |= value & 7;
149}
150
151static inline void context_set_domain_id(struct context_entry *context,
152 unsigned long value)
153{
154 context->hi |= (value & ((1 << 16) - 1)) << 8;
155}
156
157static inline void context_clear_entry(struct context_entry *context)
158{
159 context->lo = 0;
160 context->hi = 0;
161}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000162
Mark McLoughlin622ba122008-11-20 15:49:46 +0000163/*
164 * 0: readable
165 * 1: writable
166 * 2-6: reserved
167 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800168 * 8-10: available
169 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000170 * 12-63: Host physcial address
171 */
172struct dma_pte {
173 u64 val;
174};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000175
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000176static inline void dma_clear_pte(struct dma_pte *pte)
177{
178 pte->val = 0;
179}
180
181static inline void dma_set_pte_readable(struct dma_pte *pte)
182{
183 pte->val |= DMA_PTE_READ;
184}
185
186static inline void dma_set_pte_writable(struct dma_pte *pte)
187{
188 pte->val |= DMA_PTE_WRITE;
189}
190
Sheng Yang9cf06692009-03-18 15:33:07 +0800191static inline void dma_set_pte_snp(struct dma_pte *pte)
192{
193 pte->val |= DMA_PTE_SNP;
194}
195
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000196static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
197{
198 pte->val = (pte->val & ~3) | (prot & 3);
199}
200
201static inline u64 dma_pte_addr(struct dma_pte *pte)
202{
203 return (pte->val & VTD_PAGE_MASK);
204}
205
206static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
207{
208 pte->val |= (addr & VTD_PAGE_MASK);
209}
210
211static inline bool dma_pte_present(struct dma_pte *pte)
212{
213 return (pte->val & 3) != 0;
214}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000215
Weidong Han3b5410e2008-12-08 09:17:15 +0800216/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100217#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800218
Weidong Han1ce28fe2008-12-08 16:35:39 +0800219/* domain represents a virtual machine, more than one devices
220 * across iommus may be owned in one domain, e.g. kvm guest.
221 */
222#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
223
Mark McLoughlin99126f72008-11-20 15:49:47 +0000224struct dmar_domain {
225 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800226 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000227
228 struct list_head devices; /* all devices' list */
229 struct iova_domain iovad; /* iova's that belong to this domain */
230
231 struct dma_pte *pgd; /* virtual address */
232 spinlock_t mapping_lock; /* page table lock */
233 int gaw; /* max guest address width */
234
235 /* adjusted guest address width, 0 is level 2 30-bit */
236 int agaw;
237
Weidong Han3b5410e2008-12-08 09:17:15 +0800238 int flags; /* flags to find out type of domain */
Weidong Han8e604092008-12-08 15:49:06 +0800239
240 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800241 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800242 int iommu_count; /* reference count of iommu */
243 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800244 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000245};
246
Mark McLoughlina647dac2008-11-20 15:49:48 +0000247/* PCI domain-device relationship */
248struct device_domain_info {
249 struct list_head link; /* link to domain siblings */
250 struct list_head global; /* link to global list */
251 u8 bus; /* PCI bus numer */
252 u8 devfn; /* PCI devfn number */
253 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
254 struct dmar_domain *domain; /* pointer to domain */
255};
256
mark gross5e0d2a62008-03-04 15:22:08 -0800257static void flush_unmaps_timeout(unsigned long data);
258
259DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
260
mark gross80b20dd2008-04-18 13:53:58 -0700261#define HIGH_WATER_MARK 250
262struct deferred_flush_tables {
263 int next;
264 struct iova *iova[HIGH_WATER_MARK];
265 struct dmar_domain *domain[HIGH_WATER_MARK];
266};
267
268static struct deferred_flush_tables *deferred_flush;
269
mark gross5e0d2a62008-03-04 15:22:08 -0800270/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800271static int g_num_of_iommus;
272
273static DEFINE_SPINLOCK(async_umap_flush_lock);
274static LIST_HEAD(unmaps_to_do);
275
276static int timer_on;
277static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800278
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700279static void domain_remove_dev_info(struct dmar_domain *domain);
280
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800281#ifdef CONFIG_DMAR_DEFAULT_ON
282int dmar_disabled = 0;
283#else
284int dmar_disabled = 1;
285#endif /*CONFIG_DMAR_DEFAULT_ON*/
286
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700287static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700288static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800289static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700290
291#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
292static DEFINE_SPINLOCK(device_domain_lock);
293static LIST_HEAD(device_domain_list);
294
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100295static struct iommu_ops intel_iommu_ops;
296
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700297static int __init intel_iommu_setup(char *str)
298{
299 if (!str)
300 return -EINVAL;
301 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800302 if (!strncmp(str, "on", 2)) {
303 dmar_disabled = 0;
304 printk(KERN_INFO "Intel-IOMMU: enabled\n");
305 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700306 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800307 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700308 } else if (!strncmp(str, "igfx_off", 8)) {
309 dmar_map_gfx = 0;
310 printk(KERN_INFO
311 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700312 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800313 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700314 "Intel-IOMMU: Forcing DAC for PCI devices\n");
315 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800316 } else if (!strncmp(str, "strict", 6)) {
317 printk(KERN_INFO
318 "Intel-IOMMU: disable batched IOTLB flush\n");
319 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700320 }
321
322 str += strcspn(str, ",");
323 while (*str == ',')
324 str++;
325 }
326 return 0;
327}
328__setup("intel_iommu=", intel_iommu_setup);
329
330static struct kmem_cache *iommu_domain_cache;
331static struct kmem_cache *iommu_devinfo_cache;
332static struct kmem_cache *iommu_iova_cache;
333
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700334static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
335{
336 unsigned int flags;
337 void *vaddr;
338
339 /* trying to avoid low memory issues */
340 flags = current->flags & PF_MEMALLOC;
341 current->flags |= PF_MEMALLOC;
342 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
343 current->flags &= (~PF_MEMALLOC | flags);
344 return vaddr;
345}
346
347
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700348static inline void *alloc_pgtable_page(void)
349{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700350 unsigned int flags;
351 void *vaddr;
352
353 /* trying to avoid low memory issues */
354 flags = current->flags & PF_MEMALLOC;
355 current->flags |= PF_MEMALLOC;
356 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
357 current->flags &= (~PF_MEMALLOC | flags);
358 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700359}
360
361static inline void free_pgtable_page(void *vaddr)
362{
363 free_page((unsigned long)vaddr);
364}
365
366static inline void *alloc_domain_mem(void)
367{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700368 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700369}
370
Kay, Allen M38717942008-09-09 18:37:29 +0300371static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700372{
373 kmem_cache_free(iommu_domain_cache, vaddr);
374}
375
376static inline void * alloc_devinfo_mem(void)
377{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700378 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700379}
380
381static inline void free_devinfo_mem(void *vaddr)
382{
383 kmem_cache_free(iommu_devinfo_cache, vaddr);
384}
385
386struct iova *alloc_iova_mem(void)
387{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700388 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700389}
390
391void free_iova_mem(struct iova *iova)
392{
393 kmem_cache_free(iommu_iova_cache, iova);
394}
395
Weidong Han1b573682008-12-08 15:34:06 +0800396
397static inline int width_to_agaw(int width);
398
399/* calculate agaw for each iommu.
400 * "SAGAW" may be different across iommus, use a default agaw, and
401 * get a supported less agaw for iommus that don't support the default agaw.
402 */
403int iommu_calculate_agaw(struct intel_iommu *iommu)
404{
405 unsigned long sagaw;
406 int agaw = -1;
407
408 sagaw = cap_sagaw(iommu->cap);
409 for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
410 agaw >= 0; agaw--) {
411 if (test_bit(agaw, &sagaw))
412 break;
413 }
414
415 return agaw;
416}
417
Weidong Han8c11e792008-12-08 15:29:22 +0800418/* in native case, each domain is related to only one iommu */
419static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
420{
421 int iommu_id;
422
Weidong Han1ce28fe2008-12-08 16:35:39 +0800423 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
424
Weidong Han8c11e792008-12-08 15:29:22 +0800425 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
426 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
427 return NULL;
428
429 return g_iommus[iommu_id];
430}
431
Weidong Han8e604092008-12-08 15:49:06 +0800432static void domain_update_iommu_coherency(struct dmar_domain *domain)
433{
434 int i;
435
436 domain->iommu_coherency = 1;
437
438 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
439 for (; i < g_num_of_iommus; ) {
440 if (!ecap_coherent(g_iommus[i]->ecap)) {
441 domain->iommu_coherency = 0;
442 break;
443 }
444 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
445 }
446}
447
Sheng Yang58c610b2009-03-18 15:33:05 +0800448static void domain_update_iommu_snooping(struct dmar_domain *domain)
449{
450 int i;
451
452 domain->iommu_snooping = 1;
453
454 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
455 for (; i < g_num_of_iommus; ) {
456 if (!ecap_sc_support(g_iommus[i]->ecap)) {
457 domain->iommu_snooping = 0;
458 break;
459 }
460 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
461 }
462}
463
464/* Some capabilities may be different across iommus */
465static void domain_update_iommu_cap(struct dmar_domain *domain)
466{
467 domain_update_iommu_coherency(domain);
468 domain_update_iommu_snooping(domain);
469}
470
Weidong Hanc7151a82008-12-08 22:51:37 +0800471static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
472{
473 struct dmar_drhd_unit *drhd = NULL;
474 int i;
475
476 for_each_drhd_unit(drhd) {
477 if (drhd->ignored)
478 continue;
479
480 for (i = 0; i < drhd->devices_cnt; i++)
Dirk Hohndel288e4872009-01-11 15:33:51 +0000481 if (drhd->devices[i] &&
482 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800483 drhd->devices[i]->devfn == devfn)
484 return drhd->iommu;
485
486 if (drhd->include_all)
487 return drhd->iommu;
488 }
489
490 return NULL;
491}
492
Weidong Han5331fe62008-12-08 23:00:00 +0800493static void domain_flush_cache(struct dmar_domain *domain,
494 void *addr, int size)
495{
496 if (!domain->iommu_coherency)
497 clflush_cache_range(addr, size);
498}
499
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700500/* Gets context entry for a given bus and devfn */
501static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
502 u8 bus, u8 devfn)
503{
504 struct root_entry *root;
505 struct context_entry *context;
506 unsigned long phy_addr;
507 unsigned long flags;
508
509 spin_lock_irqsave(&iommu->lock, flags);
510 root = &iommu->root_entry[bus];
511 context = get_context_addr_from_root(root);
512 if (!context) {
513 context = (struct context_entry *)alloc_pgtable_page();
514 if (!context) {
515 spin_unlock_irqrestore(&iommu->lock, flags);
516 return NULL;
517 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700518 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700519 phy_addr = virt_to_phys((void *)context);
520 set_root_value(root, phy_addr);
521 set_root_present(root);
522 __iommu_flush_cache(iommu, root, sizeof(*root));
523 }
524 spin_unlock_irqrestore(&iommu->lock, flags);
525 return &context[devfn];
526}
527
528static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
529{
530 struct root_entry *root;
531 struct context_entry *context;
532 int ret;
533 unsigned long flags;
534
535 spin_lock_irqsave(&iommu->lock, flags);
536 root = &iommu->root_entry[bus];
537 context = get_context_addr_from_root(root);
538 if (!context) {
539 ret = 0;
540 goto out;
541 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000542 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700543out:
544 spin_unlock_irqrestore(&iommu->lock, flags);
545 return ret;
546}
547
548static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
549{
550 struct root_entry *root;
551 struct context_entry *context;
552 unsigned long flags;
553
554 spin_lock_irqsave(&iommu->lock, flags);
555 root = &iommu->root_entry[bus];
556 context = get_context_addr_from_root(root);
557 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000558 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700559 __iommu_flush_cache(iommu, &context[devfn], \
560 sizeof(*context));
561 }
562 spin_unlock_irqrestore(&iommu->lock, flags);
563}
564
565static void free_context_table(struct intel_iommu *iommu)
566{
567 struct root_entry *root;
568 int i;
569 unsigned long flags;
570 struct context_entry *context;
571
572 spin_lock_irqsave(&iommu->lock, flags);
573 if (!iommu->root_entry) {
574 goto out;
575 }
576 for (i = 0; i < ROOT_ENTRY_NR; i++) {
577 root = &iommu->root_entry[i];
578 context = get_context_addr_from_root(root);
579 if (context)
580 free_pgtable_page(context);
581 }
582 free_pgtable_page(iommu->root_entry);
583 iommu->root_entry = NULL;
584out:
585 spin_unlock_irqrestore(&iommu->lock, flags);
586}
587
588/* page table handling */
589#define LEVEL_STRIDE (9)
590#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
591
592static inline int agaw_to_level(int agaw)
593{
594 return agaw + 2;
595}
596
597static inline int agaw_to_width(int agaw)
598{
599 return 30 + agaw * LEVEL_STRIDE;
600
601}
602
603static inline int width_to_agaw(int width)
604{
605 return (width - 30) / LEVEL_STRIDE;
606}
607
608static inline unsigned int level_to_offset_bits(int level)
609{
610 return (12 + (level - 1) * LEVEL_STRIDE);
611}
612
613static inline int address_level_offset(u64 addr, int level)
614{
615 return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK);
616}
617
618static inline u64 level_mask(int level)
619{
620 return ((u64)-1 << level_to_offset_bits(level));
621}
622
623static inline u64 level_size(int level)
624{
625 return ((u64)1 << level_to_offset_bits(level));
626}
627
628static inline u64 align_to_level(u64 addr, int level)
629{
630 return ((addr + level_size(level) - 1) & level_mask(level));
631}
632
633static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
634{
635 int addr_width = agaw_to_width(domain->agaw);
636 struct dma_pte *parent, *pte = NULL;
637 int level = agaw_to_level(domain->agaw);
638 int offset;
639 unsigned long flags;
640
641 BUG_ON(!domain->pgd);
642
643 addr &= (((u64)1) << addr_width) - 1;
644 parent = domain->pgd;
645
646 spin_lock_irqsave(&domain->mapping_lock, flags);
647 while (level > 0) {
648 void *tmp_page;
649
650 offset = address_level_offset(addr, level);
651 pte = &parent[offset];
652 if (level == 1)
653 break;
654
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000655 if (!dma_pte_present(pte)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700656 tmp_page = alloc_pgtable_page();
657
658 if (!tmp_page) {
659 spin_unlock_irqrestore(&domain->mapping_lock,
660 flags);
661 return NULL;
662 }
Weidong Han5331fe62008-12-08 23:00:00 +0800663 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000664 dma_set_pte_addr(pte, virt_to_phys(tmp_page));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700665 /*
666 * high level table always sets r/w, last level page
667 * table control read/write
668 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000669 dma_set_pte_readable(pte);
670 dma_set_pte_writable(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800671 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700672 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000673 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700674 level--;
675 }
676
677 spin_unlock_irqrestore(&domain->mapping_lock, flags);
678 return pte;
679}
680
681/* return address's pte at specific level */
682static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
683 int level)
684{
685 struct dma_pte *parent, *pte = NULL;
686 int total = agaw_to_level(domain->agaw);
687 int offset;
688
689 parent = domain->pgd;
690 while (level <= total) {
691 offset = address_level_offset(addr, total);
692 pte = &parent[offset];
693 if (level == total)
694 return pte;
695
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000696 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700697 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000698 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700699 total--;
700 }
701 return NULL;
702}
703
704/* clear one page's page table */
705static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
706{
707 struct dma_pte *pte = NULL;
708
709 /* get last level pte */
710 pte = dma_addr_level_pte(domain, addr, 1);
711
712 if (pte) {
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000713 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800714 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700715 }
716}
717
718/* clear last level pte, a tlb flush should be followed */
719static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
720{
721 int addr_width = agaw_to_width(domain->agaw);
Zhao, Yuafeeb7c2009-02-13 17:55:49 +0800722 int npages;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700723
724 start &= (((u64)1) << addr_width) - 1;
725 end &= (((u64)1) << addr_width) - 1;
726 /* in case it's partial page */
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700727 start = PAGE_ALIGN(start);
728 end &= PAGE_MASK;
Zhao, Yuafeeb7c2009-02-13 17:55:49 +0800729 npages = (end - start) / VTD_PAGE_SIZE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700730
731 /* we don't need lock here, nobody else touches the iova range */
Zhao, Yuafeeb7c2009-02-13 17:55:49 +0800732 while (npages--) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700733 dma_pte_clear_one(domain, start);
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700734 start += VTD_PAGE_SIZE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700735 }
736}
737
738/* free page table pages. last level pte should already be cleared */
739static void dma_pte_free_pagetable(struct dmar_domain *domain,
740 u64 start, u64 end)
741{
742 int addr_width = agaw_to_width(domain->agaw);
743 struct dma_pte *pte;
744 int total = agaw_to_level(domain->agaw);
745 int level;
746 u64 tmp;
747
748 start &= (((u64)1) << addr_width) - 1;
749 end &= (((u64)1) << addr_width) - 1;
750
751 /* we don't need lock here, nobody else touches the iova range */
752 level = 2;
753 while (level <= total) {
754 tmp = align_to_level(start, level);
755 if (tmp >= end || (tmp + level_size(level) > end))
756 return;
757
758 while (tmp < end) {
759 pte = dma_addr_level_pte(domain, tmp, level);
760 if (pte) {
761 free_pgtable_page(
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000762 phys_to_virt(dma_pte_addr(pte)));
763 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800764 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700765 }
766 tmp += level_size(level);
767 }
768 level++;
769 }
770 /* free pgd */
771 if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
772 free_pgtable_page(domain->pgd);
773 domain->pgd = NULL;
774 }
775}
776
777/* iommu handling */
778static int iommu_alloc_root_entry(struct intel_iommu *iommu)
779{
780 struct root_entry *root;
781 unsigned long flags;
782
783 root = (struct root_entry *)alloc_pgtable_page();
784 if (!root)
785 return -ENOMEM;
786
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700787 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700788
789 spin_lock_irqsave(&iommu->lock, flags);
790 iommu->root_entry = root;
791 spin_unlock_irqrestore(&iommu->lock, flags);
792
793 return 0;
794}
795
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700796static void iommu_set_root_entry(struct intel_iommu *iommu)
797{
798 void *addr;
799 u32 cmd, sts;
800 unsigned long flag;
801
802 addr = iommu->root_entry;
803
804 spin_lock_irqsave(&iommu->register_lock, flag);
805 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
806
807 cmd = iommu->gcmd | DMA_GCMD_SRTP;
808 writel(cmd, iommu->reg + DMAR_GCMD_REG);
809
810 /* Make sure hardware complete it */
811 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
812 readl, (sts & DMA_GSTS_RTPS), sts);
813
814 spin_unlock_irqrestore(&iommu->register_lock, flag);
815}
816
817static void iommu_flush_write_buffer(struct intel_iommu *iommu)
818{
819 u32 val;
820 unsigned long flag;
821
David Woodhouse9af88142009-02-13 23:18:03 +0000822 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700823 return;
824 val = iommu->gcmd | DMA_GCMD_WBF;
825
826 spin_lock_irqsave(&iommu->register_lock, flag);
827 writel(val, iommu->reg + DMAR_GCMD_REG);
828
829 /* Make sure hardware complete it */
830 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
831 readl, (!(val & DMA_GSTS_WBFS)), val);
832
833 spin_unlock_irqrestore(&iommu->register_lock, flag);
834}
835
836/* return value determine if we need a write buffer flush */
837static int __iommu_flush_context(struct intel_iommu *iommu,
838 u16 did, u16 source_id, u8 function_mask, u64 type,
839 int non_present_entry_flush)
840{
841 u64 val = 0;
842 unsigned long flag;
843
844 /*
845 * In the non-present entry flush case, if hardware doesn't cache
846 * non-present entry we do nothing and if hardware cache non-present
847 * entry, we flush entries of domain 0 (the domain id is used to cache
848 * any non-present entries)
849 */
850 if (non_present_entry_flush) {
851 if (!cap_caching_mode(iommu->cap))
852 return 1;
853 else
854 did = 0;
855 }
856
857 switch (type) {
858 case DMA_CCMD_GLOBAL_INVL:
859 val = DMA_CCMD_GLOBAL_INVL;
860 break;
861 case DMA_CCMD_DOMAIN_INVL:
862 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
863 break;
864 case DMA_CCMD_DEVICE_INVL:
865 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
866 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
867 break;
868 default:
869 BUG();
870 }
871 val |= DMA_CCMD_ICC;
872
873 spin_lock_irqsave(&iommu->register_lock, flag);
874 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
875
876 /* Make sure hardware complete it */
877 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
878 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
879
880 spin_unlock_irqrestore(&iommu->register_lock, flag);
881
Ameya Palande4d235ba2008-10-18 20:27:30 -0700882 /* flush context entry will implicitly flush write buffer */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700883 return 0;
884}
885
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700886/* return value determine if we need a write buffer flush */
887static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
888 u64 addr, unsigned int size_order, u64 type,
889 int non_present_entry_flush)
890{
891 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
892 u64 val = 0, val_iva = 0;
893 unsigned long flag;
894
895 /*
896 * In the non-present entry flush case, if hardware doesn't cache
897 * non-present entry we do nothing and if hardware cache non-present
898 * entry, we flush entries of domain 0 (the domain id is used to cache
899 * any non-present entries)
900 */
901 if (non_present_entry_flush) {
902 if (!cap_caching_mode(iommu->cap))
903 return 1;
904 else
905 did = 0;
906 }
907
908 switch (type) {
909 case DMA_TLB_GLOBAL_FLUSH:
910 /* global flush doesn't need set IVA_REG */
911 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
912 break;
913 case DMA_TLB_DSI_FLUSH:
914 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
915 break;
916 case DMA_TLB_PSI_FLUSH:
917 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
918 /* Note: always flush non-leaf currently */
919 val_iva = size_order | addr;
920 break;
921 default:
922 BUG();
923 }
924 /* Note: set drain read/write */
925#if 0
926 /*
927 * This is probably to be super secure.. Looks like we can
928 * ignore it without any impact.
929 */
930 if (cap_read_drain(iommu->cap))
931 val |= DMA_TLB_READ_DRAIN;
932#endif
933 if (cap_write_drain(iommu->cap))
934 val |= DMA_TLB_WRITE_DRAIN;
935
936 spin_lock_irqsave(&iommu->register_lock, flag);
937 /* Note: Only uses first TLB reg currently */
938 if (val_iva)
939 dmar_writeq(iommu->reg + tlb_offset, val_iva);
940 dmar_writeq(iommu->reg + tlb_offset + 8, val);
941
942 /* Make sure hardware complete it */
943 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
944 dmar_readq, (!(val & DMA_TLB_IVT)), val);
945
946 spin_unlock_irqrestore(&iommu->register_lock, flag);
947
948 /* check IOTLB invalidation granularity */
949 if (DMA_TLB_IAIG(val) == 0)
950 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
951 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
952 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700953 (unsigned long long)DMA_TLB_IIRG(type),
954 (unsigned long long)DMA_TLB_IAIG(val));
Ameya Palande4d235ba2008-10-18 20:27:30 -0700955 /* flush iotlb entry will implicitly flush write buffer */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700956 return 0;
957}
958
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700959static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
960 u64 addr, unsigned int pages, int non_present_entry_flush)
961{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700962 unsigned int mask;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700963
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700964 BUG_ON(addr & (~VTD_PAGE_MASK));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700965 BUG_ON(pages == 0);
966
967 /* Fallback to domain selective flush if no PSI support */
968 if (!cap_pgsel_inv(iommu->cap))
Youquan Songa77b67d2008-10-16 16:31:56 -0700969 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
970 DMA_TLB_DSI_FLUSH,
971 non_present_entry_flush);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700972
973 /*
974 * PSI requires page size to be 2 ^ x, and the base address is naturally
975 * aligned to the size
976 */
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700977 mask = ilog2(__roundup_pow_of_two(pages));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700978 /* Fallback to domain selective flush if size is too big */
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700979 if (mask > cap_max_amask_val(iommu->cap))
Youquan Songa77b67d2008-10-16 16:31:56 -0700980 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
981 DMA_TLB_DSI_FLUSH, non_present_entry_flush);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700982
Youquan Songa77b67d2008-10-16 16:31:56 -0700983 return iommu->flush.flush_iotlb(iommu, did, addr, mask,
984 DMA_TLB_PSI_FLUSH,
985 non_present_entry_flush);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700986}
987
mark grossf8bab732008-02-08 04:18:38 -0800988static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
989{
990 u32 pmen;
991 unsigned long flags;
992
993 spin_lock_irqsave(&iommu->register_lock, flags);
994 pmen = readl(iommu->reg + DMAR_PMEN_REG);
995 pmen &= ~DMA_PMEN_EPM;
996 writel(pmen, iommu->reg + DMAR_PMEN_REG);
997
998 /* wait for the protected region status bit to clear */
999 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1000 readl, !(pmen & DMA_PMEN_PRS), pmen);
1001
1002 spin_unlock_irqrestore(&iommu->register_lock, flags);
1003}
1004
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001005static int iommu_enable_translation(struct intel_iommu *iommu)
1006{
1007 u32 sts;
1008 unsigned long flags;
1009
1010 spin_lock_irqsave(&iommu->register_lock, flags);
1011 writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG);
1012
1013 /* Make sure hardware complete it */
1014 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1015 readl, (sts & DMA_GSTS_TES), sts);
1016
1017 iommu->gcmd |= DMA_GCMD_TE;
1018 spin_unlock_irqrestore(&iommu->register_lock, flags);
1019 return 0;
1020}
1021
1022static int iommu_disable_translation(struct intel_iommu *iommu)
1023{
1024 u32 sts;
1025 unsigned long flag;
1026
1027 spin_lock_irqsave(&iommu->register_lock, flag);
1028 iommu->gcmd &= ~DMA_GCMD_TE;
1029 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1030
1031 /* Make sure hardware complete it */
1032 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1033 readl, (!(sts & DMA_GSTS_TES)), sts);
1034
1035 spin_unlock_irqrestore(&iommu->register_lock, flag);
1036 return 0;
1037}
1038
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001039
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001040static int iommu_init_domains(struct intel_iommu *iommu)
1041{
1042 unsigned long ndomains;
1043 unsigned long nlongs;
1044
1045 ndomains = cap_ndoms(iommu->cap);
1046 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1047 nlongs = BITS_TO_LONGS(ndomains);
1048
1049 /* TBD: there might be 64K domains,
1050 * consider other allocation for future chip
1051 */
1052 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1053 if (!iommu->domain_ids) {
1054 printk(KERN_ERR "Allocating domain id array failed\n");
1055 return -ENOMEM;
1056 }
1057 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1058 GFP_KERNEL);
1059 if (!iommu->domains) {
1060 printk(KERN_ERR "Allocating domain array failed\n");
1061 kfree(iommu->domain_ids);
1062 return -ENOMEM;
1063 }
1064
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001065 spin_lock_init(&iommu->lock);
1066
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001067 /*
1068 * if Caching mode is set, then invalid translations are tagged
1069 * with domainid 0. Hence we need to pre-allocate it.
1070 */
1071 if (cap_caching_mode(iommu->cap))
1072 set_bit(0, iommu->domain_ids);
1073 return 0;
1074}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001075
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001076
1077static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001078static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001079
1080void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001081{
1082 struct dmar_domain *domain;
1083 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001084 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001085
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001086 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1087 for (; i < cap_ndoms(iommu->cap); ) {
1088 domain = iommu->domains[i];
1089 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001090
1091 spin_lock_irqsave(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001092 if (--domain->iommu_count == 0) {
1093 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1094 vm_domain_exit(domain);
1095 else
1096 domain_exit(domain);
1097 }
Weidong Hanc7151a82008-12-08 22:51:37 +08001098 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1099
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001100 i = find_next_bit(iommu->domain_ids,
1101 cap_ndoms(iommu->cap), i+1);
1102 }
1103
1104 if (iommu->gcmd & DMA_GCMD_TE)
1105 iommu_disable_translation(iommu);
1106
1107 if (iommu->irq) {
1108 set_irq_data(iommu->irq, NULL);
1109 /* This will mask the irq */
1110 free_irq(iommu->irq, iommu);
1111 destroy_irq(iommu->irq);
1112 }
1113
1114 kfree(iommu->domains);
1115 kfree(iommu->domain_ids);
1116
Weidong Hand9630fe2008-12-08 11:06:32 +08001117 g_iommus[iommu->seq_id] = NULL;
1118
1119 /* if all iommus are freed, free g_iommus */
1120 for (i = 0; i < g_num_of_iommus; i++) {
1121 if (g_iommus[i])
1122 break;
1123 }
1124
1125 if (i == g_num_of_iommus)
1126 kfree(g_iommus);
1127
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001128 /* free context mapping */
1129 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001130}
1131
1132static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
1133{
1134 unsigned long num;
1135 unsigned long ndomains;
1136 struct dmar_domain *domain;
1137 unsigned long flags;
1138
1139 domain = alloc_domain_mem();
1140 if (!domain)
1141 return NULL;
1142
1143 ndomains = cap_ndoms(iommu->cap);
1144
1145 spin_lock_irqsave(&iommu->lock, flags);
1146 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1147 if (num >= ndomains) {
1148 spin_unlock_irqrestore(&iommu->lock, flags);
1149 free_domain_mem(domain);
1150 printk(KERN_ERR "IOMMU: no free domain ids\n");
1151 return NULL;
1152 }
1153
1154 set_bit(num, iommu->domain_ids);
1155 domain->id = num;
Weidong Han8c11e792008-12-08 15:29:22 +08001156 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1157 set_bit(iommu->seq_id, &domain->iommu_bmp);
Weidong Hand71a2f32008-12-07 21:13:41 +08001158 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001159 iommu->domains[num] = domain;
1160 spin_unlock_irqrestore(&iommu->lock, flags);
1161
1162 return domain;
1163}
1164
1165static void iommu_free_domain(struct dmar_domain *domain)
1166{
1167 unsigned long flags;
Weidong Han8c11e792008-12-08 15:29:22 +08001168 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001169
Weidong Han8c11e792008-12-08 15:29:22 +08001170 iommu = domain_get_iommu(domain);
1171
1172 spin_lock_irqsave(&iommu->lock, flags);
1173 clear_bit(domain->id, iommu->domain_ids);
1174 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001175}
1176
1177static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001178static struct lock_class_key reserved_alloc_key;
1179static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001180
1181static void dmar_init_reserved_ranges(void)
1182{
1183 struct pci_dev *pdev = NULL;
1184 struct iova *iova;
1185 int i;
1186 u64 addr, size;
1187
David Millerf6611972008-02-06 01:36:23 -08001188 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001189
Mark Gross8a443df2008-03-04 14:59:31 -08001190 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1191 &reserved_alloc_key);
1192 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1193 &reserved_rbtree_key);
1194
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001195 /* IOAPIC ranges shouldn't be accessed by DMA */
1196 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1197 IOVA_PFN(IOAPIC_RANGE_END));
1198 if (!iova)
1199 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1200
1201 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1202 for_each_pci_dev(pdev) {
1203 struct resource *r;
1204
1205 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1206 r = &pdev->resource[i];
1207 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1208 continue;
1209 addr = r->start;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001210 addr &= PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001211 size = r->end - addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001212 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001213 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1214 IOVA_PFN(size + addr) - 1);
1215 if (!iova)
1216 printk(KERN_ERR "Reserve iova failed\n");
1217 }
1218 }
1219
1220}
1221
1222static void domain_reserve_special_ranges(struct dmar_domain *domain)
1223{
1224 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1225}
1226
1227static inline int guestwidth_to_adjustwidth(int gaw)
1228{
1229 int agaw;
1230 int r = (gaw - 12) % 9;
1231
1232 if (r == 0)
1233 agaw = gaw;
1234 else
1235 agaw = gaw + 9 - r;
1236 if (agaw > 64)
1237 agaw = 64;
1238 return agaw;
1239}
1240
1241static int domain_init(struct dmar_domain *domain, int guest_width)
1242{
1243 struct intel_iommu *iommu;
1244 int adjust_width, agaw;
1245 unsigned long sagaw;
1246
David Millerf6611972008-02-06 01:36:23 -08001247 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001248 spin_lock_init(&domain->mapping_lock);
Weidong Hanc7151a82008-12-08 22:51:37 +08001249 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001250
1251 domain_reserve_special_ranges(domain);
1252
1253 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001254 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001255 if (guest_width > cap_mgaw(iommu->cap))
1256 guest_width = cap_mgaw(iommu->cap);
1257 domain->gaw = guest_width;
1258 adjust_width = guestwidth_to_adjustwidth(guest_width);
1259 agaw = width_to_agaw(adjust_width);
1260 sagaw = cap_sagaw(iommu->cap);
1261 if (!test_bit(agaw, &sagaw)) {
1262 /* hardware doesn't support it, choose a bigger one */
1263 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1264 agaw = find_next_bit(&sagaw, 5, agaw);
1265 if (agaw >= 5)
1266 return -ENODEV;
1267 }
1268 domain->agaw = agaw;
1269 INIT_LIST_HEAD(&domain->devices);
1270
Weidong Han8e604092008-12-08 15:49:06 +08001271 if (ecap_coherent(iommu->ecap))
1272 domain->iommu_coherency = 1;
1273 else
1274 domain->iommu_coherency = 0;
1275
Sheng Yang58c610b2009-03-18 15:33:05 +08001276 if (ecap_sc_support(iommu->ecap))
1277 domain->iommu_snooping = 1;
1278 else
1279 domain->iommu_snooping = 0;
1280
Weidong Hanc7151a82008-12-08 22:51:37 +08001281 domain->iommu_count = 1;
1282
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001283 /* always allocate the top pgd */
1284 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1285 if (!domain->pgd)
1286 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001287 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001288 return 0;
1289}
1290
1291static void domain_exit(struct dmar_domain *domain)
1292{
1293 u64 end;
1294
1295 /* Domain 0 is reserved, so dont process it */
1296 if (!domain)
1297 return;
1298
1299 domain_remove_dev_info(domain);
1300 /* destroy iovas */
1301 put_iova_domain(&domain->iovad);
1302 end = DOMAIN_MAX_ADDR(domain->gaw);
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001303 end = end & (~PAGE_MASK);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001304
1305 /* clear ptes */
1306 dma_pte_clear_range(domain, 0, end);
1307
1308 /* free page tables */
1309 dma_pte_free_pagetable(domain, 0, end);
1310
1311 iommu_free_domain(domain);
1312 free_domain_mem(domain);
1313}
1314
1315static int domain_context_mapping_one(struct dmar_domain *domain,
1316 u8 bus, u8 devfn)
1317{
1318 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001319 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001320 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001321 struct dma_pte *pgd;
1322 unsigned long num;
1323 unsigned long ndomains;
1324 int id;
1325 int agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001326
1327 pr_debug("Set context mapping for %02x:%02x.%d\n",
1328 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1329 BUG_ON(!domain->pgd);
Weidong Han5331fe62008-12-08 23:00:00 +08001330
1331 iommu = device_to_iommu(bus, devfn);
1332 if (!iommu)
1333 return -ENODEV;
1334
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001335 context = device_to_context_entry(iommu, bus, devfn);
1336 if (!context)
1337 return -ENOMEM;
1338 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001339 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001340 spin_unlock_irqrestore(&iommu->lock, flags);
1341 return 0;
1342 }
1343
Weidong Hanea6606b2008-12-08 23:08:15 +08001344 id = domain->id;
1345 pgd = domain->pgd;
1346
1347 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
1348 int found = 0;
1349
1350 /* find an available domain id for this device in iommu */
1351 ndomains = cap_ndoms(iommu->cap);
1352 num = find_first_bit(iommu->domain_ids, ndomains);
1353 for (; num < ndomains; ) {
1354 if (iommu->domains[num] == domain) {
1355 id = num;
1356 found = 1;
1357 break;
1358 }
1359 num = find_next_bit(iommu->domain_ids,
1360 cap_ndoms(iommu->cap), num+1);
1361 }
1362
1363 if (found == 0) {
1364 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1365 if (num >= ndomains) {
1366 spin_unlock_irqrestore(&iommu->lock, flags);
1367 printk(KERN_ERR "IOMMU: no free domain ids\n");
1368 return -EFAULT;
1369 }
1370
1371 set_bit(num, iommu->domain_ids);
1372 iommu->domains[num] = domain;
1373 id = num;
1374 }
1375
1376 /* Skip top levels of page tables for
1377 * iommu which has less agaw than default.
1378 */
1379 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1380 pgd = phys_to_virt(dma_pte_addr(pgd));
1381 if (!dma_pte_present(pgd)) {
1382 spin_unlock_irqrestore(&iommu->lock, flags);
1383 return -ENOMEM;
1384 }
1385 }
1386 }
1387
1388 context_set_domain_id(context, id);
1389 context_set_address_width(context, iommu->agaw);
1390 context_set_address_root(context, virt_to_phys(pgd));
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001391 context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
1392 context_set_fault_enable(context);
1393 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001394 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001395
1396 /* it's a non-present to present mapping */
Youquan Songa77b67d2008-10-16 16:31:56 -07001397 if (iommu->flush.flush_context(iommu, domain->id,
1398 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
1399 DMA_CCMD_DEVICE_INVL, 1))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001400 iommu_flush_write_buffer(iommu);
1401 else
Youquan Songa77b67d2008-10-16 16:31:56 -07001402 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
1403
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001404 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001405
1406 spin_lock_irqsave(&domain->iommu_lock, flags);
1407 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1408 domain->iommu_count++;
Sheng Yang58c610b2009-03-18 15:33:05 +08001409 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001410 }
1411 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001412 return 0;
1413}
1414
1415static int
1416domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
1417{
1418 int ret;
1419 struct pci_dev *tmp, *parent;
1420
1421 ret = domain_context_mapping_one(domain, pdev->bus->number,
1422 pdev->devfn);
1423 if (ret)
1424 return ret;
1425
1426 /* dependent device mapping */
1427 tmp = pci_find_upstream_pcie_bridge(pdev);
1428 if (!tmp)
1429 return 0;
1430 /* Secondary interface's bus number and devfn 0 */
1431 parent = pdev->bus->self;
1432 while (parent != tmp) {
1433 ret = domain_context_mapping_one(domain, parent->bus->number,
1434 parent->devfn);
1435 if (ret)
1436 return ret;
1437 parent = parent->bus->self;
1438 }
1439 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1440 return domain_context_mapping_one(domain,
1441 tmp->subordinate->number, 0);
1442 else /* this is a legacy PCI bridge */
1443 return domain_context_mapping_one(domain,
1444 tmp->bus->number, tmp->devfn);
1445}
1446
Weidong Han5331fe62008-12-08 23:00:00 +08001447static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001448{
1449 int ret;
1450 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001451 struct intel_iommu *iommu;
1452
1453 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
1454 if (!iommu)
1455 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001456
Weidong Han8c11e792008-12-08 15:29:22 +08001457 ret = device_context_mapped(iommu,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001458 pdev->bus->number, pdev->devfn);
1459 if (!ret)
1460 return ret;
1461 /* dependent device mapping */
1462 tmp = pci_find_upstream_pcie_bridge(pdev);
1463 if (!tmp)
1464 return ret;
1465 /* Secondary interface's bus number and devfn 0 */
1466 parent = pdev->bus->self;
1467 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001468 ret = device_context_mapped(iommu, parent->bus->number,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001469 parent->devfn);
1470 if (!ret)
1471 return ret;
1472 parent = parent->bus->self;
1473 }
1474 if (tmp->is_pcie)
Weidong Han8c11e792008-12-08 15:29:22 +08001475 return device_context_mapped(iommu,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001476 tmp->subordinate->number, 0);
1477 else
Weidong Han8c11e792008-12-08 15:29:22 +08001478 return device_context_mapped(iommu,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001479 tmp->bus->number, tmp->devfn);
1480}
1481
1482static int
1483domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1484 u64 hpa, size_t size, int prot)
1485{
1486 u64 start_pfn, end_pfn;
1487 struct dma_pte *pte;
1488 int index;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001489 int addr_width = agaw_to_width(domain->agaw);
1490
1491 hpa &= (((u64)1) << addr_width) - 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001492
1493 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1494 return -EINVAL;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001495 iova &= PAGE_MASK;
1496 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1497 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001498 index = 0;
1499 while (start_pfn < end_pfn) {
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001500 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001501 if (!pte)
1502 return -ENOMEM;
1503 /* We don't need lock here, nobody else
1504 * touches the iova range
1505 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001506 BUG_ON(dma_pte_addr(pte));
1507 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
1508 dma_set_pte_prot(pte, prot);
Sheng Yang9cf06692009-03-18 15:33:07 +08001509 if (prot & DMA_PTE_SNP)
1510 dma_set_pte_snp(pte);
Weidong Han5331fe62008-12-08 23:00:00 +08001511 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001512 start_pfn++;
1513 index++;
1514 }
1515 return 0;
1516}
1517
Weidong Hanc7151a82008-12-08 22:51:37 +08001518static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001519{
Weidong Hanc7151a82008-12-08 22:51:37 +08001520 if (!iommu)
1521 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001522
1523 clear_context_table(iommu, bus, devfn);
1524 iommu->flush.flush_context(iommu, 0, 0, 0,
Youquan Songa77b67d2008-10-16 16:31:56 -07001525 DMA_CCMD_GLOBAL_INVL, 0);
Weidong Han8c11e792008-12-08 15:29:22 +08001526 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Youquan Songa77b67d2008-10-16 16:31:56 -07001527 DMA_TLB_GLOBAL_FLUSH, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001528}
1529
1530static void domain_remove_dev_info(struct dmar_domain *domain)
1531{
1532 struct device_domain_info *info;
1533 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001534 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001535
1536 spin_lock_irqsave(&device_domain_lock, flags);
1537 while (!list_empty(&domain->devices)) {
1538 info = list_entry(domain->devices.next,
1539 struct device_domain_info, link);
1540 list_del(&info->link);
1541 list_del(&info->global);
1542 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001543 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001544 spin_unlock_irqrestore(&device_domain_lock, flags);
1545
Weidong Hanc7151a82008-12-08 22:51:37 +08001546 iommu = device_to_iommu(info->bus, info->devfn);
1547 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001548 free_devinfo_mem(info);
1549
1550 spin_lock_irqsave(&device_domain_lock, flags);
1551 }
1552 spin_unlock_irqrestore(&device_domain_lock, flags);
1553}
1554
1555/*
1556 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001557 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558 */
Kay, Allen M38717942008-09-09 18:37:29 +03001559static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001560find_domain(struct pci_dev *pdev)
1561{
1562 struct device_domain_info *info;
1563
1564 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001565 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001566 if (info)
1567 return info->domain;
1568 return NULL;
1569}
1570
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001571/* domain is initialized */
1572static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1573{
1574 struct dmar_domain *domain, *found = NULL;
1575 struct intel_iommu *iommu;
1576 struct dmar_drhd_unit *drhd;
1577 struct device_domain_info *info, *tmp;
1578 struct pci_dev *dev_tmp;
1579 unsigned long flags;
1580 int bus = 0, devfn = 0;
1581
1582 domain = find_domain(pdev);
1583 if (domain)
1584 return domain;
1585
1586 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1587 if (dev_tmp) {
1588 if (dev_tmp->is_pcie) {
1589 bus = dev_tmp->subordinate->number;
1590 devfn = 0;
1591 } else {
1592 bus = dev_tmp->bus->number;
1593 devfn = dev_tmp->devfn;
1594 }
1595 spin_lock_irqsave(&device_domain_lock, flags);
1596 list_for_each_entry(info, &device_domain_list, global) {
1597 if (info->bus == bus && info->devfn == devfn) {
1598 found = info->domain;
1599 break;
1600 }
1601 }
1602 spin_unlock_irqrestore(&device_domain_lock, flags);
1603 /* pcie-pci bridge already has a domain, uses it */
1604 if (found) {
1605 domain = found;
1606 goto found_domain;
1607 }
1608 }
1609
1610 /* Allocate new domain for the device */
1611 drhd = dmar_find_matched_drhd_unit(pdev);
1612 if (!drhd) {
1613 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1614 pci_name(pdev));
1615 return NULL;
1616 }
1617 iommu = drhd->iommu;
1618
1619 domain = iommu_alloc_domain(iommu);
1620 if (!domain)
1621 goto error;
1622
1623 if (domain_init(domain, gaw)) {
1624 domain_exit(domain);
1625 goto error;
1626 }
1627
1628 /* register pcie-to-pci device */
1629 if (dev_tmp) {
1630 info = alloc_devinfo_mem();
1631 if (!info) {
1632 domain_exit(domain);
1633 goto error;
1634 }
1635 info->bus = bus;
1636 info->devfn = devfn;
1637 info->dev = NULL;
1638 info->domain = domain;
1639 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001640 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001641
1642 /* pcie-to-pci bridge already has a domain, uses it */
1643 found = NULL;
1644 spin_lock_irqsave(&device_domain_lock, flags);
1645 list_for_each_entry(tmp, &device_domain_list, global) {
1646 if (tmp->bus == bus && tmp->devfn == devfn) {
1647 found = tmp->domain;
1648 break;
1649 }
1650 }
1651 if (found) {
1652 free_devinfo_mem(info);
1653 domain_exit(domain);
1654 domain = found;
1655 } else {
1656 list_add(&info->link, &domain->devices);
1657 list_add(&info->global, &device_domain_list);
1658 }
1659 spin_unlock_irqrestore(&device_domain_lock, flags);
1660 }
1661
1662found_domain:
1663 info = alloc_devinfo_mem();
1664 if (!info)
1665 goto error;
1666 info->bus = pdev->bus->number;
1667 info->devfn = pdev->devfn;
1668 info->dev = pdev;
1669 info->domain = domain;
1670 spin_lock_irqsave(&device_domain_lock, flags);
1671 /* somebody is fast */
1672 found = find_domain(pdev);
1673 if (found != NULL) {
1674 spin_unlock_irqrestore(&device_domain_lock, flags);
1675 if (found != domain) {
1676 domain_exit(domain);
1677 domain = found;
1678 }
1679 free_devinfo_mem(info);
1680 return domain;
1681 }
1682 list_add(&info->link, &domain->devices);
1683 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001684 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001685 spin_unlock_irqrestore(&device_domain_lock, flags);
1686 return domain;
1687error:
1688 /* recheck it here, maybe others set it */
1689 return find_domain(pdev);
1690}
1691
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001692static int iommu_prepare_identity_map(struct pci_dev *pdev,
1693 unsigned long long start,
1694 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001695{
1696 struct dmar_domain *domain;
1697 unsigned long size;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001698 unsigned long long base;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001699 int ret;
1700
1701 printk(KERN_INFO
1702 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1703 pci_name(pdev), start, end);
1704 /* page table init */
1705 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1706 if (!domain)
1707 return -ENOMEM;
1708
1709 /* The address might not be aligned */
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001710 base = start & PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001711 size = end - base;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001712 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001713 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1714 IOVA_PFN(base + size) - 1)) {
1715 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1716 ret = -ENOMEM;
1717 goto error;
1718 }
1719
1720 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1721 size, base, pci_name(pdev));
1722 /*
1723 * RMRR range might have overlap with physical memory range,
1724 * clear it first
1725 */
1726 dma_pte_clear_range(domain, base, base + size);
1727
1728 ret = domain_page_mapping(domain, base, base, size,
1729 DMA_PTE_READ|DMA_PTE_WRITE);
1730 if (ret)
1731 goto error;
1732
1733 /* context entry init */
1734 ret = domain_context_mapping(domain, pdev);
1735 if (!ret)
1736 return 0;
1737error:
1738 domain_exit(domain);
1739 return ret;
1740
1741}
1742
1743static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1744 struct pci_dev *pdev)
1745{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001746 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001747 return 0;
1748 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1749 rmrr->end_address + 1);
1750}
1751
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001752#ifdef CONFIG_DMAR_GFX_WA
Yinghai Lud52d53b2008-06-16 20:10:55 -07001753struct iommu_prepare_data {
1754 struct pci_dev *pdev;
1755 int ret;
1756};
1757
1758static int __init iommu_prepare_work_fn(unsigned long start_pfn,
1759 unsigned long end_pfn, void *datax)
1760{
1761 struct iommu_prepare_data *data;
1762
1763 data = (struct iommu_prepare_data *)datax;
1764
1765 data->ret = iommu_prepare_identity_map(data->pdev,
1766 start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
1767 return data->ret;
1768
1769}
1770
1771static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
1772{
1773 int nid;
1774 struct iommu_prepare_data data;
1775
1776 data.pdev = pdev;
1777 data.ret = 0;
1778
1779 for_each_online_node(nid) {
1780 work_with_active_regions(nid, iommu_prepare_work_fn, &data);
1781 if (data.ret)
1782 return data.ret;
1783 }
1784 return data.ret;
1785}
1786
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001787static void __init iommu_prepare_gfx_mapping(void)
1788{
1789 struct pci_dev *pdev = NULL;
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001790 int ret;
1791
1792 for_each_pci_dev(pdev) {
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001793 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001794 !IS_GFX_DEVICE(pdev))
1795 continue;
1796 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
1797 pci_name(pdev));
Yinghai Lud52d53b2008-06-16 20:10:55 -07001798 ret = iommu_prepare_with_active_regions(pdev);
1799 if (ret)
1800 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001801 }
1802}
Mark McLoughlin2abd7e12008-11-20 15:49:50 +00001803#else /* !CONFIG_DMAR_GFX_WA */
1804static inline void iommu_prepare_gfx_mapping(void)
1805{
1806 return;
1807}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001808#endif
1809
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001810#ifdef CONFIG_DMAR_FLOPPY_WA
1811static inline void iommu_prepare_isa(void)
1812{
1813 struct pci_dev *pdev;
1814 int ret;
1815
1816 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1817 if (!pdev)
1818 return;
1819
1820 printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");
1821 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1822
1823 if (ret)
Frank Seidel1c35b8e2009-02-06 10:23:36 +01001824 printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001825 "floppy might not work\n");
1826
1827}
1828#else
1829static inline void iommu_prepare_isa(void)
1830{
1831 return;
1832}
1833#endif /* !CONFIG_DMAR_FLPY_WA */
1834
Mark McLoughlin519a0542008-11-20 14:21:13 +00001835static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001836{
1837 struct dmar_drhd_unit *drhd;
1838 struct dmar_rmrr_unit *rmrr;
1839 struct pci_dev *pdev;
1840 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001841 int i, ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001842
1843 /*
1844 * for each drhd
1845 * allocate root
1846 * initialize and program root entry to not present
1847 * endfor
1848 */
1849 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08001850 g_num_of_iommus++;
1851 /*
1852 * lock not needed as this is only incremented in the single
1853 * threaded kernel __init code path all other access are read
1854 * only
1855 */
1856 }
1857
Weidong Hand9630fe2008-12-08 11:06:32 +08001858 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
1859 GFP_KERNEL);
1860 if (!g_iommus) {
1861 printk(KERN_ERR "Allocating global iommu array failed\n");
1862 ret = -ENOMEM;
1863 goto error;
1864 }
1865
mark gross80b20dd2008-04-18 13:53:58 -07001866 deferred_flush = kzalloc(g_num_of_iommus *
1867 sizeof(struct deferred_flush_tables), GFP_KERNEL);
1868 if (!deferred_flush) {
Weidong Hand9630fe2008-12-08 11:06:32 +08001869 kfree(g_iommus);
mark gross5e0d2a62008-03-04 15:22:08 -08001870 ret = -ENOMEM;
1871 goto error;
1872 }
1873
mark gross5e0d2a62008-03-04 15:22:08 -08001874 for_each_drhd_unit(drhd) {
1875 if (drhd->ignored)
1876 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07001877
1878 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08001879 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001880
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001881 ret = iommu_init_domains(iommu);
1882 if (ret)
1883 goto error;
1884
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001885 /*
1886 * TBD:
1887 * we could share the same root & context tables
1888 * amoung all IOMMU's. Need to Split it later.
1889 */
1890 ret = iommu_alloc_root_entry(iommu);
1891 if (ret) {
1892 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
1893 goto error;
1894 }
1895 }
1896
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001897 /*
1898 * Start from the sane iommu hardware state.
1899 */
Youquan Songa77b67d2008-10-16 16:31:56 -07001900 for_each_drhd_unit(drhd) {
1901 if (drhd->ignored)
1902 continue;
1903
1904 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001905
1906 /*
1907 * If the queued invalidation is already initialized by us
1908 * (for example, while enabling interrupt-remapping) then
1909 * we got the things already rolling from a sane state.
1910 */
1911 if (iommu->qi)
1912 continue;
1913
1914 /*
1915 * Clear any previous faults.
1916 */
1917 dmar_fault(-1, iommu);
1918 /*
1919 * Disable queued invalidation if supported and already enabled
1920 * before OS handover.
1921 */
1922 dmar_disable_qi(iommu);
1923 }
1924
1925 for_each_drhd_unit(drhd) {
1926 if (drhd->ignored)
1927 continue;
1928
1929 iommu = drhd->iommu;
1930
Youquan Songa77b67d2008-10-16 16:31:56 -07001931 if (dmar_enable_qi(iommu)) {
1932 /*
1933 * Queued Invalidate not enabled, use Register Based
1934 * Invalidate
1935 */
1936 iommu->flush.flush_context = __iommu_flush_context;
1937 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
1938 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09001939 "invalidation\n",
1940 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07001941 } else {
1942 iommu->flush.flush_context = qi_flush_context;
1943 iommu->flush.flush_iotlb = qi_flush_iotlb;
1944 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09001945 "invalidation\n",
1946 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07001947 }
1948 }
1949
Han, Weidongd0b03bd2009-04-03 17:15:50 +08001950#ifdef CONFIG_INTR_REMAP
1951 if (!intr_remapping_enabled) {
1952 ret = enable_intr_remapping(0);
1953 if (ret)
1954 printk(KERN_ERR
1955 "IOMMU: enable interrupt remapping failed\n");
1956 }
1957#endif
1958
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001959 /*
1960 * For each rmrr
1961 * for each dev attached to rmrr
1962 * do
1963 * locate drhd for dev, alloc domain for dev
1964 * allocate free domain
1965 * allocate page table entries for rmrr
1966 * if context not allocated for bus
1967 * allocate and init context
1968 * set present in root table for this bus
1969 * init context with domain, translation etc
1970 * endfor
1971 * endfor
1972 */
1973 for_each_rmrr_units(rmrr) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001974 for (i = 0; i < rmrr->devices_cnt; i++) {
1975 pdev = rmrr->devices[i];
1976 /* some BIOS lists non-exist devices in DMAR table */
1977 if (!pdev)
1978 continue;
1979 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
1980 if (ret)
1981 printk(KERN_ERR
1982 "IOMMU: mapping reserved region failed\n");
1983 }
1984 }
1985
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001986 iommu_prepare_gfx_mapping();
1987
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001988 iommu_prepare_isa();
1989
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001990 /*
1991 * for each drhd
1992 * enable fault log
1993 * global invalidate context cache
1994 * global invalidate iotlb
1995 * enable translation
1996 */
1997 for_each_drhd_unit(drhd) {
1998 if (drhd->ignored)
1999 continue;
2000 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002001
2002 iommu_flush_write_buffer(iommu);
2003
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002004 ret = dmar_set_interrupt(iommu);
2005 if (ret)
2006 goto error;
2007
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002008 iommu_set_root_entry(iommu);
2009
Youquan Songa77b67d2008-10-16 16:31:56 -07002010 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
2011 0);
2012 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
2013 0);
mark grossf8bab732008-02-08 04:18:38 -08002014 iommu_disable_protect_mem_regions(iommu);
2015
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002016 ret = iommu_enable_translation(iommu);
2017 if (ret)
2018 goto error;
2019 }
2020
2021 return 0;
2022error:
2023 for_each_drhd_unit(drhd) {
2024 if (drhd->ignored)
2025 continue;
2026 iommu = drhd->iommu;
2027 free_iommu(iommu);
2028 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002029 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002030 return ret;
2031}
2032
2033static inline u64 aligned_size(u64 host_addr, size_t size)
2034{
2035 u64 addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002036 addr = (host_addr & (~PAGE_MASK)) + size;
2037 return PAGE_ALIGN(addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002038}
2039
2040struct iova *
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002041iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002042{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002043 struct iova *piova;
2044
2045 /* Make sure it's in range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002046 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002047 if (!size || (IOVA_START_ADDR + size > end))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002048 return NULL;
2049
2050 piova = alloc_iova(&domain->iovad,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002051 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002052 return piova;
2053}
2054
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002055static struct iova *
2056__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002057 size_t size, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002058{
2059 struct pci_dev *pdev = to_pci_dev(dev);
2060 struct iova *iova = NULL;
2061
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002062 if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
2063 iova = iommu_alloc_iova(domain, size, dma_mask);
2064 else {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002065 /*
2066 * First try to allocate an io virtual address in
2067 * DMA_32BIT_MASK and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002068 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002069 */
2070 iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
2071 if (!iova)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002072 iova = iommu_alloc_iova(domain, size, dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002073 }
2074
2075 if (!iova) {
2076 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
2077 return NULL;
2078 }
2079
2080 return iova;
2081}
2082
2083static struct dmar_domain *
2084get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002085{
2086 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002087 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002088
2089 domain = get_domain_for_dev(pdev,
2090 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2091 if (!domain) {
2092 printk(KERN_ERR
2093 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002094 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002095 }
2096
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002097 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002098 if (unlikely(!domain_context_mapped(pdev))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002099 ret = domain_context_mapping(domain, pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002100 if (ret) {
2101 printk(KERN_ERR
2102 "Domain context map for %s failed",
2103 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002104 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002105 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002106 }
2107
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002108 return domain;
2109}
2110
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002111static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2112 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002113{
2114 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002115 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002116 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002117 struct iova *iova;
2118 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002119 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002120 struct intel_iommu *iommu;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002121
2122 BUG_ON(dir == DMA_NONE);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002123 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002124 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002125
2126 domain = get_valid_domain_for_dev(pdev);
2127 if (!domain)
2128 return 0;
2129
Weidong Han8c11e792008-12-08 15:29:22 +08002130 iommu = domain_get_iommu(domain);
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002131 size = aligned_size((u64)paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002132
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002133 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002134 if (!iova)
2135 goto error;
2136
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002137 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002138
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002139 /*
2140 * Check if DMAR supports zero-length reads on write only
2141 * mappings..
2142 */
2143 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002144 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002145 prot |= DMA_PTE_READ;
2146 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2147 prot |= DMA_PTE_WRITE;
2148 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002149 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002150 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002151 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002152 * is not a big problem
2153 */
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002154 ret = domain_page_mapping(domain, start_paddr,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002155 ((u64)paddr) & PAGE_MASK, size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002156 if (ret)
2157 goto error;
2158
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002159 /* it's a non-present to present mapping */
Weidong Han8c11e792008-12-08 15:29:22 +08002160 ret = iommu_flush_iotlb_psi(iommu, domain->id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002161 start_paddr, size >> VTD_PAGE_SHIFT, 1);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002162 if (ret)
Weidong Han8c11e792008-12-08 15:29:22 +08002163 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002164
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002165 return start_paddr + ((u64)paddr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002166
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002167error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002168 if (iova)
2169 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002170 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002171 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002172 return 0;
2173}
2174
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002175static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2176 unsigned long offset, size_t size,
2177 enum dma_data_direction dir,
2178 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002179{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002180 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2181 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002182}
2183
mark gross5e0d2a62008-03-04 15:22:08 -08002184static void flush_unmaps(void)
2185{
mark gross80b20dd2008-04-18 13:53:58 -07002186 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002187
mark gross5e0d2a62008-03-04 15:22:08 -08002188 timer_on = 0;
2189
2190 /* just flush them all */
2191 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002192 struct intel_iommu *iommu = g_iommus[i];
2193 if (!iommu)
2194 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002195
Weidong Hana2bb8452008-12-08 11:24:12 +08002196 if (deferred_flush[i].next) {
Youquan Songa77b67d2008-10-16 16:31:56 -07002197 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2198 DMA_TLB_GLOBAL_FLUSH, 0);
mark gross80b20dd2008-04-18 13:53:58 -07002199 for (j = 0; j < deferred_flush[i].next; j++) {
2200 __free_iova(&deferred_flush[i].domain[j]->iovad,
2201 deferred_flush[i].iova[j]);
2202 }
2203 deferred_flush[i].next = 0;
2204 }
mark gross5e0d2a62008-03-04 15:22:08 -08002205 }
2206
mark gross5e0d2a62008-03-04 15:22:08 -08002207 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002208}
2209
2210static void flush_unmaps_timeout(unsigned long data)
2211{
mark gross80b20dd2008-04-18 13:53:58 -07002212 unsigned long flags;
2213
2214 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002215 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002216 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002217}
2218
2219static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2220{
2221 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002222 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002223 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002224
2225 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002226 if (list_size == HIGH_WATER_MARK)
2227 flush_unmaps();
2228
Weidong Han8c11e792008-12-08 15:29:22 +08002229 iommu = domain_get_iommu(dom);
2230 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002231
mark gross80b20dd2008-04-18 13:53:58 -07002232 next = deferred_flush[iommu_id].next;
2233 deferred_flush[iommu_id].domain[next] = dom;
2234 deferred_flush[iommu_id].iova[next] = iova;
2235 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002236
2237 if (!timer_on) {
2238 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2239 timer_on = 1;
2240 }
2241 list_size++;
2242 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2243}
2244
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002245static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2246 size_t size, enum dma_data_direction dir,
2247 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002248{
2249 struct pci_dev *pdev = to_pci_dev(dev);
2250 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002251 unsigned long start_addr;
2252 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002253 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002254
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002255 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002256 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002257 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002258 BUG_ON(!domain);
2259
Weidong Han8c11e792008-12-08 15:29:22 +08002260 iommu = domain_get_iommu(domain);
2261
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002262 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2263 if (!iova)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002264 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002265
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002266 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002267 size = aligned_size((u64)dev_addr, size);
2268
David Woodhouse4cf2e752009-02-11 17:23:43 +00002269 pr_debug("Device %s unmapping: %zx@%llx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002270 pci_name(pdev), size, (unsigned long long)start_addr);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002271
2272 /* clear the whole page */
2273 dma_pte_clear_range(domain, start_addr, start_addr + size);
2274 /* free page tables */
2275 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
mark gross5e0d2a62008-03-04 15:22:08 -08002276 if (intel_iommu_strict) {
Weidong Han8c11e792008-12-08 15:29:22 +08002277 if (iommu_flush_iotlb_psi(iommu,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002278 domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
Weidong Han8c11e792008-12-08 15:29:22 +08002279 iommu_flush_write_buffer(iommu);
mark gross5e0d2a62008-03-04 15:22:08 -08002280 /* free iova */
2281 __free_iova(&domain->iovad, iova);
2282 } else {
2283 add_unmap(domain, iova);
2284 /*
2285 * queue up the release of the unmap to save the 1/6th of the
2286 * cpu used up by the iotlb flush operation...
2287 */
mark gross5e0d2a62008-03-04 15:22:08 -08002288 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002289}
2290
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002291static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2292 int dir)
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002293{
2294 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2295}
2296
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002297static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2298 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002299{
2300 void *vaddr;
2301 int order;
2302
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002303 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002304 order = get_order(size);
2305 flags &= ~(GFP_DMA | GFP_DMA32);
2306
2307 vaddr = (void *)__get_free_pages(flags, order);
2308 if (!vaddr)
2309 return NULL;
2310 memset(vaddr, 0, size);
2311
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002312 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2313 DMA_BIDIRECTIONAL,
2314 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002315 if (*dma_handle)
2316 return vaddr;
2317 free_pages((unsigned long)vaddr, order);
2318 return NULL;
2319}
2320
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002321static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2322 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002323{
2324 int order;
2325
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002326 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002327 order = get_order(size);
2328
2329 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2330 free_pages((unsigned long)vaddr, order);
2331}
2332
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002333static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2334 int nelems, enum dma_data_direction dir,
2335 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002336{
2337 int i;
2338 struct pci_dev *pdev = to_pci_dev(hwdev);
2339 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002340 unsigned long start_addr;
2341 struct iova *iova;
2342 size_t size = 0;
David Woodhouse4cf2e752009-02-11 17:23:43 +00002343 phys_addr_t addr;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002344 struct scatterlist *sg;
Weidong Han8c11e792008-12-08 15:29:22 +08002345 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002346
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002347 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002348 return;
2349
2350 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002351 BUG_ON(!domain);
2352
2353 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002354
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002355 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002356 if (!iova)
2357 return;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002358 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002359 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002360 size += aligned_size((u64)addr, sg->length);
2361 }
2362
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002363 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002364
2365 /* clear the whole page */
2366 dma_pte_clear_range(domain, start_addr, start_addr + size);
2367 /* free page tables */
2368 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2369
Weidong Han8c11e792008-12-08 15:29:22 +08002370 if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002371 size >> VTD_PAGE_SHIFT, 0))
Weidong Han8c11e792008-12-08 15:29:22 +08002372 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002373
2374 /* free iova */
2375 __free_iova(&domain->iovad, iova);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002376}
2377
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002378static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002379 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002380{
2381 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002382 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002383
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002384 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002385 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002386 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002387 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002388 }
2389 return nelems;
2390}
2391
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002392static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2393 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002394{
David Woodhouse4cf2e752009-02-11 17:23:43 +00002395 phys_addr_t addr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002396 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002397 struct pci_dev *pdev = to_pci_dev(hwdev);
2398 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002399 size_t size = 0;
2400 int prot = 0;
2401 size_t offset = 0;
2402 struct iova *iova = NULL;
2403 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002404 struct scatterlist *sg;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002405 unsigned long start_addr;
Weidong Han8c11e792008-12-08 15:29:22 +08002406 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002407
2408 BUG_ON(dir == DMA_NONE);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002409 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002410 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002411
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002412 domain = get_valid_domain_for_dev(pdev);
2413 if (!domain)
2414 return 0;
2415
Weidong Han8c11e792008-12-08 15:29:22 +08002416 iommu = domain_get_iommu(domain);
2417
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002418 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002419 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002420 size += aligned_size((u64)addr, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002421 }
2422
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002423 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002424 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002425 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002426 return 0;
2427 }
2428
2429 /*
2430 * Check if DMAR supports zero-length reads on write only
2431 * mappings..
2432 */
2433 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002434 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002435 prot |= DMA_PTE_READ;
2436 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2437 prot |= DMA_PTE_WRITE;
2438
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002439 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002440 offset = 0;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002441 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002442 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002443 size = aligned_size((u64)addr, sg->length);
2444 ret = domain_page_mapping(domain, start_addr + offset,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002445 ((u64)addr) & PAGE_MASK,
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002446 size, prot);
2447 if (ret) {
2448 /* clear the page */
2449 dma_pte_clear_range(domain, start_addr,
2450 start_addr + offset);
2451 /* free page tables */
2452 dma_pte_free_pagetable(domain, start_addr,
2453 start_addr + offset);
2454 /* free iova */
2455 __free_iova(&domain->iovad, iova);
2456 return 0;
2457 }
2458 sg->dma_address = start_addr + offset +
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002459 ((u64)addr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002460 sg->dma_length = sg->length;
2461 offset += size;
2462 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002463
2464 /* it's a non-present to present mapping */
Weidong Han8c11e792008-12-08 15:29:22 +08002465 if (iommu_flush_iotlb_psi(iommu, domain->id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002466 start_addr, offset >> VTD_PAGE_SHIFT, 1))
Weidong Han8c11e792008-12-08 15:29:22 +08002467 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002468 return nelems;
2469}
2470
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002471static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2472{
2473 return !dma_addr;
2474}
2475
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002476struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002477 .alloc_coherent = intel_alloc_coherent,
2478 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002479 .map_sg = intel_map_sg,
2480 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002481 .map_page = intel_map_page,
2482 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002483 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002484};
2485
2486static inline int iommu_domain_cache_init(void)
2487{
2488 int ret = 0;
2489
2490 iommu_domain_cache = kmem_cache_create("iommu_domain",
2491 sizeof(struct dmar_domain),
2492 0,
2493 SLAB_HWCACHE_ALIGN,
2494
2495 NULL);
2496 if (!iommu_domain_cache) {
2497 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2498 ret = -ENOMEM;
2499 }
2500
2501 return ret;
2502}
2503
2504static inline int iommu_devinfo_cache_init(void)
2505{
2506 int ret = 0;
2507
2508 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2509 sizeof(struct device_domain_info),
2510 0,
2511 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002512 NULL);
2513 if (!iommu_devinfo_cache) {
2514 printk(KERN_ERR "Couldn't create devinfo cache\n");
2515 ret = -ENOMEM;
2516 }
2517
2518 return ret;
2519}
2520
2521static inline int iommu_iova_cache_init(void)
2522{
2523 int ret = 0;
2524
2525 iommu_iova_cache = kmem_cache_create("iommu_iova",
2526 sizeof(struct iova),
2527 0,
2528 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002529 NULL);
2530 if (!iommu_iova_cache) {
2531 printk(KERN_ERR "Couldn't create iova cache\n");
2532 ret = -ENOMEM;
2533 }
2534
2535 return ret;
2536}
2537
2538static int __init iommu_init_mempool(void)
2539{
2540 int ret;
2541 ret = iommu_iova_cache_init();
2542 if (ret)
2543 return ret;
2544
2545 ret = iommu_domain_cache_init();
2546 if (ret)
2547 goto domain_error;
2548
2549 ret = iommu_devinfo_cache_init();
2550 if (!ret)
2551 return ret;
2552
2553 kmem_cache_destroy(iommu_domain_cache);
2554domain_error:
2555 kmem_cache_destroy(iommu_iova_cache);
2556
2557 return -ENOMEM;
2558}
2559
2560static void __init iommu_exit_mempool(void)
2561{
2562 kmem_cache_destroy(iommu_devinfo_cache);
2563 kmem_cache_destroy(iommu_domain_cache);
2564 kmem_cache_destroy(iommu_iova_cache);
2565
2566}
2567
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002568static void __init init_no_remapping_devices(void)
2569{
2570 struct dmar_drhd_unit *drhd;
2571
2572 for_each_drhd_unit(drhd) {
2573 if (!drhd->include_all) {
2574 int i;
2575 for (i = 0; i < drhd->devices_cnt; i++)
2576 if (drhd->devices[i] != NULL)
2577 break;
2578 /* ignore DMAR unit if no pci devices exist */
2579 if (i == drhd->devices_cnt)
2580 drhd->ignored = 1;
2581 }
2582 }
2583
2584 if (dmar_map_gfx)
2585 return;
2586
2587 for_each_drhd_unit(drhd) {
2588 int i;
2589 if (drhd->ignored || drhd->include_all)
2590 continue;
2591
2592 for (i = 0; i < drhd->devices_cnt; i++)
2593 if (drhd->devices[i] &&
2594 !IS_GFX_DEVICE(drhd->devices[i]))
2595 break;
2596
2597 if (i < drhd->devices_cnt)
2598 continue;
2599
2600 /* bypass IOMMU if it is just for gfx devices */
2601 drhd->ignored = 1;
2602 for (i = 0; i < drhd->devices_cnt; i++) {
2603 if (!drhd->devices[i])
2604 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002605 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002606 }
2607 }
2608}
2609
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002610#ifdef CONFIG_SUSPEND
2611static int init_iommu_hw(void)
2612{
2613 struct dmar_drhd_unit *drhd;
2614 struct intel_iommu *iommu = NULL;
2615
2616 for_each_active_iommu(iommu, drhd)
2617 if (iommu->qi)
2618 dmar_reenable_qi(iommu);
2619
2620 for_each_active_iommu(iommu, drhd) {
2621 iommu_flush_write_buffer(iommu);
2622
2623 iommu_set_root_entry(iommu);
2624
2625 iommu->flush.flush_context(iommu, 0, 0, 0,
2626 DMA_CCMD_GLOBAL_INVL, 0);
2627 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2628 DMA_TLB_GLOBAL_FLUSH, 0);
2629 iommu_disable_protect_mem_regions(iommu);
2630 iommu_enable_translation(iommu);
2631 }
2632
2633 return 0;
2634}
2635
2636static void iommu_flush_all(void)
2637{
2638 struct dmar_drhd_unit *drhd;
2639 struct intel_iommu *iommu;
2640
2641 for_each_active_iommu(iommu, drhd) {
2642 iommu->flush.flush_context(iommu, 0, 0, 0,
2643 DMA_CCMD_GLOBAL_INVL, 0);
2644 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2645 DMA_TLB_GLOBAL_FLUSH, 0);
2646 }
2647}
2648
2649static int iommu_suspend(struct sys_device *dev, pm_message_t state)
2650{
2651 struct dmar_drhd_unit *drhd;
2652 struct intel_iommu *iommu = NULL;
2653 unsigned long flag;
2654
2655 for_each_active_iommu(iommu, drhd) {
2656 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
2657 GFP_ATOMIC);
2658 if (!iommu->iommu_state)
2659 goto nomem;
2660 }
2661
2662 iommu_flush_all();
2663
2664 for_each_active_iommu(iommu, drhd) {
2665 iommu_disable_translation(iommu);
2666
2667 spin_lock_irqsave(&iommu->register_lock, flag);
2668
2669 iommu->iommu_state[SR_DMAR_FECTL_REG] =
2670 readl(iommu->reg + DMAR_FECTL_REG);
2671 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
2672 readl(iommu->reg + DMAR_FEDATA_REG);
2673 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
2674 readl(iommu->reg + DMAR_FEADDR_REG);
2675 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
2676 readl(iommu->reg + DMAR_FEUADDR_REG);
2677
2678 spin_unlock_irqrestore(&iommu->register_lock, flag);
2679 }
2680 return 0;
2681
2682nomem:
2683 for_each_active_iommu(iommu, drhd)
2684 kfree(iommu->iommu_state);
2685
2686 return -ENOMEM;
2687}
2688
2689static int iommu_resume(struct sys_device *dev)
2690{
2691 struct dmar_drhd_unit *drhd;
2692 struct intel_iommu *iommu = NULL;
2693 unsigned long flag;
2694
2695 if (init_iommu_hw()) {
2696 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
2697 return -EIO;
2698 }
2699
2700 for_each_active_iommu(iommu, drhd) {
2701
2702 spin_lock_irqsave(&iommu->register_lock, flag);
2703
2704 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
2705 iommu->reg + DMAR_FECTL_REG);
2706 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
2707 iommu->reg + DMAR_FEDATA_REG);
2708 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
2709 iommu->reg + DMAR_FEADDR_REG);
2710 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
2711 iommu->reg + DMAR_FEUADDR_REG);
2712
2713 spin_unlock_irqrestore(&iommu->register_lock, flag);
2714 }
2715
2716 for_each_active_iommu(iommu, drhd)
2717 kfree(iommu->iommu_state);
2718
2719 return 0;
2720}
2721
2722static struct sysdev_class iommu_sysclass = {
2723 .name = "iommu",
2724 .resume = iommu_resume,
2725 .suspend = iommu_suspend,
2726};
2727
2728static struct sys_device device_iommu = {
2729 .cls = &iommu_sysclass,
2730};
2731
2732static int __init init_iommu_sysfs(void)
2733{
2734 int error;
2735
2736 error = sysdev_class_register(&iommu_sysclass);
2737 if (error)
2738 return error;
2739
2740 error = sysdev_register(&device_iommu);
2741 if (error)
2742 sysdev_class_unregister(&iommu_sysclass);
2743
2744 return error;
2745}
2746
2747#else
2748static int __init init_iommu_sysfs(void)
2749{
2750 return 0;
2751}
2752#endif /* CONFIG_PM */
2753
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002754int __init intel_iommu_init(void)
2755{
2756 int ret = 0;
2757
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002758 if (dmar_table_init())
2759 return -ENODEV;
2760
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002761 if (dmar_dev_scope_init())
2762 return -ENODEV;
2763
Suresh Siddha2ae21012008-07-10 11:16:43 -07002764 /*
2765 * Check the need for DMA-remapping initialization now.
2766 * Above initialization will also be used by Interrupt-remapping.
2767 */
2768 if (no_iommu || swiotlb || dmar_disabled)
2769 return -ENODEV;
2770
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002771 iommu_init_mempool();
2772 dmar_init_reserved_ranges();
2773
2774 init_no_remapping_devices();
2775
2776 ret = init_dmars();
2777 if (ret) {
2778 printk(KERN_ERR "IOMMU: dmar init failed\n");
2779 put_iova_domain(&reserved_iova_list);
2780 iommu_exit_mempool();
2781 return ret;
2782 }
2783 printk(KERN_INFO
2784 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
2785
mark gross5e0d2a62008-03-04 15:22:08 -08002786 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002787 force_iommu = 1;
2788 dma_ops = &intel_dma_ops;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002789 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01002790
2791 register_iommu(&intel_iommu_ops);
2792
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002793 return 0;
2794}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07002795
Weidong Hanc7151a82008-12-08 22:51:37 +08002796static int vm_domain_add_dev_info(struct dmar_domain *domain,
2797 struct pci_dev *pdev)
2798{
2799 struct device_domain_info *info;
2800 unsigned long flags;
2801
2802 info = alloc_devinfo_mem();
2803 if (!info)
2804 return -ENOMEM;
2805
2806 info->bus = pdev->bus->number;
2807 info->devfn = pdev->devfn;
2808 info->dev = pdev;
2809 info->domain = domain;
2810
2811 spin_lock_irqsave(&device_domain_lock, flags);
2812 list_add(&info->link, &domain->devices);
2813 list_add(&info->global, &device_domain_list);
2814 pdev->dev.archdata.iommu = info;
2815 spin_unlock_irqrestore(&device_domain_lock, flags);
2816
2817 return 0;
2818}
2819
Han, Weidong3199aa62009-02-26 17:31:12 +08002820static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
2821 struct pci_dev *pdev)
2822{
2823 struct pci_dev *tmp, *parent;
2824
2825 if (!iommu || !pdev)
2826 return;
2827
2828 /* dependent device detach */
2829 tmp = pci_find_upstream_pcie_bridge(pdev);
2830 /* Secondary interface's bus number and devfn 0 */
2831 if (tmp) {
2832 parent = pdev->bus->self;
2833 while (parent != tmp) {
2834 iommu_detach_dev(iommu, parent->bus->number,
2835 parent->devfn);
2836 parent = parent->bus->self;
2837 }
2838 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
2839 iommu_detach_dev(iommu,
2840 tmp->subordinate->number, 0);
2841 else /* this is a legacy PCI bridge */
2842 iommu_detach_dev(iommu,
2843 tmp->bus->number, tmp->devfn);
2844 }
2845}
2846
Weidong Hanc7151a82008-12-08 22:51:37 +08002847static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2848 struct pci_dev *pdev)
2849{
2850 struct device_domain_info *info;
2851 struct intel_iommu *iommu;
2852 unsigned long flags;
2853 int found = 0;
2854 struct list_head *entry, *tmp;
2855
2856 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
2857 if (!iommu)
2858 return;
2859
2860 spin_lock_irqsave(&device_domain_lock, flags);
2861 list_for_each_safe(entry, tmp, &domain->devices) {
2862 info = list_entry(entry, struct device_domain_info, link);
2863 if (info->bus == pdev->bus->number &&
2864 info->devfn == pdev->devfn) {
2865 list_del(&info->link);
2866 list_del(&info->global);
2867 if (info->dev)
2868 info->dev->dev.archdata.iommu = NULL;
2869 spin_unlock_irqrestore(&device_domain_lock, flags);
2870
2871 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08002872 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08002873 free_devinfo_mem(info);
2874
2875 spin_lock_irqsave(&device_domain_lock, flags);
2876
2877 if (found)
2878 break;
2879 else
2880 continue;
2881 }
2882
2883 /* if there is no other devices under the same iommu
2884 * owned by this domain, clear this iommu in iommu_bmp
2885 * update iommu count and coherency
2886 */
2887 if (device_to_iommu(info->bus, info->devfn) == iommu)
2888 found = 1;
2889 }
2890
2891 if (found == 0) {
2892 unsigned long tmp_flags;
2893 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
2894 clear_bit(iommu->seq_id, &domain->iommu_bmp);
2895 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08002896 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08002897 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
2898 }
2899
2900 spin_unlock_irqrestore(&device_domain_lock, flags);
2901}
2902
2903static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
2904{
2905 struct device_domain_info *info;
2906 struct intel_iommu *iommu;
2907 unsigned long flags1, flags2;
2908
2909 spin_lock_irqsave(&device_domain_lock, flags1);
2910 while (!list_empty(&domain->devices)) {
2911 info = list_entry(domain->devices.next,
2912 struct device_domain_info, link);
2913 list_del(&info->link);
2914 list_del(&info->global);
2915 if (info->dev)
2916 info->dev->dev.archdata.iommu = NULL;
2917
2918 spin_unlock_irqrestore(&device_domain_lock, flags1);
2919
2920 iommu = device_to_iommu(info->bus, info->devfn);
2921 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08002922 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08002923
2924 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08002925 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08002926 */
2927 spin_lock_irqsave(&domain->iommu_lock, flags2);
2928 if (test_and_clear_bit(iommu->seq_id,
2929 &domain->iommu_bmp)) {
2930 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08002931 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08002932 }
2933 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
2934
2935 free_devinfo_mem(info);
2936 spin_lock_irqsave(&device_domain_lock, flags1);
2937 }
2938 spin_unlock_irqrestore(&device_domain_lock, flags1);
2939}
2940
Weidong Han5e98c4b2008-12-08 23:03:27 +08002941/* domain id for virtual machine, it won't be set in context */
2942static unsigned long vm_domid;
2943
Weidong Hanfe40f1e2008-12-08 23:10:23 +08002944static int vm_domain_min_agaw(struct dmar_domain *domain)
2945{
2946 int i;
2947 int min_agaw = domain->agaw;
2948
2949 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
2950 for (; i < g_num_of_iommus; ) {
2951 if (min_agaw > g_iommus[i]->agaw)
2952 min_agaw = g_iommus[i]->agaw;
2953
2954 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
2955 }
2956
2957 return min_agaw;
2958}
2959
Weidong Han5e98c4b2008-12-08 23:03:27 +08002960static struct dmar_domain *iommu_alloc_vm_domain(void)
2961{
2962 struct dmar_domain *domain;
2963
2964 domain = alloc_domain_mem();
2965 if (!domain)
2966 return NULL;
2967
2968 domain->id = vm_domid++;
2969 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
2970 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
2971
2972 return domain;
2973}
2974
2975static int vm_domain_init(struct dmar_domain *domain, int guest_width)
2976{
2977 int adjust_width;
2978
2979 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
2980 spin_lock_init(&domain->mapping_lock);
2981 spin_lock_init(&domain->iommu_lock);
2982
2983 domain_reserve_special_ranges(domain);
2984
2985 /* calculate AGAW */
2986 domain->gaw = guest_width;
2987 adjust_width = guestwidth_to_adjustwidth(guest_width);
2988 domain->agaw = width_to_agaw(adjust_width);
2989
2990 INIT_LIST_HEAD(&domain->devices);
2991
2992 domain->iommu_count = 0;
2993 domain->iommu_coherency = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08002994 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08002995
2996 /* always allocate the top pgd */
2997 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
2998 if (!domain->pgd)
2999 return -ENOMEM;
3000 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3001 return 0;
3002}
3003
3004static void iommu_free_vm_domain(struct dmar_domain *domain)
3005{
3006 unsigned long flags;
3007 struct dmar_drhd_unit *drhd;
3008 struct intel_iommu *iommu;
3009 unsigned long i;
3010 unsigned long ndomains;
3011
3012 for_each_drhd_unit(drhd) {
3013 if (drhd->ignored)
3014 continue;
3015 iommu = drhd->iommu;
3016
3017 ndomains = cap_ndoms(iommu->cap);
3018 i = find_first_bit(iommu->domain_ids, ndomains);
3019 for (; i < ndomains; ) {
3020 if (iommu->domains[i] == domain) {
3021 spin_lock_irqsave(&iommu->lock, flags);
3022 clear_bit(i, iommu->domain_ids);
3023 iommu->domains[i] = NULL;
3024 spin_unlock_irqrestore(&iommu->lock, flags);
3025 break;
3026 }
3027 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3028 }
3029 }
3030}
3031
3032static void vm_domain_exit(struct dmar_domain *domain)
3033{
3034 u64 end;
3035
3036 /* Domain 0 is reserved, so dont process it */
3037 if (!domain)
3038 return;
3039
3040 vm_domain_remove_all_dev_info(domain);
3041 /* destroy iovas */
3042 put_iova_domain(&domain->iovad);
3043 end = DOMAIN_MAX_ADDR(domain->gaw);
3044 end = end & (~VTD_PAGE_MASK);
3045
3046 /* clear ptes */
3047 dma_pte_clear_range(domain, 0, end);
3048
3049 /* free page tables */
3050 dma_pte_free_pagetable(domain, 0, end);
3051
3052 iommu_free_vm_domain(domain);
3053 free_domain_mem(domain);
3054}
3055
Joerg Roedel5d450802008-12-03 14:52:32 +01003056static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003057{
Joerg Roedel5d450802008-12-03 14:52:32 +01003058 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003059
Joerg Roedel5d450802008-12-03 14:52:32 +01003060 dmar_domain = iommu_alloc_vm_domain();
3061 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003062 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003063 "intel_iommu_domain_init: dmar_domain == NULL\n");
3064 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003065 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003066 if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003067 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003068 "intel_iommu_domain_init() failed\n");
3069 vm_domain_exit(dmar_domain);
3070 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003071 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003072 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003073
Joerg Roedel5d450802008-12-03 14:52:32 +01003074 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003075}
Kay, Allen M38717942008-09-09 18:37:29 +03003076
Joerg Roedel5d450802008-12-03 14:52:32 +01003077static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003078{
Joerg Roedel5d450802008-12-03 14:52:32 +01003079 struct dmar_domain *dmar_domain = domain->priv;
3080
3081 domain->priv = NULL;
3082 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003083}
Kay, Allen M38717942008-09-09 18:37:29 +03003084
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003085static int intel_iommu_attach_device(struct iommu_domain *domain,
3086 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003087{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003088 struct dmar_domain *dmar_domain = domain->priv;
3089 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003090 struct intel_iommu *iommu;
3091 int addr_width;
3092 u64 end;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003093 int ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003094
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003095 /* normally pdev is not mapped */
3096 if (unlikely(domain_context_mapped(pdev))) {
3097 struct dmar_domain *old_domain;
3098
3099 old_domain = find_domain(pdev);
3100 if (old_domain) {
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003101 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003102 vm_domain_remove_one_dev_info(old_domain, pdev);
3103 else
3104 domain_remove_dev_info(old_domain);
3105 }
3106 }
3107
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003108 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
3109 if (!iommu)
3110 return -ENODEV;
3111
3112 /* check if this iommu agaw is sufficient for max mapped address */
3113 addr_width = agaw_to_width(iommu->agaw);
3114 end = DOMAIN_MAX_ADDR(addr_width);
3115 end = end & VTD_PAGE_MASK;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003116 if (end < dmar_domain->max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003117 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3118 "sufficient for the mapped address (%llx)\n",
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003119 __func__, iommu->agaw, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003120 return -EFAULT;
3121 }
3122
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003123 ret = domain_context_mapping(dmar_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003124 if (ret)
3125 return ret;
3126
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003127 ret = vm_domain_add_dev_info(dmar_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003128 return ret;
3129}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003130
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003131static void intel_iommu_detach_device(struct iommu_domain *domain,
3132 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003133{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003134 struct dmar_domain *dmar_domain = domain->priv;
3135 struct pci_dev *pdev = to_pci_dev(dev);
3136
3137 vm_domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003138}
Kay, Allen M38717942008-09-09 18:37:29 +03003139
Joerg Roedeldde57a22008-12-03 15:04:09 +01003140static int intel_iommu_map_range(struct iommu_domain *domain,
3141 unsigned long iova, phys_addr_t hpa,
3142 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003143{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003144 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003145 u64 max_addr;
3146 int addr_width;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003147 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003148 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003149
Joerg Roedeldde57a22008-12-03 15:04:09 +01003150 if (iommu_prot & IOMMU_READ)
3151 prot |= DMA_PTE_READ;
3152 if (iommu_prot & IOMMU_WRITE)
3153 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08003154 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3155 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003156
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003157 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
Joerg Roedeldde57a22008-12-03 15:04:09 +01003158 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003159 int min_agaw;
3160 u64 end;
3161
3162 /* check if minimum agaw is sufficient for mapped address */
Joerg Roedeldde57a22008-12-03 15:04:09 +01003163 min_agaw = vm_domain_min_agaw(dmar_domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003164 addr_width = agaw_to_width(min_agaw);
3165 end = DOMAIN_MAX_ADDR(addr_width);
3166 end = end & VTD_PAGE_MASK;
3167 if (end < max_addr) {
3168 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3169 "sufficient for the mapped address (%llx)\n",
3170 __func__, min_agaw, max_addr);
3171 return -EFAULT;
3172 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003173 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003174 }
3175
Joerg Roedeldde57a22008-12-03 15:04:09 +01003176 ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003177 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003178}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003179
Joerg Roedeldde57a22008-12-03 15:04:09 +01003180static void intel_iommu_unmap_range(struct iommu_domain *domain,
3181 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003182{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003183 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003184 dma_addr_t base;
3185
3186 /* The address might not be aligned */
3187 base = iova & VTD_PAGE_MASK;
3188 size = VTD_PAGE_ALIGN(size);
Joerg Roedeldde57a22008-12-03 15:04:09 +01003189 dma_pte_clear_range(dmar_domain, base, base + size);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003190
Joerg Roedeldde57a22008-12-03 15:04:09 +01003191 if (dmar_domain->max_addr == base + size)
3192 dmar_domain->max_addr = base;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003193}
Kay, Allen M38717942008-09-09 18:37:29 +03003194
Joerg Roedeld14d6572008-12-03 15:06:57 +01003195static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3196 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003197{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003198 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003199 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003200 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003201
Joerg Roedeld14d6572008-12-03 15:06:57 +01003202 pte = addr_to_dma_pte(dmar_domain, iova);
Kay, Allen M38717942008-09-09 18:37:29 +03003203 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003204 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003205
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003206 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003207}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003208
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003209static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3210 unsigned long cap)
3211{
3212 struct dmar_domain *dmar_domain = domain->priv;
3213
3214 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3215 return dmar_domain->iommu_snooping;
3216
3217 return 0;
3218}
3219
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003220static struct iommu_ops intel_iommu_ops = {
3221 .domain_init = intel_iommu_domain_init,
3222 .domain_destroy = intel_iommu_domain_destroy,
3223 .attach_dev = intel_iommu_attach_device,
3224 .detach_dev = intel_iommu_detach_device,
3225 .map = intel_iommu_map_range,
3226 .unmap = intel_iommu_unmap_range,
3227 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003228 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003229};
David Woodhouse9af88142009-02-13 23:18:03 +00003230
3231static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3232{
3233 /*
3234 * Mobile 4 Series Chipset neglects to set RWBF capability,
3235 * but needs it:
3236 */
3237 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3238 rwbf_quirk = 1;
3239}
3240
3241DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);