blob: 1ddcfe5ef353cc0e8827d11affa6219331204cfa [file] [log] [blame]
Fenghua Yu62fdd762008-10-17 12:14:13 -07001/*
2 * Dynamic DMA mapping support.
3 */
4
5#include <linux/types.h>
6#include <linux/mm.h>
7#include <linux/string.h>
8#include <linux/pci.h>
9#include <linux/module.h>
10#include <linux/dmar.h>
11#include <asm/iommu.h>
12#include <asm/machvec.h>
13#include <linux/dma-mapping.h>
14
Fenghua Yu62fdd762008-10-17 12:14:13 -070015
Suresh Siddhad3f13812011-08-23 17:05:25 -070016#ifdef CONFIG_INTEL_IOMMU
Fenghua Yu62fdd762008-10-17 12:14:13 -070017
18#include <linux/kernel.h>
Fenghua Yu62fdd762008-10-17 12:14:13 -070019
20#include <asm/page.h>
Fenghua Yu62fdd762008-10-17 12:14:13 -070021
22dma_addr_t bad_dma_address __read_mostly;
23EXPORT_SYMBOL(bad_dma_address);
24
25static int iommu_sac_force __read_mostly;
26
27int no_iommu __read_mostly;
28#ifdef CONFIG_IOMMU_DEBUG
29int force_iommu __read_mostly = 1;
30#else
31int force_iommu __read_mostly;
32#endif
33
Fenghua Yuaed5d5f2009-04-30 17:57:11 -070034int iommu_pass_through;
35
Fenghua Yu62fdd762008-10-17 12:14:13 -070036/* Dummy device used for NULL arguments (normally ISA). Better would
37 be probably a smaller DMA mask, but this is bug-to-bug compatible
38 to i386. */
39struct device fallback_dev = {
Kay Sievers48ef2bb2009-01-06 10:44:40 -080040 .init_name = "fallback device",
Yang Hongyang284901a2009-04-06 19:01:15 -070041 .coherent_dma_mask = DMA_BIT_MASK(32),
Fenghua Yu62fdd762008-10-17 12:14:13 -070042 .dma_mask = &fallback_dev.coherent_dma_mask,
43};
44
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090045extern struct dma_map_ops intel_dma_ops;
Fenghua Yu62fdd762008-10-17 12:14:13 -070046
47static int __init pci_iommu_init(void)
48{
49 if (iommu_detected)
50 intel_iommu_init();
51
52 return 0;
53}
54
55/* Must execute after PCI subsystem */
56fs_initcall(pci_iommu_init);
57
58void pci_iommu_shutdown(void)
59{
60 return;
61}
62
63void __init
64iommu_dma_init(void)
65{
66 return;
67}
68
Fenghua Yu62fdd762008-10-17 12:14:13 -070069int iommu_dma_supported(struct device *dev, u64 mask)
70{
Fenghua Yu62fdd762008-10-17 12:14:13 -070071 /* Copied from i386. Doesn't make much sense, because it will
72 only work for pci_alloc_coherent.
73 The caller just has to use GFP_DMA in this case. */
Yang Hongyang2f4f27d2009-04-06 19:01:18 -070074 if (mask < DMA_BIT_MASK(24))
Fenghua Yu62fdd762008-10-17 12:14:13 -070075 return 0;
76
77 /* Tell the device to use SAC when IOMMU force is on. This
78 allows the driver to use cheaper accesses in some cases.
79
80 Problem with this is that if we overflow the IOMMU area and
81 return DAC as fallback address the device may not handle it
82 correctly.
83
84 As a special case some controllers have a 39bit address
85 mode that is as efficient as 32bit (aic79xx). Don't force
86 SAC for these. Assume all masks <= 40 bits are of this
87 type. Normally this doesn't make any difference, but gives
88 more gentle handling of IOMMU overflow. */
Yang Hongyang50cf1562009-04-06 19:01:14 -070089 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
Matthew Wilcoxe088a4a2009-05-22 13:49:49 -070090 dev_info(dev, "Force SAC with mask %llx\n", mask);
Fenghua Yu62fdd762008-10-17 12:14:13 -070091 return 0;
92 }
93
94 return 1;
95}
96EXPORT_SYMBOL(iommu_dma_supported);
97
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090098void __init pci_iommu_alloc(void)
99{
100 dma_ops = &intel_dma_ops;
101
102 dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
103 dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
104 dma_ops->sync_single_for_device = machvec_dma_sync_single;
105 dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
106 dma_ops->dma_supported = iommu_dma_supported;
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900107
108 /*
109 * The order of these functions is important for
110 * fall-back/fail-over reasons
111 */
112 detect_intel_iommu();
113
114#ifdef CONFIG_SWIOTLB
115 pci_swiotlb_init();
116#endif
117}
118
Fenghua Yu62fdd762008-10-17 12:14:13 -0700119#endif