blob: 1e5a10de347104c9ca8002a514ccd741307e219f [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030031#include <linux/iova.h>
32#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070033#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070034#include <linux/irq.h>
35#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070036#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040037#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070039#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040040#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070041
Len Browna192a952009-07-28 16:45:54 -040042#define PREFIX "DMAR: "
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070043
44/* No locks are needed as DMA remapping hardware unit
45 * list is constructed at boot time and hotplug of
46 * these units are not supported by the architecture.
47 */
48LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070049
Suresh Siddha41750d32011-08-23 17:05:18 -070050struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080051static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070052
53static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
54{
55 /*
56 * add INCLUDE_ALL at the tail, so scan the list will find it at
57 * the very end.
58 */
59 if (drhd->include_all)
60 list_add_tail(&drhd->list, &dmar_drhd_units);
61 else
62 list_add(&drhd->list, &dmar_drhd_units);
63}
64
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070065static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
66 struct pci_dev **dev, u16 segment)
67{
68 struct pci_bus *bus;
69 struct pci_dev *pdev = NULL;
70 struct acpi_dmar_pci_path *path;
71 int count;
72
73 bus = pci_find_bus(segment, scope->bus);
74 path = (struct acpi_dmar_pci_path *)(scope + 1);
75 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
76 / sizeof(struct acpi_dmar_pci_path);
77
78 while (count) {
79 if (pdev)
80 pci_dev_put(pdev);
81 /*
82 * Some BIOSes list non-exist devices in DMAR table, just
83 * ignore it
84 */
85 if (!bus) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -040086 pr_warn(PREFIX "Device scope bus [%d] not found\n",
87 scope->bus);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070088 break;
89 }
90 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
91 if (!pdev) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -040092 pr_warn(PREFIX "Device scope device"
93 "[%04x:%02x:%02x.%02x] not found\n",
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070094 segment, bus->number, path->dev, path->fn);
95 break;
96 }
97 path ++;
98 count --;
99 bus = pdev->subordinate;
100 }
101 if (!pdev) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400102 pr_warn(PREFIX
103 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
104 segment, scope->bus, path->dev, path->fn);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700105 *dev = NULL;
106 return 0;
107 }
108 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
109 pdev->subordinate) || (scope->entry_type == \
110 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111 pci_dev_put(pdev);
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400112 pr_warn(PREFIX "Device scope type does not match for %s\n",
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700113 pci_name(pdev));
114 return -EINVAL;
115 }
116 *dev = pdev;
117 return 0;
118}
119
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700120int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
121 struct pci_dev ***devices, u16 segment)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700122{
123 struct acpi_dmar_device_scope *scope;
124 void * tmp = start;
125 int index;
126 int ret;
127
128 *cnt = 0;
129 while (start < end) {
130 scope = start;
131 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
132 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
133 (*cnt)++;
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100134 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400135 pr_warn(PREFIX "Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100136 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700137 start += scope->length;
138 }
139 if (*cnt == 0)
140 return 0;
141
142 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
143 if (!*devices)
144 return -ENOMEM;
145
146 start = tmp;
147 index = 0;
148 while (start < end) {
149 scope = start;
150 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152 ret = dmar_parse_one_dev_scope(scope,
153 &(*devices)[index], segment);
154 if (ret) {
155 kfree(*devices);
156 return ret;
157 }
158 index ++;
159 }
160 start += scope->length;
161 }
162
163 return 0;
164}
165
166/**
167 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
168 * structure which uniquely represent one DMA remapping hardware unit
169 * present in the platform
170 */
171static int __init
172dmar_parse_one_drhd(struct acpi_dmar_header *header)
173{
174 struct acpi_dmar_hardware_unit *drhd;
175 struct dmar_drhd_unit *dmaru;
176 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700177
David Woodhousee523b382009-04-10 22:27:48 -0700178 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700179 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
180 if (!dmaru)
181 return -ENOMEM;
182
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700183 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700184 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf92009-04-04 01:45:37 +0100185 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700186 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700188 ret = alloc_iommu(dmaru);
189 if (ret) {
190 kfree(dmaru);
191 return ret;
192 }
193 dmar_register_drhd_unit(dmaru);
194 return 0;
195}
196
David Woodhousef82851a2008-10-18 15:43:14 +0100197static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700198{
199 struct acpi_dmar_hardware_unit *drhd;
David Woodhousef82851a2008-10-18 15:43:14 +0100200 int ret = 0;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700201
202 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
203
Yu Zhao2e824f72008-12-22 16:54:58 +0800204 if (dmaru->include_all)
205 return 0;
206
207 ret = dmar_parse_dev_scope((void *)(drhd + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700208 ((void *)drhd) + drhd->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700209 &dmaru->devices_cnt, &dmaru->devices,
210 drhd->segment);
Suresh Siddha1c7d1bc2008-09-03 16:58:35 -0700211 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700212 list_del(&dmaru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700213 kfree(dmaru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700214 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700215 return ret;
216}
217
David Woodhouseaa697072009-10-07 12:18:00 +0100218#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700219static int __init
220dmar_parse_one_rhsa(struct acpi_dmar_header *header)
221{
222 struct acpi_dmar_rhsa *rhsa;
223 struct dmar_drhd_unit *drhd;
224
225 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100226 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700227 if (drhd->reg_base_addr == rhsa->base_address) {
228 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
229
230 if (!node_online(node))
231 node = -1;
232 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100233 return 0;
234 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700235 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100236 WARN_TAINT(
237 1, TAINT_FIRMWARE_WORKAROUND,
238 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
239 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
240 drhd->reg_base_addr,
241 dmi_get_system_info(DMI_BIOS_VENDOR),
242 dmi_get_system_info(DMI_BIOS_VERSION),
243 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700244
David Woodhouseaa697072009-10-07 12:18:00 +0100245 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700246}
David Woodhouseaa697072009-10-07 12:18:00 +0100247#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700248
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700249static void __init
250dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
251{
252 struct acpi_dmar_hardware_unit *drhd;
253 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800254 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700255 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700256
257 switch (header->type) {
258 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800259 drhd = container_of(header, struct acpi_dmar_hardware_unit,
260 header);
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400261 pr_info(PREFIX "DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800262 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700263 break;
264 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800265 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
266 header);
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400267 pr_info(PREFIX "RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700268 (unsigned long long)rmrr->base_address,
269 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700270 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800271 case ACPI_DMAR_TYPE_ATSR:
272 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400273 pr_info(PREFIX "ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800274 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700275 case ACPI_DMAR_HARDWARE_AFFINITY:
276 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400277 pr_info(PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700278 (unsigned long long)rhsa->base_address,
279 rhsa->proximity_domain);
280 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700281 }
282}
283
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700284/**
285 * dmar_table_detect - checks to see if the platform supports DMAR devices
286 */
287static int __init dmar_table_detect(void)
288{
289 acpi_status status = AE_OK;
290
291 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800292 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
293 (struct acpi_table_header **)&dmar_tbl,
294 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700295
296 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400297 pr_warn(PREFIX "Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700298 status = AE_NOT_FOUND;
299 }
300
301 return (ACPI_SUCCESS(status) ? 1 : 0);
302}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700303
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700304/**
305 * parse_dmar_table - parses the DMA reporting table
306 */
307static int __init
308parse_dmar_table(void)
309{
310 struct acpi_table_dmar *dmar;
311 struct acpi_dmar_header *entry_header;
312 int ret = 0;
313
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700314 /*
315 * Do it again, earlier dmar_tbl mapping could be mapped with
316 * fixed map.
317 */
318 dmar_table_detect();
319
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700320 /*
321 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
322 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
323 */
324 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
325
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700326 dmar = (struct acpi_table_dmar *)dmar_tbl;
327 if (!dmar)
328 return -ENODEV;
329
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700330 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400331 pr_warn(PREFIX "Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700332 return -EINVAL;
333 }
334
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400335 pr_info(PREFIX "Host address width %d\n", dmar->width + 1);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700336
337 entry_header = (struct acpi_dmar_header *)(dmar + 1);
338 while (((unsigned long)entry_header) <
339 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800340 /* Avoid looping forever on bad ACPI tables */
341 if (entry_header->length == 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400342 pr_warn(PREFIX "Invalid 0-length structure\n");
Tony Battersby084eb962009-02-11 13:24:19 -0800343 ret = -EINVAL;
344 break;
345 }
346
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700347 dmar_table_print_dmar_entry(entry_header);
348
349 switch (entry_header->type) {
350 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
351 ret = dmar_parse_one_drhd(entry_header);
352 break;
353 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
354 ret = dmar_parse_one_rmrr(entry_header);
355 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800356 case ACPI_DMAR_TYPE_ATSR:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800357 ret = dmar_parse_one_atsr(entry_header);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800358 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700359 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100360#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700361 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100362#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700363 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700364 default:
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400365 pr_warn(PREFIX "Unknown DMAR structure type %d\n",
Roland Dreier4de75cf2009-09-24 01:01:29 +0100366 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700367 ret = 0; /* for forward compatibility */
368 break;
369 }
370 if (ret)
371 break;
372
373 entry_header = ((void *)entry_header + entry_header->length);
374 }
375 return ret;
376}
377
Yinghaidda56542010-04-09 01:07:55 +0100378static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700379 struct pci_dev *dev)
380{
381 int index;
382
383 while (dev) {
384 for (index = 0; index < cnt; index++)
385 if (dev == devices[index])
386 return 1;
387
388 /* Check our parent */
389 dev = dev->bus->self;
390 }
391
392 return 0;
393}
394
395struct dmar_drhd_unit *
396dmar_find_matched_drhd_unit(struct pci_dev *dev)
397{
Yu Zhao2e824f72008-12-22 16:54:58 +0800398 struct dmar_drhd_unit *dmaru = NULL;
399 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700400
Yinghaidda56542010-04-09 01:07:55 +0100401 dev = pci_physfn(dev);
402
Yu Zhao2e824f72008-12-22 16:54:58 +0800403 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
404 drhd = container_of(dmaru->hdr,
405 struct acpi_dmar_hardware_unit,
406 header);
407
408 if (dmaru->include_all &&
409 drhd->segment == pci_domain_nr(dev->bus))
410 return dmaru;
411
412 if (dmar_pci_device_match(dmaru->devices,
413 dmaru->devices_cnt, dev))
414 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700415 }
416
417 return NULL;
418}
419
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700420int __init dmar_dev_scope_init(void)
421{
Suresh Siddhac2c72862011-08-23 17:05:19 -0700422 static int dmar_dev_scope_initialized;
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700423 struct dmar_drhd_unit *drhd, *drhd_n;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700424 int ret = -ENODEV;
425
Suresh Siddhac2c72862011-08-23 17:05:19 -0700426 if (dmar_dev_scope_initialized)
427 return dmar_dev_scope_initialized;
428
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700429 if (list_empty(&dmar_drhd_units))
430 goto fail;
431
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700432 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700433 ret = dmar_parse_dev(drhd);
434 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700435 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700436 }
437
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700438 ret = dmar_parse_rmrr_atsr_dev();
439 if (ret)
440 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700441
Suresh Siddhac2c72862011-08-23 17:05:19 -0700442 dmar_dev_scope_initialized = 1;
443 return 0;
444
445fail:
446 dmar_dev_scope_initialized = ret;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700447 return ret;
448}
449
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700450
451int __init dmar_table_init(void)
452{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700453 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800454 int ret;
455
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700456 if (dmar_table_initialized)
457 return 0;
458
459 dmar_table_initialized = 1;
460
Fenghua Yu093f87d2007-11-21 15:07:14 -0800461 ret = parse_dmar_table();
462 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700463 if (ret != -ENODEV)
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400464 pr_info(PREFIX "parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800465 return ret;
466 }
467
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700468 if (list_empty(&dmar_drhd_units)) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400469 pr_info(PREFIX "No DMAR devices found\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700470 return -ENODEV;
471 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800472
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700473 return 0;
474}
475
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100476static void warn_invalid_dmar(u64 addr, const char *message)
477{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100478 WARN_TAINT_ONCE(
479 1, TAINT_FIRMWARE_WORKAROUND,
480 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
481 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
482 addr, message,
483 dmi_get_system_info(DMI_BIOS_VENDOR),
484 dmi_get_system_info(DMI_BIOS_VERSION),
485 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100486}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000487
David Woodhouse86cf8982009-11-09 22:15:15 +0000488int __init check_zero_address(void)
489{
490 struct acpi_table_dmar *dmar;
491 struct acpi_dmar_header *entry_header;
492 struct acpi_dmar_hardware_unit *drhd;
493
494 dmar = (struct acpi_table_dmar *)dmar_tbl;
495 entry_header = (struct acpi_dmar_header *)(dmar + 1);
496
497 while (((unsigned long)entry_header) <
498 (((unsigned long)dmar) + dmar_tbl->length)) {
499 /* Avoid looping forever on bad ACPI tables */
500 if (entry_header->length == 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400501 pr_warn(PREFIX "Invalid 0-length structure\n");
David Woodhouse86cf8982009-11-09 22:15:15 +0000502 return 0;
503 }
504
505 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000506 void __iomem *addr;
507 u64 cap, ecap;
508
David Woodhouse86cf8982009-11-09 22:15:15 +0000509 drhd = (void *)entry_header;
510 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100511 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000512 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000513 }
Chris Wright2c992202009-12-02 09:17:13 +0000514
515 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
516 if (!addr ) {
517 printk("IOMMU: can't validate: %llx\n", drhd->address);
518 goto failed;
519 }
520 cap = dmar_readq(addr + DMAR_CAP_REG);
521 ecap = dmar_readq(addr + DMAR_ECAP_REG);
522 early_iounmap(addr, VTD_PAGE_SIZE);
523 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100524 warn_invalid_dmar(drhd->address,
525 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000526 goto failed;
527 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000528 }
529
530 entry_header = ((void *)entry_header + entry_header->length);
531 }
532 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000533
534failed:
Chris Wright2c992202009-12-02 09:17:13 +0000535 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000536}
537
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400538int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700539{
540 int ret;
541
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700542 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000543 if (ret)
544 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700545 {
Suresh Siddha1cb11582008-07-10 11:16:51 -0700546 struct acpi_table_dmar *dmar;
Jan Kiszkab3a530e2011-05-15 12:34:55 +0200547
Suresh Siddha1cb11582008-07-10 11:16:51 -0700548 dmar = (struct acpi_table_dmar *) dmar_tbl;
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700549
Suresh Siddha95a02e92012-03-30 11:47:07 -0700550 if (ret && irq_remapping_enabled && cpu_has_x2apic &&
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700551 dmar->flags & 0x1)
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400552 pr_info("Queued invalidation will be enabled to "
553 "support x2apic and Intr-remapping.\n");
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700554
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800555 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700556 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800557 /* Make sure ACS will be enabled */
558 pci_request_acs();
559 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700560
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900561#ifdef CONFIG_X86
562 if (ret)
563 x86_init.iommu.iommu_init = intel_iommu_init;
564#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700565 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800566 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700567 dmar_tbl = NULL;
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400568
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400569 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700570}
571
572
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700573int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700574{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700575 struct intel_iommu *iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700576 int map_size;
577 u32 ver;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700578 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100579 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700580 int msagaw = 0;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700581
David Woodhouse6ecbf012009-12-02 09:20:27 +0000582 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100583 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000584 return -EINVAL;
585 }
586
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700587 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
588 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700589 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700590
591 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700592 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700593
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700594 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700595 if (!iommu->reg) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400596 pr_err("IOMMU: can't map the region\n");
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700597 goto error;
598 }
599 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
600 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
601
David Woodhouse08155652009-08-04 09:17:20 +0100602 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100603 warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
David Woodhouse08155652009-08-04 09:17:20 +0100604 goto err_unmap;
605 }
606
Weidong Han1b573682008-12-08 15:34:06 +0800607 agaw = iommu_calculate_agaw(iommu);
608 if (agaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400609 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
610 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100611 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700612 }
613 msagaw = iommu_calculate_max_sagaw(iommu);
614 if (msagaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400615 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800616 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100617 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800618 }
619 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700620 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800621
Suresh Siddhaee34b322009-10-02 11:01:21 -0700622 iommu->node = -1;
623
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700624 /* the registers might be more than one page */
625 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
626 cap_max_fault_reg_offset(iommu->cap));
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700627 map_size = VTD_PAGE_ALIGN(map_size);
628 if (map_size > VTD_PAGE_SIZE) {
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700629 iounmap(iommu->reg);
630 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
631 if (!iommu->reg) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400632 pr_err("IOMMU: can't map the region\n");
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700633 goto error;
634 }
635 }
636
637 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100638 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
639 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700640 (unsigned long long)drhd->reg_base_addr,
641 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
642 (unsigned long long)iommu->cap,
643 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700644
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200645 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700646
647 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700648 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100649
650 err_unmap:
651 iounmap(iommu->reg);
652 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700653 kfree(iommu);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700654 return -1;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700655}
656
657void free_iommu(struct intel_iommu *iommu)
658{
659 if (!iommu)
660 return;
661
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700662 free_dmar_iommu(iommu);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700663
664 if (iommu->reg)
665 iounmap(iommu->reg);
666 kfree(iommu);
667}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700668
669/*
670 * Reclaim all the submitted descriptors which have completed its work.
671 */
672static inline void reclaim_free_desc(struct q_inval *qi)
673{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800674 while (qi->desc_status[qi->free_tail] == QI_DONE ||
675 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700676 qi->desc_status[qi->free_tail] = QI_FREE;
677 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
678 qi->free_cnt++;
679 }
680}
681
Yu Zhao704126a2009-01-04 16:28:52 +0800682static int qi_check_fault(struct intel_iommu *iommu, int index)
683{
684 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800685 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800686 struct q_inval *qi = iommu->qi;
687 int wait_index = (index + 1) % QI_LENGTH;
688
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800689 if (qi->desc_status[wait_index] == QI_ABORT)
690 return -EAGAIN;
691
Yu Zhao704126a2009-01-04 16:28:52 +0800692 fault = readl(iommu->reg + DMAR_FSTS_REG);
693
694 /*
695 * If IQE happens, the head points to the descriptor associated
696 * with the error. No new descriptors are fetched until the IQE
697 * is cleared.
698 */
699 if (fault & DMA_FSTS_IQE) {
700 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800701 if ((head >> DMAR_IQ_SHIFT) == index) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400702 pr_err("VT-d detected invalid descriptor: "
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800703 "low=%llx, high=%llx\n",
704 (unsigned long long)qi->desc[index].low,
705 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800706 memcpy(&qi->desc[index], &qi->desc[wait_index],
707 sizeof(struct qi_desc));
708 __iommu_flush_cache(iommu, &qi->desc[index],
709 sizeof(struct qi_desc));
710 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
711 return -EINVAL;
712 }
713 }
714
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800715 /*
716 * If ITE happens, all pending wait_desc commands are aborted.
717 * No new descriptors are fetched until the ITE is cleared.
718 */
719 if (fault & DMA_FSTS_ITE) {
720 head = readl(iommu->reg + DMAR_IQH_REG);
721 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
722 head |= 1;
723 tail = readl(iommu->reg + DMAR_IQT_REG);
724 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
725
726 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
727
728 do {
729 if (qi->desc_status[head] == QI_IN_USE)
730 qi->desc_status[head] = QI_ABORT;
731 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
732 } while (head != tail);
733
734 if (qi->desc_status[wait_index] == QI_ABORT)
735 return -EAGAIN;
736 }
737
738 if (fault & DMA_FSTS_ICE)
739 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
740
Yu Zhao704126a2009-01-04 16:28:52 +0800741 return 0;
742}
743
Suresh Siddhafe962e92008-07-10 11:16:42 -0700744/*
745 * Submit the queued invalidation descriptor to the remapping
746 * hardware unit and wait for its completion.
747 */
Yu Zhao704126a2009-01-04 16:28:52 +0800748int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700749{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800750 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700751 struct q_inval *qi = iommu->qi;
752 struct qi_desc *hw, wait_desc;
753 int wait_index, index;
754 unsigned long flags;
755
756 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800757 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700758
759 hw = qi->desc;
760
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800761restart:
762 rc = 0;
763
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200764 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700765 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200766 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700767 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200768 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700769 }
770
771 index = qi->free_head;
772 wait_index = (index + 1) % QI_LENGTH;
773
774 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
775
776 hw[index] = *desc;
777
Yu Zhao704126a2009-01-04 16:28:52 +0800778 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
779 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700780 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
781
782 hw[wait_index] = wait_desc;
783
784 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
785 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
786
787 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
788 qi->free_cnt -= 2;
789
Suresh Siddhafe962e92008-07-10 11:16:42 -0700790 /*
791 * update the HW tail register indicating the presence of
792 * new descriptors.
793 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800794 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700795
796 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700797 /*
798 * We will leave the interrupts disabled, to prevent interrupt
799 * context to queue another cmd while a cmd is already submitted
800 * and waiting for completion on this cpu. This is to avoid
801 * a deadlock where the interrupt context can wait indefinitely
802 * for free slots in the queue.
803 */
Yu Zhao704126a2009-01-04 16:28:52 +0800804 rc = qi_check_fault(iommu, index);
805 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800806 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800807
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200808 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700809 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200810 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700811 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800812
813 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700814
815 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200816 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800817
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800818 if (rc == -EAGAIN)
819 goto restart;
820
Yu Zhao704126a2009-01-04 16:28:52 +0800821 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700822}
823
824/*
825 * Flush the global interrupt entry cache.
826 */
827void qi_global_iec(struct intel_iommu *iommu)
828{
829 struct qi_desc desc;
830
831 desc.low = QI_IEC_TYPE;
832 desc.high = 0;
833
Yu Zhao704126a2009-01-04 16:28:52 +0800834 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700835 qi_submit_sync(&desc, iommu);
836}
837
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100838void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
839 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700840{
Youquan Song3481f212008-10-16 16:31:55 -0700841 struct qi_desc desc;
842
Youquan Song3481f212008-10-16 16:31:55 -0700843 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
844 | QI_CC_GRAN(type) | QI_CC_TYPE;
845 desc.high = 0;
846
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100847 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700848}
849
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100850void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
851 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700852{
853 u8 dw = 0, dr = 0;
854
855 struct qi_desc desc;
856 int ih = 0;
857
Youquan Song3481f212008-10-16 16:31:55 -0700858 if (cap_write_drain(iommu->cap))
859 dw = 1;
860
861 if (cap_read_drain(iommu->cap))
862 dr = 1;
863
864 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
865 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
866 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
867 | QI_IOTLB_AM(size_order);
868
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100869 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700870}
871
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800872void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
873 u64 addr, unsigned mask)
874{
875 struct qi_desc desc;
876
877 if (mask) {
878 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
879 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
880 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
881 } else
882 desc.high = QI_DEV_IOTLB_ADDR(addr);
883
884 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
885 qdep = 0;
886
887 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
888 QI_DIOTLB_TYPE;
889
890 qi_submit_sync(&desc, iommu);
891}
892
Suresh Siddhafe962e92008-07-10 11:16:42 -0700893/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700894 * Disable Queued Invalidation interface.
895 */
896void dmar_disable_qi(struct intel_iommu *iommu)
897{
898 unsigned long flags;
899 u32 sts;
900 cycles_t start_time = get_cycles();
901
902 if (!ecap_qis(iommu->ecap))
903 return;
904
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200905 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700906
907 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
908 if (!(sts & DMA_GSTS_QIES))
909 goto end;
910
911 /*
912 * Give a chance to HW to complete the pending invalidation requests.
913 */
914 while ((readl(iommu->reg + DMAR_IQT_REG) !=
915 readl(iommu->reg + DMAR_IQH_REG)) &&
916 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
917 cpu_relax();
918
919 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700920 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
921
922 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
923 !(sts & DMA_GSTS_QIES), sts);
924end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200925 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700926}
927
928/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700929 * Enable queued invalidation.
930 */
931static void __dmar_enable_qi(struct intel_iommu *iommu)
932{
David Woodhousec416daa2009-05-10 20:30:58 +0100933 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700934 unsigned long flags;
935 struct q_inval *qi = iommu->qi;
936
937 qi->free_head = qi->free_tail = 0;
938 qi->free_cnt = QI_LENGTH;
939
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200940 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700941
942 /* write zero to the tail reg */
943 writel(0, iommu->reg + DMAR_IQT_REG);
944
945 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
946
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700947 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +0100948 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700949
950 /* Make sure hardware complete it */
951 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
952
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200953 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700954}
955
956/*
Suresh Siddhafe962e92008-07-10 11:16:42 -0700957 * Enable Queued Invalidation interface. This is a must to support
958 * interrupt-remapping. Also used by DMA-remapping, which replaces
959 * register based IOTLB invalidation.
960 */
961int dmar_enable_qi(struct intel_iommu *iommu)
962{
Suresh Siddhafe962e92008-07-10 11:16:42 -0700963 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -0700964 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700965
966 if (!ecap_qis(iommu->ecap))
967 return -ENOENT;
968
969 /*
970 * queued invalidation is already setup and enabled.
971 */
972 if (iommu->qi)
973 return 0;
974
Suresh Siddhafa4b57c2009-03-16 17:05:05 -0700975 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700976 if (!iommu->qi)
977 return -ENOMEM;
978
979 qi = iommu->qi;
980
Suresh Siddha751cafe2009-10-02 11:01:22 -0700981
982 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
983 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700984 kfree(qi);
985 iommu->qi = 0;
986 return -ENOMEM;
987 }
988
Suresh Siddha751cafe2009-10-02 11:01:22 -0700989 qi->desc = page_address(desc_page);
990
Suresh Siddhafa4b57c2009-03-16 17:05:05 -0700991 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700992 if (!qi->desc_status) {
993 free_page((unsigned long) qi->desc);
994 kfree(qi);
995 iommu->qi = 0;
996 return -ENOMEM;
997 }
998
999 qi->free_head = qi->free_tail = 0;
1000 qi->free_cnt = QI_LENGTH;
1001
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001002 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001003
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001004 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001005
1006 return 0;
1007}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001008
1009/* iommu interrupt handling. Most stuff are MSI-like. */
1010
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001011enum faulttype {
1012 DMA_REMAP,
1013 INTR_REMAP,
1014 UNKNOWN,
1015};
1016
1017static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001018{
1019 "Software",
1020 "Present bit in root entry is clear",
1021 "Present bit in context entry is clear",
1022 "Invalid context entry",
1023 "Access beyond MGAW",
1024 "PTE Write access is not set",
1025 "PTE Read access is not set",
1026 "Next page table ptr is invalid",
1027 "Root table address invalid",
1028 "Context table ptr is invalid",
1029 "non-zero reserved fields in RTP",
1030 "non-zero reserved fields in CTP",
1031 "non-zero reserved fields in PTE",
1032};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001033
Suresh Siddha95a02e92012-03-30 11:47:07 -07001034static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001035{
1036 "Detected reserved fields in the decoded interrupt-remapped request",
1037 "Interrupt index exceeded the interrupt-remapping table size",
1038 "Present field in the IRTE entry is clear",
1039 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1040 "Detected reserved fields in the IRTE entry",
1041 "Blocked a compatibility format interrupt request",
1042 "Blocked an interrupt request due to source-id verification failure",
1043};
1044
Suresh Siddha0ac24912009-03-16 17:04:54 -07001045#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1046
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001047const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001048{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001049 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1050 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001051 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001052 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001053 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1054 *fault_type = DMA_REMAP;
1055 return dma_remap_fault_reasons[fault_reason];
1056 } else {
1057 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001058 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001059 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001060}
1061
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001062void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001063{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001064 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001065 unsigned long flag;
1066
1067 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001068 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001069 writel(0, iommu->reg + DMAR_FECTL_REG);
1070 /* Read a reg to force flush the post write */
1071 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001072 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001073}
1074
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001075void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001076{
1077 unsigned long flag;
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001078 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001079
1080 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001081 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001082 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1083 /* Read a reg to force flush the post write */
1084 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001085 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001086}
1087
1088void dmar_msi_write(int irq, struct msi_msg *msg)
1089{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001090 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001091 unsigned long flag;
1092
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001093 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001094 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1095 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1096 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001097 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001098}
1099
1100void dmar_msi_read(int irq, struct msi_msg *msg)
1101{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001102 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001103 unsigned long flag;
1104
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001105 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001106 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1107 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1108 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001109 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001110}
1111
1112static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1113 u8 fault_reason, u16 source_id, unsigned long long addr)
1114{
1115 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001116 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001117
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001118 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001119
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001120 if (fault_type == INTR_REMAP)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001121 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001122 "fault index %llx\n"
1123 "INTR-REMAP:[fault reason %02d] %s\n",
1124 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1125 PCI_FUNC(source_id & 0xFF), addr >> 48,
1126 fault_reason, reason);
1127 else
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001128 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001129 "fault addr %llx \n"
1130 "DMAR:[fault reason %02d] %s\n",
1131 (type ? "DMA Read" : "DMA Write"),
1132 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1133 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001134 return 0;
1135}
1136
1137#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001138irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001139{
1140 struct intel_iommu *iommu = dev_id;
1141 int reg, fault_index;
1142 u32 fault_status;
1143 unsigned long flag;
1144
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001145 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001146 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001147 if (fault_status)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001148 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001149
1150 /* TBD: ignore advanced fault log currently */
1151 if (!(fault_status & DMA_FSTS_PPF))
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001152 goto clear_rest;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001153
1154 fault_index = dma_fsts_fault_record_index(fault_status);
1155 reg = cap_fault_reg_offset(iommu->cap);
1156 while (1) {
1157 u8 fault_reason;
1158 u16 source_id;
1159 u64 guest_addr;
1160 int type;
1161 u32 data;
1162
1163 /* highest 32 bits */
1164 data = readl(iommu->reg + reg +
1165 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1166 if (!(data & DMA_FRCD_F))
1167 break;
1168
1169 fault_reason = dma_frcd_fault_reason(data);
1170 type = dma_frcd_type(data);
1171
1172 data = readl(iommu->reg + reg +
1173 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1174 source_id = dma_frcd_source_id(data);
1175
1176 guest_addr = dmar_readq(iommu->reg + reg +
1177 fault_index * PRIMARY_FAULT_REG_LEN);
1178 guest_addr = dma_frcd_page_addr(guest_addr);
1179 /* clear the fault */
1180 writel(DMA_FRCD_F, iommu->reg + reg +
1181 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1182
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001183 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001184
1185 dmar_fault_do_one(iommu, type, fault_reason,
1186 source_id, guest_addr);
1187
1188 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001189 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001190 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001191 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001192 }
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001193clear_rest:
1194 /* clear all the other faults */
Suresh Siddha0ac24912009-03-16 17:04:54 -07001195 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001196 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001197
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001198 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001199 return IRQ_HANDLED;
1200}
1201
1202int dmar_set_interrupt(struct intel_iommu *iommu)
1203{
1204 int irq, ret;
1205
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001206 /*
1207 * Check if the fault interrupt is already initialized.
1208 */
1209 if (iommu->irq)
1210 return 0;
1211
Suresh Siddha0ac24912009-03-16 17:04:54 -07001212 irq = create_irq();
1213 if (!irq) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001214 pr_err("IOMMU: no free vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001215 return -EINVAL;
1216 }
1217
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001218 irq_set_handler_data(irq, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001219 iommu->irq = irq;
1220
1221 ret = arch_setup_dmar_msi(irq);
1222 if (ret) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001223 irq_set_handler_data(irq, NULL);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001224 iommu->irq = 0;
1225 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001226 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001227 }
1228
Thomas Gleixner477694e2011-07-19 16:25:42 +02001229 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001230 if (ret)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001231 pr_err("IOMMU: can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001232 return ret;
1233}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001234
1235int __init enable_drhd_fault_handling(void)
1236{
1237 struct dmar_drhd_unit *drhd;
1238
1239 /*
1240 * Enable fault control interrupt.
1241 */
1242 for_each_drhd_unit(drhd) {
1243 int ret;
1244 struct intel_iommu *iommu = drhd->iommu;
1245 ret = dmar_set_interrupt(iommu);
1246
1247 if (ret) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001248 pr_err("DRHD %Lx: failed to enable fault, "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001249 " interrupt, ret %d\n",
1250 (unsigned long long)drhd->reg_base_addr, ret);
1251 return -1;
1252 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001253
1254 /*
1255 * Clear any previous faults.
1256 */
1257 dmar_fault(iommu->irq, iommu);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001258 }
1259
1260 return 0;
1261}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001262
1263/*
1264 * Re-enable Queued Invalidation interface.
1265 */
1266int dmar_reenable_qi(struct intel_iommu *iommu)
1267{
1268 if (!ecap_qis(iommu->ecap))
1269 return -ENOENT;
1270
1271 if (!iommu->qi)
1272 return -ENOENT;
1273
1274 /*
1275 * First disable queued invalidation.
1276 */
1277 dmar_disable_qi(iommu);
1278 /*
1279 * Then enable queued invalidation again. Since there is no pending
1280 * invalidation requests now, it's safe to re-enable queued
1281 * invalidation.
1282 */
1283 __dmar_enable_qi(iommu);
1284
1285 return 0;
1286}
Youquan Song074835f2009-09-09 12:05:39 -04001287
1288/*
1289 * Check interrupt remapping support in DMAR table description.
1290 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001291int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001292{
1293 struct acpi_table_dmar *dmar;
1294 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001295 if (!dmar)
1296 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001297 return dmar->flags & 0x1;
1298}
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001299IOMMU_INIT_POST(detect_intel_iommu);