blob: 9ab6ebf46f7a6e0e34578e2e2ca6f6dbe61c06f3 [file] [log] [blame]
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070021 *
Suresh Siddhae61d98d2008-07-10 11:16:35 -070022 * This file implements early detection/parsing of Remapping Devices
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070023 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
Suresh Siddhae61d98d2008-07-10 11:16:35 -070025 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070027 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
Kay, Allen M38717942008-09-09 18:37:29 +030031#include <linux/iova.h>
32#include <linux/intel-iommu.h>
Suresh Siddhafe962e92008-07-10 11:16:42 -070033#include <linux/timer.h>
Suresh Siddha0ac24912009-03-16 17:04:54 -070034#include <linux/irq.h>
35#include <linux/interrupt.h>
Shane Wang69575d32009-09-01 18:25:07 -070036#include <linux/tboot.h>
Len Browneb27cae2009-07-06 23:40:19 -040037#include <linux/dmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Suresh Siddha8a8f4222012-03-30 11:47:08 -070039#include <asm/irq_remapping.h>
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -040040#include <asm/iommu_table.h>
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070041
Len Browna192a952009-07-28 16:45:54 -040042#define PREFIX "DMAR: "
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070043
44/* No locks are needed as DMA remapping hardware unit
45 * list is constructed at boot time and hotplug of
46 * these units are not supported by the architecture.
47 */
48LIST_HEAD(dmar_drhd_units);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070049
Suresh Siddha41750d32011-08-23 17:05:18 -070050struct acpi_table_header * __initdata dmar_tbl;
Yinghai Lu8e1568f2009-02-11 01:06:59 -080051static acpi_size dmar_tbl_size;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070052
53static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
54{
55 /*
56 * add INCLUDE_ALL at the tail, so scan the list will find it at
57 * the very end.
58 */
59 if (drhd->include_all)
60 list_add_tail(&drhd->list, &dmar_drhd_units);
61 else
62 list_add(&drhd->list, &dmar_drhd_units);
63}
64
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070065static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
66 struct pci_dev **dev, u16 segment)
67{
68 struct pci_bus *bus;
69 struct pci_dev *pdev = NULL;
70 struct acpi_dmar_pci_path *path;
71 int count;
72
73 bus = pci_find_bus(segment, scope->bus);
74 path = (struct acpi_dmar_pci_path *)(scope + 1);
75 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
76 / sizeof(struct acpi_dmar_pci_path);
77
78 while (count) {
79 if (pdev)
80 pci_dev_put(pdev);
81 /*
82 * Some BIOSes list non-exist devices in DMAR table, just
83 * ignore it
84 */
85 if (!bus) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -040086 pr_warn(PREFIX "Device scope bus [%d] not found\n",
87 scope->bus);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070088 break;
89 }
90 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
91 if (!pdev) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -040092 pr_warn(PREFIX "Device scope device"
93 "[%04x:%02x:%02x.%02x] not found\n",
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -070094 segment, bus->number, path->dev, path->fn);
95 break;
96 }
97 path ++;
98 count --;
99 bus = pdev->subordinate;
100 }
101 if (!pdev) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400102 pr_warn(PREFIX
103 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
104 segment, scope->bus, path->dev, path->fn);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700105 *dev = NULL;
106 return 0;
107 }
108 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
109 pdev->subordinate) || (scope->entry_type == \
110 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111 pci_dev_put(pdev);
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400112 pr_warn(PREFIX "Device scope type does not match for %s\n",
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700113 pci_name(pdev));
114 return -EINVAL;
115 }
116 *dev = pdev;
117 return 0;
118}
119
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700120int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
121 struct pci_dev ***devices, u16 segment)
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700122{
123 struct acpi_dmar_device_scope *scope;
124 void * tmp = start;
125 int index;
126 int ret;
127
128 *cnt = 0;
129 while (start < end) {
130 scope = start;
131 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
132 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
133 (*cnt)++;
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100134 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400135 pr_warn(PREFIX "Unsupported device scope\n");
Yinghai Lu5715f0f2010-04-08 19:58:22 +0100136 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700137 start += scope->length;
138 }
139 if (*cnt == 0)
140 return 0;
141
142 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
143 if (!*devices)
144 return -ENOMEM;
145
146 start = tmp;
147 index = 0;
148 while (start < end) {
149 scope = start;
150 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152 ret = dmar_parse_one_dev_scope(scope,
153 &(*devices)[index], segment);
154 if (ret) {
155 kfree(*devices);
156 return ret;
157 }
158 index ++;
159 }
160 start += scope->length;
161 }
162
163 return 0;
164}
165
166/**
167 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
168 * structure which uniquely represent one DMA remapping hardware unit
169 * present in the platform
170 */
171static int __init
172dmar_parse_one_drhd(struct acpi_dmar_header *header)
173{
174 struct acpi_dmar_hardware_unit *drhd;
175 struct dmar_drhd_unit *dmaru;
176 int ret = 0;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700177
David Woodhousee523b382009-04-10 22:27:48 -0700178 drhd = (struct acpi_dmar_hardware_unit *)header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700179 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
180 if (!dmaru)
181 return -ENOMEM;
182
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700183 dmaru->hdr = header;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700184 dmaru->reg_base_addr = drhd->address;
David Woodhouse276dbf92009-04-04 01:45:37 +0100185 dmaru->segment = drhd->segment;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700186 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700188 ret = alloc_iommu(dmaru);
189 if (ret) {
190 kfree(dmaru);
191 return ret;
192 }
193 dmar_register_drhd_unit(dmaru);
194 return 0;
195}
196
David Woodhousef82851a2008-10-18 15:43:14 +0100197static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700198{
199 struct acpi_dmar_hardware_unit *drhd;
David Woodhousef82851a2008-10-18 15:43:14 +0100200 int ret = 0;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700201
202 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
203
Yu Zhao2e824f72008-12-22 16:54:58 +0800204 if (dmaru->include_all)
205 return 0;
206
207 ret = dmar_parse_dev_scope((void *)(drhd + 1),
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700208 ((void *)drhd) + drhd->header.length,
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700209 &dmaru->devices_cnt, &dmaru->devices,
210 drhd->segment);
Suresh Siddha1c7d1bc2008-09-03 16:58:35 -0700211 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700212 list_del(&dmaru->list);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700213 kfree(dmaru);
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700214 }
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700215 return ret;
216}
217
David Woodhouseaa697072009-10-07 12:18:00 +0100218#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700219static int __init
220dmar_parse_one_rhsa(struct acpi_dmar_header *header)
221{
222 struct acpi_dmar_rhsa *rhsa;
223 struct dmar_drhd_unit *drhd;
224
225 rhsa = (struct acpi_dmar_rhsa *)header;
David Woodhouseaa697072009-10-07 12:18:00 +0100226 for_each_drhd_unit(drhd) {
Suresh Siddhaee34b322009-10-02 11:01:21 -0700227 if (drhd->reg_base_addr == rhsa->base_address) {
228 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
229
230 if (!node_online(node))
231 node = -1;
232 drhd->iommu->node = node;
David Woodhouseaa697072009-10-07 12:18:00 +0100233 return 0;
234 }
Suresh Siddhaee34b322009-10-02 11:01:21 -0700235 }
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100236 WARN_TAINT(
237 1, TAINT_FIRMWARE_WORKAROUND,
238 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
239 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
240 drhd->reg_base_addr,
241 dmi_get_system_info(DMI_BIOS_VENDOR),
242 dmi_get_system_info(DMI_BIOS_VERSION),
243 dmi_get_system_info(DMI_PRODUCT_VERSION));
Suresh Siddhaee34b322009-10-02 11:01:21 -0700244
David Woodhouseaa697072009-10-07 12:18:00 +0100245 return 0;
Suresh Siddhaee34b322009-10-02 11:01:21 -0700246}
David Woodhouseaa697072009-10-07 12:18:00 +0100247#endif
Suresh Siddhaee34b322009-10-02 11:01:21 -0700248
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700249static void __init
250dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
251{
252 struct acpi_dmar_hardware_unit *drhd;
253 struct acpi_dmar_reserved_memory *rmrr;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800254 struct acpi_dmar_atsr *atsr;
Roland Dreier17b60972009-09-24 12:14:00 -0700255 struct acpi_dmar_rhsa *rhsa;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700256
257 switch (header->type) {
258 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800259 drhd = container_of(header, struct acpi_dmar_hardware_unit,
260 header);
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400261 pr_info(PREFIX "DRHD base: %#016Lx flags: %#x\n",
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800262 (unsigned long long)drhd->address, drhd->flags);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700263 break;
264 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800265 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
266 header);
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400267 pr_info(PREFIX "RMRR base: %#016Lx end: %#016Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700268 (unsigned long long)rmrr->base_address,
269 (unsigned long long)rmrr->end_address);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700270 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800271 case ACPI_DMAR_TYPE_ATSR:
272 atsr = container_of(header, struct acpi_dmar_atsr, header);
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400273 pr_info(PREFIX "ATSR flags: %#x\n", atsr->flags);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800274 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700275 case ACPI_DMAR_HARDWARE_AFFINITY:
276 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400277 pr_info(PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
Roland Dreier17b60972009-09-24 12:14:00 -0700278 (unsigned long long)rhsa->base_address,
279 rhsa->proximity_domain);
280 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700281 }
282}
283
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700284/**
285 * dmar_table_detect - checks to see if the platform supports DMAR devices
286 */
287static int __init dmar_table_detect(void)
288{
289 acpi_status status = AE_OK;
290
291 /* if we could find DMAR table, then there are DMAR devices */
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800292 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
293 (struct acpi_table_header **)&dmar_tbl,
294 &dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700295
296 if (ACPI_SUCCESS(status) && !dmar_tbl) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400297 pr_warn(PREFIX "Unable to map DMAR\n");
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700298 status = AE_NOT_FOUND;
299 }
300
301 return (ACPI_SUCCESS(status) ? 1 : 0);
302}
Suresh Siddhaaaa9d1d2008-07-10 11:16:38 -0700303
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700304/**
305 * parse_dmar_table - parses the DMA reporting table
306 */
307static int __init
308parse_dmar_table(void)
309{
310 struct acpi_table_dmar *dmar;
311 struct acpi_dmar_header *entry_header;
312 int ret = 0;
313
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700314 /*
315 * Do it again, earlier dmar_tbl mapping could be mapped with
316 * fixed map.
317 */
318 dmar_table_detect();
319
Joseph Cihulaa59b50e2009-06-30 19:31:10 -0700320 /*
321 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
322 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
323 */
324 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
325
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700326 dmar = (struct acpi_table_dmar *)dmar_tbl;
327 if (!dmar)
328 return -ENODEV;
329
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700330 if (dmar->width < PAGE_SHIFT - 1) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400331 pr_warn(PREFIX "Invalid DMAR haw\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700332 return -EINVAL;
333 }
334
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400335 pr_info(PREFIX "Host address width %d\n", dmar->width + 1);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700336
337 entry_header = (struct acpi_dmar_header *)(dmar + 1);
338 while (((unsigned long)entry_header) <
339 (((unsigned long)dmar) + dmar_tbl->length)) {
Tony Battersby084eb962009-02-11 13:24:19 -0800340 /* Avoid looping forever on bad ACPI tables */
341 if (entry_header->length == 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400342 pr_warn(PREFIX "Invalid 0-length structure\n");
Tony Battersby084eb962009-02-11 13:24:19 -0800343 ret = -EINVAL;
344 break;
345 }
346
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700347 dmar_table_print_dmar_entry(entry_header);
348
349 switch (entry_header->type) {
350 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
351 ret = dmar_parse_one_drhd(entry_header);
352 break;
353 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
354 ret = dmar_parse_one_rmrr(entry_header);
355 break;
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800356 case ACPI_DMAR_TYPE_ATSR:
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800357 ret = dmar_parse_one_atsr(entry_header);
Yu Zhaoaa5d2b52009-05-18 13:51:34 +0800358 break;
Roland Dreier17b60972009-09-24 12:14:00 -0700359 case ACPI_DMAR_HARDWARE_AFFINITY:
David Woodhouseaa697072009-10-07 12:18:00 +0100360#ifdef CONFIG_ACPI_NUMA
Suresh Siddhaee34b322009-10-02 11:01:21 -0700361 ret = dmar_parse_one_rhsa(entry_header);
David Woodhouseaa697072009-10-07 12:18:00 +0100362#endif
Roland Dreier17b60972009-09-24 12:14:00 -0700363 break;
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700364 default:
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400365 pr_warn(PREFIX "Unknown DMAR structure type %d\n",
Roland Dreier4de75cf2009-09-24 01:01:29 +0100366 entry_header->type);
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700367 ret = 0; /* for forward compatibility */
368 break;
369 }
370 if (ret)
371 break;
372
373 entry_header = ((void *)entry_header + entry_header->length);
374 }
375 return ret;
376}
377
Yinghaidda56542010-04-09 01:07:55 +0100378static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700379 struct pci_dev *dev)
380{
381 int index;
382
383 while (dev) {
384 for (index = 0; index < cnt; index++)
385 if (dev == devices[index])
386 return 1;
387
388 /* Check our parent */
389 dev = dev->bus->self;
390 }
391
392 return 0;
393}
394
395struct dmar_drhd_unit *
396dmar_find_matched_drhd_unit(struct pci_dev *dev)
397{
Yu Zhao2e824f72008-12-22 16:54:58 +0800398 struct dmar_drhd_unit *dmaru = NULL;
399 struct acpi_dmar_hardware_unit *drhd;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700400
Yinghaidda56542010-04-09 01:07:55 +0100401 dev = pci_physfn(dev);
402
Yu Zhao2e824f72008-12-22 16:54:58 +0800403 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
404 drhd = container_of(dmaru->hdr,
405 struct acpi_dmar_hardware_unit,
406 header);
407
408 if (dmaru->include_all &&
409 drhd->segment == pci_domain_nr(dev->bus))
410 return dmaru;
411
412 if (dmar_pci_device_match(dmaru->devices,
413 dmaru->devices_cnt, dev))
414 return dmaru;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700415 }
416
417 return NULL;
418}
419
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700420int __init dmar_dev_scope_init(void)
421{
Suresh Siddhac2c72862011-08-23 17:05:19 -0700422 static int dmar_dev_scope_initialized;
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700423 struct dmar_drhd_unit *drhd, *drhd_n;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700424 int ret = -ENODEV;
425
Suresh Siddhac2c72862011-08-23 17:05:19 -0700426 if (dmar_dev_scope_initialized)
427 return dmar_dev_scope_initialized;
428
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700429 if (list_empty(&dmar_drhd_units))
430 goto fail;
431
Suresh Siddha04e2ea62008-09-03 16:58:34 -0700432 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700433 ret = dmar_parse_dev(drhd);
434 if (ret)
Suresh Siddhac2c72862011-08-23 17:05:19 -0700435 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700436 }
437
Suresh Siddha318fe7d2011-08-23 17:05:20 -0700438 ret = dmar_parse_rmrr_atsr_dev();
439 if (ret)
440 goto fail;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700441
Suresh Siddhac2c72862011-08-23 17:05:19 -0700442 dmar_dev_scope_initialized = 1;
443 return 0;
444
445fail:
446 dmar_dev_scope_initialized = ret;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700447 return ret;
448}
449
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700450
451int __init dmar_table_init(void)
452{
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700453 static int dmar_table_initialized;
Fenghua Yu093f87d2007-11-21 15:07:14 -0800454 int ret;
455
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700456 if (dmar_table_initialized)
457 return 0;
458
459 dmar_table_initialized = 1;
460
Fenghua Yu093f87d2007-11-21 15:07:14 -0800461 ret = parse_dmar_table();
462 if (ret) {
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700463 if (ret != -ENODEV)
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400464 pr_info(PREFIX "parse DMAR table failure.\n");
Fenghua Yu093f87d2007-11-21 15:07:14 -0800465 return ret;
466 }
467
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700468 if (list_empty(&dmar_drhd_units)) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400469 pr_info(PREFIX "No DMAR devices found\n");
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700470 return -ENODEV;
471 }
Fenghua Yu093f87d2007-11-21 15:07:14 -0800472
Keshavamurthy, Anil S10e52472007-10-21 16:41:41 -0700473 return 0;
474}
475
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100476static void warn_invalid_dmar(u64 addr, const char *message)
477{
Ben Hutchingsfd0c8892010-04-03 19:38:43 +0100478 WARN_TAINT_ONCE(
479 1, TAINT_FIRMWARE_WORKAROUND,
480 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
481 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
482 addr, message,
483 dmi_get_system_info(DMI_BIOS_VENDOR),
484 dmi_get_system_info(DMI_BIOS_VERSION),
485 dmi_get_system_info(DMI_PRODUCT_VERSION));
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100486}
David Woodhouse6ecbf012009-12-02 09:20:27 +0000487
David Woodhouse86cf8982009-11-09 22:15:15 +0000488int __init check_zero_address(void)
489{
490 struct acpi_table_dmar *dmar;
491 struct acpi_dmar_header *entry_header;
492 struct acpi_dmar_hardware_unit *drhd;
493
494 dmar = (struct acpi_table_dmar *)dmar_tbl;
495 entry_header = (struct acpi_dmar_header *)(dmar + 1);
496
497 while (((unsigned long)entry_header) <
498 (((unsigned long)dmar) + dmar_tbl->length)) {
499 /* Avoid looping forever on bad ACPI tables */
500 if (entry_header->length == 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400501 pr_warn(PREFIX "Invalid 0-length structure\n");
David Woodhouse86cf8982009-11-09 22:15:15 +0000502 return 0;
503 }
504
505 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
Chris Wright2c992202009-12-02 09:17:13 +0000506 void __iomem *addr;
507 u64 cap, ecap;
508
David Woodhouse86cf8982009-11-09 22:15:15 +0000509 drhd = (void *)entry_header;
510 if (!drhd->address) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100511 warn_invalid_dmar(0, "");
Chris Wright2c992202009-12-02 09:17:13 +0000512 goto failed;
David Woodhouse86cf8982009-11-09 22:15:15 +0000513 }
Chris Wright2c992202009-12-02 09:17:13 +0000514
515 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
516 if (!addr ) {
517 printk("IOMMU: can't validate: %llx\n", drhd->address);
518 goto failed;
519 }
520 cap = dmar_readq(addr + DMAR_CAP_REG);
521 ecap = dmar_readq(addr + DMAR_ECAP_REG);
522 early_iounmap(addr, VTD_PAGE_SIZE);
523 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100524 warn_invalid_dmar(drhd->address,
525 " returns all ones");
Chris Wright2c992202009-12-02 09:17:13 +0000526 goto failed;
527 }
David Woodhouse86cf8982009-11-09 22:15:15 +0000528 }
529
530 entry_header = ((void *)entry_header + entry_header->length);
531 }
532 return 1;
Chris Wright2c992202009-12-02 09:17:13 +0000533
534failed:
Chris Wright2c992202009-12-02 09:17:13 +0000535 return 0;
David Woodhouse86cf8982009-11-09 22:15:15 +0000536}
537
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400538int __init detect_intel_iommu(void)
Suresh Siddha2ae21012008-07-10 11:16:43 -0700539{
540 int ret;
541
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700542 ret = dmar_table_detect();
David Woodhouse86cf8982009-11-09 22:15:15 +0000543 if (ret)
544 ret = check_zero_address();
Suresh Siddha2ae21012008-07-10 11:16:43 -0700545 {
Suresh Siddha1cb11582008-07-10 11:16:51 -0700546 struct acpi_table_dmar *dmar;
Jan Kiszkab3a530e2011-05-15 12:34:55 +0200547
Suresh Siddha1cb11582008-07-10 11:16:51 -0700548 dmar = (struct acpi_table_dmar *) dmar_tbl;
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700549
Suresh Siddha95a02e92012-03-30 11:47:07 -0700550 if (ret && irq_remapping_enabled && cpu_has_x2apic &&
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700551 dmar->flags & 0x1)
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400552 pr_info("Queued invalidation will be enabled to "
553 "support x2apic and Intr-remapping.\n");
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700554
Linus Torvalds11bd04f2009-12-11 12:18:16 -0800555 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
Suresh Siddha2ae21012008-07-10 11:16:43 -0700556 iommu_detected = 1;
Chris Wright5d990b62009-12-04 12:15:21 -0800557 /* Make sure ACS will be enabled */
558 pci_request_acs();
559 }
Suresh Siddhaf5d1b972011-08-23 17:05:22 -0700560
FUJITA Tomonori9d5ce732009-11-10 19:46:16 +0900561#ifdef CONFIG_X86
562 if (ret)
563 x86_init.iommu.iommu_init = intel_iommu_init;
564#endif
Youquan Songcacd4212008-10-16 16:31:57 -0700565 }
Yinghai Lu8e1568f2009-02-11 01:06:59 -0800566 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
Yinghai Luf6dd5c32008-09-03 16:58:32 -0700567 dmar_tbl = NULL;
Konrad Rzeszutek Wilk480125b2010-08-26 13:57:57 -0400568
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -0400569 return ret ? 1 : -ENODEV;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700570}
571
572
Donald Dutile6f5cf522012-06-04 17:29:02 -0400573static void unmap_iommu(struct intel_iommu *iommu)
574{
575 iounmap(iommu->reg);
576 release_mem_region(iommu->reg_phys, iommu->reg_size);
577}
578
579/**
580 * map_iommu: map the iommu's registers
581 * @iommu: the iommu to map
582 * @phys_addr: the physical address of the base resgister
583 *
584 * Memory map the iommu's registers. Start w/ a single page, and
585 * possibly expand if that turns out to be insufficent.
586 */
587static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
588{
589 int map_size, err=0;
590
591 iommu->reg_phys = phys_addr;
592 iommu->reg_size = VTD_PAGE_SIZE;
593
594 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
595 pr_err("IOMMU: can't reserve memory\n");
596 err = -EBUSY;
597 goto out;
598 }
599
600 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
601 if (!iommu->reg) {
602 pr_err("IOMMU: can't map the region\n");
603 err = -ENOMEM;
604 goto release;
605 }
606
607 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
608 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
609
610 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
611 err = -EINVAL;
612 warn_invalid_dmar(phys_addr, " returns all ones");
613 goto unmap;
614 }
615
616 /* the registers might be more than one page */
617 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
618 cap_max_fault_reg_offset(iommu->cap));
619 map_size = VTD_PAGE_ALIGN(map_size);
620 if (map_size > iommu->reg_size) {
621 iounmap(iommu->reg);
622 release_mem_region(iommu->reg_phys, iommu->reg_size);
623 iommu->reg_size = map_size;
624 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
625 iommu->name)) {
626 pr_err("IOMMU: can't reserve memory\n");
627 err = -EBUSY;
628 goto out;
629 }
630 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
631 if (!iommu->reg) {
632 pr_err("IOMMU: can't map the region\n");
633 err = -ENOMEM;
634 goto release;
635 }
636 }
637 err = 0;
638 goto out;
639
640unmap:
641 iounmap(iommu->reg);
642release:
643 release_mem_region(iommu->reg_phys, iommu->reg_size);
644out:
645 return err;
646}
647
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700648int alloc_iommu(struct dmar_drhd_unit *drhd)
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700649{
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700650 struct intel_iommu *iommu;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700651 u32 ver;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700652 static int iommu_allocated = 0;
Joerg Roedel43f73922009-01-03 23:56:27 +0100653 int agaw = 0;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700654 int msagaw = 0;
Donald Dutile6f5cf522012-06-04 17:29:02 -0400655 int err;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700656
David Woodhouse6ecbf012009-12-02 09:20:27 +0000657 if (!drhd->reg_base_addr) {
Ben Hutchings3a8663e2010-04-03 19:37:23 +0100658 warn_invalid_dmar(0, "");
David Woodhouse6ecbf012009-12-02 09:20:27 +0000659 return -EINVAL;
660 }
661
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700662 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
663 if (!iommu)
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700664 return -ENOMEM;
Suresh Siddhac42d9f32008-07-10 11:16:36 -0700665
666 iommu->seq_id = iommu_allocated++;
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700667 sprintf (iommu->name, "dmar%d", iommu->seq_id);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700668
Donald Dutile6f5cf522012-06-04 17:29:02 -0400669 err = map_iommu(iommu, drhd->reg_base_addr);
670 if (err) {
671 pr_err("IOMMU: failed to map %s\n", iommu->name);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700672 goto error;
673 }
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700674
Donald Dutile6f5cf522012-06-04 17:29:02 -0400675 err = -EINVAL;
Weidong Han1b573682008-12-08 15:34:06 +0800676 agaw = iommu_calculate_agaw(iommu);
677 if (agaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400678 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
679 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100680 goto err_unmap;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700681 }
682 msagaw = iommu_calculate_max_sagaw(iommu);
683 if (msagaw < 0) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400684 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
Weidong Han1b573682008-12-08 15:34:06 +0800685 iommu->seq_id);
David Woodhouse08155652009-08-04 09:17:20 +0100686 goto err_unmap;
Weidong Han1b573682008-12-08 15:34:06 +0800687 }
688 iommu->agaw = agaw;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700689 iommu->msagaw = msagaw;
Weidong Han1b573682008-12-08 15:34:06 +0800690
Suresh Siddhaee34b322009-10-02 11:01:21 -0700691 iommu->node = -1;
692
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700693 ver = readl(iommu->reg + DMAR_VER_REG);
Yinghai Lu680a7522010-04-08 19:58:23 +0100694 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
695 iommu->seq_id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700696 (unsigned long long)drhd->reg_base_addr,
697 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
698 (unsigned long long)iommu->cap,
699 (unsigned long long)iommu->ecap);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700700
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200701 raw_spin_lock_init(&iommu->register_lock);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700702
703 drhd->iommu = iommu;
Suresh Siddha1886e8a2008-07-10 11:16:37 -0700704 return 0;
David Woodhouse08155652009-08-04 09:17:20 +0100705
706 err_unmap:
Donald Dutile6f5cf522012-06-04 17:29:02 -0400707 unmap_iommu(iommu);
David Woodhouse08155652009-08-04 09:17:20 +0100708 error:
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700709 kfree(iommu);
Donald Dutile6f5cf522012-06-04 17:29:02 -0400710 return err;
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700711}
712
713void free_iommu(struct intel_iommu *iommu)
714{
715 if (!iommu)
716 return;
717
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700718 free_dmar_iommu(iommu);
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700719
720 if (iommu->reg)
Donald Dutile6f5cf522012-06-04 17:29:02 -0400721 unmap_iommu(iommu);
722
Suresh Siddhae61d98d2008-07-10 11:16:35 -0700723 kfree(iommu);
724}
Suresh Siddhafe962e92008-07-10 11:16:42 -0700725
726/*
727 * Reclaim all the submitted descriptors which have completed its work.
728 */
729static inline void reclaim_free_desc(struct q_inval *qi)
730{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800731 while (qi->desc_status[qi->free_tail] == QI_DONE ||
732 qi->desc_status[qi->free_tail] == QI_ABORT) {
Suresh Siddhafe962e92008-07-10 11:16:42 -0700733 qi->desc_status[qi->free_tail] = QI_FREE;
734 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
735 qi->free_cnt++;
736 }
737}
738
Yu Zhao704126a2009-01-04 16:28:52 +0800739static int qi_check_fault(struct intel_iommu *iommu, int index)
740{
741 u32 fault;
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800742 int head, tail;
Yu Zhao704126a2009-01-04 16:28:52 +0800743 struct q_inval *qi = iommu->qi;
744 int wait_index = (index + 1) % QI_LENGTH;
745
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800746 if (qi->desc_status[wait_index] == QI_ABORT)
747 return -EAGAIN;
748
Yu Zhao704126a2009-01-04 16:28:52 +0800749 fault = readl(iommu->reg + DMAR_FSTS_REG);
750
751 /*
752 * If IQE happens, the head points to the descriptor associated
753 * with the error. No new descriptors are fetched until the IQE
754 * is cleared.
755 */
756 if (fault & DMA_FSTS_IQE) {
757 head = readl(iommu->reg + DMAR_IQH_REG);
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800758 if ((head >> DMAR_IQ_SHIFT) == index) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -0400759 pr_err("VT-d detected invalid descriptor: "
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800760 "low=%llx, high=%llx\n",
761 (unsigned long long)qi->desc[index].low,
762 (unsigned long long)qi->desc[index].high);
Yu Zhao704126a2009-01-04 16:28:52 +0800763 memcpy(&qi->desc[index], &qi->desc[wait_index],
764 sizeof(struct qi_desc));
765 __iommu_flush_cache(iommu, &qi->desc[index],
766 sizeof(struct qi_desc));
767 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
768 return -EINVAL;
769 }
770 }
771
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800772 /*
773 * If ITE happens, all pending wait_desc commands are aborted.
774 * No new descriptors are fetched until the ITE is cleared.
775 */
776 if (fault & DMA_FSTS_ITE) {
777 head = readl(iommu->reg + DMAR_IQH_REG);
778 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
779 head |= 1;
780 tail = readl(iommu->reg + DMAR_IQT_REG);
781 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
782
783 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
784
785 do {
786 if (qi->desc_status[head] == QI_IN_USE)
787 qi->desc_status[head] = QI_ABORT;
788 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
789 } while (head != tail);
790
791 if (qi->desc_status[wait_index] == QI_ABORT)
792 return -EAGAIN;
793 }
794
795 if (fault & DMA_FSTS_ICE)
796 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
797
Yu Zhao704126a2009-01-04 16:28:52 +0800798 return 0;
799}
800
Suresh Siddhafe962e92008-07-10 11:16:42 -0700801/*
802 * Submit the queued invalidation descriptor to the remapping
803 * hardware unit and wait for its completion.
804 */
Yu Zhao704126a2009-01-04 16:28:52 +0800805int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
Suresh Siddhafe962e92008-07-10 11:16:42 -0700806{
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800807 int rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700808 struct q_inval *qi = iommu->qi;
809 struct qi_desc *hw, wait_desc;
810 int wait_index, index;
811 unsigned long flags;
812
813 if (!qi)
Yu Zhao704126a2009-01-04 16:28:52 +0800814 return 0;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700815
816 hw = qi->desc;
817
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800818restart:
819 rc = 0;
820
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200821 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700822 while (qi->free_cnt < 3) {
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200823 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700824 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200825 raw_spin_lock_irqsave(&qi->q_lock, flags);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700826 }
827
828 index = qi->free_head;
829 wait_index = (index + 1) % QI_LENGTH;
830
831 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
832
833 hw[index] = *desc;
834
Yu Zhao704126a2009-01-04 16:28:52 +0800835 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
836 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700837 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
838
839 hw[wait_index] = wait_desc;
840
841 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
842 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
843
844 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
845 qi->free_cnt -= 2;
846
Suresh Siddhafe962e92008-07-10 11:16:42 -0700847 /*
848 * update the HW tail register indicating the presence of
849 * new descriptors.
850 */
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800851 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700852
853 while (qi->desc_status[wait_index] != QI_DONE) {
Suresh Siddhaf05810c2008-10-16 16:31:54 -0700854 /*
855 * We will leave the interrupts disabled, to prevent interrupt
856 * context to queue another cmd while a cmd is already submitted
857 * and waiting for completion on this cpu. This is to avoid
858 * a deadlock where the interrupt context can wait indefinitely
859 * for free slots in the queue.
860 */
Yu Zhao704126a2009-01-04 16:28:52 +0800861 rc = qi_check_fault(iommu, index);
862 if (rc)
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800863 break;
Yu Zhao704126a2009-01-04 16:28:52 +0800864
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200865 raw_spin_unlock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700866 cpu_relax();
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200867 raw_spin_lock(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -0700868 }
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800869
870 qi->desc_status[index] = QI_DONE;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700871
872 reclaim_free_desc(qi);
Thomas Gleixner3b8f4042011-07-19 17:02:07 +0200873 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800874
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800875 if (rc == -EAGAIN)
876 goto restart;
877
Yu Zhao704126a2009-01-04 16:28:52 +0800878 return rc;
Suresh Siddhafe962e92008-07-10 11:16:42 -0700879}
880
881/*
882 * Flush the global interrupt entry cache.
883 */
884void qi_global_iec(struct intel_iommu *iommu)
885{
886 struct qi_desc desc;
887
888 desc.low = QI_IEC_TYPE;
889 desc.high = 0;
890
Yu Zhao704126a2009-01-04 16:28:52 +0800891 /* should never fail */
Suresh Siddhafe962e92008-07-10 11:16:42 -0700892 qi_submit_sync(&desc, iommu);
893}
894
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100895void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
896 u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700897{
Youquan Song3481f212008-10-16 16:31:55 -0700898 struct qi_desc desc;
899
Youquan Song3481f212008-10-16 16:31:55 -0700900 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
901 | QI_CC_GRAN(type) | QI_CC_TYPE;
902 desc.high = 0;
903
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100904 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700905}
906
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100907void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
908 unsigned int size_order, u64 type)
Youquan Song3481f212008-10-16 16:31:55 -0700909{
910 u8 dw = 0, dr = 0;
911
912 struct qi_desc desc;
913 int ih = 0;
914
Youquan Song3481f212008-10-16 16:31:55 -0700915 if (cap_write_drain(iommu->cap))
916 dw = 1;
917
918 if (cap_read_drain(iommu->cap))
919 dr = 1;
920
921 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
922 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
923 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
924 | QI_IOTLB_AM(size_order);
925
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100926 qi_submit_sync(&desc, iommu);
Youquan Song3481f212008-10-16 16:31:55 -0700927}
928
Yu Zhao6ba6c3a2009-05-18 13:51:35 +0800929void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
930 u64 addr, unsigned mask)
931{
932 struct qi_desc desc;
933
934 if (mask) {
935 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
936 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
937 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
938 } else
939 desc.high = QI_DEV_IOTLB_ADDR(addr);
940
941 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
942 qdep = 0;
943
944 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
945 QI_DIOTLB_TYPE;
946
947 qi_submit_sync(&desc, iommu);
948}
949
Suresh Siddhafe962e92008-07-10 11:16:42 -0700950/*
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700951 * Disable Queued Invalidation interface.
952 */
953void dmar_disable_qi(struct intel_iommu *iommu)
954{
955 unsigned long flags;
956 u32 sts;
957 cycles_t start_time = get_cycles();
958
959 if (!ecap_qis(iommu->ecap))
960 return;
961
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200962 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700963
964 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
965 if (!(sts & DMA_GSTS_QIES))
966 goto end;
967
968 /*
969 * Give a chance to HW to complete the pending invalidation requests.
970 */
971 while ((readl(iommu->reg + DMAR_IQT_REG) !=
972 readl(iommu->reg + DMAR_IQH_REG)) &&
973 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
974 cpu_relax();
975
976 iommu->gcmd &= ~DMA_GCMD_QIE;
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700977 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
978
979 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
980 !(sts & DMA_GSTS_QIES), sts);
981end:
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200982 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700983}
984
985/*
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700986 * Enable queued invalidation.
987 */
988static void __dmar_enable_qi(struct intel_iommu *iommu)
989{
David Woodhousec416daa2009-05-10 20:30:58 +0100990 u32 sts;
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700991 unsigned long flags;
992 struct q_inval *qi = iommu->qi;
993
994 qi->free_head = qi->free_tail = 0;
995 qi->free_cnt = QI_LENGTH;
996
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200997 raw_spin_lock_irqsave(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -0700998
999 /* write zero to the tail reg */
1000 writel(0, iommu->reg + DMAR_IQT_REG);
1001
1002 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1003
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001004 iommu->gcmd |= DMA_GCMD_QIE;
David Woodhousec416daa2009-05-10 20:30:58 +01001005 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001006
1007 /* Make sure hardware complete it */
1008 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1009
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001010 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001011}
1012
1013/*
Suresh Siddhafe962e92008-07-10 11:16:42 -07001014 * Enable Queued Invalidation interface. This is a must to support
1015 * interrupt-remapping. Also used by DMA-remapping, which replaces
1016 * register based IOTLB invalidation.
1017 */
1018int dmar_enable_qi(struct intel_iommu *iommu)
1019{
Suresh Siddhafe962e92008-07-10 11:16:42 -07001020 struct q_inval *qi;
Suresh Siddha751cafe2009-10-02 11:01:22 -07001021 struct page *desc_page;
Suresh Siddhafe962e92008-07-10 11:16:42 -07001022
1023 if (!ecap_qis(iommu->ecap))
1024 return -ENOENT;
1025
1026 /*
1027 * queued invalidation is already setup and enabled.
1028 */
1029 if (iommu->qi)
1030 return 0;
1031
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001032 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001033 if (!iommu->qi)
1034 return -ENOMEM;
1035
1036 qi = iommu->qi;
1037
Suresh Siddha751cafe2009-10-02 11:01:22 -07001038
1039 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1040 if (!desc_page) {
Suresh Siddhafe962e92008-07-10 11:16:42 -07001041 kfree(qi);
1042 iommu->qi = 0;
1043 return -ENOMEM;
1044 }
1045
Suresh Siddha751cafe2009-10-02 11:01:22 -07001046 qi->desc = page_address(desc_page);
1047
Suresh Siddhafa4b57c2009-03-16 17:05:05 -07001048 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001049 if (!qi->desc_status) {
1050 free_page((unsigned long) qi->desc);
1051 kfree(qi);
1052 iommu->qi = 0;
1053 return -ENOMEM;
1054 }
1055
1056 qi->free_head = qi->free_tail = 0;
1057 qi->free_cnt = QI_LENGTH;
1058
Thomas Gleixner3b8f4042011-07-19 17:02:07 +02001059 raw_spin_lock_init(&qi->q_lock);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001060
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001061 __dmar_enable_qi(iommu);
Suresh Siddhafe962e92008-07-10 11:16:42 -07001062
1063 return 0;
1064}
Suresh Siddha0ac24912009-03-16 17:04:54 -07001065
1066/* iommu interrupt handling. Most stuff are MSI-like. */
1067
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001068enum faulttype {
1069 DMA_REMAP,
1070 INTR_REMAP,
1071 UNKNOWN,
1072};
1073
1074static const char *dma_remap_fault_reasons[] =
Suresh Siddha0ac24912009-03-16 17:04:54 -07001075{
1076 "Software",
1077 "Present bit in root entry is clear",
1078 "Present bit in context entry is clear",
1079 "Invalid context entry",
1080 "Access beyond MGAW",
1081 "PTE Write access is not set",
1082 "PTE Read access is not set",
1083 "Next page table ptr is invalid",
1084 "Root table address invalid",
1085 "Context table ptr is invalid",
1086 "non-zero reserved fields in RTP",
1087 "non-zero reserved fields in CTP",
1088 "non-zero reserved fields in PTE",
1089};
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001090
Suresh Siddha95a02e92012-03-30 11:47:07 -07001091static const char *irq_remap_fault_reasons[] =
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001092{
1093 "Detected reserved fields in the decoded interrupt-remapped request",
1094 "Interrupt index exceeded the interrupt-remapping table size",
1095 "Present field in the IRTE entry is clear",
1096 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1097 "Detected reserved fields in the IRTE entry",
1098 "Blocked a compatibility format interrupt request",
1099 "Blocked an interrupt request due to source-id verification failure",
1100};
1101
Suresh Siddha0ac24912009-03-16 17:04:54 -07001102#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1103
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001104const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001105{
Dan Carpenterfefe1ed2012-05-13 20:09:38 +03001106 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1107 ARRAY_SIZE(irq_remap_fault_reasons))) {
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001108 *fault_type = INTR_REMAP;
Suresh Siddha95a02e92012-03-30 11:47:07 -07001109 return irq_remap_fault_reasons[fault_reason - 0x20];
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001110 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1111 *fault_type = DMA_REMAP;
1112 return dma_remap_fault_reasons[fault_reason];
1113 } else {
1114 *fault_type = UNKNOWN;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001115 return "Unknown";
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001116 }
Suresh Siddha0ac24912009-03-16 17:04:54 -07001117}
1118
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001119void dmar_msi_unmask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001120{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001121 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001122 unsigned long flag;
1123
1124 /* unmask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001125 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001126 writel(0, iommu->reg + DMAR_FECTL_REG);
1127 /* Read a reg to force flush the post write */
1128 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001129 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001130}
1131
Thomas Gleixner5c2837f2010-09-28 17:15:11 +02001132void dmar_msi_mask(struct irq_data *data)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001133{
1134 unsigned long flag;
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001135 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001136
1137 /* mask it */
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001138 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001139 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1140 /* Read a reg to force flush the post write */
1141 readl(iommu->reg + DMAR_FECTL_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001142 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001143}
1144
1145void dmar_msi_write(int irq, struct msi_msg *msg)
1146{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001147 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001148 unsigned long flag;
1149
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001150 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001151 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1152 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1153 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001154 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001155}
1156
1157void dmar_msi_read(int irq, struct msi_msg *msg)
1158{
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001159 struct intel_iommu *iommu = irq_get_handler_data(irq);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001160 unsigned long flag;
1161
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001162 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001163 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1164 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1165 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001166 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001167}
1168
1169static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1170 u8 fault_reason, u16 source_id, unsigned long long addr)
1171{
1172 const char *reason;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001173 int fault_type;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001174
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001175 reason = dmar_get_fault_reason(fault_reason, &fault_type);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001176
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001177 if (fault_type == INTR_REMAP)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001178 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001179 "fault index %llx\n"
1180 "INTR-REMAP:[fault reason %02d] %s\n",
1181 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1182 PCI_FUNC(source_id & 0xFF), addr >> 48,
1183 fault_reason, reason);
1184 else
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001185 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001186 "fault addr %llx \n"
1187 "DMAR:[fault reason %02d] %s\n",
1188 (type ? "DMA Read" : "DMA Write"),
1189 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1190 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001191 return 0;
1192}
1193
1194#define PRIMARY_FAULT_REG_LEN (16)
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001195irqreturn_t dmar_fault(int irq, void *dev_id)
Suresh Siddha0ac24912009-03-16 17:04:54 -07001196{
1197 struct intel_iommu *iommu = dev_id;
1198 int reg, fault_index;
1199 u32 fault_status;
1200 unsigned long flag;
1201
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001202 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001203 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001204 if (fault_status)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001205 pr_err("DRHD: handling fault status reg %x\n", fault_status);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001206
1207 /* TBD: ignore advanced fault log currently */
1208 if (!(fault_status & DMA_FSTS_PPF))
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001209 goto clear_rest;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001210
1211 fault_index = dma_fsts_fault_record_index(fault_status);
1212 reg = cap_fault_reg_offset(iommu->cap);
1213 while (1) {
1214 u8 fault_reason;
1215 u16 source_id;
1216 u64 guest_addr;
1217 int type;
1218 u32 data;
1219
1220 /* highest 32 bits */
1221 data = readl(iommu->reg + reg +
1222 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1223 if (!(data & DMA_FRCD_F))
1224 break;
1225
1226 fault_reason = dma_frcd_fault_reason(data);
1227 type = dma_frcd_type(data);
1228
1229 data = readl(iommu->reg + reg +
1230 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1231 source_id = dma_frcd_source_id(data);
1232
1233 guest_addr = dmar_readq(iommu->reg + reg +
1234 fault_index * PRIMARY_FAULT_REG_LEN);
1235 guest_addr = dma_frcd_page_addr(guest_addr);
1236 /* clear the fault */
1237 writel(DMA_FRCD_F, iommu->reg + reg +
1238 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1239
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001240 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001241
1242 dmar_fault_do_one(iommu, type, fault_reason,
1243 source_id, guest_addr);
1244
1245 fault_index++;
Troy Heber8211a7b2009-08-19 15:26:11 -06001246 if (fault_index >= cap_num_fault_regs(iommu->cap))
Suresh Siddha0ac24912009-03-16 17:04:54 -07001247 fault_index = 0;
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001248 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001249 }
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001250clear_rest:
1251 /* clear all the other faults */
Suresh Siddha0ac24912009-03-16 17:04:54 -07001252 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001253 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001254
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001255 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001256 return IRQ_HANDLED;
1257}
1258
1259int dmar_set_interrupt(struct intel_iommu *iommu)
1260{
1261 int irq, ret;
1262
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001263 /*
1264 * Check if the fault interrupt is already initialized.
1265 */
1266 if (iommu->irq)
1267 return 0;
1268
Suresh Siddha0ac24912009-03-16 17:04:54 -07001269 irq = create_irq();
1270 if (!irq) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001271 pr_err("IOMMU: no free vectors\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001272 return -EINVAL;
1273 }
1274
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001275 irq_set_handler_data(irq, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001276 iommu->irq = irq;
1277
1278 ret = arch_setup_dmar_msi(irq);
1279 if (ret) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001280 irq_set_handler_data(irq, NULL);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001281 iommu->irq = 0;
1282 destroy_irq(irq);
Chris Wrightdd726432009-05-13 15:55:52 -07001283 return ret;
Suresh Siddha0ac24912009-03-16 17:04:54 -07001284 }
1285
Thomas Gleixner477694e2011-07-19 16:25:42 +02001286 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
Suresh Siddha0ac24912009-03-16 17:04:54 -07001287 if (ret)
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001288 pr_err("IOMMU: can't request irq\n");
Suresh Siddha0ac24912009-03-16 17:04:54 -07001289 return ret;
1290}
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001291
1292int __init enable_drhd_fault_handling(void)
1293{
1294 struct dmar_drhd_unit *drhd;
1295
1296 /*
1297 * Enable fault control interrupt.
1298 */
1299 for_each_drhd_unit(drhd) {
1300 int ret;
1301 struct intel_iommu *iommu = drhd->iommu;
1302 ret = dmar_set_interrupt(iommu);
1303
1304 if (ret) {
Donald Dutilebf947fcb2012-06-04 17:29:01 -04001305 pr_err("DRHD %Lx: failed to enable fault, "
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001306 " interrupt, ret %d\n",
1307 (unsigned long long)drhd->reg_base_addr, ret);
1308 return -1;
1309 }
Suresh Siddha7f99d942010-11-30 22:22:29 -08001310
1311 /*
1312 * Clear any previous faults.
1313 */
1314 dmar_fault(iommu->irq, iommu);
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001315 }
1316
1317 return 0;
1318}
Fenghua Yueb4a52b2009-03-27 14:22:43 -07001319
1320/*
1321 * Re-enable Queued Invalidation interface.
1322 */
1323int dmar_reenable_qi(struct intel_iommu *iommu)
1324{
1325 if (!ecap_qis(iommu->ecap))
1326 return -ENOENT;
1327
1328 if (!iommu->qi)
1329 return -ENOENT;
1330
1331 /*
1332 * First disable queued invalidation.
1333 */
1334 dmar_disable_qi(iommu);
1335 /*
1336 * Then enable queued invalidation again. Since there is no pending
1337 * invalidation requests now, it's safe to re-enable queued
1338 * invalidation.
1339 */
1340 __dmar_enable_qi(iommu);
1341
1342 return 0;
1343}
Youquan Song074835f2009-09-09 12:05:39 -04001344
1345/*
1346 * Check interrupt remapping support in DMAR table description.
1347 */
Luck, Tony0b8973a2009-12-16 22:59:29 +00001348int __init dmar_ir_support(void)
Youquan Song074835f2009-09-09 12:05:39 -04001349{
1350 struct acpi_table_dmar *dmar;
1351 dmar = (struct acpi_table_dmar *)dmar_tbl;
Arnaud Patard4f506e02010-03-25 18:02:58 +00001352 if (!dmar)
1353 return 0;
Youquan Song074835f2009-09-09 12:05:39 -04001354 return dmar->flags & 0x1;
1355}
Konrad Rzeszutek Wilk4db77ff2010-08-26 13:58:04 -04001356IOMMU_INIT_POST(detect_intel_iommu);