| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (c) 2006, Intel Corporation. | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or modify it | 
 | 5 |  * under the terms and conditions of the GNU General Public License, | 
 | 6 |  * version 2, as published by the Free Software Foundation. | 
 | 7 |  * | 
 | 8 |  * This program is distributed in the hope it will be useful, but WITHOUT | 
 | 9 |  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
 | 10 |  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for | 
 | 11 |  * more details. | 
 | 12 |  * | 
 | 13 |  * You should have received a copy of the GNU General Public License along with | 
 | 14 |  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | 
 | 15 |  * Place - Suite 330, Boston, MA 02111-1307 USA. | 
 | 16 |  * | 
| mark gross | 98bcef5 | 2008-02-23 15:23:35 -0800 | [diff] [blame] | 17 |  * Copyright (C) 2006-2008 Intel Corporation | 
 | 18 |  * Author: Ashok Raj <ashok.raj@intel.com> | 
 | 19 |  * Author: Shaohua Li <shaohua.li@intel.com> | 
 | 20 |  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 21 |  * | 
| Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 22 |  * This file implements early detection/parsing of Remapping Devices | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 23 |  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI | 
 | 24 |  * tables. | 
| Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 25 |  * | 
 | 26 |  * These routines are used by both DMA-remapping and Interrupt-remapping | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 27 |  */ | 
 | 28 |  | 
 | 29 | #include <linux/pci.h> | 
 | 30 | #include <linux/dmar.h> | 
| Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 31 | #include <linux/iova.h> | 
 | 32 | #include <linux/intel-iommu.h> | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 33 | #include <linux/timer.h> | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 34 | #include <linux/irq.h> | 
 | 35 | #include <linux/interrupt.h> | 
| Shane Wang | 69575d3 | 2009-09-01 18:25:07 -0700 | [diff] [blame] | 36 | #include <linux/tboot.h> | 
| Len Brown | eb27cae | 2009-07-06 23:40:19 -0400 | [diff] [blame] | 37 | #include <linux/dmi.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 38 | #include <linux/slab.h> | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 39 |  | 
| Len Brown | a192a95 | 2009-07-28 16:45:54 -0400 | [diff] [blame] | 40 | #define PREFIX "DMAR: " | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 41 |  | 
 | 42 | /* No locks are needed as DMA remapping hardware unit | 
 | 43 |  * list is constructed at boot time and hotplug of | 
 | 44 |  * these units are not supported by the architecture. | 
 | 45 |  */ | 
 | 46 | LIST_HEAD(dmar_drhd_units); | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 47 |  | 
 | 48 | static struct acpi_table_header * __initdata dmar_tbl; | 
| Yinghai Lu | 8e1568f | 2009-02-11 01:06:59 -0800 | [diff] [blame] | 49 | static acpi_size dmar_tbl_size; | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 50 |  | 
 | 51 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) | 
 | 52 | { | 
 | 53 | 	/* | 
 | 54 | 	 * add INCLUDE_ALL at the tail, so scan the list will find it at | 
 | 55 | 	 * the very end. | 
 | 56 | 	 */ | 
 | 57 | 	if (drhd->include_all) | 
 | 58 | 		list_add_tail(&drhd->list, &dmar_drhd_units); | 
 | 59 | 	else | 
 | 60 | 		list_add(&drhd->list, &dmar_drhd_units); | 
 | 61 | } | 
 | 62 |  | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 63 | static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, | 
 | 64 | 					   struct pci_dev **dev, u16 segment) | 
 | 65 | { | 
 | 66 | 	struct pci_bus *bus; | 
 | 67 | 	struct pci_dev *pdev = NULL; | 
 | 68 | 	struct acpi_dmar_pci_path *path; | 
 | 69 | 	int count; | 
 | 70 |  | 
 | 71 | 	bus = pci_find_bus(segment, scope->bus); | 
 | 72 | 	path = (struct acpi_dmar_pci_path *)(scope + 1); | 
 | 73 | 	count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | 
 | 74 | 		/ sizeof(struct acpi_dmar_pci_path); | 
 | 75 |  | 
 | 76 | 	while (count) { | 
 | 77 | 		if (pdev) | 
 | 78 | 			pci_dev_put(pdev); | 
 | 79 | 		/* | 
 | 80 | 		 * Some BIOSes list non-exist devices in DMAR table, just | 
 | 81 | 		 * ignore it | 
 | 82 | 		 */ | 
 | 83 | 		if (!bus) { | 
 | 84 | 			printk(KERN_WARNING | 
 | 85 | 			PREFIX "Device scope bus [%d] not found\n", | 
 | 86 | 			scope->bus); | 
 | 87 | 			break; | 
 | 88 | 		} | 
 | 89 | 		pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn)); | 
 | 90 | 		if (!pdev) { | 
 | 91 | 			printk(KERN_WARNING PREFIX | 
 | 92 | 			"Device scope device [%04x:%02x:%02x.%02x] not found\n", | 
 | 93 | 				segment, bus->number, path->dev, path->fn); | 
 | 94 | 			break; | 
 | 95 | 		} | 
 | 96 | 		path ++; | 
 | 97 | 		count --; | 
 | 98 | 		bus = pdev->subordinate; | 
 | 99 | 	} | 
 | 100 | 	if (!pdev) { | 
 | 101 | 		printk(KERN_WARNING PREFIX | 
 | 102 | 		"Device scope device [%04x:%02x:%02x.%02x] not found\n", | 
 | 103 | 		segment, scope->bus, path->dev, path->fn); | 
 | 104 | 		*dev = NULL; | 
 | 105 | 		return 0; | 
 | 106 | 	} | 
 | 107 | 	if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \ | 
 | 108 | 			pdev->subordinate) || (scope->entry_type == \ | 
 | 109 | 			ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) { | 
 | 110 | 		pci_dev_put(pdev); | 
 | 111 | 		printk(KERN_WARNING PREFIX | 
 | 112 | 			"Device scope type does not match for %s\n", | 
 | 113 | 			 pci_name(pdev)); | 
 | 114 | 		return -EINVAL; | 
 | 115 | 	} | 
 | 116 | 	*dev = pdev; | 
 | 117 | 	return 0; | 
 | 118 | } | 
 | 119 |  | 
 | 120 | static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, | 
 | 121 | 				       struct pci_dev ***devices, u16 segment) | 
 | 122 | { | 
 | 123 | 	struct acpi_dmar_device_scope *scope; | 
 | 124 | 	void * tmp = start; | 
 | 125 | 	int index; | 
 | 126 | 	int ret; | 
 | 127 |  | 
 | 128 | 	*cnt = 0; | 
 | 129 | 	while (start < end) { | 
 | 130 | 		scope = start; | 
 | 131 | 		if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || | 
 | 132 | 		    scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) | 
 | 133 | 			(*cnt)++; | 
| Yinghai Lu | 5715f0f | 2010-04-08 19:58:22 +0100 | [diff] [blame] | 134 | 		else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) { | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 135 | 			printk(KERN_WARNING PREFIX | 
| Yinghai Lu | 5715f0f | 2010-04-08 19:58:22 +0100 | [diff] [blame] | 136 | 			       "Unsupported device scope\n"); | 
 | 137 | 		} | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 138 | 		start += scope->length; | 
 | 139 | 	} | 
 | 140 | 	if (*cnt == 0) | 
 | 141 | 		return 0; | 
 | 142 |  | 
 | 143 | 	*devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL); | 
 | 144 | 	if (!*devices) | 
 | 145 | 		return -ENOMEM; | 
 | 146 |  | 
 | 147 | 	start = tmp; | 
 | 148 | 	index = 0; | 
 | 149 | 	while (start < end) { | 
 | 150 | 		scope = start; | 
 | 151 | 		if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || | 
 | 152 | 		    scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) { | 
 | 153 | 			ret = dmar_parse_one_dev_scope(scope, | 
 | 154 | 				&(*devices)[index], segment); | 
 | 155 | 			if (ret) { | 
 | 156 | 				kfree(*devices); | 
 | 157 | 				return ret; | 
 | 158 | 			} | 
 | 159 | 			index ++; | 
 | 160 | 		} | 
 | 161 | 		start += scope->length; | 
 | 162 | 	} | 
 | 163 |  | 
 | 164 | 	return 0; | 
 | 165 | } | 
 | 166 |  | 
 | 167 | /** | 
 | 168 |  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition | 
 | 169 |  * structure which uniquely represent one DMA remapping hardware unit | 
 | 170 |  * present in the platform | 
 | 171 |  */ | 
 | 172 | static int __init | 
 | 173 | dmar_parse_one_drhd(struct acpi_dmar_header *header) | 
 | 174 | { | 
 | 175 | 	struct acpi_dmar_hardware_unit *drhd; | 
 | 176 | 	struct dmar_drhd_unit *dmaru; | 
 | 177 | 	int ret = 0; | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 178 |  | 
| David Woodhouse | e523b38 | 2009-04-10 22:27:48 -0700 | [diff] [blame] | 179 | 	drhd = (struct acpi_dmar_hardware_unit *)header; | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 180 | 	dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); | 
 | 181 | 	if (!dmaru) | 
 | 182 | 		return -ENOMEM; | 
 | 183 |  | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 184 | 	dmaru->hdr = header; | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 185 | 	dmaru->reg_base_addr = drhd->address; | 
| David Woodhouse | 276dbf9 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 186 | 	dmaru->segment = drhd->segment; | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 187 | 	dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ | 
 | 188 |  | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 189 | 	ret = alloc_iommu(dmaru); | 
 | 190 | 	if (ret) { | 
 | 191 | 		kfree(dmaru); | 
 | 192 | 		return ret; | 
 | 193 | 	} | 
 | 194 | 	dmar_register_drhd_unit(dmaru); | 
 | 195 | 	return 0; | 
 | 196 | } | 
 | 197 |  | 
| David Woodhouse | f82851a | 2008-10-18 15:43:14 +0100 | [diff] [blame] | 198 | static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 199 | { | 
 | 200 | 	struct acpi_dmar_hardware_unit *drhd; | 
| David Woodhouse | f82851a | 2008-10-18 15:43:14 +0100 | [diff] [blame] | 201 | 	int ret = 0; | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 202 |  | 
 | 203 | 	drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; | 
 | 204 |  | 
| Yu Zhao | 2e824f7 | 2008-12-22 16:54:58 +0800 | [diff] [blame] | 205 | 	if (dmaru->include_all) | 
 | 206 | 		return 0; | 
 | 207 |  | 
 | 208 | 	ret = dmar_parse_dev_scope((void *)(drhd + 1), | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 209 | 				((void *)drhd) + drhd->header.length, | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 210 | 				&dmaru->devices_cnt, &dmaru->devices, | 
 | 211 | 				drhd->segment); | 
| Suresh Siddha | 1c7d1bc | 2008-09-03 16:58:35 -0700 | [diff] [blame] | 212 | 	if (ret) { | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 213 | 		list_del(&dmaru->list); | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 214 | 		kfree(dmaru); | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 215 | 	} | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 216 | 	return ret; | 
 | 217 | } | 
 | 218 |  | 
| Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 219 | #ifdef CONFIG_DMAR | 
 | 220 | LIST_HEAD(dmar_rmrr_units); | 
 | 221 |  | 
 | 222 | static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) | 
 | 223 | { | 
 | 224 | 	list_add(&rmrr->list, &dmar_rmrr_units); | 
 | 225 | } | 
 | 226 |  | 
 | 227 |  | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 228 | static int __init | 
 | 229 | dmar_parse_one_rmrr(struct acpi_dmar_header *header) | 
 | 230 | { | 
 | 231 | 	struct acpi_dmar_reserved_memory *rmrr; | 
 | 232 | 	struct dmar_rmrr_unit *rmrru; | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 233 |  | 
 | 234 | 	rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); | 
 | 235 | 	if (!rmrru) | 
 | 236 | 		return -ENOMEM; | 
 | 237 |  | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 238 | 	rmrru->hdr = header; | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 239 | 	rmrr = (struct acpi_dmar_reserved_memory *)header; | 
 | 240 | 	rmrru->base_address = rmrr->base_address; | 
 | 241 | 	rmrru->end_address = rmrr->end_address; | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 242 |  | 
 | 243 | 	dmar_register_rmrr_unit(rmrru); | 
 | 244 | 	return 0; | 
 | 245 | } | 
 | 246 |  | 
 | 247 | static int __init | 
 | 248 | rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) | 
 | 249 | { | 
 | 250 | 	struct acpi_dmar_reserved_memory *rmrr; | 
 | 251 | 	int ret; | 
 | 252 |  | 
 | 253 | 	rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 254 | 	ret = dmar_parse_dev_scope((void *)(rmrr + 1), | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 255 | 		((void *)rmrr) + rmrr->header.length, | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 256 | 		&rmrru->devices_cnt, &rmrru->devices, rmrr->segment); | 
 | 257 |  | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 258 | 	if (ret || (rmrru->devices_cnt == 0)) { | 
 | 259 | 		list_del(&rmrru->list); | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 260 | 		kfree(rmrru); | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 261 | 	} | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 262 | 	return ret; | 
 | 263 | } | 
| Yu Zhao | aa5d2b5 | 2009-05-18 13:51:34 +0800 | [diff] [blame] | 264 |  | 
 | 265 | static LIST_HEAD(dmar_atsr_units); | 
 | 266 |  | 
 | 267 | static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) | 
 | 268 | { | 
 | 269 | 	struct acpi_dmar_atsr *atsr; | 
 | 270 | 	struct dmar_atsr_unit *atsru; | 
 | 271 |  | 
 | 272 | 	atsr = container_of(hdr, struct acpi_dmar_atsr, header); | 
 | 273 | 	atsru = kzalloc(sizeof(*atsru), GFP_KERNEL); | 
 | 274 | 	if (!atsru) | 
 | 275 | 		return -ENOMEM; | 
 | 276 |  | 
 | 277 | 	atsru->hdr = hdr; | 
 | 278 | 	atsru->include_all = atsr->flags & 0x1; | 
 | 279 |  | 
 | 280 | 	list_add(&atsru->list, &dmar_atsr_units); | 
 | 281 |  | 
 | 282 | 	return 0; | 
 | 283 | } | 
 | 284 |  | 
 | 285 | static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru) | 
 | 286 | { | 
 | 287 | 	int rc; | 
 | 288 | 	struct acpi_dmar_atsr *atsr; | 
 | 289 |  | 
 | 290 | 	if (atsru->include_all) | 
 | 291 | 		return 0; | 
 | 292 |  | 
 | 293 | 	atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | 
 | 294 | 	rc = dmar_parse_dev_scope((void *)(atsr + 1), | 
 | 295 | 				(void *)atsr + atsr->header.length, | 
 | 296 | 				&atsru->devices_cnt, &atsru->devices, | 
 | 297 | 				atsr->segment); | 
 | 298 | 	if (rc || !atsru->devices_cnt) { | 
 | 299 | 		list_del(&atsru->list); | 
 | 300 | 		kfree(atsru); | 
 | 301 | 	} | 
 | 302 |  | 
 | 303 | 	return rc; | 
 | 304 | } | 
 | 305 |  | 
 | 306 | int dmar_find_matched_atsr_unit(struct pci_dev *dev) | 
 | 307 | { | 
 | 308 | 	int i; | 
 | 309 | 	struct pci_bus *bus; | 
 | 310 | 	struct acpi_dmar_atsr *atsr; | 
 | 311 | 	struct dmar_atsr_unit *atsru; | 
 | 312 |  | 
| Yinghai | dda5654 | 2010-04-09 01:07:55 +0100 | [diff] [blame] | 313 | 	dev = pci_physfn(dev); | 
 | 314 |  | 
| Yu Zhao | aa5d2b5 | 2009-05-18 13:51:34 +0800 | [diff] [blame] | 315 | 	list_for_each_entry(atsru, &dmar_atsr_units, list) { | 
 | 316 | 		atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); | 
 | 317 | 		if (atsr->segment == pci_domain_nr(dev->bus)) | 
 | 318 | 			goto found; | 
 | 319 | 	} | 
 | 320 |  | 
 | 321 | 	return 0; | 
 | 322 |  | 
 | 323 | found: | 
 | 324 | 	for (bus = dev->bus; bus; bus = bus->parent) { | 
 | 325 | 		struct pci_dev *bridge = bus->self; | 
 | 326 |  | 
| Kenji Kaneshige | 5f4d91a | 2009-11-11 14:36:17 +0900 | [diff] [blame] | 327 | 		if (!bridge || !pci_is_pcie(bridge) || | 
| Yu Zhao | aa5d2b5 | 2009-05-18 13:51:34 +0800 | [diff] [blame] | 328 | 		    bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) | 
 | 329 | 			return 0; | 
 | 330 |  | 
 | 331 | 		if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { | 
 | 332 | 			for (i = 0; i < atsru->devices_cnt; i++) | 
 | 333 | 				if (atsru->devices[i] == bridge) | 
 | 334 | 					return 1; | 
 | 335 | 			break; | 
 | 336 | 		} | 
 | 337 | 	} | 
 | 338 |  | 
 | 339 | 	if (atsru->include_all) | 
 | 340 | 		return 1; | 
 | 341 |  | 
 | 342 | 	return 0; | 
 | 343 | } | 
| Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 344 | #endif | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 345 |  | 
| David Woodhouse | aa69707 | 2009-10-07 12:18:00 +0100 | [diff] [blame] | 346 | #ifdef CONFIG_ACPI_NUMA | 
| Suresh Siddha | ee34b32 | 2009-10-02 11:01:21 -0700 | [diff] [blame] | 347 | static int __init | 
 | 348 | dmar_parse_one_rhsa(struct acpi_dmar_header *header) | 
 | 349 | { | 
 | 350 | 	struct acpi_dmar_rhsa *rhsa; | 
 | 351 | 	struct dmar_drhd_unit *drhd; | 
 | 352 |  | 
 | 353 | 	rhsa = (struct acpi_dmar_rhsa *)header; | 
| David Woodhouse | aa69707 | 2009-10-07 12:18:00 +0100 | [diff] [blame] | 354 | 	for_each_drhd_unit(drhd) { | 
| Suresh Siddha | ee34b32 | 2009-10-02 11:01:21 -0700 | [diff] [blame] | 355 | 		if (drhd->reg_base_addr == rhsa->base_address) { | 
 | 356 | 			int node = acpi_map_pxm_to_node(rhsa->proximity_domain); | 
 | 357 |  | 
 | 358 | 			if (!node_online(node)) | 
 | 359 | 				node = -1; | 
 | 360 | 			drhd->iommu->node = node; | 
| David Woodhouse | aa69707 | 2009-10-07 12:18:00 +0100 | [diff] [blame] | 361 | 			return 0; | 
 | 362 | 		} | 
| Suresh Siddha | ee34b32 | 2009-10-02 11:01:21 -0700 | [diff] [blame] | 363 | 	} | 
| Ben Hutchings | fd0c889 | 2010-04-03 19:38:43 +0100 | [diff] [blame] | 364 | 	WARN_TAINT( | 
 | 365 | 		1, TAINT_FIRMWARE_WORKAROUND, | 
 | 366 | 		"Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n" | 
 | 367 | 		"BIOS vendor: %s; Ver: %s; Product Version: %s\n", | 
 | 368 | 		drhd->reg_base_addr, | 
 | 369 | 		dmi_get_system_info(DMI_BIOS_VENDOR), | 
 | 370 | 		dmi_get_system_info(DMI_BIOS_VERSION), | 
 | 371 | 		dmi_get_system_info(DMI_PRODUCT_VERSION)); | 
| Suresh Siddha | ee34b32 | 2009-10-02 11:01:21 -0700 | [diff] [blame] | 372 |  | 
| David Woodhouse | aa69707 | 2009-10-07 12:18:00 +0100 | [diff] [blame] | 373 | 	return 0; | 
| Suresh Siddha | ee34b32 | 2009-10-02 11:01:21 -0700 | [diff] [blame] | 374 | } | 
| David Woodhouse | aa69707 | 2009-10-07 12:18:00 +0100 | [diff] [blame] | 375 | #endif | 
| Suresh Siddha | ee34b32 | 2009-10-02 11:01:21 -0700 | [diff] [blame] | 376 |  | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 377 | static void __init | 
 | 378 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | 
 | 379 | { | 
 | 380 | 	struct acpi_dmar_hardware_unit *drhd; | 
 | 381 | 	struct acpi_dmar_reserved_memory *rmrr; | 
| Yu Zhao | aa5d2b5 | 2009-05-18 13:51:34 +0800 | [diff] [blame] | 382 | 	struct acpi_dmar_atsr *atsr; | 
| Roland Dreier | 17b6097 | 2009-09-24 12:14:00 -0700 | [diff] [blame] | 383 | 	struct acpi_dmar_rhsa *rhsa; | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 384 |  | 
 | 385 | 	switch (header->type) { | 
 | 386 | 	case ACPI_DMAR_TYPE_HARDWARE_UNIT: | 
| Yu Zhao | aa5d2b5 | 2009-05-18 13:51:34 +0800 | [diff] [blame] | 387 | 		drhd = container_of(header, struct acpi_dmar_hardware_unit, | 
 | 388 | 				    header); | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 389 | 		printk (KERN_INFO PREFIX | 
| Yu Zhao | aa5d2b5 | 2009-05-18 13:51:34 +0800 | [diff] [blame] | 390 | 			"DRHD base: %#016Lx flags: %#x\n", | 
 | 391 | 			(unsigned long long)drhd->address, drhd->flags); | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 392 | 		break; | 
 | 393 | 	case ACPI_DMAR_TYPE_RESERVED_MEMORY: | 
| Yu Zhao | aa5d2b5 | 2009-05-18 13:51:34 +0800 | [diff] [blame] | 394 | 		rmrr = container_of(header, struct acpi_dmar_reserved_memory, | 
 | 395 | 				    header); | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 396 | 		printk (KERN_INFO PREFIX | 
| Yu Zhao | aa5d2b5 | 2009-05-18 13:51:34 +0800 | [diff] [blame] | 397 | 			"RMRR base: %#016Lx end: %#016Lx\n", | 
| Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 398 | 			(unsigned long long)rmrr->base_address, | 
 | 399 | 			(unsigned long long)rmrr->end_address); | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 400 | 		break; | 
| Yu Zhao | aa5d2b5 | 2009-05-18 13:51:34 +0800 | [diff] [blame] | 401 | 	case ACPI_DMAR_TYPE_ATSR: | 
 | 402 | 		atsr = container_of(header, struct acpi_dmar_atsr, header); | 
 | 403 | 		printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags); | 
 | 404 | 		break; | 
| Roland Dreier | 17b6097 | 2009-09-24 12:14:00 -0700 | [diff] [blame] | 405 | 	case ACPI_DMAR_HARDWARE_AFFINITY: | 
 | 406 | 		rhsa = container_of(header, struct acpi_dmar_rhsa, header); | 
 | 407 | 		printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n", | 
 | 408 | 		       (unsigned long long)rhsa->base_address, | 
 | 409 | 		       rhsa->proximity_domain); | 
 | 410 | 		break; | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 411 | 	} | 
 | 412 | } | 
 | 413 |  | 
| Yinghai Lu | f6dd5c3 | 2008-09-03 16:58:32 -0700 | [diff] [blame] | 414 | /** | 
 | 415 |  * dmar_table_detect - checks to see if the platform supports DMAR devices | 
 | 416 |  */ | 
 | 417 | static int __init dmar_table_detect(void) | 
 | 418 | { | 
 | 419 | 	acpi_status status = AE_OK; | 
 | 420 |  | 
 | 421 | 	/* if we could find DMAR table, then there are DMAR devices */ | 
| Yinghai Lu | 8e1568f | 2009-02-11 01:06:59 -0800 | [diff] [blame] | 422 | 	status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0, | 
 | 423 | 				(struct acpi_table_header **)&dmar_tbl, | 
 | 424 | 				&dmar_tbl_size); | 
| Yinghai Lu | f6dd5c3 | 2008-09-03 16:58:32 -0700 | [diff] [blame] | 425 |  | 
 | 426 | 	if (ACPI_SUCCESS(status) && !dmar_tbl) { | 
 | 427 | 		printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); | 
 | 428 | 		status = AE_NOT_FOUND; | 
 | 429 | 	} | 
 | 430 |  | 
 | 431 | 	return (ACPI_SUCCESS(status) ? 1 : 0); | 
 | 432 | } | 
| Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 433 |  | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 434 | /** | 
 | 435 |  * parse_dmar_table - parses the DMA reporting table | 
 | 436 |  */ | 
 | 437 | static int __init | 
 | 438 | parse_dmar_table(void) | 
 | 439 | { | 
 | 440 | 	struct acpi_table_dmar *dmar; | 
 | 441 | 	struct acpi_dmar_header *entry_header; | 
 | 442 | 	int ret = 0; | 
 | 443 |  | 
| Yinghai Lu | f6dd5c3 | 2008-09-03 16:58:32 -0700 | [diff] [blame] | 444 | 	/* | 
 | 445 | 	 * Do it again, earlier dmar_tbl mapping could be mapped with | 
 | 446 | 	 * fixed map. | 
 | 447 | 	 */ | 
 | 448 | 	dmar_table_detect(); | 
 | 449 |  | 
| Joseph Cihula | a59b50e | 2009-06-30 19:31:10 -0700 | [diff] [blame] | 450 | 	/* | 
 | 451 | 	 * ACPI tables may not be DMA protected by tboot, so use DMAR copy | 
 | 452 | 	 * SINIT saved in SinitMleData in TXT heap (which is DMA protected) | 
 | 453 | 	 */ | 
 | 454 | 	dmar_tbl = tboot_get_dmar_table(dmar_tbl); | 
 | 455 |  | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 456 | 	dmar = (struct acpi_table_dmar *)dmar_tbl; | 
 | 457 | 	if (!dmar) | 
 | 458 | 		return -ENODEV; | 
 | 459 |  | 
| Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 460 | 	if (dmar->width < PAGE_SHIFT - 1) { | 
| Fenghua Yu | 093f87d | 2007-11-21 15:07:14 -0800 | [diff] [blame] | 461 | 		printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 462 | 		return -EINVAL; | 
 | 463 | 	} | 
 | 464 |  | 
 | 465 | 	printk (KERN_INFO PREFIX "Host address width %d\n", | 
 | 466 | 		dmar->width + 1); | 
 | 467 |  | 
 | 468 | 	entry_header = (struct acpi_dmar_header *)(dmar + 1); | 
 | 469 | 	while (((unsigned long)entry_header) < | 
 | 470 | 			(((unsigned long)dmar) + dmar_tbl->length)) { | 
| Tony Battersby | 084eb96 | 2009-02-11 13:24:19 -0800 | [diff] [blame] | 471 | 		/* Avoid looping forever on bad ACPI tables */ | 
 | 472 | 		if (entry_header->length == 0) { | 
 | 473 | 			printk(KERN_WARNING PREFIX | 
 | 474 | 				"Invalid 0-length structure\n"); | 
 | 475 | 			ret = -EINVAL; | 
 | 476 | 			break; | 
 | 477 | 		} | 
 | 478 |  | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 479 | 		dmar_table_print_dmar_entry(entry_header); | 
 | 480 |  | 
 | 481 | 		switch (entry_header->type) { | 
 | 482 | 		case ACPI_DMAR_TYPE_HARDWARE_UNIT: | 
 | 483 | 			ret = dmar_parse_one_drhd(entry_header); | 
 | 484 | 			break; | 
 | 485 | 		case ACPI_DMAR_TYPE_RESERVED_MEMORY: | 
| Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 486 | #ifdef CONFIG_DMAR | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 487 | 			ret = dmar_parse_one_rmrr(entry_header); | 
| Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 488 | #endif | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 489 | 			break; | 
| Yu Zhao | aa5d2b5 | 2009-05-18 13:51:34 +0800 | [diff] [blame] | 490 | 		case ACPI_DMAR_TYPE_ATSR: | 
 | 491 | #ifdef CONFIG_DMAR | 
 | 492 | 			ret = dmar_parse_one_atsr(entry_header); | 
 | 493 | #endif | 
 | 494 | 			break; | 
| Roland Dreier | 17b6097 | 2009-09-24 12:14:00 -0700 | [diff] [blame] | 495 | 		case ACPI_DMAR_HARDWARE_AFFINITY: | 
| David Woodhouse | aa69707 | 2009-10-07 12:18:00 +0100 | [diff] [blame] | 496 | #ifdef CONFIG_ACPI_NUMA | 
| Suresh Siddha | ee34b32 | 2009-10-02 11:01:21 -0700 | [diff] [blame] | 497 | 			ret = dmar_parse_one_rhsa(entry_header); | 
| David Woodhouse | aa69707 | 2009-10-07 12:18:00 +0100 | [diff] [blame] | 498 | #endif | 
| Roland Dreier | 17b6097 | 2009-09-24 12:14:00 -0700 | [diff] [blame] | 499 | 			break; | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 500 | 		default: | 
 | 501 | 			printk(KERN_WARNING PREFIX | 
| Roland Dreier | 4de75cf | 2009-09-24 01:01:29 +0100 | [diff] [blame] | 502 | 				"Unknown DMAR structure type %d\n", | 
 | 503 | 				entry_header->type); | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 504 | 			ret = 0; /* for forward compatibility */ | 
 | 505 | 			break; | 
 | 506 | 		} | 
 | 507 | 		if (ret) | 
 | 508 | 			break; | 
 | 509 |  | 
 | 510 | 		entry_header = ((void *)entry_header + entry_header->length); | 
 | 511 | 	} | 
 | 512 | 	return ret; | 
 | 513 | } | 
 | 514 |  | 
| Yinghai | dda5654 | 2010-04-09 01:07:55 +0100 | [diff] [blame] | 515 | static int dmar_pci_device_match(struct pci_dev *devices[], int cnt, | 
| Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 516 | 			  struct pci_dev *dev) | 
 | 517 | { | 
 | 518 | 	int index; | 
 | 519 |  | 
 | 520 | 	while (dev) { | 
 | 521 | 		for (index = 0; index < cnt; index++) | 
 | 522 | 			if (dev == devices[index]) | 
 | 523 | 				return 1; | 
 | 524 |  | 
 | 525 | 		/* Check our parent */ | 
 | 526 | 		dev = dev->bus->self; | 
 | 527 | 	} | 
 | 528 |  | 
 | 529 | 	return 0; | 
 | 530 | } | 
 | 531 |  | 
 | 532 | struct dmar_drhd_unit * | 
 | 533 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | 
 | 534 | { | 
| Yu Zhao | 2e824f7 | 2008-12-22 16:54:58 +0800 | [diff] [blame] | 535 | 	struct dmar_drhd_unit *dmaru = NULL; | 
 | 536 | 	struct acpi_dmar_hardware_unit *drhd; | 
| Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 537 |  | 
| Yinghai | dda5654 | 2010-04-09 01:07:55 +0100 | [diff] [blame] | 538 | 	dev = pci_physfn(dev); | 
 | 539 |  | 
| Yu Zhao | 2e824f7 | 2008-12-22 16:54:58 +0800 | [diff] [blame] | 540 | 	list_for_each_entry(dmaru, &dmar_drhd_units, list) { | 
 | 541 | 		drhd = container_of(dmaru->hdr, | 
 | 542 | 				    struct acpi_dmar_hardware_unit, | 
 | 543 | 				    header); | 
 | 544 |  | 
 | 545 | 		if (dmaru->include_all && | 
 | 546 | 		    drhd->segment == pci_domain_nr(dev->bus)) | 
 | 547 | 			return dmaru; | 
 | 548 |  | 
 | 549 | 		if (dmar_pci_device_match(dmaru->devices, | 
 | 550 | 					  dmaru->devices_cnt, dev)) | 
 | 551 | 			return dmaru; | 
| Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 552 | 	} | 
 | 553 |  | 
 | 554 | 	return NULL; | 
 | 555 | } | 
 | 556 |  | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 557 | int __init dmar_dev_scope_init(void) | 
 | 558 | { | 
| Suresh Siddha | 04e2ea6 | 2008-09-03 16:58:34 -0700 | [diff] [blame] | 559 | 	struct dmar_drhd_unit *drhd, *drhd_n; | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 560 | 	int ret = -ENODEV; | 
 | 561 |  | 
| Suresh Siddha | 04e2ea6 | 2008-09-03 16:58:34 -0700 | [diff] [blame] | 562 | 	list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 563 | 		ret = dmar_parse_dev(drhd); | 
 | 564 | 		if (ret) | 
 | 565 | 			return ret; | 
 | 566 | 	} | 
 | 567 |  | 
| Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 568 | #ifdef CONFIG_DMAR | 
 | 569 | 	{ | 
| Suresh Siddha | 04e2ea6 | 2008-09-03 16:58:34 -0700 | [diff] [blame] | 570 | 		struct dmar_rmrr_unit *rmrr, *rmrr_n; | 
| Yu Zhao | aa5d2b5 | 2009-05-18 13:51:34 +0800 | [diff] [blame] | 571 | 		struct dmar_atsr_unit *atsr, *atsr_n; | 
 | 572 |  | 
| Suresh Siddha | 04e2ea6 | 2008-09-03 16:58:34 -0700 | [diff] [blame] | 573 | 		list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { | 
| Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 574 | 			ret = rmrr_parse_dev(rmrr); | 
 | 575 | 			if (ret) | 
 | 576 | 				return ret; | 
 | 577 | 		} | 
| Yu Zhao | aa5d2b5 | 2009-05-18 13:51:34 +0800 | [diff] [blame] | 578 |  | 
 | 579 | 		list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) { | 
 | 580 | 			ret = atsr_parse_dev(atsr); | 
 | 581 | 			if (ret) | 
 | 582 | 				return ret; | 
 | 583 | 		} | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 584 | 	} | 
| Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 585 | #endif | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 586 |  | 
 | 587 | 	return ret; | 
 | 588 | } | 
 | 589 |  | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 590 |  | 
 | 591 | int __init dmar_table_init(void) | 
 | 592 | { | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 593 | 	static int dmar_table_initialized; | 
| Fenghua Yu | 093f87d | 2007-11-21 15:07:14 -0800 | [diff] [blame] | 594 | 	int ret; | 
 | 595 |  | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 596 | 	if (dmar_table_initialized) | 
 | 597 | 		return 0; | 
 | 598 |  | 
 | 599 | 	dmar_table_initialized = 1; | 
 | 600 |  | 
| Fenghua Yu | 093f87d | 2007-11-21 15:07:14 -0800 | [diff] [blame] | 601 | 	ret = parse_dmar_table(); | 
 | 602 | 	if (ret) { | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 603 | 		if (ret != -ENODEV) | 
 | 604 | 			printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); | 
| Fenghua Yu | 093f87d | 2007-11-21 15:07:14 -0800 | [diff] [blame] | 605 | 		return ret; | 
 | 606 | 	} | 
 | 607 |  | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 608 | 	if (list_empty(&dmar_drhd_units)) { | 
 | 609 | 		printk(KERN_INFO PREFIX "No DMAR devices found\n"); | 
 | 610 | 		return -ENODEV; | 
 | 611 | 	} | 
| Fenghua Yu | 093f87d | 2007-11-21 15:07:14 -0800 | [diff] [blame] | 612 |  | 
| Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 613 | #ifdef CONFIG_DMAR | 
| Suresh Siddha | 2d6b5f8 | 2008-07-10 11:16:39 -0700 | [diff] [blame] | 614 | 	if (list_empty(&dmar_rmrr_units)) | 
| Fenghua Yu | 093f87d | 2007-11-21 15:07:14 -0800 | [diff] [blame] | 615 | 		printk(KERN_INFO PREFIX "No RMRR found\n"); | 
| Yu Zhao | aa5d2b5 | 2009-05-18 13:51:34 +0800 | [diff] [blame] | 616 |  | 
 | 617 | 	if (list_empty(&dmar_atsr_units)) | 
 | 618 | 		printk(KERN_INFO PREFIX "No ATSR found\n"); | 
| Suresh Siddha | aaa9d1d | 2008-07-10 11:16:38 -0700 | [diff] [blame] | 619 | #endif | 
| Fenghua Yu | 093f87d | 2007-11-21 15:07:14 -0800 | [diff] [blame] | 620 |  | 
| Keshavamurthy, Anil S | 10e5247 | 2007-10-21 16:41:41 -0700 | [diff] [blame] | 621 | 	return 0; | 
 | 622 | } | 
 | 623 |  | 
| Ben Hutchings | 3a8663e | 2010-04-03 19:37:23 +0100 | [diff] [blame] | 624 | static void warn_invalid_dmar(u64 addr, const char *message) | 
 | 625 | { | 
| Ben Hutchings | fd0c889 | 2010-04-03 19:38:43 +0100 | [diff] [blame] | 626 | 	WARN_TAINT_ONCE( | 
 | 627 | 		1, TAINT_FIRMWARE_WORKAROUND, | 
 | 628 | 		"Your BIOS is broken; DMAR reported at address %llx%s!\n" | 
 | 629 | 		"BIOS vendor: %s; Ver: %s; Product Version: %s\n", | 
 | 630 | 		addr, message, | 
 | 631 | 		dmi_get_system_info(DMI_BIOS_VENDOR), | 
 | 632 | 		dmi_get_system_info(DMI_BIOS_VERSION), | 
 | 633 | 		dmi_get_system_info(DMI_PRODUCT_VERSION)); | 
| Ben Hutchings | 3a8663e | 2010-04-03 19:37:23 +0100 | [diff] [blame] | 634 | } | 
| David Woodhouse | 6ecbf01 | 2009-12-02 09:20:27 +0000 | [diff] [blame] | 635 |  | 
| David Woodhouse | 86cf898 | 2009-11-09 22:15:15 +0000 | [diff] [blame] | 636 | int __init check_zero_address(void) | 
 | 637 | { | 
 | 638 | 	struct acpi_table_dmar *dmar; | 
 | 639 | 	struct acpi_dmar_header *entry_header; | 
 | 640 | 	struct acpi_dmar_hardware_unit *drhd; | 
 | 641 |  | 
 | 642 | 	dmar = (struct acpi_table_dmar *)dmar_tbl; | 
 | 643 | 	entry_header = (struct acpi_dmar_header *)(dmar + 1); | 
 | 644 |  | 
 | 645 | 	while (((unsigned long)entry_header) < | 
 | 646 | 			(((unsigned long)dmar) + dmar_tbl->length)) { | 
 | 647 | 		/* Avoid looping forever on bad ACPI tables */ | 
 | 648 | 		if (entry_header->length == 0) { | 
 | 649 | 			printk(KERN_WARNING PREFIX | 
 | 650 | 				"Invalid 0-length structure\n"); | 
 | 651 | 			return 0; | 
 | 652 | 		} | 
 | 653 |  | 
 | 654 | 		if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { | 
| Chris Wright | 2c99220 | 2009-12-02 09:17:13 +0000 | [diff] [blame] | 655 | 			void __iomem *addr; | 
 | 656 | 			u64 cap, ecap; | 
 | 657 |  | 
| David Woodhouse | 86cf898 | 2009-11-09 22:15:15 +0000 | [diff] [blame] | 658 | 			drhd = (void *)entry_header; | 
 | 659 | 			if (!drhd->address) { | 
| Ben Hutchings | 3a8663e | 2010-04-03 19:37:23 +0100 | [diff] [blame] | 660 | 				warn_invalid_dmar(0, ""); | 
| Chris Wright | 2c99220 | 2009-12-02 09:17:13 +0000 | [diff] [blame] | 661 | 				goto failed; | 
| David Woodhouse | 86cf898 | 2009-11-09 22:15:15 +0000 | [diff] [blame] | 662 | 			} | 
| Chris Wright | 2c99220 | 2009-12-02 09:17:13 +0000 | [diff] [blame] | 663 |  | 
 | 664 | 			addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); | 
 | 665 | 			if (!addr ) { | 
 | 666 | 				printk("IOMMU: can't validate: %llx\n", drhd->address); | 
 | 667 | 				goto failed; | 
 | 668 | 			} | 
 | 669 | 			cap = dmar_readq(addr + DMAR_CAP_REG); | 
 | 670 | 			ecap = dmar_readq(addr + DMAR_ECAP_REG); | 
 | 671 | 			early_iounmap(addr, VTD_PAGE_SIZE); | 
 | 672 | 			if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { | 
| Ben Hutchings | 3a8663e | 2010-04-03 19:37:23 +0100 | [diff] [blame] | 673 | 				warn_invalid_dmar(drhd->address, | 
 | 674 | 						  " returns all ones"); | 
| Chris Wright | 2c99220 | 2009-12-02 09:17:13 +0000 | [diff] [blame] | 675 | 				goto failed; | 
 | 676 | 			} | 
| David Woodhouse | 86cf898 | 2009-11-09 22:15:15 +0000 | [diff] [blame] | 677 | 		} | 
 | 678 |  | 
 | 679 | 		entry_header = ((void *)entry_header + entry_header->length); | 
 | 680 | 	} | 
 | 681 | 	return 1; | 
| Chris Wright | 2c99220 | 2009-12-02 09:17:13 +0000 | [diff] [blame] | 682 |  | 
 | 683 | failed: | 
 | 684 | #ifdef CONFIG_DMAR | 
 | 685 | 	dmar_disabled = 1; | 
 | 686 | #endif | 
 | 687 | 	return 0; | 
| David Woodhouse | 86cf898 | 2009-11-09 22:15:15 +0000 | [diff] [blame] | 688 | } | 
 | 689 |  | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 690 | void __init detect_intel_iommu(void) | 
 | 691 | { | 
 | 692 | 	int ret; | 
 | 693 |  | 
| Yinghai Lu | f6dd5c3 | 2008-09-03 16:58:32 -0700 | [diff] [blame] | 694 | 	ret = dmar_table_detect(); | 
| David Woodhouse | 86cf898 | 2009-11-09 22:15:15 +0000 | [diff] [blame] | 695 | 	if (ret) | 
 | 696 | 		ret = check_zero_address(); | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 697 | 	{ | 
| Youquan Song | cacd421 | 2008-10-16 16:31:57 -0700 | [diff] [blame] | 698 | #ifdef CONFIG_INTR_REMAP | 
| Suresh Siddha | 1cb1158 | 2008-07-10 11:16:51 -0700 | [diff] [blame] | 699 | 		struct acpi_table_dmar *dmar; | 
 | 700 | 		/* | 
 | 701 | 		 * for now we will disable dma-remapping when interrupt | 
 | 702 | 		 * remapping is enabled. | 
 | 703 | 		 * When support for queued invalidation for IOTLB invalidation | 
 | 704 | 		 * is added, we will not need this any more. | 
 | 705 | 		 */ | 
 | 706 | 		dmar = (struct acpi_table_dmar *) dmar_tbl; | 
| Youquan Song | cacd421 | 2008-10-16 16:31:57 -0700 | [diff] [blame] | 707 | 		if (ret && cpu_has_x2apic && dmar->flags & 0x1) | 
| Suresh Siddha | 1cb1158 | 2008-07-10 11:16:51 -0700 | [diff] [blame] | 708 | 			printk(KERN_INFO | 
 | 709 | 			       "Queued invalidation will be enabled to support " | 
 | 710 | 			       "x2apic and Intr-remapping.\n"); | 
| Youquan Song | cacd421 | 2008-10-16 16:31:57 -0700 | [diff] [blame] | 711 | #endif | 
| Youquan Song | cacd421 | 2008-10-16 16:31:57 -0700 | [diff] [blame] | 712 | #ifdef CONFIG_DMAR | 
| Linus Torvalds | 11bd04f | 2009-12-11 12:18:16 -0800 | [diff] [blame] | 713 | 		if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 714 | 			iommu_detected = 1; | 
| Chris Wright | 5d990b6 | 2009-12-04 12:15:21 -0800 | [diff] [blame] | 715 | 			/* Make sure ACS will be enabled */ | 
 | 716 | 			pci_request_acs(); | 
 | 717 | 		} | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 718 | #endif | 
| FUJITA Tomonori | 9d5ce73 | 2009-11-10 19:46:16 +0900 | [diff] [blame] | 719 | #ifdef CONFIG_X86 | 
 | 720 | 		if (ret) | 
 | 721 | 			x86_init.iommu.iommu_init = intel_iommu_init; | 
 | 722 | #endif | 
| Youquan Song | cacd421 | 2008-10-16 16:31:57 -0700 | [diff] [blame] | 723 | 	} | 
| Yinghai Lu | 8e1568f | 2009-02-11 01:06:59 -0800 | [diff] [blame] | 724 | 	early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); | 
| Yinghai Lu | f6dd5c3 | 2008-09-03 16:58:32 -0700 | [diff] [blame] | 725 | 	dmar_tbl = NULL; | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 726 | } | 
 | 727 |  | 
 | 728 |  | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 729 | int alloc_iommu(struct dmar_drhd_unit *drhd) | 
| Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 730 | { | 
| Suresh Siddha | c42d9f3 | 2008-07-10 11:16:36 -0700 | [diff] [blame] | 731 | 	struct intel_iommu *iommu; | 
| Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 732 | 	int map_size; | 
 | 733 | 	u32 ver; | 
| Suresh Siddha | c42d9f3 | 2008-07-10 11:16:36 -0700 | [diff] [blame] | 734 | 	static int iommu_allocated = 0; | 
| Joerg Roedel | 43f7392 | 2009-01-03 23:56:27 +0100 | [diff] [blame] | 735 | 	int agaw = 0; | 
| Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 736 | 	int msagaw = 0; | 
| Suresh Siddha | c42d9f3 | 2008-07-10 11:16:36 -0700 | [diff] [blame] | 737 |  | 
| David Woodhouse | 6ecbf01 | 2009-12-02 09:20:27 +0000 | [diff] [blame] | 738 | 	if (!drhd->reg_base_addr) { | 
| Ben Hutchings | 3a8663e | 2010-04-03 19:37:23 +0100 | [diff] [blame] | 739 | 		warn_invalid_dmar(0, ""); | 
| David Woodhouse | 6ecbf01 | 2009-12-02 09:20:27 +0000 | [diff] [blame] | 740 | 		return -EINVAL; | 
 | 741 | 	} | 
 | 742 |  | 
| Suresh Siddha | c42d9f3 | 2008-07-10 11:16:36 -0700 | [diff] [blame] | 743 | 	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | 
 | 744 | 	if (!iommu) | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 745 | 		return -ENOMEM; | 
| Suresh Siddha | c42d9f3 | 2008-07-10 11:16:36 -0700 | [diff] [blame] | 746 |  | 
 | 747 | 	iommu->seq_id = iommu_allocated++; | 
| Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 748 | 	sprintf (iommu->name, "dmar%d", iommu->seq_id); | 
| Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 749 |  | 
| Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 750 | 	iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); | 
| Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 751 | 	if (!iommu->reg) { | 
 | 752 | 		printk(KERN_ERR "IOMMU: can't map the region\n"); | 
 | 753 | 		goto error; | 
 | 754 | 	} | 
 | 755 | 	iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | 
 | 756 | 	iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | 
 | 757 |  | 
| David Woodhouse | 0815565 | 2009-08-04 09:17:20 +0100 | [diff] [blame] | 758 | 	if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { | 
| Ben Hutchings | 3a8663e | 2010-04-03 19:37:23 +0100 | [diff] [blame] | 759 | 		warn_invalid_dmar(drhd->reg_base_addr, " returns all ones"); | 
| David Woodhouse | 0815565 | 2009-08-04 09:17:20 +0100 | [diff] [blame] | 760 | 		goto err_unmap; | 
 | 761 | 	} | 
 | 762 |  | 
| Joerg Roedel | 43f7392 | 2009-01-03 23:56:27 +0100 | [diff] [blame] | 763 | #ifdef CONFIG_DMAR | 
| Weidong Han | 1b57368 | 2008-12-08 15:34:06 +0800 | [diff] [blame] | 764 | 	agaw = iommu_calculate_agaw(iommu); | 
 | 765 | 	if (agaw < 0) { | 
 | 766 | 		printk(KERN_ERR | 
| Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 767 | 		       "Cannot get a valid agaw for iommu (seq_id = %d)\n", | 
 | 768 | 		       iommu->seq_id); | 
| David Woodhouse | 0815565 | 2009-08-04 09:17:20 +0100 | [diff] [blame] | 769 | 		goto err_unmap; | 
| Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 770 | 	} | 
 | 771 | 	msagaw = iommu_calculate_max_sagaw(iommu); | 
 | 772 | 	if (msagaw < 0) { | 
 | 773 | 		printk(KERN_ERR | 
 | 774 | 			"Cannot get a valid max agaw for iommu (seq_id = %d)\n", | 
| Weidong Han | 1b57368 | 2008-12-08 15:34:06 +0800 | [diff] [blame] | 775 | 			iommu->seq_id); | 
| David Woodhouse | 0815565 | 2009-08-04 09:17:20 +0100 | [diff] [blame] | 776 | 		goto err_unmap; | 
| Weidong Han | 1b57368 | 2008-12-08 15:34:06 +0800 | [diff] [blame] | 777 | 	} | 
| Joerg Roedel | 43f7392 | 2009-01-03 23:56:27 +0100 | [diff] [blame] | 778 | #endif | 
| Weidong Han | 1b57368 | 2008-12-08 15:34:06 +0800 | [diff] [blame] | 779 | 	iommu->agaw = agaw; | 
| Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 780 | 	iommu->msagaw = msagaw; | 
| Weidong Han | 1b57368 | 2008-12-08 15:34:06 +0800 | [diff] [blame] | 781 |  | 
| Suresh Siddha | ee34b32 | 2009-10-02 11:01:21 -0700 | [diff] [blame] | 782 | 	iommu->node = -1; | 
 | 783 |  | 
| Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 784 | 	/* the registers might be more than one page */ | 
 | 785 | 	map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | 
 | 786 | 		cap_max_fault_reg_offset(iommu->cap)); | 
| Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 787 | 	map_size = VTD_PAGE_ALIGN(map_size); | 
 | 788 | 	if (map_size > VTD_PAGE_SIZE) { | 
| Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 789 | 		iounmap(iommu->reg); | 
 | 790 | 		iommu->reg = ioremap(drhd->reg_base_addr, map_size); | 
 | 791 | 		if (!iommu->reg) { | 
 | 792 | 			printk(KERN_ERR "IOMMU: can't map the region\n"); | 
 | 793 | 			goto error; | 
 | 794 | 		} | 
 | 795 | 	} | 
 | 796 |  | 
 | 797 | 	ver = readl(iommu->reg + DMAR_VER_REG); | 
| Yinghai Lu | 680a752 | 2010-04-08 19:58:23 +0100 | [diff] [blame] | 798 | 	pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n", | 
 | 799 | 		iommu->seq_id, | 
| Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 800 | 		(unsigned long long)drhd->reg_base_addr, | 
 | 801 | 		DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | 
 | 802 | 		(unsigned long long)iommu->cap, | 
 | 803 | 		(unsigned long long)iommu->ecap); | 
| Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 804 |  | 
 | 805 | 	spin_lock_init(&iommu->register_lock); | 
 | 806 |  | 
 | 807 | 	drhd->iommu = iommu; | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 808 | 	return 0; | 
| David Woodhouse | 0815565 | 2009-08-04 09:17:20 +0100 | [diff] [blame] | 809 |  | 
 | 810 |  err_unmap: | 
 | 811 | 	iounmap(iommu->reg); | 
 | 812 |  error: | 
| Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 813 | 	kfree(iommu); | 
| Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 814 | 	return -1; | 
| Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 815 | } | 
 | 816 |  | 
 | 817 | void free_iommu(struct intel_iommu *iommu) | 
 | 818 | { | 
 | 819 | 	if (!iommu) | 
 | 820 | 		return; | 
 | 821 |  | 
 | 822 | #ifdef CONFIG_DMAR | 
 | 823 | 	free_dmar_iommu(iommu); | 
 | 824 | #endif | 
 | 825 |  | 
 | 826 | 	if (iommu->reg) | 
 | 827 | 		iounmap(iommu->reg); | 
 | 828 | 	kfree(iommu); | 
 | 829 | } | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 830 |  | 
 | 831 | /* | 
 | 832 |  * Reclaim all the submitted descriptors which have completed its work. | 
 | 833 |  */ | 
 | 834 | static inline void reclaim_free_desc(struct q_inval *qi) | 
 | 835 | { | 
| Yu Zhao | 6ba6c3a | 2009-05-18 13:51:35 +0800 | [diff] [blame] | 836 | 	while (qi->desc_status[qi->free_tail] == QI_DONE || | 
 | 837 | 	       qi->desc_status[qi->free_tail] == QI_ABORT) { | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 838 | 		qi->desc_status[qi->free_tail] = QI_FREE; | 
 | 839 | 		qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; | 
 | 840 | 		qi->free_cnt++; | 
 | 841 | 	} | 
 | 842 | } | 
 | 843 |  | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 844 | static int qi_check_fault(struct intel_iommu *iommu, int index) | 
 | 845 | { | 
 | 846 | 	u32 fault; | 
| Yu Zhao | 6ba6c3a | 2009-05-18 13:51:35 +0800 | [diff] [blame] | 847 | 	int head, tail; | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 848 | 	struct q_inval *qi = iommu->qi; | 
 | 849 | 	int wait_index = (index + 1) % QI_LENGTH; | 
 | 850 |  | 
| Yu Zhao | 6ba6c3a | 2009-05-18 13:51:35 +0800 | [diff] [blame] | 851 | 	if (qi->desc_status[wait_index] == QI_ABORT) | 
 | 852 | 		return -EAGAIN; | 
 | 853 |  | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 854 | 	fault = readl(iommu->reg + DMAR_FSTS_REG); | 
 | 855 |  | 
 | 856 | 	/* | 
 | 857 | 	 * If IQE happens, the head points to the descriptor associated | 
 | 858 | 	 * with the error. No new descriptors are fetched until the IQE | 
 | 859 | 	 * is cleared. | 
 | 860 | 	 */ | 
 | 861 | 	if (fault & DMA_FSTS_IQE) { | 
 | 862 | 		head = readl(iommu->reg + DMAR_IQH_REG); | 
| Yu Zhao | 6ba6c3a | 2009-05-18 13:51:35 +0800 | [diff] [blame] | 863 | 		if ((head >> DMAR_IQ_SHIFT) == index) { | 
 | 864 | 			printk(KERN_ERR "VT-d detected invalid descriptor: " | 
 | 865 | 				"low=%llx, high=%llx\n", | 
 | 866 | 				(unsigned long long)qi->desc[index].low, | 
 | 867 | 				(unsigned long long)qi->desc[index].high); | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 868 | 			memcpy(&qi->desc[index], &qi->desc[wait_index], | 
 | 869 | 					sizeof(struct qi_desc)); | 
 | 870 | 			__iommu_flush_cache(iommu, &qi->desc[index], | 
 | 871 | 					sizeof(struct qi_desc)); | 
 | 872 | 			writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); | 
 | 873 | 			return -EINVAL; | 
 | 874 | 		} | 
 | 875 | 	} | 
 | 876 |  | 
| Yu Zhao | 6ba6c3a | 2009-05-18 13:51:35 +0800 | [diff] [blame] | 877 | 	/* | 
 | 878 | 	 * If ITE happens, all pending wait_desc commands are aborted. | 
 | 879 | 	 * No new descriptors are fetched until the ITE is cleared. | 
 | 880 | 	 */ | 
 | 881 | 	if (fault & DMA_FSTS_ITE) { | 
 | 882 | 		head = readl(iommu->reg + DMAR_IQH_REG); | 
 | 883 | 		head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; | 
 | 884 | 		head |= 1; | 
 | 885 | 		tail = readl(iommu->reg + DMAR_IQT_REG); | 
 | 886 | 		tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; | 
 | 887 |  | 
 | 888 | 		writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG); | 
 | 889 |  | 
 | 890 | 		do { | 
 | 891 | 			if (qi->desc_status[head] == QI_IN_USE) | 
 | 892 | 				qi->desc_status[head] = QI_ABORT; | 
 | 893 | 			head = (head - 2 + QI_LENGTH) % QI_LENGTH; | 
 | 894 | 		} while (head != tail); | 
 | 895 |  | 
 | 896 | 		if (qi->desc_status[wait_index] == QI_ABORT) | 
 | 897 | 			return -EAGAIN; | 
 | 898 | 	} | 
 | 899 |  | 
 | 900 | 	if (fault & DMA_FSTS_ICE) | 
 | 901 | 		writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG); | 
 | 902 |  | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 903 | 	return 0; | 
 | 904 | } | 
 | 905 |  | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 906 | /* | 
 | 907 |  * Submit the queued invalidation descriptor to the remapping | 
 | 908 |  * hardware unit and wait for its completion. | 
 | 909 |  */ | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 910 | int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 911 | { | 
| Yu Zhao | 6ba6c3a | 2009-05-18 13:51:35 +0800 | [diff] [blame] | 912 | 	int rc; | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 913 | 	struct q_inval *qi = iommu->qi; | 
 | 914 | 	struct qi_desc *hw, wait_desc; | 
 | 915 | 	int wait_index, index; | 
 | 916 | 	unsigned long flags; | 
 | 917 |  | 
 | 918 | 	if (!qi) | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 919 | 		return 0; | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 920 |  | 
 | 921 | 	hw = qi->desc; | 
 | 922 |  | 
| Yu Zhao | 6ba6c3a | 2009-05-18 13:51:35 +0800 | [diff] [blame] | 923 | restart: | 
 | 924 | 	rc = 0; | 
 | 925 |  | 
| Suresh Siddha | f05810c | 2008-10-16 16:31:54 -0700 | [diff] [blame] | 926 | 	spin_lock_irqsave(&qi->q_lock, flags); | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 927 | 	while (qi->free_cnt < 3) { | 
| Suresh Siddha | f05810c | 2008-10-16 16:31:54 -0700 | [diff] [blame] | 928 | 		spin_unlock_irqrestore(&qi->q_lock, flags); | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 929 | 		cpu_relax(); | 
| Suresh Siddha | f05810c | 2008-10-16 16:31:54 -0700 | [diff] [blame] | 930 | 		spin_lock_irqsave(&qi->q_lock, flags); | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 931 | 	} | 
 | 932 |  | 
 | 933 | 	index = qi->free_head; | 
 | 934 | 	wait_index = (index + 1) % QI_LENGTH; | 
 | 935 |  | 
 | 936 | 	qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; | 
 | 937 |  | 
 | 938 | 	hw[index] = *desc; | 
 | 939 |  | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 940 | 	wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | | 
 | 941 | 			QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 942 | 	wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); | 
 | 943 |  | 
 | 944 | 	hw[wait_index] = wait_desc; | 
 | 945 |  | 
 | 946 | 	__iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); | 
 | 947 | 	__iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); | 
 | 948 |  | 
 | 949 | 	qi->free_head = (qi->free_head + 2) % QI_LENGTH; | 
 | 950 | 	qi->free_cnt -= 2; | 
 | 951 |  | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 952 | 	/* | 
 | 953 | 	 * update the HW tail register indicating the presence of | 
 | 954 | 	 * new descriptors. | 
 | 955 | 	 */ | 
| Yu Zhao | 6ba6c3a | 2009-05-18 13:51:35 +0800 | [diff] [blame] | 956 | 	writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG); | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 957 |  | 
 | 958 | 	while (qi->desc_status[wait_index] != QI_DONE) { | 
| Suresh Siddha | f05810c | 2008-10-16 16:31:54 -0700 | [diff] [blame] | 959 | 		/* | 
 | 960 | 		 * We will leave the interrupts disabled, to prevent interrupt | 
 | 961 | 		 * context to queue another cmd while a cmd is already submitted | 
 | 962 | 		 * and waiting for completion on this cpu. This is to avoid | 
 | 963 | 		 * a deadlock where the interrupt context can wait indefinitely | 
 | 964 | 		 * for free slots in the queue. | 
 | 965 | 		 */ | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 966 | 		rc = qi_check_fault(iommu, index); | 
 | 967 | 		if (rc) | 
| Yu Zhao | 6ba6c3a | 2009-05-18 13:51:35 +0800 | [diff] [blame] | 968 | 			break; | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 969 |  | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 970 | 		spin_unlock(&qi->q_lock); | 
 | 971 | 		cpu_relax(); | 
 | 972 | 		spin_lock(&qi->q_lock); | 
 | 973 | 	} | 
| Yu Zhao | 6ba6c3a | 2009-05-18 13:51:35 +0800 | [diff] [blame] | 974 |  | 
 | 975 | 	qi->desc_status[index] = QI_DONE; | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 976 |  | 
 | 977 | 	reclaim_free_desc(qi); | 
| Suresh Siddha | f05810c | 2008-10-16 16:31:54 -0700 | [diff] [blame] | 978 | 	spin_unlock_irqrestore(&qi->q_lock, flags); | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 979 |  | 
| Yu Zhao | 6ba6c3a | 2009-05-18 13:51:35 +0800 | [diff] [blame] | 980 | 	if (rc == -EAGAIN) | 
 | 981 | 		goto restart; | 
 | 982 |  | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 983 | 	return rc; | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 984 | } | 
 | 985 |  | 
 | 986 | /* | 
 | 987 |  * Flush the global interrupt entry cache. | 
 | 988 |  */ | 
 | 989 | void qi_global_iec(struct intel_iommu *iommu) | 
 | 990 | { | 
 | 991 | 	struct qi_desc desc; | 
 | 992 |  | 
 | 993 | 	desc.low = QI_IEC_TYPE; | 
 | 994 | 	desc.high = 0; | 
 | 995 |  | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 996 | 	/* should never fail */ | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 997 | 	qi_submit_sync(&desc, iommu); | 
 | 998 | } | 
 | 999 |  | 
| David Woodhouse | 4c25a2c | 2009-05-10 17:16:06 +0100 | [diff] [blame] | 1000 | void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | 
 | 1001 | 		      u64 type) | 
| Youquan Song | 3481f21 | 2008-10-16 16:31:55 -0700 | [diff] [blame] | 1002 | { | 
| Youquan Song | 3481f21 | 2008-10-16 16:31:55 -0700 | [diff] [blame] | 1003 | 	struct qi_desc desc; | 
 | 1004 |  | 
| Youquan Song | 3481f21 | 2008-10-16 16:31:55 -0700 | [diff] [blame] | 1005 | 	desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) | 
 | 1006 | 			| QI_CC_GRAN(type) | QI_CC_TYPE; | 
 | 1007 | 	desc.high = 0; | 
 | 1008 |  | 
| David Woodhouse | 4c25a2c | 2009-05-10 17:16:06 +0100 | [diff] [blame] | 1009 | 	qi_submit_sync(&desc, iommu); | 
| Youquan Song | 3481f21 | 2008-10-16 16:31:55 -0700 | [diff] [blame] | 1010 | } | 
 | 1011 |  | 
| David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 1012 | void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | 
 | 1013 | 		    unsigned int size_order, u64 type) | 
| Youquan Song | 3481f21 | 2008-10-16 16:31:55 -0700 | [diff] [blame] | 1014 | { | 
 | 1015 | 	u8 dw = 0, dr = 0; | 
 | 1016 |  | 
 | 1017 | 	struct qi_desc desc; | 
 | 1018 | 	int ih = 0; | 
 | 1019 |  | 
| Youquan Song | 3481f21 | 2008-10-16 16:31:55 -0700 | [diff] [blame] | 1020 | 	if (cap_write_drain(iommu->cap)) | 
 | 1021 | 		dw = 1; | 
 | 1022 |  | 
 | 1023 | 	if (cap_read_drain(iommu->cap)) | 
 | 1024 | 		dr = 1; | 
 | 1025 |  | 
 | 1026 | 	desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) | 
 | 1027 | 		| QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; | 
 | 1028 | 	desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) | 
 | 1029 | 		| QI_IOTLB_AM(size_order); | 
 | 1030 |  | 
| David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 1031 | 	qi_submit_sync(&desc, iommu); | 
| Youquan Song | 3481f21 | 2008-10-16 16:31:55 -0700 | [diff] [blame] | 1032 | } | 
 | 1033 |  | 
| Yu Zhao | 6ba6c3a | 2009-05-18 13:51:35 +0800 | [diff] [blame] | 1034 | void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, | 
 | 1035 | 			u64 addr, unsigned mask) | 
 | 1036 | { | 
 | 1037 | 	struct qi_desc desc; | 
 | 1038 |  | 
 | 1039 | 	if (mask) { | 
 | 1040 | 		BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1)); | 
 | 1041 | 		addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1; | 
 | 1042 | 		desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; | 
 | 1043 | 	} else | 
 | 1044 | 		desc.high = QI_DEV_IOTLB_ADDR(addr); | 
 | 1045 |  | 
 | 1046 | 	if (qdep >= QI_DEV_IOTLB_MAX_INVS) | 
 | 1047 | 		qdep = 0; | 
 | 1048 |  | 
 | 1049 | 	desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | | 
 | 1050 | 		   QI_DIOTLB_TYPE; | 
 | 1051 |  | 
 | 1052 | 	qi_submit_sync(&desc, iommu); | 
 | 1053 | } | 
 | 1054 |  | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 1055 | /* | 
| Suresh Siddha | eba67e5 | 2009-03-16 17:04:56 -0700 | [diff] [blame] | 1056 |  * Disable Queued Invalidation interface. | 
 | 1057 |  */ | 
 | 1058 | void dmar_disable_qi(struct intel_iommu *iommu) | 
 | 1059 | { | 
 | 1060 | 	unsigned long flags; | 
 | 1061 | 	u32 sts; | 
 | 1062 | 	cycles_t start_time = get_cycles(); | 
 | 1063 |  | 
 | 1064 | 	if (!ecap_qis(iommu->ecap)) | 
 | 1065 | 		return; | 
 | 1066 |  | 
 | 1067 | 	spin_lock_irqsave(&iommu->register_lock, flags); | 
 | 1068 |  | 
 | 1069 | 	sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG); | 
 | 1070 | 	if (!(sts & DMA_GSTS_QIES)) | 
 | 1071 | 		goto end; | 
 | 1072 |  | 
 | 1073 | 	/* | 
 | 1074 | 	 * Give a chance to HW to complete the pending invalidation requests. | 
 | 1075 | 	 */ | 
 | 1076 | 	while ((readl(iommu->reg + DMAR_IQT_REG) != | 
 | 1077 | 		readl(iommu->reg + DMAR_IQH_REG)) && | 
 | 1078 | 		(DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time))) | 
 | 1079 | 		cpu_relax(); | 
 | 1080 |  | 
 | 1081 | 	iommu->gcmd &= ~DMA_GCMD_QIE; | 
| Suresh Siddha | eba67e5 | 2009-03-16 17:04:56 -0700 | [diff] [blame] | 1082 | 	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | 
 | 1083 |  | 
 | 1084 | 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, | 
 | 1085 | 		      !(sts & DMA_GSTS_QIES), sts); | 
 | 1086 | end: | 
 | 1087 | 	spin_unlock_irqrestore(&iommu->register_lock, flags); | 
 | 1088 | } | 
 | 1089 |  | 
 | 1090 | /* | 
| Fenghua Yu | eb4a52b | 2009-03-27 14:22:43 -0700 | [diff] [blame] | 1091 |  * Enable queued invalidation. | 
 | 1092 |  */ | 
 | 1093 | static void __dmar_enable_qi(struct intel_iommu *iommu) | 
 | 1094 | { | 
| David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 1095 | 	u32 sts; | 
| Fenghua Yu | eb4a52b | 2009-03-27 14:22:43 -0700 | [diff] [blame] | 1096 | 	unsigned long flags; | 
 | 1097 | 	struct q_inval *qi = iommu->qi; | 
 | 1098 |  | 
 | 1099 | 	qi->free_head = qi->free_tail = 0; | 
 | 1100 | 	qi->free_cnt = QI_LENGTH; | 
 | 1101 |  | 
 | 1102 | 	spin_lock_irqsave(&iommu->register_lock, flags); | 
 | 1103 |  | 
 | 1104 | 	/* write zero to the tail reg */ | 
 | 1105 | 	writel(0, iommu->reg + DMAR_IQT_REG); | 
 | 1106 |  | 
 | 1107 | 	dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); | 
 | 1108 |  | 
| Fenghua Yu | eb4a52b | 2009-03-27 14:22:43 -0700 | [diff] [blame] | 1109 | 	iommu->gcmd |= DMA_GCMD_QIE; | 
| David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 1110 | 	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | 
| Fenghua Yu | eb4a52b | 2009-03-27 14:22:43 -0700 | [diff] [blame] | 1111 |  | 
 | 1112 | 	/* Make sure hardware complete it */ | 
 | 1113 | 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); | 
 | 1114 |  | 
 | 1115 | 	spin_unlock_irqrestore(&iommu->register_lock, flags); | 
 | 1116 | } | 
 | 1117 |  | 
 | 1118 | /* | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 1119 |  * Enable Queued Invalidation interface. This is a must to support | 
 | 1120 |  * interrupt-remapping. Also used by DMA-remapping, which replaces | 
 | 1121 |  * register based IOTLB invalidation. | 
 | 1122 |  */ | 
 | 1123 | int dmar_enable_qi(struct intel_iommu *iommu) | 
 | 1124 | { | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 1125 | 	struct q_inval *qi; | 
| Suresh Siddha | 751cafe | 2009-10-02 11:01:22 -0700 | [diff] [blame] | 1126 | 	struct page *desc_page; | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 1127 |  | 
 | 1128 | 	if (!ecap_qis(iommu->ecap)) | 
 | 1129 | 		return -ENOENT; | 
 | 1130 |  | 
 | 1131 | 	/* | 
 | 1132 | 	 * queued invalidation is already setup and enabled. | 
 | 1133 | 	 */ | 
 | 1134 | 	if (iommu->qi) | 
 | 1135 | 		return 0; | 
 | 1136 |  | 
| Suresh Siddha | fa4b57c | 2009-03-16 17:05:05 -0700 | [diff] [blame] | 1137 | 	iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 1138 | 	if (!iommu->qi) | 
 | 1139 | 		return -ENOMEM; | 
 | 1140 |  | 
 | 1141 | 	qi = iommu->qi; | 
 | 1142 |  | 
| Suresh Siddha | 751cafe | 2009-10-02 11:01:22 -0700 | [diff] [blame] | 1143 |  | 
 | 1144 | 	desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); | 
 | 1145 | 	if (!desc_page) { | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 1146 | 		kfree(qi); | 
 | 1147 | 		iommu->qi = 0; | 
 | 1148 | 		return -ENOMEM; | 
 | 1149 | 	} | 
 | 1150 |  | 
| Suresh Siddha | 751cafe | 2009-10-02 11:01:22 -0700 | [diff] [blame] | 1151 | 	qi->desc = page_address(desc_page); | 
 | 1152 |  | 
| Suresh Siddha | fa4b57c | 2009-03-16 17:05:05 -0700 | [diff] [blame] | 1153 | 	qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC); | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 1154 | 	if (!qi->desc_status) { | 
 | 1155 | 		free_page((unsigned long) qi->desc); | 
 | 1156 | 		kfree(qi); | 
 | 1157 | 		iommu->qi = 0; | 
 | 1158 | 		return -ENOMEM; | 
 | 1159 | 	} | 
 | 1160 |  | 
 | 1161 | 	qi->free_head = qi->free_tail = 0; | 
 | 1162 | 	qi->free_cnt = QI_LENGTH; | 
 | 1163 |  | 
 | 1164 | 	spin_lock_init(&qi->q_lock); | 
 | 1165 |  | 
| Fenghua Yu | eb4a52b | 2009-03-27 14:22:43 -0700 | [diff] [blame] | 1166 | 	__dmar_enable_qi(iommu); | 
| Suresh Siddha | fe962e9 | 2008-07-10 11:16:42 -0700 | [diff] [blame] | 1167 |  | 
 | 1168 | 	return 0; | 
 | 1169 | } | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1170 |  | 
 | 1171 | /* iommu interrupt handling. Most stuff are MSI-like. */ | 
 | 1172 |  | 
| Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 1173 | enum faulttype { | 
 | 1174 | 	DMA_REMAP, | 
 | 1175 | 	INTR_REMAP, | 
 | 1176 | 	UNKNOWN, | 
 | 1177 | }; | 
 | 1178 |  | 
 | 1179 | static const char *dma_remap_fault_reasons[] = | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1180 | { | 
 | 1181 | 	"Software", | 
 | 1182 | 	"Present bit in root entry is clear", | 
 | 1183 | 	"Present bit in context entry is clear", | 
 | 1184 | 	"Invalid context entry", | 
 | 1185 | 	"Access beyond MGAW", | 
 | 1186 | 	"PTE Write access is not set", | 
 | 1187 | 	"PTE Read access is not set", | 
 | 1188 | 	"Next page table ptr is invalid", | 
 | 1189 | 	"Root table address invalid", | 
 | 1190 | 	"Context table ptr is invalid", | 
 | 1191 | 	"non-zero reserved fields in RTP", | 
 | 1192 | 	"non-zero reserved fields in CTP", | 
 | 1193 | 	"non-zero reserved fields in PTE", | 
 | 1194 | }; | 
| Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 1195 |  | 
 | 1196 | static const char *intr_remap_fault_reasons[] = | 
 | 1197 | { | 
 | 1198 | 	"Detected reserved fields in the decoded interrupt-remapped request", | 
 | 1199 | 	"Interrupt index exceeded the interrupt-remapping table size", | 
 | 1200 | 	"Present field in the IRTE entry is clear", | 
 | 1201 | 	"Error accessing interrupt-remapping table pointed by IRTA_REG", | 
 | 1202 | 	"Detected reserved fields in the IRTE entry", | 
 | 1203 | 	"Blocked a compatibility format interrupt request", | 
 | 1204 | 	"Blocked an interrupt request due to source-id verification failure", | 
 | 1205 | }; | 
 | 1206 |  | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1207 | #define MAX_FAULT_REASON_IDX 	(ARRAY_SIZE(fault_reason_strings) - 1) | 
 | 1208 |  | 
| Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 1209 | const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1210 | { | 
| Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 1211 | 	if (fault_reason >= 0x20 && (fault_reason <= 0x20 + | 
 | 1212 | 				     ARRAY_SIZE(intr_remap_fault_reasons))) { | 
 | 1213 | 		*fault_type = INTR_REMAP; | 
 | 1214 | 		return intr_remap_fault_reasons[fault_reason - 0x20]; | 
 | 1215 | 	} else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) { | 
 | 1216 | 		*fault_type = DMA_REMAP; | 
 | 1217 | 		return dma_remap_fault_reasons[fault_reason]; | 
 | 1218 | 	} else { | 
 | 1219 | 		*fault_type = UNKNOWN; | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1220 | 		return "Unknown"; | 
| Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 1221 | 	} | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1222 | } | 
 | 1223 |  | 
 | 1224 | void dmar_msi_unmask(unsigned int irq) | 
 | 1225 | { | 
 | 1226 | 	struct intel_iommu *iommu = get_irq_data(irq); | 
 | 1227 | 	unsigned long flag; | 
 | 1228 |  | 
 | 1229 | 	/* unmask it */ | 
 | 1230 | 	spin_lock_irqsave(&iommu->register_lock, flag); | 
 | 1231 | 	writel(0, iommu->reg + DMAR_FECTL_REG); | 
 | 1232 | 	/* Read a reg to force flush the post write */ | 
 | 1233 | 	readl(iommu->reg + DMAR_FECTL_REG); | 
 | 1234 | 	spin_unlock_irqrestore(&iommu->register_lock, flag); | 
 | 1235 | } | 
 | 1236 |  | 
 | 1237 | void dmar_msi_mask(unsigned int irq) | 
 | 1238 | { | 
 | 1239 | 	unsigned long flag; | 
 | 1240 | 	struct intel_iommu *iommu = get_irq_data(irq); | 
 | 1241 |  | 
 | 1242 | 	/* mask it */ | 
 | 1243 | 	spin_lock_irqsave(&iommu->register_lock, flag); | 
 | 1244 | 	writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); | 
 | 1245 | 	/* Read a reg to force flush the post write */ | 
 | 1246 | 	readl(iommu->reg + DMAR_FECTL_REG); | 
 | 1247 | 	spin_unlock_irqrestore(&iommu->register_lock, flag); | 
 | 1248 | } | 
 | 1249 |  | 
 | 1250 | void dmar_msi_write(int irq, struct msi_msg *msg) | 
 | 1251 | { | 
 | 1252 | 	struct intel_iommu *iommu = get_irq_data(irq); | 
 | 1253 | 	unsigned long flag; | 
 | 1254 |  | 
 | 1255 | 	spin_lock_irqsave(&iommu->register_lock, flag); | 
 | 1256 | 	writel(msg->data, iommu->reg + DMAR_FEDATA_REG); | 
 | 1257 | 	writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); | 
 | 1258 | 	writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); | 
 | 1259 | 	spin_unlock_irqrestore(&iommu->register_lock, flag); | 
 | 1260 | } | 
 | 1261 |  | 
 | 1262 | void dmar_msi_read(int irq, struct msi_msg *msg) | 
 | 1263 | { | 
 | 1264 | 	struct intel_iommu *iommu = get_irq_data(irq); | 
 | 1265 | 	unsigned long flag; | 
 | 1266 |  | 
 | 1267 | 	spin_lock_irqsave(&iommu->register_lock, flag); | 
 | 1268 | 	msg->data = readl(iommu->reg + DMAR_FEDATA_REG); | 
 | 1269 | 	msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); | 
 | 1270 | 	msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); | 
 | 1271 | 	spin_unlock_irqrestore(&iommu->register_lock, flag); | 
 | 1272 | } | 
 | 1273 |  | 
 | 1274 | static int dmar_fault_do_one(struct intel_iommu *iommu, int type, | 
 | 1275 | 		u8 fault_reason, u16 source_id, unsigned long long addr) | 
 | 1276 | { | 
 | 1277 | 	const char *reason; | 
| Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 1278 | 	int fault_type; | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1279 |  | 
| Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 1280 | 	reason = dmar_get_fault_reason(fault_reason, &fault_type); | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1281 |  | 
| Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 1282 | 	if (fault_type == INTR_REMAP) | 
 | 1283 | 		printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] " | 
 | 1284 | 		       "fault index %llx\n" | 
 | 1285 | 			"INTR-REMAP:[fault reason %02d] %s\n", | 
 | 1286 | 			(source_id >> 8), PCI_SLOT(source_id & 0xFF), | 
 | 1287 | 			PCI_FUNC(source_id & 0xFF), addr >> 48, | 
 | 1288 | 			fault_reason, reason); | 
 | 1289 | 	else | 
 | 1290 | 		printk(KERN_ERR | 
 | 1291 | 		       "DMAR:[%s] Request device [%02x:%02x.%d] " | 
 | 1292 | 		       "fault addr %llx \n" | 
 | 1293 | 		       "DMAR:[fault reason %02d] %s\n", | 
 | 1294 | 		       (type ? "DMA Read" : "DMA Write"), | 
 | 1295 | 		       (source_id >> 8), PCI_SLOT(source_id & 0xFF), | 
 | 1296 | 		       PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1297 | 	return 0; | 
 | 1298 | } | 
 | 1299 |  | 
 | 1300 | #define PRIMARY_FAULT_REG_LEN (16) | 
| Suresh Siddha | 1531a6a | 2009-03-16 17:04:57 -0700 | [diff] [blame] | 1301 | irqreturn_t dmar_fault(int irq, void *dev_id) | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1302 | { | 
 | 1303 | 	struct intel_iommu *iommu = dev_id; | 
 | 1304 | 	int reg, fault_index; | 
 | 1305 | 	u32 fault_status; | 
 | 1306 | 	unsigned long flag; | 
 | 1307 |  | 
 | 1308 | 	spin_lock_irqsave(&iommu->register_lock, flag); | 
 | 1309 | 	fault_status = readl(iommu->reg + DMAR_FSTS_REG); | 
| Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 1310 | 	if (fault_status) | 
 | 1311 | 		printk(KERN_ERR "DRHD: handling fault status reg %x\n", | 
 | 1312 | 		       fault_status); | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1313 |  | 
 | 1314 | 	/* TBD: ignore advanced fault log currently */ | 
 | 1315 | 	if (!(fault_status & DMA_FSTS_PPF)) | 
| Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 1316 | 		goto clear_rest; | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1317 |  | 
 | 1318 | 	fault_index = dma_fsts_fault_record_index(fault_status); | 
 | 1319 | 	reg = cap_fault_reg_offset(iommu->cap); | 
 | 1320 | 	while (1) { | 
 | 1321 | 		u8 fault_reason; | 
 | 1322 | 		u16 source_id; | 
 | 1323 | 		u64 guest_addr; | 
 | 1324 | 		int type; | 
 | 1325 | 		u32 data; | 
 | 1326 |  | 
 | 1327 | 		/* highest 32 bits */ | 
 | 1328 | 		data = readl(iommu->reg + reg + | 
 | 1329 | 				fault_index * PRIMARY_FAULT_REG_LEN + 12); | 
 | 1330 | 		if (!(data & DMA_FRCD_F)) | 
 | 1331 | 			break; | 
 | 1332 |  | 
 | 1333 | 		fault_reason = dma_frcd_fault_reason(data); | 
 | 1334 | 		type = dma_frcd_type(data); | 
 | 1335 |  | 
 | 1336 | 		data = readl(iommu->reg + reg + | 
 | 1337 | 				fault_index * PRIMARY_FAULT_REG_LEN + 8); | 
 | 1338 | 		source_id = dma_frcd_source_id(data); | 
 | 1339 |  | 
 | 1340 | 		guest_addr = dmar_readq(iommu->reg + reg + | 
 | 1341 | 				fault_index * PRIMARY_FAULT_REG_LEN); | 
 | 1342 | 		guest_addr = dma_frcd_page_addr(guest_addr); | 
 | 1343 | 		/* clear the fault */ | 
 | 1344 | 		writel(DMA_FRCD_F, iommu->reg + reg + | 
 | 1345 | 			fault_index * PRIMARY_FAULT_REG_LEN + 12); | 
 | 1346 |  | 
 | 1347 | 		spin_unlock_irqrestore(&iommu->register_lock, flag); | 
 | 1348 |  | 
 | 1349 | 		dmar_fault_do_one(iommu, type, fault_reason, | 
 | 1350 | 				source_id, guest_addr); | 
 | 1351 |  | 
 | 1352 | 		fault_index++; | 
| Troy Heber | 8211a7b | 2009-08-19 15:26:11 -0600 | [diff] [blame] | 1353 | 		if (fault_index >= cap_num_fault_regs(iommu->cap)) | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1354 | 			fault_index = 0; | 
 | 1355 | 		spin_lock_irqsave(&iommu->register_lock, flag); | 
 | 1356 | 	} | 
| Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 1357 | clear_rest: | 
 | 1358 | 	/* clear all the other faults */ | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1359 | 	fault_status = readl(iommu->reg + DMAR_FSTS_REG); | 
| Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 1360 | 	writel(fault_status, iommu->reg + DMAR_FSTS_REG); | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1361 |  | 
 | 1362 | 	spin_unlock_irqrestore(&iommu->register_lock, flag); | 
 | 1363 | 	return IRQ_HANDLED; | 
 | 1364 | } | 
 | 1365 |  | 
 | 1366 | int dmar_set_interrupt(struct intel_iommu *iommu) | 
 | 1367 | { | 
 | 1368 | 	int irq, ret; | 
 | 1369 |  | 
| Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 1370 | 	/* | 
 | 1371 | 	 * Check if the fault interrupt is already initialized. | 
 | 1372 | 	 */ | 
 | 1373 | 	if (iommu->irq) | 
 | 1374 | 		return 0; | 
 | 1375 |  | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1376 | 	irq = create_irq(); | 
 | 1377 | 	if (!irq) { | 
 | 1378 | 		printk(KERN_ERR "IOMMU: no free vectors\n"); | 
 | 1379 | 		return -EINVAL; | 
 | 1380 | 	} | 
 | 1381 |  | 
 | 1382 | 	set_irq_data(irq, iommu); | 
 | 1383 | 	iommu->irq = irq; | 
 | 1384 |  | 
 | 1385 | 	ret = arch_setup_dmar_msi(irq); | 
 | 1386 | 	if (ret) { | 
 | 1387 | 		set_irq_data(irq, NULL); | 
 | 1388 | 		iommu->irq = 0; | 
 | 1389 | 		destroy_irq(irq); | 
| Chris Wright | dd72643 | 2009-05-13 15:55:52 -0700 | [diff] [blame] | 1390 | 		return ret; | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1391 | 	} | 
 | 1392 |  | 
| Suresh Siddha | 0ac2491 | 2009-03-16 17:04:54 -0700 | [diff] [blame] | 1393 | 	ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); | 
 | 1394 | 	if (ret) | 
 | 1395 | 		printk(KERN_ERR "IOMMU: can't request irq\n"); | 
 | 1396 | 	return ret; | 
 | 1397 | } | 
| Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 1398 |  | 
 | 1399 | int __init enable_drhd_fault_handling(void) | 
 | 1400 | { | 
 | 1401 | 	struct dmar_drhd_unit *drhd; | 
 | 1402 |  | 
 | 1403 | 	/* | 
 | 1404 | 	 * Enable fault control interrupt. | 
 | 1405 | 	 */ | 
 | 1406 | 	for_each_drhd_unit(drhd) { | 
 | 1407 | 		int ret; | 
 | 1408 | 		struct intel_iommu *iommu = drhd->iommu; | 
 | 1409 | 		ret = dmar_set_interrupt(iommu); | 
 | 1410 |  | 
 | 1411 | 		if (ret) { | 
 | 1412 | 			printk(KERN_ERR "DRHD %Lx: failed to enable fault, " | 
 | 1413 | 			       " interrupt, ret %d\n", | 
 | 1414 | 			       (unsigned long long)drhd->reg_base_addr, ret); | 
 | 1415 | 			return -1; | 
 | 1416 | 		} | 
 | 1417 | 	} | 
 | 1418 |  | 
 | 1419 | 	return 0; | 
 | 1420 | } | 
| Fenghua Yu | eb4a52b | 2009-03-27 14:22:43 -0700 | [diff] [blame] | 1421 |  | 
 | 1422 | /* | 
 | 1423 |  * Re-enable Queued Invalidation interface. | 
 | 1424 |  */ | 
 | 1425 | int dmar_reenable_qi(struct intel_iommu *iommu) | 
 | 1426 | { | 
 | 1427 | 	if (!ecap_qis(iommu->ecap)) | 
 | 1428 | 		return -ENOENT; | 
 | 1429 |  | 
 | 1430 | 	if (!iommu->qi) | 
 | 1431 | 		return -ENOENT; | 
 | 1432 |  | 
 | 1433 | 	/* | 
 | 1434 | 	 * First disable queued invalidation. | 
 | 1435 | 	 */ | 
 | 1436 | 	dmar_disable_qi(iommu); | 
 | 1437 | 	/* | 
 | 1438 | 	 * Then enable queued invalidation again. Since there is no pending | 
 | 1439 | 	 * invalidation requests now, it's safe to re-enable queued | 
 | 1440 | 	 * invalidation. | 
 | 1441 | 	 */ | 
 | 1442 | 	__dmar_enable_qi(iommu); | 
 | 1443 |  | 
 | 1444 | 	return 0; | 
 | 1445 | } | 
| Youquan Song | 074835f | 2009-09-09 12:05:39 -0400 | [diff] [blame] | 1446 |  | 
 | 1447 | /* | 
 | 1448 |  * Check interrupt remapping support in DMAR table description. | 
 | 1449 |  */ | 
| Luck, Tony | 0b8973a | 2009-12-16 22:59:29 +0000 | [diff] [blame] | 1450 | int __init dmar_ir_support(void) | 
| Youquan Song | 074835f | 2009-09-09 12:05:39 -0400 | [diff] [blame] | 1451 | { | 
 | 1452 | 	struct acpi_table_dmar *dmar; | 
 | 1453 | 	dmar = (struct acpi_table_dmar *)dmar_tbl; | 
| Arnaud Patard | 4f506e0 | 2010-03-25 18:02:58 +0000 | [diff] [blame] | 1454 | 	if (!dmar) | 
 | 1455 | 		return 0; | 
| Youquan Song | 074835f | 2009-09-09 12:05:39 -0400 | [diff] [blame] | 1456 | 	return dmar->flags & 0x1; | 
 | 1457 | } |