blob: 842039e4955b10e8f77bc613258c74507bda154c [file] [log] [blame]
Yinghai Lu5aeecaf2008-08-19 20:49:59 -07001#include <linux/interrupt.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07002#include <linux/dmar.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -07003#include <linux/spinlock.h>
4#include <linux/jiffies.h>
5#include <linux/pci.h>
Suresh Siddhab6fcb332008-07-10 11:16:44 -07006#include <linux/irq.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07007#include <asm/io_apic.h>
Yinghai Lu17483a12008-12-12 13:14:18 -08008#include <asm/smp.h>
Jaswinder Singh Rajput6d652ea2009-01-07 21:38:59 +05309#include <asm/cpu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030010#include <linux/intel-iommu.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -070011#include "intr_remapping.h"
Alexander Beregalov46f06b722009-04-06 16:45:28 +010012#include <acpi/acpi.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -070013
14static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
15static int ir_ioapic_num;
Suresh Siddha2ae21012008-07-10 11:16:43 -070016int intr_remapping_enabled;
17
Weidong Han03ea8152009-04-17 16:42:15 +080018static int disable_intremap;
19static __init int setup_nointremap(char *str)
20{
21 disable_intremap = 1;
22 return 0;
23}
24early_param("nointremap", setup_nointremap);
25
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070026struct irq_2_iommu {
Suresh Siddhab6fcb332008-07-10 11:16:44 -070027 struct intel_iommu *iommu;
28 u16 irte_index;
29 u16 sub_handle;
30 u8 irte_mask;
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070031};
32
Yinghai Lud7e51e62009-01-07 15:03:13 -080033#ifdef CONFIG_GENERIC_HARDIRQS
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080034static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
35{
36 struct irq_2_iommu *iommu;
37 int node;
38
39 node = cpu_to_node(cpu);
40
41 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
42 printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node);
43
44 return iommu;
45}
Yinghai Lue420dfb2008-08-19 20:50:21 -070046
47static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
48{
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080049 struct irq_desc *desc;
50
51 desc = irq_to_desc(irq);
52
53 if (WARN_ON_ONCE(!desc))
54 return NULL;
55
56 return desc->irq_2_iommu;
57}
58
59static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
60{
61 struct irq_desc *desc;
62 struct irq_2_iommu *irq_iommu;
63
64 /*
65 * alloc irq desc if not allocated already.
66 */
67 desc = irq_to_desc_alloc_cpu(irq, cpu);
68 if (!desc) {
69 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
70 return NULL;
71 }
72
73 irq_iommu = desc->irq_2_iommu;
74
75 if (!irq_iommu)
76 desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu);
77
78 return desc->irq_2_iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -070079}
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +020080
Yinghai Lue420dfb2008-08-19 20:50:21 -070081static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
82{
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080083 return irq_2_iommu_alloc_cpu(irq, boot_cpu_id);
84}
85
86#else /* !CONFIG_SPARSE_IRQ */
87
88static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
89
90static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
91{
92 if (irq < nr_irqs)
93 return &irq_2_iommuX[irq];
94
95 return NULL;
96}
97static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
98{
Yinghai Lue420dfb2008-08-19 20:50:21 -070099 return irq_2_iommu(irq);
100}
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800101#endif
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700102
103static DEFINE_SPINLOCK(irq_2_ir_lock);
104
Yinghai Lue420dfb2008-08-19 20:50:21 -0700105static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
106{
107 struct irq_2_iommu *irq_iommu;
108
109 irq_iommu = irq_2_iommu(irq);
110
111 if (!irq_iommu)
112 return NULL;
113
114 if (!irq_iommu->iommu)
115 return NULL;
116
117 return irq_iommu;
118}
119
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700120int irq_remapped(int irq)
121{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700122 return valid_irq_2_iommu(irq) != NULL;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700123}
124
125int get_irte(int irq, struct irte *entry)
126{
127 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700128 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700129 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700130
Yinghai Lue420dfb2008-08-19 20:50:21 -0700131 if (!entry)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700132 return -1;
133
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700134 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700135 irq_iommu = valid_irq_2_iommu(irq);
136 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700137 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700138 return -1;
139 }
140
Yinghai Lue420dfb2008-08-19 20:50:21 -0700141 index = irq_iommu->irte_index + irq_iommu->sub_handle;
142 *entry = *(irq_iommu->iommu->ir_table->base + index);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700143
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700144 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700145 return 0;
146}
147
148int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
149{
150 struct ir_table *table = iommu->ir_table;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700151 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700152 u16 index, start_index;
153 unsigned int mask = 0;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700154 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700155 int i;
156
157 if (!count)
158 return -1;
159
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800160#ifndef CONFIG_SPARSE_IRQ
Yinghai Lue420dfb2008-08-19 20:50:21 -0700161 /* protect irq_2_iommu_alloc later */
162 if (irq >= nr_irqs)
163 return -1;
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800164#endif
Yinghai Lue420dfb2008-08-19 20:50:21 -0700165
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700166 /*
167 * start the IRTE search from index 0.
168 */
169 index = start_index = 0;
170
171 if (count > 1) {
172 count = __roundup_pow_of_two(count);
173 mask = ilog2(count);
174 }
175
176 if (mask > ecap_max_handle_mask(iommu->ecap)) {
177 printk(KERN_ERR
178 "Requested mask %x exceeds the max invalidation handle"
179 " mask value %Lx\n", mask,
180 ecap_max_handle_mask(iommu->ecap));
181 return -1;
182 }
183
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700184 spin_lock_irqsave(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700185 do {
186 for (i = index; i < index + count; i++)
187 if (table->base[i].present)
188 break;
189 /* empty index found */
190 if (i == index + count)
191 break;
192
193 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
194
195 if (index == start_index) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700196 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700197 printk(KERN_ERR "can't allocate an IRTE\n");
198 return -1;
199 }
200 } while (1);
201
202 for (i = index; i < index + count; i++)
203 table->base[i].present = 1;
204
Yinghai Lue420dfb2008-08-19 20:50:21 -0700205 irq_iommu = irq_2_iommu_alloc(irq);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800206 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700207 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800208 printk(KERN_ERR "can't allocate irq_2_iommu\n");
209 return -1;
210 }
211
Yinghai Lue420dfb2008-08-19 20:50:21 -0700212 irq_iommu->iommu = iommu;
213 irq_iommu->irte_index = index;
214 irq_iommu->sub_handle = 0;
215 irq_iommu->irte_mask = mask;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700216
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700217 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700218
219 return index;
220}
221
Yu Zhao704126a2009-01-04 16:28:52 +0800222static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700223{
224 struct qi_desc desc;
225
226 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
227 | QI_IEC_SELECTIVE;
228 desc.high = 0;
229
Yu Zhao704126a2009-01-04 16:28:52 +0800230 return qi_submit_sync(&desc, iommu);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700231}
232
233int map_irq_to_irte_handle(int irq, u16 *sub_handle)
234{
235 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700236 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700237 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700238
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700239 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700240 irq_iommu = valid_irq_2_iommu(irq);
241 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700242 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700243 return -1;
244 }
245
Yinghai Lue420dfb2008-08-19 20:50:21 -0700246 *sub_handle = irq_iommu->sub_handle;
247 index = irq_iommu->irte_index;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700248 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700249 return index;
250}
251
252int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
253{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700254 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700255 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700256
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700257 spin_lock_irqsave(&irq_2_ir_lock, flags);
Suresh Siddha7ddfb652008-08-20 17:22:51 -0700258
259 irq_iommu = irq_2_iommu_alloc(irq);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700260
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800261 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700262 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800263 printk(KERN_ERR "can't allocate irq_2_iommu\n");
264 return -1;
265 }
266
Yinghai Lue420dfb2008-08-19 20:50:21 -0700267 irq_iommu->iommu = iommu;
268 irq_iommu->irte_index = index;
269 irq_iommu->sub_handle = subhandle;
270 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700271
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700272 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700273
274 return 0;
275}
276
277int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
278{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700279 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700280 unsigned long flags;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700281
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700282 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700283 irq_iommu = valid_irq_2_iommu(irq);
284 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700285 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700286 return -1;
287 }
288
Yinghai Lue420dfb2008-08-19 20:50:21 -0700289 irq_iommu->iommu = NULL;
290 irq_iommu->irte_index = 0;
291 irq_iommu->sub_handle = 0;
292 irq_2_iommu(irq)->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700293
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700294 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700295
296 return 0;
297}
298
299int modify_irte(int irq, struct irte *irte_modified)
300{
Yu Zhao704126a2009-01-04 16:28:52 +0800301 int rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700302 int index;
303 struct irte *irte;
304 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700305 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700306 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700307
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700308 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700309 irq_iommu = valid_irq_2_iommu(irq);
310 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700311 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700312 return -1;
313 }
314
Yinghai Lue420dfb2008-08-19 20:50:21 -0700315 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700316
Yinghai Lue420dfb2008-08-19 20:50:21 -0700317 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700318 irte = &iommu->ir_table->base[index];
319
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700320 set_64bit((unsigned long *)irte, irte_modified->low);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700321 __iommu_flush_cache(iommu, irte, sizeof(*irte));
322
Yu Zhao704126a2009-01-04 16:28:52 +0800323 rc = qi_flush_iec(iommu, index, 0);
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700324 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800325
326 return rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700327}
328
329int flush_irte(int irq)
330{
Yu Zhao704126a2009-01-04 16:28:52 +0800331 int rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700332 int index;
333 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700334 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700335 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700336
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700337 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700338 irq_iommu = valid_irq_2_iommu(irq);
339 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700340 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700341 return -1;
342 }
343
Yinghai Lue420dfb2008-08-19 20:50:21 -0700344 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700345
Yinghai Lue420dfb2008-08-19 20:50:21 -0700346 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700347
Yu Zhao704126a2009-01-04 16:28:52 +0800348 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700349 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700350
Yu Zhao704126a2009-01-04 16:28:52 +0800351 return rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700352}
353
Suresh Siddha89027d32008-07-10 11:16:56 -0700354struct intel_iommu *map_ioapic_to_ir(int apic)
355{
356 int i;
357
358 for (i = 0; i < MAX_IO_APICS; i++)
359 if (ir_ioapic[i].id == apic)
360 return ir_ioapic[i].iommu;
361 return NULL;
362}
363
Suresh Siddha75c46fa2008-07-10 11:16:57 -0700364struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
365{
366 struct dmar_drhd_unit *drhd;
367
368 drhd = dmar_find_matched_drhd_unit(dev);
369 if (!drhd)
370 return NULL;
371
372 return drhd->iommu;
373}
374
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700375int free_irte(int irq)
376{
Yu Zhao704126a2009-01-04 16:28:52 +0800377 int rc = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700378 int index, i;
379 struct irte *irte;
380 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700381 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700382 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700383
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700384 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700385 irq_iommu = valid_irq_2_iommu(irq);
386 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700387 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700388 return -1;
389 }
390
Yinghai Lue420dfb2008-08-19 20:50:21 -0700391 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700392
Yinghai Lue420dfb2008-08-19 20:50:21 -0700393 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700394 irte = &iommu->ir_table->base[index];
395
Yinghai Lue420dfb2008-08-19 20:50:21 -0700396 if (!irq_iommu->sub_handle) {
397 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
Suresh Siddha2e934562009-03-16 17:04:58 -0700398 set_64bit((unsigned long *)(irte + i), 0);
Yu Zhao704126a2009-01-04 16:28:52 +0800399 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700400 }
401
Yinghai Lue420dfb2008-08-19 20:50:21 -0700402 irq_iommu->iommu = NULL;
403 irq_iommu->irte_index = 0;
404 irq_iommu->sub_handle = 0;
405 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700406
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700407 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700408
Yu Zhao704126a2009-01-04 16:28:52 +0800409 return rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700410}
411
Suresh Siddha2ae21012008-07-10 11:16:43 -0700412static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
413{
414 u64 addr;
415 u32 cmd, sts;
416 unsigned long flags;
417
418 addr = virt_to_phys((void *)iommu->ir_table->base);
419
420 spin_lock_irqsave(&iommu->register_lock, flags);
421
422 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
423 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
424
425 /* Set interrupt-remapping table pointer */
426 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
Han, Weidong161fde02009-04-03 17:15:47 +0800427 iommu->gcmd |= DMA_GCMD_SIRTP;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700428 writel(cmd, iommu->reg + DMAR_GCMD_REG);
429
430 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
431 readl, (sts & DMA_GSTS_IRTPS), sts);
432 spin_unlock_irqrestore(&iommu->register_lock, flags);
433
434 /*
435 * global invalidation of interrupt entry cache before enabling
436 * interrupt-remapping.
437 */
438 qi_global_iec(iommu);
439
440 spin_lock_irqsave(&iommu->register_lock, flags);
441
442 /* Enable interrupt-remapping */
443 cmd = iommu->gcmd | DMA_GCMD_IRE;
444 iommu->gcmd |= DMA_GCMD_IRE;
445 writel(cmd, iommu->reg + DMAR_GCMD_REG);
446
447 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
448 readl, (sts & DMA_GSTS_IRES), sts);
449
450 spin_unlock_irqrestore(&iommu->register_lock, flags);
451}
452
453
454static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
455{
456 struct ir_table *ir_table;
457 struct page *pages;
458
459 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
Suresh Siddhafa4b57c2009-03-16 17:05:05 -0700460 GFP_ATOMIC);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700461
462 if (!iommu->ir_table)
463 return -ENOMEM;
464
Suresh Siddhafa4b57c2009-03-16 17:05:05 -0700465 pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700466
467 if (!pages) {
468 printk(KERN_ERR "failed to allocate pages of order %d\n",
469 INTR_REMAP_PAGE_ORDER);
470 kfree(iommu->ir_table);
471 return -ENOMEM;
472 }
473
474 ir_table->base = page_address(pages);
475
476 iommu_set_intr_remapping(iommu, mode);
477 return 0;
478}
479
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700480/*
481 * Disable Interrupt Remapping.
482 */
Fenghua Yub24696b2009-03-27 14:22:44 -0700483static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700484{
485 unsigned long flags;
486 u32 sts;
487
488 if (!ecap_ir_support(iommu->ecap))
489 return;
490
Fenghua Yub24696b2009-03-27 14:22:44 -0700491 /*
492 * global invalidation of interrupt entry cache before disabling
493 * interrupt-remapping.
494 */
495 qi_global_iec(iommu);
496
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700497 spin_lock_irqsave(&iommu->register_lock, flags);
498
499 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
500 if (!(sts & DMA_GSTS_IRES))
501 goto end;
502
503 iommu->gcmd &= ~DMA_GCMD_IRE;
504 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
505
506 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
507 readl, !(sts & DMA_GSTS_IRES), sts);
508
509end:
510 spin_unlock_irqrestore(&iommu->register_lock, flags);
511}
512
Weidong Han93758232009-04-17 16:42:14 +0800513int __init intr_remapping_supported(void)
514{
515 struct dmar_drhd_unit *drhd;
516
Weidong Han03ea8152009-04-17 16:42:15 +0800517 if (disable_intremap)
518 return 0;
519
Weidong Han93758232009-04-17 16:42:14 +0800520 for_each_drhd_unit(drhd) {
521 struct intel_iommu *iommu = drhd->iommu;
522
523 if (!ecap_ir_support(iommu->ecap))
524 return 0;
525 }
526
527 return 1;
528}
529
Suresh Siddha2ae21012008-07-10 11:16:43 -0700530int __init enable_intr_remapping(int eim)
531{
532 struct dmar_drhd_unit *drhd;
533 int setup = 0;
534
Suresh Siddha1531a6a2009-03-16 17:04:57 -0700535 for_each_drhd_unit(drhd) {
536 struct intel_iommu *iommu = drhd->iommu;
537
538 /*
Han, Weidong34aaaa92009-04-04 17:21:26 +0800539 * If the queued invalidation is already initialized,
540 * shouldn't disable it.
541 */
542 if (iommu->qi)
543 continue;
544
545 /*
Suresh Siddha1531a6a2009-03-16 17:04:57 -0700546 * Clear previous faults.
547 */
548 dmar_fault(-1, iommu);
549
550 /*
551 * Disable intr remapping and queued invalidation, if already
552 * enabled prior to OS handover.
553 */
Fenghua Yub24696b2009-03-27 14:22:44 -0700554 iommu_disable_intr_remapping(iommu);
Suresh Siddha1531a6a2009-03-16 17:04:57 -0700555
556 dmar_disable_qi(iommu);
557 }
558
Suresh Siddha2ae21012008-07-10 11:16:43 -0700559 /*
560 * check for the Interrupt-remapping support
561 */
562 for_each_drhd_unit(drhd) {
563 struct intel_iommu *iommu = drhd->iommu;
564
565 if (!ecap_ir_support(iommu->ecap))
566 continue;
567
568 if (eim && !ecap_eim_support(iommu->ecap)) {
569 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
570 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
571 return -1;
572 }
573 }
574
575 /*
576 * Enable queued invalidation for all the DRHD's.
577 */
578 for_each_drhd_unit(drhd) {
579 int ret;
580 struct intel_iommu *iommu = drhd->iommu;
581 ret = dmar_enable_qi(iommu);
582
583 if (ret) {
584 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
585 " invalidation, ecap %Lx, ret %d\n",
586 drhd->reg_base_addr, iommu->ecap, ret);
587 return -1;
588 }
589 }
590
591 /*
592 * Setup Interrupt-remapping for all the DRHD's now.
593 */
594 for_each_drhd_unit(drhd) {
595 struct intel_iommu *iommu = drhd->iommu;
596
597 if (!ecap_ir_support(iommu->ecap))
598 continue;
599
600 if (setup_intr_remapping(iommu, eim))
601 goto error;
602
603 setup = 1;
604 }
605
606 if (!setup)
607 goto error;
608
609 intr_remapping_enabled = 1;
610
611 return 0;
612
613error:
614 /*
615 * handle error condition gracefully here!
616 */
617 return -1;
618}
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700619
620static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
621 struct intel_iommu *iommu)
622{
623 struct acpi_dmar_hardware_unit *drhd;
624 struct acpi_dmar_device_scope *scope;
625 void *start, *end;
626
627 drhd = (struct acpi_dmar_hardware_unit *)header;
628
629 start = (void *)(drhd + 1);
630 end = ((void *)drhd) + header->length;
631
632 while (start < end) {
633 scope = start;
634 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
635 if (ir_ioapic_num == MAX_IO_APICS) {
636 printk(KERN_WARNING "Exceeded Max IO APICS\n");
637 return -1;
638 }
639
640 printk(KERN_INFO "IOAPIC id %d under DRHD base"
641 " 0x%Lx\n", scope->enumeration_id,
642 drhd->address);
643
644 ir_ioapic[ir_ioapic_num].iommu = iommu;
645 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
646 ir_ioapic_num++;
647 }
648 start += scope->length;
649 }
650
651 return 0;
652}
653
654/*
655 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
656 * hardware unit.
657 */
658int __init parse_ioapics_under_ir(void)
659{
660 struct dmar_drhd_unit *drhd;
661 int ir_supported = 0;
662
663 for_each_drhd_unit(drhd) {
664 struct intel_iommu *iommu = drhd->iommu;
665
666 if (ecap_ir_support(iommu->ecap)) {
667 if (ir_parse_ioapic_scope(drhd->hdr, iommu))
668 return -1;
669
670 ir_supported = 1;
671 }
672 }
673
674 if (ir_supported && ir_ioapic_num != nr_ioapics) {
675 printk(KERN_WARNING
676 "Not all IO-APIC's listed under remapping hardware\n");
677 return -1;
678 }
679
680 return ir_supported;
681}
Fenghua Yub24696b2009-03-27 14:22:44 -0700682
683void disable_intr_remapping(void)
684{
685 struct dmar_drhd_unit *drhd;
686 struct intel_iommu *iommu = NULL;
687
688 /*
689 * Disable Interrupt-remapping for all the DRHD's now.
690 */
691 for_each_iommu(iommu, drhd) {
692 if (!ecap_ir_support(iommu->ecap))
693 continue;
694
695 iommu_disable_intr_remapping(iommu);
696 }
697}
698
699int reenable_intr_remapping(int eim)
700{
701 struct dmar_drhd_unit *drhd;
702 int setup = 0;
703 struct intel_iommu *iommu = NULL;
704
705 for_each_iommu(iommu, drhd)
706 if (iommu->qi)
707 dmar_reenable_qi(iommu);
708
709 /*
710 * Setup Interrupt-remapping for all the DRHD's now.
711 */
712 for_each_iommu(iommu, drhd) {
713 if (!ecap_ir_support(iommu->ecap))
714 continue;
715
716 /* Set up interrupt remapping for iommu.*/
717 iommu_set_intr_remapping(iommu, eim);
718 setup = 1;
719 }
720
721 if (!setup)
722 goto error;
723
724 return 0;
725
726error:
727 /*
728 * handle error condition gracefully here!
729 */
730 return -1;
731}
732