blob: 87ec390e10df3f21ff1f7e671e7ef5d553c45e83 [file] [log] [blame]
Yinghai Lu5aeecaf2008-08-19 20:49:59 -07001#include <linux/interrupt.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07002#include <linux/dmar.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -07003#include <linux/spinlock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09004#include <linux/slab.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -07005#include <linux/jiffies.h>
Suresh Siddha20f30972009-08-04 12:07:08 -07006#include <linux/hpet.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -07007#include <linux/pci.h>
Suresh Siddhab6fcb332008-07-10 11:16:44 -07008#include <linux/irq.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07009#include <asm/io_apic.h>
Yinghai Lu17483a12008-12-12 13:14:18 -080010#include <asm/smp.h>
Jaswinder Singh Rajput6d652ea2009-01-07 21:38:59 +053011#include <asm/cpu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030012#include <linux/intel-iommu.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -070013#include "intr_remapping.h"
Alexander Beregalov46f06b722009-04-06 16:45:28 +010014#include <acpi/acpi.h>
Weidong Hanf007e992009-05-23 00:41:15 +080015#include <asm/pci-direct.h>
16#include "pci.h"
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -070017
18static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
Suresh Siddha20f30972009-08-04 12:07:08 -070019static struct hpet_scope ir_hpet[MAX_HPET_TBS];
20static int ir_ioapic_num, ir_hpet_num;
Suresh Siddha2ae21012008-07-10 11:16:43 -070021int intr_remapping_enabled;
22
Weidong Han03ea8152009-04-17 16:42:15 +080023static int disable_intremap;
Chris Wrightd1423d52010-07-20 11:06:49 -070024static int disable_sourceid_checking;
25
Weidong Han03ea8152009-04-17 16:42:15 +080026static __init int setup_nointremap(char *str)
27{
28 disable_intremap = 1;
29 return 0;
30}
31early_param("nointremap", setup_nointremap);
32
Chris Wrightd1423d52010-07-20 11:06:49 -070033static __init int setup_intremap(char *str)
34{
35 if (!str)
36 return -EINVAL;
37
38 if (!strncmp(str, "on", 2))
39 disable_intremap = 0;
40 else if (!strncmp(str, "off", 3))
41 disable_intremap = 1;
42 else if (!strncmp(str, "nosid", 5))
43 disable_sourceid_checking = 1;
44
45 return 0;
46}
47early_param("intremap", setup_intremap);
48
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070049struct irq_2_iommu {
Suresh Siddhab6fcb332008-07-10 11:16:44 -070050 struct intel_iommu *iommu;
51 u16 irte_index;
52 u16 sub_handle;
53 u8 irte_mask;
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070054};
55
Yinghai Lud7e51e62009-01-07 15:03:13 -080056#ifdef CONFIG_GENERIC_HARDIRQS
Yinghai Lu85ac16d2009-04-27 18:00:38 -070057static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080058{
59 struct irq_2_iommu *iommu;
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080060
61 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
Yinghai Lu85ac16d2009-04-27 18:00:38 -070062 printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080063
64 return iommu;
65}
Yinghai Lue420dfb2008-08-19 20:50:21 -070066
67static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
68{
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080069 struct irq_desc *desc;
70
71 desc = irq_to_desc(irq);
72
73 if (WARN_ON_ONCE(!desc))
74 return NULL;
75
76 return desc->irq_2_iommu;
77}
78
Yinghai Lu70590ea2009-08-26 16:21:54 -070079static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080080{
81 struct irq_desc *desc;
82 struct irq_2_iommu *irq_iommu;
83
Yinghai Lu70590ea2009-08-26 16:21:54 -070084 desc = irq_to_desc(irq);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080085 if (!desc) {
86 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
87 return NULL;
88 }
89
90 irq_iommu = desc->irq_2_iommu;
91
92 if (!irq_iommu)
Yinghai Lu70590ea2009-08-26 16:21:54 -070093 desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080094
95 return desc->irq_2_iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -070096}
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +020097
Thomas Gleixner0e1e3672010-10-04 16:20:16 +020098static void irq_2_iommu_free(unsigned int irq)
99{
100 struct irq_data *d = irq_get_irq_data(irq);
101 struct irq_2_iommu *p = d->irq_2_iommu;
102
103 d->irq_2_iommu = NULL;
104 kfree(p);
105}
106
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800107#else /* !CONFIG_SPARSE_IRQ */
108
109static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
110
111static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
112{
113 if (irq < nr_irqs)
114 return &irq_2_iommuX[irq];
115
116 return NULL;
117}
118static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
119{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700120 return irq_2_iommu(irq);
121}
Thomas Gleixner0e1e3672010-10-04 16:20:16 +0200122
123static void irq_2_iommu_free(unsigned int irq) { }
124
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800125#endif
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700126
127static DEFINE_SPINLOCK(irq_2_ir_lock);
128
Yinghai Lue420dfb2008-08-19 20:50:21 -0700129static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
130{
131 struct irq_2_iommu *irq_iommu;
132
133 irq_iommu = irq_2_iommu(irq);
134
135 if (!irq_iommu)
136 return NULL;
137
138 if (!irq_iommu->iommu)
139 return NULL;
140
141 return irq_iommu;
142}
143
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700144int irq_remapped(int irq)
145{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700146 return valid_irq_2_iommu(irq) != NULL;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700147}
148
149int get_irte(int irq, struct irte *entry)
150{
151 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700152 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700153 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700154
Yinghai Lue420dfb2008-08-19 20:50:21 -0700155 if (!entry)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700156 return -1;
157
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700158 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700159 irq_iommu = valid_irq_2_iommu(irq);
160 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700161 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700162 return -1;
163 }
164
Yinghai Lue420dfb2008-08-19 20:50:21 -0700165 index = irq_iommu->irte_index + irq_iommu->sub_handle;
166 *entry = *(irq_iommu->iommu->ir_table->base + index);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700167
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700168 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700169 return 0;
170}
171
172int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
173{
174 struct ir_table *table = iommu->ir_table;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700175 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700176 u16 index, start_index;
177 unsigned int mask = 0;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700178 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700179 int i;
180
181 if (!count)
182 return -1;
183
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800184#ifndef CONFIG_SPARSE_IRQ
Yinghai Lue420dfb2008-08-19 20:50:21 -0700185 /* protect irq_2_iommu_alloc later */
186 if (irq >= nr_irqs)
187 return -1;
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800188#endif
Yinghai Lue420dfb2008-08-19 20:50:21 -0700189
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700190 /*
191 * start the IRTE search from index 0.
192 */
193 index = start_index = 0;
194
195 if (count > 1) {
196 count = __roundup_pow_of_two(count);
197 mask = ilog2(count);
198 }
199
200 if (mask > ecap_max_handle_mask(iommu->ecap)) {
201 printk(KERN_ERR
202 "Requested mask %x exceeds the max invalidation handle"
203 " mask value %Lx\n", mask,
204 ecap_max_handle_mask(iommu->ecap));
205 return -1;
206 }
207
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700208 spin_lock_irqsave(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700209 do {
210 for (i = index; i < index + count; i++)
211 if (table->base[i].present)
212 break;
213 /* empty index found */
214 if (i == index + count)
215 break;
216
217 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
218
219 if (index == start_index) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700220 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700221 printk(KERN_ERR "can't allocate an IRTE\n");
222 return -1;
223 }
224 } while (1);
225
226 for (i = index; i < index + count; i++)
227 table->base[i].present = 1;
228
Yinghai Lue420dfb2008-08-19 20:50:21 -0700229 irq_iommu = irq_2_iommu_alloc(irq);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800230 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700231 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800232 printk(KERN_ERR "can't allocate irq_2_iommu\n");
233 return -1;
234 }
235
Yinghai Lue420dfb2008-08-19 20:50:21 -0700236 irq_iommu->iommu = iommu;
237 irq_iommu->irte_index = index;
238 irq_iommu->sub_handle = 0;
239 irq_iommu->irte_mask = mask;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700240
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700241 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700242
243 return index;
244}
245
Yu Zhao704126a2009-01-04 16:28:52 +0800246static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700247{
248 struct qi_desc desc;
249
250 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
251 | QI_IEC_SELECTIVE;
252 desc.high = 0;
253
Yu Zhao704126a2009-01-04 16:28:52 +0800254 return qi_submit_sync(&desc, iommu);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700255}
256
257int map_irq_to_irte_handle(int irq, u16 *sub_handle)
258{
259 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700260 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700261 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700262
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700263 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700264 irq_iommu = valid_irq_2_iommu(irq);
265 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700266 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700267 return -1;
268 }
269
Yinghai Lue420dfb2008-08-19 20:50:21 -0700270 *sub_handle = irq_iommu->sub_handle;
271 index = irq_iommu->irte_index;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700272 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700273 return index;
274}
275
276int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
277{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700278 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700279 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700280
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700281 spin_lock_irqsave(&irq_2_ir_lock, flags);
Suresh Siddha7ddfb652008-08-20 17:22:51 -0700282
283 irq_iommu = irq_2_iommu_alloc(irq);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700284
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800285 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700286 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800287 printk(KERN_ERR "can't allocate irq_2_iommu\n");
288 return -1;
289 }
290
Yinghai Lue420dfb2008-08-19 20:50:21 -0700291 irq_iommu->iommu = iommu;
292 irq_iommu->irte_index = index;
293 irq_iommu->sub_handle = subhandle;
294 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700295
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700296 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700297
298 return 0;
299}
300
301int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
302{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700303 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700304 unsigned long flags;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700305
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700306 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700307 irq_iommu = valid_irq_2_iommu(irq);
308 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700309 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700310 return -1;
311 }
312
Yinghai Lue420dfb2008-08-19 20:50:21 -0700313 irq_iommu->iommu = NULL;
314 irq_iommu->irte_index = 0;
315 irq_iommu->sub_handle = 0;
316 irq_2_iommu(irq)->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700317
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700318 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700319
320 return 0;
321}
322
323int modify_irte(int irq, struct irte *irte_modified)
324{
Yu Zhao704126a2009-01-04 16:28:52 +0800325 int rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700326 int index;
327 struct irte *irte;
328 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700329 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700330 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700331
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700332 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700333 irq_iommu = valid_irq_2_iommu(irq);
334 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700335 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700336 return -1;
337 }
338
Yinghai Lue420dfb2008-08-19 20:50:21 -0700339 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700340
Yinghai Lue420dfb2008-08-19 20:50:21 -0700341 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700342 irte = &iommu->ir_table->base[index];
343
Linus Torvaldsc513b672010-08-06 11:02:31 -0700344 set_64bit(&irte->low, irte_modified->low);
345 set_64bit(&irte->high, irte_modified->high);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700346 __iommu_flush_cache(iommu, irte, sizeof(*irte));
347
Yu Zhao704126a2009-01-04 16:28:52 +0800348 rc = qi_flush_iec(iommu, index, 0);
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700349 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800350
351 return rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700352}
353
354int flush_irte(int irq)
355{
Yu Zhao704126a2009-01-04 16:28:52 +0800356 int rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700357 int index;
358 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700359 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700360 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700361
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700362 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700363 irq_iommu = valid_irq_2_iommu(irq);
364 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700365 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700366 return -1;
367 }
368
Yinghai Lue420dfb2008-08-19 20:50:21 -0700369 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700370
Yinghai Lue420dfb2008-08-19 20:50:21 -0700371 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700372
Yu Zhao704126a2009-01-04 16:28:52 +0800373 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700374 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700375
Yu Zhao704126a2009-01-04 16:28:52 +0800376 return rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700377}
378
Suresh Siddha20f30972009-08-04 12:07:08 -0700379struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
380{
381 int i;
382
383 for (i = 0; i < MAX_HPET_TBS; i++)
384 if (ir_hpet[i].id == hpet_id)
385 return ir_hpet[i].iommu;
386 return NULL;
387}
388
Suresh Siddha89027d32008-07-10 11:16:56 -0700389struct intel_iommu *map_ioapic_to_ir(int apic)
390{
391 int i;
392
393 for (i = 0; i < MAX_IO_APICS; i++)
394 if (ir_ioapic[i].id == apic)
395 return ir_ioapic[i].iommu;
396 return NULL;
397}
398
Suresh Siddha75c46fa2008-07-10 11:16:57 -0700399struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
400{
401 struct dmar_drhd_unit *drhd;
402
403 drhd = dmar_find_matched_drhd_unit(dev);
404 if (!drhd)
405 return NULL;
406
407 return drhd->iommu;
408}
409
Weidong Hanc4658b42009-05-23 00:41:14 +0800410static int clear_entries(struct irq_2_iommu *irq_iommu)
411{
412 struct irte *start, *entry, *end;
413 struct intel_iommu *iommu;
414 int index;
415
416 if (irq_iommu->sub_handle)
417 return 0;
418
419 iommu = irq_iommu->iommu;
420 index = irq_iommu->irte_index + irq_iommu->sub_handle;
421
422 start = iommu->ir_table->base + index;
423 end = start + (1 << irq_iommu->irte_mask);
424
425 for (entry = start; entry < end; entry++) {
Linus Torvaldsc513b672010-08-06 11:02:31 -0700426 set_64bit(&entry->low, 0);
427 set_64bit(&entry->high, 0);
Weidong Hanc4658b42009-05-23 00:41:14 +0800428 }
429
430 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
431}
432
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700433int free_irte(int irq)
434{
Yu Zhao704126a2009-01-04 16:28:52 +0800435 int rc = 0;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700436 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700437 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700438
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700439 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700440 irq_iommu = valid_irq_2_iommu(irq);
441 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700442 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700443 return -1;
444 }
445
Weidong Hanc4658b42009-05-23 00:41:14 +0800446 rc = clear_entries(irq_iommu);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700447
Yinghai Lue420dfb2008-08-19 20:50:21 -0700448 irq_iommu->iommu = NULL;
449 irq_iommu->irte_index = 0;
450 irq_iommu->sub_handle = 0;
451 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700452
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700453 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700454
Thomas Gleixner0e1e3672010-10-04 16:20:16 +0200455 irq_2_iommu_free(irq);
456
Yu Zhao704126a2009-01-04 16:28:52 +0800457 return rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700458}
459
Weidong Hanf007e992009-05-23 00:41:15 +0800460/*
461 * source validation type
462 */
463#define SVT_NO_VERIFY 0x0 /* no verification is required */
464#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
465#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
466
467/*
468 * source-id qualifier
469 */
470#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
471#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
472 * the third least significant bit
473 */
474#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
475 * the second and third least significant bits
476 */
477#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
478 * the least three significant bits
479 */
480
481/*
482 * set SVT, SQ and SID fields of irte to verify
483 * source ids of interrupt requests
484 */
485static void set_irte_sid(struct irte *irte, unsigned int svt,
486 unsigned int sq, unsigned int sid)
487{
Chris Wrightd1423d52010-07-20 11:06:49 -0700488 if (disable_sourceid_checking)
489 svt = SVT_NO_VERIFY;
Weidong Hanf007e992009-05-23 00:41:15 +0800490 irte->svt = svt;
491 irte->sq = sq;
492 irte->sid = sid;
493}
494
495int set_ioapic_sid(struct irte *irte, int apic)
496{
497 int i;
498 u16 sid = 0;
499
500 if (!irte)
501 return -1;
502
503 for (i = 0; i < MAX_IO_APICS; i++) {
504 if (ir_ioapic[i].id == apic) {
505 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
506 break;
507 }
508 }
509
510 if (sid == 0) {
511 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
512 return -1;
513 }
514
515 set_irte_sid(irte, 1, 0, sid);
516
517 return 0;
518}
519
Suresh Siddha20f30972009-08-04 12:07:08 -0700520int set_hpet_sid(struct irte *irte, u8 id)
521{
522 int i;
523 u16 sid = 0;
524
525 if (!irte)
526 return -1;
527
528 for (i = 0; i < MAX_HPET_TBS; i++) {
529 if (ir_hpet[i].id == id) {
530 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
531 break;
532 }
533 }
534
535 if (sid == 0) {
536 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
537 return -1;
538 }
539
540 /*
541 * Should really use SQ_ALL_16. Some platforms are broken.
542 * While we figure out the right quirks for these broken platforms, use
543 * SQ_13_IGNORE_3 for now.
544 */
545 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
546
547 return 0;
548}
549
Weidong Hanf007e992009-05-23 00:41:15 +0800550int set_msi_sid(struct irte *irte, struct pci_dev *dev)
551{
552 struct pci_dev *bridge;
553
554 if (!irte || !dev)
555 return -1;
556
557 /* PCIe device or Root Complex integrated PCI device */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +0900558 if (pci_is_pcie(dev) || !dev->bus->parent) {
Weidong Hanf007e992009-05-23 00:41:15 +0800559 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
560 (dev->bus->number << 8) | dev->devfn);
561 return 0;
562 }
563
564 bridge = pci_find_upstream_pcie_bridge(dev);
565 if (bridge) {
Stefan Assmann45e829e2009-12-03 06:49:24 -0500566 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
Weidong Hanf007e992009-05-23 00:41:15 +0800567 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
568 (bridge->bus->number << 8) | dev->bus->number);
569 else /* this is a legacy PCI bridge */
570 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
571 (bridge->bus->number << 8) | bridge->devfn);
572 }
573
574 return 0;
575}
576
Suresh Siddha2ae21012008-07-10 11:16:43 -0700577static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
578{
579 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100580 u32 sts;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700581 unsigned long flags;
582
583 addr = virt_to_phys((void *)iommu->ir_table->base);
584
585 spin_lock_irqsave(&iommu->register_lock, flags);
586
587 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
588 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
589
590 /* Set interrupt-remapping table pointer */
Han, Weidong161fde02009-04-03 17:15:47 +0800591 iommu->gcmd |= DMA_GCMD_SIRTP;
David Woodhousec416daa2009-05-10 20:30:58 +0100592 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700593
594 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
595 readl, (sts & DMA_GSTS_IRTPS), sts);
596 spin_unlock_irqrestore(&iommu->register_lock, flags);
597
598 /*
599 * global invalidation of interrupt entry cache before enabling
600 * interrupt-remapping.
601 */
602 qi_global_iec(iommu);
603
604 spin_lock_irqsave(&iommu->register_lock, flags);
605
606 /* Enable interrupt-remapping */
Suresh Siddha2ae21012008-07-10 11:16:43 -0700607 iommu->gcmd |= DMA_GCMD_IRE;
David Woodhousec416daa2009-05-10 20:30:58 +0100608 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700609
610 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
611 readl, (sts & DMA_GSTS_IRES), sts);
612
613 spin_unlock_irqrestore(&iommu->register_lock, flags);
614}
615
616
617static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
618{
619 struct ir_table *ir_table;
620 struct page *pages;
621
622 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
Suresh Siddhafa4b57c2009-03-16 17:05:05 -0700623 GFP_ATOMIC);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700624
625 if (!iommu->ir_table)
626 return -ENOMEM;
627
Suresh Siddha824cd752009-10-02 11:01:23 -0700628 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
629 INTR_REMAP_PAGE_ORDER);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700630
631 if (!pages) {
632 printk(KERN_ERR "failed to allocate pages of order %d\n",
633 INTR_REMAP_PAGE_ORDER);
634 kfree(iommu->ir_table);
635 return -ENOMEM;
636 }
637
638 ir_table->base = page_address(pages);
639
640 iommu_set_intr_remapping(iommu, mode);
641 return 0;
642}
643
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700644/*
645 * Disable Interrupt Remapping.
646 */
Fenghua Yub24696b2009-03-27 14:22:44 -0700647static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700648{
649 unsigned long flags;
650 u32 sts;
651
652 if (!ecap_ir_support(iommu->ecap))
653 return;
654
Fenghua Yub24696b2009-03-27 14:22:44 -0700655 /*
656 * global invalidation of interrupt entry cache before disabling
657 * interrupt-remapping.
658 */
659 qi_global_iec(iommu);
660
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700661 spin_lock_irqsave(&iommu->register_lock, flags);
662
663 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
664 if (!(sts & DMA_GSTS_IRES))
665 goto end;
666
667 iommu->gcmd &= ~DMA_GCMD_IRE;
668 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
669
670 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
671 readl, !(sts & DMA_GSTS_IRES), sts);
672
673end:
674 spin_unlock_irqrestore(&iommu->register_lock, flags);
675}
676
Weidong Han93758232009-04-17 16:42:14 +0800677int __init intr_remapping_supported(void)
678{
679 struct dmar_drhd_unit *drhd;
680
Weidong Han03ea8152009-04-17 16:42:15 +0800681 if (disable_intremap)
682 return 0;
683
Youquan Song074835f2009-09-09 12:05:39 -0400684 if (!dmar_ir_support())
685 return 0;
686
Weidong Han93758232009-04-17 16:42:14 +0800687 for_each_drhd_unit(drhd) {
688 struct intel_iommu *iommu = drhd->iommu;
689
690 if (!ecap_ir_support(iommu->ecap))
691 return 0;
692 }
693
694 return 1;
695}
696
Suresh Siddha2ae21012008-07-10 11:16:43 -0700697int __init enable_intr_remapping(int eim)
698{
699 struct dmar_drhd_unit *drhd;
700 int setup = 0;
701
Youquan Songe936d072009-09-07 10:58:07 -0400702 if (parse_ioapics_under_ir() != 1) {
703 printk(KERN_INFO "Not enable interrupt remapping\n");
704 return -1;
705 }
706
Suresh Siddha1531a6a2009-03-16 17:04:57 -0700707 for_each_drhd_unit(drhd) {
708 struct intel_iommu *iommu = drhd->iommu;
709
710 /*
Han, Weidong34aaaa92009-04-04 17:21:26 +0800711 * If the queued invalidation is already initialized,
712 * shouldn't disable it.
713 */
714 if (iommu->qi)
715 continue;
716
717 /*
Suresh Siddha1531a6a2009-03-16 17:04:57 -0700718 * Clear previous faults.
719 */
720 dmar_fault(-1, iommu);
721
722 /*
723 * Disable intr remapping and queued invalidation, if already
724 * enabled prior to OS handover.
725 */
Fenghua Yub24696b2009-03-27 14:22:44 -0700726 iommu_disable_intr_remapping(iommu);
Suresh Siddha1531a6a2009-03-16 17:04:57 -0700727
728 dmar_disable_qi(iommu);
729 }
730
Suresh Siddha2ae21012008-07-10 11:16:43 -0700731 /*
732 * check for the Interrupt-remapping support
733 */
734 for_each_drhd_unit(drhd) {
735 struct intel_iommu *iommu = drhd->iommu;
736
737 if (!ecap_ir_support(iommu->ecap))
738 continue;
739
740 if (eim && !ecap_eim_support(iommu->ecap)) {
741 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
742 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
743 return -1;
744 }
745 }
746
747 /*
748 * Enable queued invalidation for all the DRHD's.
749 */
750 for_each_drhd_unit(drhd) {
751 int ret;
752 struct intel_iommu *iommu = drhd->iommu;
753 ret = dmar_enable_qi(iommu);
754
755 if (ret) {
756 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
757 " invalidation, ecap %Lx, ret %d\n",
758 drhd->reg_base_addr, iommu->ecap, ret);
759 return -1;
760 }
761 }
762
763 /*
764 * Setup Interrupt-remapping for all the DRHD's now.
765 */
766 for_each_drhd_unit(drhd) {
767 struct intel_iommu *iommu = drhd->iommu;
768
769 if (!ecap_ir_support(iommu->ecap))
770 continue;
771
772 if (setup_intr_remapping(iommu, eim))
773 goto error;
774
775 setup = 1;
776 }
777
778 if (!setup)
779 goto error;
780
781 intr_remapping_enabled = 1;
782
783 return 0;
784
785error:
786 /*
787 * handle error condition gracefully here!
788 */
789 return -1;
790}
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700791
Suresh Siddha20f30972009-08-04 12:07:08 -0700792static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
793 struct intel_iommu *iommu)
794{
795 struct acpi_dmar_pci_path *path;
796 u8 bus;
797 int count;
798
799 bus = scope->bus;
800 path = (struct acpi_dmar_pci_path *)(scope + 1);
801 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
802 / sizeof(struct acpi_dmar_pci_path);
803
804 while (--count > 0) {
805 /*
806 * Access PCI directly due to the PCI
807 * subsystem isn't initialized yet.
808 */
809 bus = read_pci_config_byte(bus, path->dev, path->fn,
810 PCI_SECONDARY_BUS);
811 path++;
812 }
813 ir_hpet[ir_hpet_num].bus = bus;
814 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
815 ir_hpet[ir_hpet_num].iommu = iommu;
816 ir_hpet[ir_hpet_num].id = scope->enumeration_id;
817 ir_hpet_num++;
818}
819
Weidong Hanf007e992009-05-23 00:41:15 +0800820static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
821 struct intel_iommu *iommu)
822{
823 struct acpi_dmar_pci_path *path;
824 u8 bus;
825 int count;
826
827 bus = scope->bus;
828 path = (struct acpi_dmar_pci_path *)(scope + 1);
829 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
830 / sizeof(struct acpi_dmar_pci_path);
831
832 while (--count > 0) {
833 /*
834 * Access PCI directly due to the PCI
835 * subsystem isn't initialized yet.
836 */
837 bus = read_pci_config_byte(bus, path->dev, path->fn,
838 PCI_SECONDARY_BUS);
839 path++;
840 }
841
842 ir_ioapic[ir_ioapic_num].bus = bus;
843 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
844 ir_ioapic[ir_ioapic_num].iommu = iommu;
845 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
846 ir_ioapic_num++;
847}
848
Suresh Siddha20f30972009-08-04 12:07:08 -0700849static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
850 struct intel_iommu *iommu)
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700851{
852 struct acpi_dmar_hardware_unit *drhd;
853 struct acpi_dmar_device_scope *scope;
854 void *start, *end;
855
856 drhd = (struct acpi_dmar_hardware_unit *)header;
857
858 start = (void *)(drhd + 1);
859 end = ((void *)drhd) + header->length;
860
861 while (start < end) {
862 scope = start;
863 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
864 if (ir_ioapic_num == MAX_IO_APICS) {
865 printk(KERN_WARNING "Exceeded Max IO APICS\n");
866 return -1;
867 }
868
Yinghai Lu680a7522010-04-08 19:58:23 +0100869 printk(KERN_INFO "IOAPIC id %d under DRHD base "
870 " 0x%Lx IOMMU %d\n", scope->enumeration_id,
871 drhd->address, iommu->seq_id);
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700872
Weidong Hanf007e992009-05-23 00:41:15 +0800873 ir_parse_one_ioapic_scope(scope, iommu);
Suresh Siddha20f30972009-08-04 12:07:08 -0700874 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
875 if (ir_hpet_num == MAX_HPET_TBS) {
876 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
877 return -1;
878 }
879
880 printk(KERN_INFO "HPET id %d under DRHD base"
881 " 0x%Lx\n", scope->enumeration_id,
882 drhd->address);
883
884 ir_parse_one_hpet_scope(scope, iommu);
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700885 }
886 start += scope->length;
887 }
888
889 return 0;
890}
891
892/*
893 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
894 * hardware unit.
895 */
896int __init parse_ioapics_under_ir(void)
897{
898 struct dmar_drhd_unit *drhd;
899 int ir_supported = 0;
900
901 for_each_drhd_unit(drhd) {
902 struct intel_iommu *iommu = drhd->iommu;
903
904 if (ecap_ir_support(iommu->ecap)) {
Suresh Siddha20f30972009-08-04 12:07:08 -0700905 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700906 return -1;
907
908 ir_supported = 1;
909 }
910 }
911
912 if (ir_supported && ir_ioapic_num != nr_ioapics) {
913 printk(KERN_WARNING
914 "Not all IO-APIC's listed under remapping hardware\n");
915 return -1;
916 }
917
918 return ir_supported;
919}
Fenghua Yub24696b2009-03-27 14:22:44 -0700920
921void disable_intr_remapping(void)
922{
923 struct dmar_drhd_unit *drhd;
924 struct intel_iommu *iommu = NULL;
925
926 /*
927 * Disable Interrupt-remapping for all the DRHD's now.
928 */
929 for_each_iommu(iommu, drhd) {
930 if (!ecap_ir_support(iommu->ecap))
931 continue;
932
933 iommu_disable_intr_remapping(iommu);
934 }
935}
936
937int reenable_intr_remapping(int eim)
938{
939 struct dmar_drhd_unit *drhd;
940 int setup = 0;
941 struct intel_iommu *iommu = NULL;
942
943 for_each_iommu(iommu, drhd)
944 if (iommu->qi)
945 dmar_reenable_qi(iommu);
946
947 /*
948 * Setup Interrupt-remapping for all the DRHD's now.
949 */
950 for_each_iommu(iommu, drhd) {
951 if (!ecap_ir_support(iommu->ecap))
952 continue;
953
954 /* Set up interrupt remapping for iommu.*/
955 iommu_set_intr_remapping(iommu, eim);
956 setup = 1;
957 }
958
959 if (!setup)
960 goto error;
961
962 return 0;
963
964error:
965 /*
966 * handle error condition gracefully here!
967 */
968 return -1;
969}
970