blob: d4421cd6d6636f2e1d0b310b26f9d0839cb01b3b [file] [log] [blame]
Xiantao Zhang3de42dc2008-10-06 13:48:45 +08001/*
2 * irq_comm.c: Common API for in kernel interrupt controller
3 * Copyright (c) 2007, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Authors:
18 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
19 *
20 */
21
22#include <linux/kvm_host.h>
Sheng Yang79950e12009-02-10 13:57:06 +080023
Sheng Yang79950e12009-02-10 13:57:06 +080024#include <asm/msidef.h>
Sheng Yang79950e12009-02-10 13:57:06 +080025
Xiantao Zhang3de42dc2008-10-06 13:48:45 +080026#include "irq.h"
27
28#include "ioapic.h"
29
Gleb Natapov49256632009-02-04 17:28:14 +020030static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
31 struct kvm *kvm, int level)
Avi Kivity399ec802008-11-19 13:58:46 +020032{
33#ifdef CONFIG_X86
Gleb Natapov49256632009-02-04 17:28:14 +020034 return kvm_pic_set_irq(pic_irqchip(kvm), e->irqchip.pin, level);
35#else
36 return -1;
Avi Kivity399ec802008-11-19 13:58:46 +020037#endif
38}
39
Gleb Natapov49256632009-02-04 17:28:14 +020040static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
41 struct kvm *kvm, int level)
Avi Kivity399ec802008-11-19 13:58:46 +020042{
Gleb Natapov49256632009-02-04 17:28:14 +020043 return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
Avi Kivity399ec802008-11-19 13:58:46 +020044}
45
Sheng Yang116191b2009-02-11 16:03:37 +080046void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
47 union kvm_ioapic_redirect_entry *entry,
48 unsigned long *deliver_bitmask)
49{
50 struct kvm_vcpu *vcpu;
51
Sheng Yange5871be2009-02-11 16:03:38 +080052 kvm_ioapic_get_delivery_bitmask(ioapic, entry->fields.dest_id,
53 entry->fields.dest_mode,
54 deliver_bitmask);
Sheng Yang116191b2009-02-11 16:03:37 +080055 switch (entry->fields.delivery_mode) {
56 case IOAPIC_LOWEST_PRIORITY:
57 vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,
Sheng Yang110c2fa2009-02-11 16:03:39 +080058 entry->fields.vector, deliver_bitmask);
Sheng Yang116191b2009-02-11 16:03:37 +080059 *deliver_bitmask = 1 << vcpu->vcpu_id;
60 break;
61 case IOAPIC_FIXED:
62 case IOAPIC_NMI:
63 break;
64 default:
65 if (printk_ratelimit())
66 printk(KERN_INFO "kvm: unsupported delivery mode %d\n",
67 entry->fields.delivery_mode);
68 *deliver_bitmask = 0;
69 }
70}
71
Gleb Natapov49256632009-02-04 17:28:14 +020072static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
73 struct kvm *kvm, int level)
Sheng Yang79950e12009-02-10 13:57:06 +080074{
Gleb Natapov71450f72009-02-23 12:57:11 +020075 int vcpu_id, r = -1;
Sheng Yang79950e12009-02-10 13:57:06 +080076 struct kvm_vcpu *vcpu;
77 struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
Sheng Yang116191b2009-02-11 16:03:37 +080078 union kvm_ioapic_redirect_entry entry;
79 unsigned long deliver_bitmask;
Sheng Yang79950e12009-02-10 13:57:06 +080080
81 BUG_ON(!ioapic);
82
Sheng Yang116191b2009-02-11 16:03:37 +080083 entry.bits = 0;
84 entry.fields.dest_id = (e->msi.address_lo &
85 MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
86 entry.fields.vector = (e->msi.data &
87 MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
88 entry.fields.dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
89 (unsigned long *)&e->msi.address_lo);
90 entry.fields.trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
91 (unsigned long *)&e->msi.data);
92 entry.fields.delivery_mode = test_bit(
93 MSI_DATA_DELIVERY_MODE_SHIFT,
94 (unsigned long *)&e->msi.data);
95
96 /* TODO Deal with RH bit of MSI message address */
97
98 kvm_get_intr_delivery_bitmask(ioapic, &entry, &deliver_bitmask);
99
100 if (!deliver_bitmask) {
101 printk(KERN_WARNING "kvm: no destination for MSI delivery!");
102 return -1;
103 }
104 for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
105 if (!(deliver_bitmask & (1 << vcpu_id)))
106 continue;
107 deliver_bitmask &= ~(1 << vcpu_id);
108 vcpu = ioapic->kvm->vcpus[vcpu_id];
109 if (vcpu) {
110 if (r < 0)
111 r = 0;
112 r += kvm_apic_set_irq(vcpu, entry.fields.vector,
113 entry.fields.trig_mode);
Sheng Yang79950e12009-02-10 13:57:06 +0800114 }
Sheng Yang79950e12009-02-10 13:57:06 +0800115 }
Gleb Natapov71450f72009-02-23 12:57:11 +0200116 return r;
Sheng Yang79950e12009-02-10 13:57:06 +0800117}
118
Gleb Natapov49256632009-02-04 17:28:14 +0200119/* This should be called with the kvm->lock mutex held
120 * Return value:
121 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
122 * = 0 Interrupt was coalesced (previous irq is still pending)
123 * > 0 Number of CPUs interrupt was delivered to
124 */
125int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
Xiantao Zhang3de42dc2008-10-06 13:48:45 +0800126{
Avi Kivity399ec802008-11-19 13:58:46 +0200127 struct kvm_kernel_irq_routing_entry *e;
Sheng Yang79950e12009-02-10 13:57:06 +0800128 unsigned long *irq_state, sig_level;
Gleb Natapov49256632009-02-04 17:28:14 +0200129 int ret = -1;
Sheng Yang5550af42008-10-15 20:15:06 +0800130
Sheng Yang79950e12009-02-10 13:57:06 +0800131 if (irq < KVM_IOAPIC_NUM_PINS) {
132 irq_state = (unsigned long *)&kvm->arch.irq_states[irq];
133
134 /* Logical OR for level trig interrupt */
135 if (level)
136 set_bit(irq_source_id, irq_state);
137 else
138 clear_bit(irq_source_id, irq_state);
139 sig_level = !!(*irq_state);
140 } else /* Deal with MSI/MSI-X */
141 sig_level = 1;
Sheng Yang5550af42008-10-15 20:15:06 +0800142
Xiantao Zhang3de42dc2008-10-06 13:48:45 +0800143 /* Not possible to detect if the guest uses the PIC or the
144 * IOAPIC. So set the bit in both. The guest will ignore
145 * writes to the unused one.
146 */
Avi Kivity399ec802008-11-19 13:58:46 +0200147 list_for_each_entry(e, &kvm->irq_routing, link)
Gleb Natapov49256632009-02-04 17:28:14 +0200148 if (e->gsi == irq) {
149 int r = e->set(e, kvm, sig_level);
150 if (r < 0)
151 continue;
152
153 ret = r + ((ret < 0) ? 0 : ret);
154 }
155 return ret;
Xiantao Zhang3de42dc2008-10-06 13:48:45 +0800156}
157
Marcelo Tosatti44882ee2009-01-27 15:12:38 -0200158void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
Xiantao Zhang3de42dc2008-10-06 13:48:45 +0800159{
Marcelo Tosatti44882ee2009-01-27 15:12:38 -0200160 struct kvm_kernel_irq_routing_entry *e;
Xiantao Zhang3de42dc2008-10-06 13:48:45 +0800161 struct kvm_irq_ack_notifier *kian;
162 struct hlist_node *n;
Marcelo Tosatti44882ee2009-01-27 15:12:38 -0200163 unsigned gsi = pin;
164
165 list_for_each_entry(e, &kvm->irq_routing, link)
166 if (e->irqchip.irqchip == irqchip &&
167 e->irqchip.pin == pin) {
168 gsi = e->gsi;
169 break;
170 }
Xiantao Zhang3de42dc2008-10-06 13:48:45 +0800171
172 hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, link)
173 if (kian->gsi == gsi)
174 kian->irq_acked(kian);
175}
176
177void kvm_register_irq_ack_notifier(struct kvm *kvm,
178 struct kvm_irq_ack_notifier *kian)
179{
180 hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list);
181}
182
Sheng Yange19e30e2008-10-20 16:07:10 +0800183void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian)
Xiantao Zhang3de42dc2008-10-06 13:48:45 +0800184{
Mark McLoughlinfdd897e2008-12-01 13:57:46 +0000185 hlist_del_init(&kian->link);
Xiantao Zhang3de42dc2008-10-06 13:48:45 +0800186}
Sheng Yang5550af42008-10-15 20:15:06 +0800187
188/* The caller must hold kvm->lock mutex */
189int kvm_request_irq_source_id(struct kvm *kvm)
190{
191 unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
192 int irq_source_id = find_first_zero_bit(bitmap,
193 sizeof(kvm->arch.irq_sources_bitmap));
Mark McLoughlin61552362008-12-01 13:57:48 +0000194
Sheng Yang5550af42008-10-15 20:15:06 +0800195 if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
196 printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
Mark McLoughlin61552362008-12-01 13:57:48 +0000197 return -EFAULT;
198 }
199
200 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
201 set_bit(irq_source_id, bitmap);
202
Sheng Yang5550af42008-10-15 20:15:06 +0800203 return irq_source_id;
204}
205
206void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
207{
208 int i;
209
Mark McLoughlin61552362008-12-01 13:57:48 +0000210 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
211
212 if (irq_source_id < 0 ||
Sheng Yang5550af42008-10-15 20:15:06 +0800213 irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
214 printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
215 return;
216 }
217 for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
218 clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
219 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
220}
Avi Kivity75858a82009-01-04 17:10:50 +0200221
222void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
223 struct kvm_irq_mask_notifier *kimn)
224{
225 kimn->irq = irq;
226 hlist_add_head(&kimn->link, &kvm->mask_notifier_list);
227}
228
229void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
230 struct kvm_irq_mask_notifier *kimn)
231{
232 hlist_del(&kimn->link);
233}
234
235void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
236{
237 struct kvm_irq_mask_notifier *kimn;
238 struct hlist_node *n;
239
240 hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
241 if (kimn->irq == irq)
242 kimn->func(kimn, mask);
243}
244
Avi Kivity399ec802008-11-19 13:58:46 +0200245static void __kvm_free_irq_routing(struct list_head *irq_routing)
246{
247 struct kvm_kernel_irq_routing_entry *e, *n;
248
249 list_for_each_entry_safe(e, n, irq_routing, link)
250 kfree(e);
251}
252
253void kvm_free_irq_routing(struct kvm *kvm)
254{
255 __kvm_free_irq_routing(&kvm->irq_routing);
256}
257
Hannes Edercded19f2009-02-21 02:19:13 +0100258static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e,
259 const struct kvm_irq_routing_entry *ue)
Avi Kivity399ec802008-11-19 13:58:46 +0200260{
261 int r = -EINVAL;
262 int delta;
263
264 e->gsi = ue->gsi;
265 switch (ue->type) {
266 case KVM_IRQ_ROUTING_IRQCHIP:
267 delta = 0;
268 switch (ue->u.irqchip.irqchip) {
269 case KVM_IRQCHIP_PIC_MASTER:
270 e->set = kvm_set_pic_irq;
271 break;
272 case KVM_IRQCHIP_PIC_SLAVE:
Gleb Natapov49256632009-02-04 17:28:14 +0200273 e->set = kvm_set_pic_irq;
Avi Kivity399ec802008-11-19 13:58:46 +0200274 delta = 8;
275 break;
276 case KVM_IRQCHIP_IOAPIC:
277 e->set = kvm_set_ioapic_irq;
278 break;
279 default:
280 goto out;
281 }
282 e->irqchip.irqchip = ue->u.irqchip.irqchip;
283 e->irqchip.pin = ue->u.irqchip.pin + delta;
284 break;
Sheng Yang79950e12009-02-10 13:57:06 +0800285 case KVM_IRQ_ROUTING_MSI:
286 e->set = kvm_set_msi;
287 e->msi.address_lo = ue->u.msi.address_lo;
288 e->msi.address_hi = ue->u.msi.address_hi;
289 e->msi.data = ue->u.msi.data;
290 break;
Avi Kivity399ec802008-11-19 13:58:46 +0200291 default:
292 goto out;
293 }
294 r = 0;
295out:
296 return r;
297}
298
299
300int kvm_set_irq_routing(struct kvm *kvm,
301 const struct kvm_irq_routing_entry *ue,
302 unsigned nr,
303 unsigned flags)
304{
305 struct list_head irq_list = LIST_HEAD_INIT(irq_list);
306 struct list_head tmp = LIST_HEAD_INIT(tmp);
307 struct kvm_kernel_irq_routing_entry *e = NULL;
308 unsigned i;
309 int r;
310
311 for (i = 0; i < nr; ++i) {
312 r = -EINVAL;
313 if (ue->gsi >= KVM_MAX_IRQ_ROUTES)
314 goto out;
315 if (ue->flags)
316 goto out;
317 r = -ENOMEM;
318 e = kzalloc(sizeof(*e), GFP_KERNEL);
319 if (!e)
320 goto out;
321 r = setup_routing_entry(e, ue);
322 if (r)
323 goto out;
324 ++ue;
325 list_add(&e->link, &irq_list);
326 e = NULL;
327 }
328
329 mutex_lock(&kvm->lock);
330 list_splice(&kvm->irq_routing, &tmp);
331 INIT_LIST_HEAD(&kvm->irq_routing);
332 list_splice(&irq_list, &kvm->irq_routing);
333 INIT_LIST_HEAD(&irq_list);
334 list_splice(&tmp, &irq_list);
335 mutex_unlock(&kvm->lock);
336
337 r = 0;
338
339out:
340 kfree(e);
341 __kvm_free_irq_routing(&irq_list);
342 return r;
343}
344
345#define IOAPIC_ROUTING_ENTRY(irq) \
346 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
347 .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) }
348#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
349
350#ifdef CONFIG_X86
Avi Kivity399ec802008-11-19 13:58:46 +0200351# define PIC_ROUTING_ENTRY(irq) \
352 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
353 .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 }
354# define ROUTING_ENTRY2(irq) \
355 IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
356#else
357# define ROUTING_ENTRY2(irq) \
358 IOAPIC_ROUTING_ENTRY(irq)
359#endif
360
361static const struct kvm_irq_routing_entry default_routing[] = {
362 ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
363 ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
364 ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
365 ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
366 ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
367 ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
368 ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
369 ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
370 ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
371 ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
372 ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
373 ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
374#ifdef CONFIG_IA64
375 ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
376 ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
377 ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
378 ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
379 ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
380 ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
381 ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
382 ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
383 ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
384 ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
385 ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
386 ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
387#endif
388};
389
390int kvm_setup_default_irq_routing(struct kvm *kvm)
391{
392 return kvm_set_irq_routing(kvm, default_routing,
393 ARRAY_SIZE(default_routing), 0);
394}