blob: 75ab94c75c7a4a8dba8a68af23d46c97f9beb191 [file] [log] [blame]
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -05001/*
2 * KVM paravirt_ops implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
21 */
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/kvm_para.h>
26#include <linux/cpu.h>
27#include <linux/mm.h>
Marcelo Tosatti1da8a772008-02-22 12:21:37 -050028#include <linux/highmem.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050029#include <linux/hardirq.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020030#include <linux/notifier.h>
31#include <linux/reboot.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020032#include <linux/hash.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/kprobes.h>
Marcelo Tosattia90ede72009-02-11 22:45:42 -020036#include <asm/timer.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020037#include <asm/cpu.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020038#include <asm/traps.h>
39#include <asm/desc.h>
Gleb Natapov6c047cd2010-10-14 11:22:54 +020040#include <asm/tlbflush.h>
Gleb Natapove0875922012-04-04 15:30:33 +030041#include <asm/idle.h>
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +030042#include <asm/apic.h>
43#include <asm/apicdef.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050044
Gleb Natapovfd10cde2010-10-14 11:22:51 +020045static int kvmapf = 1;
46
47static int parse_no_kvmapf(char *arg)
48{
49 kvmapf = 0;
50 return 0;
51}
52
53early_param("no-kvmapf", parse_no_kvmapf);
54
Glauber Costad910f5c2011-07-11 15:28:19 -040055static int steal_acc = 1;
56static int parse_no_stealacc(char *arg)
57{
58 steal_acc = 0;
59 return 0;
60}
61
62early_param("no-steal-acc", parse_no_stealacc);
63
Gleb Natapovfd10cde2010-10-14 11:22:51 +020064static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
Glauber Costad910f5c2011-07-11 15:28:19 -040065static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
66static int has_steal_clock = 0;
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050067
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -050068/*
69 * No need for any "IO delay" on KVM
70 */
71static void kvm_io_delay(void)
72{
73}
74
Gleb Natapov631bc482010-10-14 11:22:52 +020075#define KVM_TASK_SLEEP_HASHBITS 8
76#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
77
78struct kvm_task_sleep_node {
79 struct hlist_node link;
80 wait_queue_head_t wq;
81 u32 token;
82 int cpu;
Gleb Natapov6c047cd2010-10-14 11:22:54 +020083 bool halted;
Gleb Natapov631bc482010-10-14 11:22:52 +020084};
85
86static struct kvm_task_sleep_head {
87 spinlock_t lock;
88 struct hlist_head list;
89} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
90
91static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
92 u32 token)
93{
94 struct hlist_node *p;
95
96 hlist_for_each(p, &b->list) {
97 struct kvm_task_sleep_node *n =
98 hlist_entry(p, typeof(*n), link);
99 if (n->token == token)
100 return n;
101 }
102
103 return NULL;
104}
105
106void kvm_async_pf_task_wait(u32 token)
107{
108 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
109 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
110 struct kvm_task_sleep_node n, *e;
111 DEFINE_WAIT(wait);
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200112 int cpu, idle;
113
114 cpu = get_cpu();
115 idle = idle_cpu(cpu);
116 put_cpu();
Gleb Natapov631bc482010-10-14 11:22:52 +0200117
118 spin_lock(&b->lock);
119 e = _find_apf_task(b, token);
120 if (e) {
121 /* dummy entry exist -> wake up was delivered ahead of PF */
122 hlist_del(&e->link);
123 kfree(e);
124 spin_unlock(&b->lock);
125 return;
126 }
127
128 n.token = token;
129 n.cpu = smp_processor_id();
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200130 n.halted = idle || preempt_count() > 1;
Gleb Natapov631bc482010-10-14 11:22:52 +0200131 init_waitqueue_head(&n.wq);
132 hlist_add_head(&n.link, &b->list);
133 spin_unlock(&b->lock);
134
135 for (;;) {
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200136 if (!n.halted)
137 prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
Gleb Natapov631bc482010-10-14 11:22:52 +0200138 if (hlist_unhashed(&n.link))
139 break;
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200140
141 if (!n.halted) {
142 local_irq_enable();
143 schedule();
144 local_irq_disable();
145 } else {
146 /*
147 * We cannot reschedule. So halt.
148 */
149 native_safe_halt();
150 local_irq_disable();
151 }
Gleb Natapov631bc482010-10-14 11:22:52 +0200152 }
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200153 if (!n.halted)
154 finish_wait(&n.wq, &wait);
Gleb Natapov631bc482010-10-14 11:22:52 +0200155
156 return;
157}
158EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
159
160static void apf_task_wake_one(struct kvm_task_sleep_node *n)
161{
162 hlist_del_init(&n->link);
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200163 if (n->halted)
164 smp_send_reschedule(n->cpu);
165 else if (waitqueue_active(&n->wq))
Gleb Natapov631bc482010-10-14 11:22:52 +0200166 wake_up(&n->wq);
167}
168
169static void apf_task_wake_all(void)
170{
171 int i;
172
173 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
174 struct hlist_node *p, *next;
175 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
176 spin_lock(&b->lock);
177 hlist_for_each_safe(p, next, &b->list) {
178 struct kvm_task_sleep_node *n =
179 hlist_entry(p, typeof(*n), link);
180 if (n->cpu == smp_processor_id())
181 apf_task_wake_one(n);
182 }
183 spin_unlock(&b->lock);
184 }
185}
186
187void kvm_async_pf_task_wake(u32 token)
188{
189 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
190 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
191 struct kvm_task_sleep_node *n;
192
193 if (token == ~0) {
194 apf_task_wake_all();
195 return;
196 }
197
198again:
199 spin_lock(&b->lock);
200 n = _find_apf_task(b, token);
201 if (!n) {
202 /*
203 * async PF was not yet handled.
204 * Add dummy entry for the token.
205 */
Gleb Natapov62c49cc2012-05-02 15:04:02 +0300206 n = kzalloc(sizeof(*n), GFP_ATOMIC);
Gleb Natapov631bc482010-10-14 11:22:52 +0200207 if (!n) {
208 /*
209 * Allocation failed! Busy wait while other cpu
210 * handles async PF.
211 */
212 spin_unlock(&b->lock);
213 cpu_relax();
214 goto again;
215 }
216 n->token = token;
217 n->cpu = smp_processor_id();
218 init_waitqueue_head(&n->wq);
219 hlist_add_head(&n->link, &b->list);
220 } else
221 apf_task_wake_one(n);
222 spin_unlock(&b->lock);
223 return;
224}
225EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
226
227u32 kvm_read_and_reset_pf_reason(void)
228{
229 u32 reason = 0;
230
231 if (__get_cpu_var(apf_reason).enabled) {
232 reason = __get_cpu_var(apf_reason).reason;
233 __get_cpu_var(apf_reason).reason = 0;
234 }
235
236 return reason;
237}
238EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
239
240dotraplinkage void __kprobes
241do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
242{
243 switch (kvm_read_and_reset_pf_reason()) {
244 default:
245 do_page_fault(regs, error_code);
246 break;
247 case KVM_PV_REASON_PAGE_NOT_PRESENT:
248 /* page is swapped out by the host. */
249 kvm_async_pf_task_wait((u32)read_cr2());
250 break;
251 case KVM_PV_REASON_PAGE_READY:
Gleb Natapove0875922012-04-04 15:30:33 +0300252 rcu_irq_enter();
253 exit_idle();
Gleb Natapov631bc482010-10-14 11:22:52 +0200254 kvm_async_pf_task_wake((u32)read_cr2());
Gleb Natapove0875922012-04-04 15:30:33 +0300255 rcu_irq_exit();
Gleb Natapov631bc482010-10-14 11:22:52 +0200256 break;
257 }
258}
259
Rakib Mullickd3ac8812009-07-02 11:40:36 +0600260static void __init paravirt_ops_setup(void)
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500261{
262 pv_info.name = "KVM";
263 pv_info.paravirt_enabled = 1;
264
265 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
266 pv_cpu_ops.io_delay = kvm_io_delay;
267
Marcelo Tosattia90ede72009-02-11 22:45:42 -0200268#ifdef CONFIG_X86_IO_APIC
269 no_timer_check = 1;
270#endif
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500271}
272
Glauber Costad910f5c2011-07-11 15:28:19 -0400273static void kvm_register_steal_time(void)
274{
275 int cpu = smp_processor_id();
276 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
277
278 if (!has_steal_clock)
279 return;
280
281 memset(st, 0, sizeof(*st));
282
283 wrmsrl(MSR_KVM_STEAL_TIME, (__pa(st) | KVM_MSR_ENABLED));
284 printk(KERN_INFO "kvm-stealtime: cpu %d, msr %lx\n",
285 cpu, __pa(st));
286}
287
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300288static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
289
290static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
291{
292 /**
293 * This relies on __test_and_clear_bit to modify the memory
294 * in a way that is atomic with respect to the local CPU.
295 * The hypervisor only accesses this memory from the local CPU so
296 * there's no need for lock or memory barriers.
297 * An optimization barrier is implied in apic write.
298 */
299 if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi)))
300 return;
301 apic->write(APIC_EOI, APIC_EOI_ACK);
302}
303
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200304void __cpuinit kvm_guest_cpu_init(void)
305{
306 if (!kvm_para_available())
307 return;
308
309 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
310 u64 pa = __pa(&__get_cpu_var(apf_reason));
311
Gleb Natapov6adba522010-10-14 11:22:55 +0200312#ifdef CONFIG_PREEMPT
313 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
314#endif
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200315 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
316 __get_cpu_var(apf_reason).enabled = 1;
317 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
318 smp_processor_id());
319 }
Glauber Costad910f5c2011-07-11 15:28:19 -0400320
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300321 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
322 unsigned long pa;
323 /* Size alignment is implied but just to make it explicit. */
324 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
325 __get_cpu_var(kvm_apic_eoi) = 0;
326 pa = __pa(&__get_cpu_var(kvm_apic_eoi)) | KVM_MSR_ENABLED;
327 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
328 }
329
Glauber Costad910f5c2011-07-11 15:28:19 -0400330 if (has_steal_clock)
331 kvm_register_steal_time();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200332}
333
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300334static void kvm_pv_disable_apf(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200335{
336 if (!__get_cpu_var(apf_reason).enabled)
337 return;
338
339 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
340 __get_cpu_var(apf_reason).enabled = 0;
341
342 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
343 smp_processor_id());
344}
345
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300346static void kvm_pv_guest_cpu_reboot(void *unused)
347{
348 /*
349 * We disable PV EOI before we load a new kernel by kexec,
350 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
351 * New kernel can re-enable when it boots.
352 */
353 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
354 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
355 kvm_pv_disable_apf();
356}
357
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200358static int kvm_pv_reboot_notify(struct notifier_block *nb,
359 unsigned long code, void *unused)
360{
361 if (code == SYS_RESTART)
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300362 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200363 return NOTIFY_DONE;
364}
365
366static struct notifier_block kvm_pv_reboot_nb = {
367 .notifier_call = kvm_pv_reboot_notify,
368};
369
Glauber Costad910f5c2011-07-11 15:28:19 -0400370static u64 kvm_steal_clock(int cpu)
371{
372 u64 steal;
373 struct kvm_steal_time *src;
374 int version;
375
376 src = &per_cpu(steal_time, cpu);
377 do {
378 version = src->version;
379 rmb();
380 steal = src->steal;
381 rmb();
382 } while ((version & 1) || (version != src->version));
383
384 return steal;
385}
386
387void kvm_disable_steal_time(void)
388{
389 if (!has_steal_clock)
390 return;
391
392 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
393}
394
Gleb Natapovca3f1012010-10-14 11:22:49 +0200395#ifdef CONFIG_SMP
396static void __init kvm_smp_prepare_boot_cpu(void)
397{
Avi Kivitya63512a2010-12-16 11:27:23 +0200398#ifdef CONFIG_KVM_CLOCK
Gleb Natapovca3f1012010-10-14 11:22:49 +0200399 WARN_ON(kvm_register_clock("primary cpu clock"));
Avi Kivitya63512a2010-12-16 11:27:23 +0200400#endif
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200401 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200402 native_smp_prepare_boot_cpu();
403}
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200404
Sedat Dilek775077a2011-01-03 00:01:29 +0100405static void __cpuinit kvm_guest_cpu_online(void *dummy)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200406{
407 kvm_guest_cpu_init();
408}
409
410static void kvm_guest_cpu_offline(void *dummy)
411{
Glauber Costad910f5c2011-07-11 15:28:19 -0400412 kvm_disable_steal_time();
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300413 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
414 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
415 kvm_pv_disable_apf();
Gleb Natapov631bc482010-10-14 11:22:52 +0200416 apf_task_wake_all();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200417}
418
419static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
420 unsigned long action, void *hcpu)
421{
422 int cpu = (unsigned long)hcpu;
423 switch (action) {
424 case CPU_ONLINE:
425 case CPU_DOWN_FAILED:
426 case CPU_ONLINE_FROZEN:
427 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
428 break;
429 case CPU_DOWN_PREPARE:
430 case CPU_DOWN_PREPARE_FROZEN:
431 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
432 break;
433 default:
434 break;
435 }
436 return NOTIFY_OK;
437}
438
439static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
440 .notifier_call = kvm_cpu_notify,
441};
Gleb Natapovca3f1012010-10-14 11:22:49 +0200442#endif
443
Gleb Natapov631bc482010-10-14 11:22:52 +0200444static void __init kvm_apf_trap_init(void)
445{
446 set_intr_gate(14, &async_page_fault);
447}
448
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500449void __init kvm_guest_init(void)
450{
Gleb Natapov631bc482010-10-14 11:22:52 +0200451 int i;
452
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500453 if (!kvm_para_available())
454 return;
455
456 paravirt_ops_setup();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200457 register_reboot_notifier(&kvm_pv_reboot_nb);
Gleb Natapov631bc482010-10-14 11:22:52 +0200458 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
459 spin_lock_init(&async_pf_sleepers[i].lock);
460 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
461 x86_init.irqs.trap_init = kvm_apf_trap_init;
462
Glauber Costad910f5c2011-07-11 15:28:19 -0400463 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
464 has_steal_clock = 1;
465 pv_time_ops.steal_clock = kvm_steal_clock;
466 }
467
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300468 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
469 struct apic **drv;
470
471 for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
472 /* Should happen once for each apic */
473 WARN_ON((*drv)->eoi_write == kvm_guest_apic_eoi_write);
474 (*drv)->eoi_write = kvm_guest_apic_eoi_write;
475 }
476 }
477
Gleb Natapovca3f1012010-10-14 11:22:49 +0200478#ifdef CONFIG_SMP
479 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200480 register_cpu_notifier(&kvm_cpu_notifier);
481#else
482 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200483#endif
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500484}
Glauber Costad910f5c2011-07-11 15:28:19 -0400485
486static __init int activate_jump_labels(void)
487{
488 if (has_steal_clock) {
Ingo Molnarc5905af2012-02-24 08:31:31 +0100489 static_key_slow_inc(&paravirt_steal_enabled);
Glauber Costad910f5c2011-07-11 15:28:19 -0400490 if (steal_acc)
Ingo Molnarc5905af2012-02-24 08:31:31 +0100491 static_key_slow_inc(&paravirt_steal_rq_enabled);
Glauber Costad910f5c2011-07-11 15:28:19 -0400492 }
493
494 return 0;
495}
496arch_initcall(activate_jump_labels);