blob: 73d854c36e39ce1a9fbf748b99ade29ecc14489c [file] [log] [blame]
Carsten Otte043405e2007-10-10 17:16:19 +02001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +03007 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
Carsten Otte043405e2007-10-10 17:16:19 +02009 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +030013 * Amit Shah <amit.shah@qumranet.com>
14 * Ben-Ami Yassour <benami@il.ibm.com>
Carsten Otte043405e2007-10-10 17:16:19 +020015 *
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
18 *
19 */
20
Avi Kivityedf88412007-12-16 11:02:48 +020021#include <linux/kvm_host.h>
Carsten Otte313a3dc2007-10-11 19:16:52 +020022#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080023#include "mmu.h"
Sheng Yang78376992008-01-28 05:10:22 +080024#include "i8254.h"
Izik Eidus37817f22008-03-24 23:14:53 +020025#include "tss.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030026#include "kvm_cache_regs.h"
Avi Kivity26eef702008-07-03 14:59:22 +030027#include "x86.h"
Carsten Otte313a3dc2007-10-11 19:16:52 +020028
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -020029#include <linux/clocksource.h>
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +030030#include <linux/interrupt.h>
Carsten Otte313a3dc2007-10-11 19:16:52 +020031#include <linux/kvm.h>
32#include <linux/fs.h>
33#include <linux/vmalloc.h>
Carsten Otte5fb76f92007-10-29 16:08:51 +010034#include <linux/module.h>
Zhang Xiantao0de10342007-11-20 16:25:04 +080035#include <linux/mman.h>
Marcelo Tosatti2bacc552007-12-12 10:46:12 -050036#include <linux/highmem.h>
Joerg Roedel19de40a2008-12-03 14:43:34 +010037#include <linux/iommu.h>
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030038#include <linux/intel-iommu.h>
Gerd Hoffmannc8076602009-02-04 17:52:04 +010039#include <linux/cpufreq.h>
Avi Kivity18863bd2009-09-07 11:12:18 +030040#include <linux/user-return-notifier.h>
Marcelo Tosattia983fb22009-12-23 14:35:23 -020041#include <linux/srcu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090042#include <linux/slab.h>
Zhang, Yanminff9d07a2010-04-19 13:32:45 +080043#include <linux/perf_event.h>
Avi Kivityaec51dc2009-07-01 16:01:02 +030044#include <trace/events/kvm.h>
45#undef TRACE_INCLUDE_FILE
Marcelo Tosatti229456f2009-06-17 09:22:14 -030046#define CREATE_TRACE_POINTS
47#include "trace.h"
Carsten Otte043405e2007-10-10 17:16:19 +020048
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +020049#include <asm/debugreg.h>
Carsten Otte043405e2007-10-10 17:16:19 +020050#include <asm/uaccess.h>
Zhang Xiantaod825ed02007-11-14 20:08:51 +080051#include <asm/msr.h>
Avi Kivitya5f61302008-02-20 17:57:21 +020052#include <asm/desc.h>
Sheng Yang0bed3b52008-10-09 16:01:54 +080053#include <asm/mtrr.h>
Huang Ying890ca9a2009-05-11 16:48:15 +080054#include <asm/mce.h>
Carsten Otte043405e2007-10-10 17:16:19 +020055
Carsten Otte313a3dc2007-10-11 19:16:52 +020056#define MAX_IO_MSRS 256
Carsten Ottea03490e2007-10-29 16:09:35 +010057#define CR0_RESERVED_BITS \
58 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
59 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
60 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
61#define CR4_RESERVED_BITS \
62 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
63 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
64 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
65 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
66
67#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
Huang Ying890ca9a2009-05-11 16:48:15 +080068
69#define KVM_MAX_MCE_BANKS 32
70#define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
71
Joerg Roedel50a37eb2008-01-31 14:57:38 +010072/* EFER defaults:
73 * - enable syscall per default because its emulated by KVM
74 * - enable LME and LMA per default on 64 bit KVM
75 */
76#ifdef CONFIG_X86_64
77static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
78#else
79static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
80#endif
Carsten Otte313a3dc2007-10-11 19:16:52 +020081
Avi Kivityba1389b2007-11-18 16:24:12 +020082#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
83#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Hollis Blanchard417bc302007-10-31 17:24:23 -050084
Gleb Natapovcb142eb2009-08-09 15:17:40 +030085static void update_cr8_intercept(struct kvm_vcpu *vcpu);
Avi Kivity674eea02008-02-11 18:37:23 +020086static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
87 struct kvm_cpuid_entry2 __user *entries);
88
Zhang Xiantao97896d02007-11-14 20:09:30 +080089struct kvm_x86_ops *kvm_x86_ops;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030090EXPORT_SYMBOL_GPL(kvm_x86_ops);
Zhang Xiantao97896d02007-11-14 20:09:30 +080091
Andre Przywaraed85c062009-06-25 12:36:49 +020092int ignore_msrs = 0;
93module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
94
Avi Kivity18863bd2009-09-07 11:12:18 +030095#define KVM_NR_SHARED_MSRS 16
96
97struct kvm_shared_msrs_global {
98 int nr;
Sheng Yang2bf78fa2009-12-18 16:48:44 +080099 u32 msrs[KVM_NR_SHARED_MSRS];
Avi Kivity18863bd2009-09-07 11:12:18 +0300100};
101
102struct kvm_shared_msrs {
103 struct user_return_notifier urn;
104 bool registered;
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800105 struct kvm_shared_msr_values {
106 u64 host;
107 u64 curr;
108 } values[KVM_NR_SHARED_MSRS];
Avi Kivity18863bd2009-09-07 11:12:18 +0300109};
110
111static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
112static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
113
Hollis Blanchard417bc302007-10-31 17:24:23 -0500114struct kvm_stats_debugfs_item debugfs_entries[] = {
Avi Kivityba1389b2007-11-18 16:24:12 +0200115 { "pf_fixed", VCPU_STAT(pf_fixed) },
116 { "pf_guest", VCPU_STAT(pf_guest) },
117 { "tlb_flush", VCPU_STAT(tlb_flush) },
118 { "invlpg", VCPU_STAT(invlpg) },
119 { "exits", VCPU_STAT(exits) },
120 { "io_exits", VCPU_STAT(io_exits) },
121 { "mmio_exits", VCPU_STAT(mmio_exits) },
122 { "signal_exits", VCPU_STAT(signal_exits) },
123 { "irq_window", VCPU_STAT(irq_window_exits) },
Sheng Yangf08864b2008-05-15 18:23:25 +0800124 { "nmi_window", VCPU_STAT(nmi_window_exits) },
Avi Kivityba1389b2007-11-18 16:24:12 +0200125 { "halt_exits", VCPU_STAT(halt_exits) },
126 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Amit Shahf11c3a82008-02-21 01:00:30 +0530127 { "hypercalls", VCPU_STAT(hypercalls) },
Avi Kivityba1389b2007-11-18 16:24:12 +0200128 { "request_irq", VCPU_STAT(request_irq_exits) },
129 { "irq_exits", VCPU_STAT(irq_exits) },
130 { "host_state_reload", VCPU_STAT(host_state_reload) },
131 { "efer_reload", VCPU_STAT(efer_reload) },
132 { "fpu_reload", VCPU_STAT(fpu_reload) },
133 { "insn_emulation", VCPU_STAT(insn_emulation) },
134 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
Avi Kivityfa89a812008-09-01 15:57:51 +0300135 { "irq_injections", VCPU_STAT(irq_injections) },
Jan Kiszkac4abb7c2008-09-26 09:30:55 +0200136 { "nmi_injections", VCPU_STAT(nmi_injections) },
Avi Kivity4cee5762007-11-18 16:37:07 +0200137 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
138 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
139 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
140 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
141 { "mmu_flooded", VM_STAT(mmu_flooded) },
142 { "mmu_recycled", VM_STAT(mmu_recycled) },
Avi Kivitydfc5aa02007-12-18 19:47:18 +0200143 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -0300144 { "mmu_unsync", VM_STAT(mmu_unsync) },
Avi Kivity0f74a242007-11-20 23:01:14 +0200145 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300146 { "largepages", VM_STAT(lpages) },
Hollis Blanchard417bc302007-10-31 17:24:23 -0500147 { NULL }
148};
149
Avi Kivity18863bd2009-09-07 11:12:18 +0300150static void kvm_on_user_return(struct user_return_notifier *urn)
151{
152 unsigned slot;
Avi Kivity18863bd2009-09-07 11:12:18 +0300153 struct kvm_shared_msrs *locals
154 = container_of(urn, struct kvm_shared_msrs, urn);
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800155 struct kvm_shared_msr_values *values;
Avi Kivity18863bd2009-09-07 11:12:18 +0300156
157 for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800158 values = &locals->values[slot];
159 if (values->host != values->curr) {
160 wrmsrl(shared_msrs_global.msrs[slot], values->host);
161 values->curr = values->host;
Avi Kivity18863bd2009-09-07 11:12:18 +0300162 }
163 }
164 locals->registered = false;
165 user_return_notifier_unregister(urn);
166}
167
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800168static void shared_msr_update(unsigned slot, u32 msr)
Avi Kivity18863bd2009-09-07 11:12:18 +0300169{
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800170 struct kvm_shared_msrs *smsr;
Avi Kivity18863bd2009-09-07 11:12:18 +0300171 u64 value;
172
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800173 smsr = &__get_cpu_var(shared_msrs);
174 /* only read, and nobody should modify it at this time,
175 * so don't need lock */
176 if (slot >= shared_msrs_global.nr) {
177 printk(KERN_ERR "kvm: invalid MSR slot!");
178 return;
179 }
180 rdmsrl_safe(msr, &value);
181 smsr->values[slot].host = value;
182 smsr->values[slot].curr = value;
183}
184
185void kvm_define_shared_msr(unsigned slot, u32 msr)
186{
Avi Kivity18863bd2009-09-07 11:12:18 +0300187 if (slot >= shared_msrs_global.nr)
188 shared_msrs_global.nr = slot + 1;
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800189 shared_msrs_global.msrs[slot] = msr;
190 /* we need ensured the shared_msr_global have been updated */
191 smp_wmb();
Avi Kivity18863bd2009-09-07 11:12:18 +0300192}
193EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
194
195static void kvm_shared_msr_cpu_online(void)
196{
197 unsigned i;
Avi Kivity18863bd2009-09-07 11:12:18 +0300198
199 for (i = 0; i < shared_msrs_global.nr; ++i)
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800200 shared_msr_update(i, shared_msrs_global.msrs[i]);
Avi Kivity18863bd2009-09-07 11:12:18 +0300201}
202
Avi Kivityd5696722009-12-02 12:28:47 +0200203void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
Avi Kivity18863bd2009-09-07 11:12:18 +0300204{
205 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
206
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800207 if (((value ^ smsr->values[slot].curr) & mask) == 0)
Avi Kivity18863bd2009-09-07 11:12:18 +0300208 return;
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800209 smsr->values[slot].curr = value;
210 wrmsrl(shared_msrs_global.msrs[slot], value);
Avi Kivity18863bd2009-09-07 11:12:18 +0300211 if (!smsr->registered) {
212 smsr->urn.on_user_return = kvm_on_user_return;
213 user_return_notifier_register(&smsr->urn);
214 smsr->registered = true;
215 }
216}
217EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
218
Avi Kivity3548bab2009-11-28 14:18:47 +0200219static void drop_user_return_notifiers(void *ignore)
220{
221 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
222
223 if (smsr->registered)
224 kvm_on_user_return(&smsr->urn);
225}
226
Carsten Otte5fb76f92007-10-29 16:08:51 +0100227unsigned long segment_base(u16 selector)
228{
229 struct descriptor_table gdt;
Avi Kivitya5f61302008-02-20 17:57:21 +0200230 struct desc_struct *d;
Carsten Otte5fb76f92007-10-29 16:08:51 +0100231 unsigned long table_base;
232 unsigned long v;
233
234 if (selector == 0)
235 return 0;
236
Akinobu Mitab792c342009-07-19 00:00:01 +0900237 kvm_get_gdt(&gdt);
Carsten Otte5fb76f92007-10-29 16:08:51 +0100238 table_base = gdt.base;
239
240 if (selector & 4) { /* from ldt */
Akinobu Mitab792c342009-07-19 00:00:01 +0900241 u16 ldt_selector = kvm_read_ldt();
Carsten Otte5fb76f92007-10-29 16:08:51 +0100242
Carsten Otte5fb76f92007-10-29 16:08:51 +0100243 table_base = segment_base(ldt_selector);
244 }
Avi Kivitya5f61302008-02-20 17:57:21 +0200245 d = (struct desc_struct *)(table_base + (selector & ~7));
Akinobu Mita46a359e2009-07-18 23:58:32 +0900246 v = get_desc_base(d);
Carsten Otte5fb76f92007-10-29 16:08:51 +0100247#ifdef CONFIG_X86_64
Avi Kivitya5f61302008-02-20 17:57:21 +0200248 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
249 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
Carsten Otte5fb76f92007-10-29 16:08:51 +0100250#endif
251 return v;
252}
253EXPORT_SYMBOL_GPL(segment_base);
254
Carsten Otte6866b832007-10-29 16:09:10 +0100255u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
256{
257 if (irqchip_in_kernel(vcpu->kvm))
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800258 return vcpu->arch.apic_base;
Carsten Otte6866b832007-10-29 16:09:10 +0100259 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800260 return vcpu->arch.apic_base;
Carsten Otte6866b832007-10-29 16:09:10 +0100261}
262EXPORT_SYMBOL_GPL(kvm_get_apic_base);
263
264void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
265{
266 /* TODO: reserve bits check */
267 if (irqchip_in_kernel(vcpu->kvm))
268 kvm_lapic_set_base(vcpu, data);
269 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800270 vcpu->arch.apic_base = data;
Carsten Otte6866b832007-10-29 16:09:10 +0100271}
272EXPORT_SYMBOL_GPL(kvm_set_apic_base);
273
Eddie Dong3fd28fc2009-11-19 17:54:07 +0200274#define EXCPT_BENIGN 0
275#define EXCPT_CONTRIBUTORY 1
276#define EXCPT_PF 2
277
278static int exception_class(int vector)
279{
280 switch (vector) {
281 case PF_VECTOR:
282 return EXCPT_PF;
283 case DE_VECTOR:
284 case TS_VECTOR:
285 case NP_VECTOR:
286 case SS_VECTOR:
287 case GP_VECTOR:
288 return EXCPT_CONTRIBUTORY;
289 default:
290 break;
291 }
292 return EXCPT_BENIGN;
293}
294
295static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
296 unsigned nr, bool has_error, u32 error_code)
297{
298 u32 prev_nr;
299 int class1, class2;
300
301 if (!vcpu->arch.exception.pending) {
302 queue:
303 vcpu->arch.exception.pending = true;
304 vcpu->arch.exception.has_error_code = has_error;
305 vcpu->arch.exception.nr = nr;
306 vcpu->arch.exception.error_code = error_code;
307 return;
308 }
309
310 /* to check exception */
311 prev_nr = vcpu->arch.exception.nr;
312 if (prev_nr == DF_VECTOR) {
313 /* triple fault -> shutdown */
314 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
315 return;
316 }
317 class1 = exception_class(prev_nr);
318 class2 = exception_class(nr);
319 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
320 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
321 /* generate double fault per SDM Table 5-5 */
322 vcpu->arch.exception.pending = true;
323 vcpu->arch.exception.has_error_code = true;
324 vcpu->arch.exception.nr = DF_VECTOR;
325 vcpu->arch.exception.error_code = 0;
326 } else
327 /* replace previous exception with a new one in a hope
328 that instruction re-execution will regenerate lost
329 exception */
330 goto queue;
331}
332
Avi Kivity298101d2007-11-25 13:41:11 +0200333void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
334{
Eddie Dong3fd28fc2009-11-19 17:54:07 +0200335 kvm_multiple_exception(vcpu, nr, false, 0);
Avi Kivity298101d2007-11-25 13:41:11 +0200336}
337EXPORT_SYMBOL_GPL(kvm_queue_exception);
338
Avi Kivityc3c91fe2007-11-25 14:04:58 +0200339void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
340 u32 error_code)
341{
342 ++vcpu->stat.pf_guest;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800343 vcpu->arch.cr2 = addr;
Avi Kivityc3c91fe2007-11-25 14:04:58 +0200344 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
345}
346
Sheng Yang3419ffc2008-05-15 09:52:48 +0800347void kvm_inject_nmi(struct kvm_vcpu *vcpu)
348{
349 vcpu->arch.nmi_pending = 1;
350}
351EXPORT_SYMBOL_GPL(kvm_inject_nmi);
352
Avi Kivity298101d2007-11-25 13:41:11 +0200353void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
354{
Eddie Dong3fd28fc2009-11-19 17:54:07 +0200355 kvm_multiple_exception(vcpu, nr, true, error_code);
Avi Kivity298101d2007-11-25 13:41:11 +0200356}
357EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
358
Carsten Ottea03490e2007-10-29 16:09:35 +0100359/*
Avi Kivity0a79b002009-09-01 12:03:25 +0300360 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
361 * a #GP and return false.
362 */
363bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
Carsten Otte043405e2007-10-10 17:16:19 +0200364{
Avi Kivity0a79b002009-09-01 12:03:25 +0300365 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
366 return true;
367 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
368 return false;
Carsten Ottea03490e2007-10-29 16:09:35 +0100369}
Avi Kivity0a79b002009-09-01 12:03:25 +0300370EXPORT_SYMBOL_GPL(kvm_require_cpl);
Carsten Ottea03490e2007-10-29 16:09:35 +0100371
372/*
373 * Load the pae pdptrs. Return true is they are all valid.
374 */
375int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
376{
377 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
378 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
379 int i;
380 int ret;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800381 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
Carsten Ottea03490e2007-10-29 16:09:35 +0100382
Carsten Ottea03490e2007-10-29 16:09:35 +0100383 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
384 offset * sizeof(u64), sizeof(pdpte));
385 if (ret < 0) {
386 ret = 0;
387 goto out;
388 }
389 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
Avi Kivity43a37952009-06-10 14:12:05 +0300390 if (is_present_gpte(pdpte[i]) &&
Dong, Eddie20c466b2009-03-31 23:03:45 +0800391 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
Carsten Ottea03490e2007-10-29 16:09:35 +0100392 ret = 0;
393 goto out;
394 }
395 }
396 ret = 1;
397
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800398 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
Avi Kivity6de4f3a2009-05-31 22:58:47 +0300399 __set_bit(VCPU_EXREG_PDPTR,
400 (unsigned long *)&vcpu->arch.regs_avail);
401 __set_bit(VCPU_EXREG_PDPTR,
402 (unsigned long *)&vcpu->arch.regs_dirty);
Carsten Ottea03490e2007-10-29 16:09:35 +0100403out:
Carsten Ottea03490e2007-10-29 16:09:35 +0100404
405 return ret;
406}
Joerg Roedelcc4b6872008-02-07 13:47:43 +0100407EXPORT_SYMBOL_GPL(load_pdptrs);
Carsten Ottea03490e2007-10-29 16:09:35 +0100408
Avi Kivityd835dfe2007-11-21 02:57:59 +0200409static bool pdptrs_changed(struct kvm_vcpu *vcpu)
410{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800411 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
Avi Kivityd835dfe2007-11-21 02:57:59 +0200412 bool changed = true;
413 int r;
414
415 if (is_long_mode(vcpu) || !is_pae(vcpu))
416 return false;
417
Avi Kivity6de4f3a2009-05-31 22:58:47 +0300418 if (!test_bit(VCPU_EXREG_PDPTR,
419 (unsigned long *)&vcpu->arch.regs_avail))
420 return true;
421
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800422 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
Avi Kivityd835dfe2007-11-21 02:57:59 +0200423 if (r < 0)
424 goto out;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800425 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
Avi Kivityd835dfe2007-11-21 02:57:59 +0200426out:
Avi Kivityd835dfe2007-11-21 02:57:59 +0200427
428 return changed;
429}
430
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200431void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
Carsten Ottea03490e2007-10-29 16:09:35 +0100432{
Avi Kivityf9a48e62010-01-06 19:10:22 +0200433 cr0 |= X86_CR0_ET;
434
Gleb Natapovab344822010-01-21 15:28:46 +0200435#ifdef CONFIG_X86_64
436 if (cr0 & 0xffffffff00000000UL) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200437 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100438 return;
439 }
Gleb Natapovab344822010-01-21 15:28:46 +0200440#endif
441
442 cr0 &= ~CR0_RESERVED_BITS;
Carsten Ottea03490e2007-10-29 16:09:35 +0100443
444 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200445 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100446 return;
447 }
448
449 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200450 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100451 return;
452 }
453
454 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
455#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +0200456 if ((vcpu->arch.efer & EFER_LME)) {
Carsten Ottea03490e2007-10-29 16:09:35 +0100457 int cs_db, cs_l;
458
459 if (!is_pae(vcpu)) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200460 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100461 return;
462 }
463 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
464 if (cs_l) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200465 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100466 return;
467
468 }
469 } else
470#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800471 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200472 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100473 return;
474 }
475
476 }
477
478 kvm_x86_ops->set_cr0(vcpu, cr0);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800479 vcpu->arch.cr0 = cr0;
Carsten Ottea03490e2007-10-29 16:09:35 +0100480
Carsten Ottea03490e2007-10-29 16:09:35 +0100481 kvm_mmu_reset_context(vcpu);
Carsten Ottea03490e2007-10-29 16:09:35 +0100482 return;
483}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200484EXPORT_SYMBOL_GPL(kvm_set_cr0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100485
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200486void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
Carsten Ottea03490e2007-10-29 16:09:35 +0100487{
Avi Kivity4d4ec082009-12-29 18:07:30 +0200488 kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f));
Carsten Ottea03490e2007-10-29 16:09:35 +0100489}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200490EXPORT_SYMBOL_GPL(kvm_lmsw);
Carsten Ottea03490e2007-10-29 16:09:35 +0100491
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200492void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Carsten Ottea03490e2007-10-29 16:09:35 +0100493{
Avi Kivityfc78f512009-12-07 12:16:48 +0200494 unsigned long old_cr4 = kvm_read_cr4(vcpu);
Avi Kivitya2edf572009-05-24 22:19:00 +0300495 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
496
Carsten Ottea03490e2007-10-29 16:09:35 +0100497 if (cr4 & CR4_RESERVED_BITS) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200498 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100499 return;
500 }
501
502 if (is_long_mode(vcpu)) {
503 if (!(cr4 & X86_CR4_PAE)) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200504 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100505 return;
506 }
Avi Kivitya2edf572009-05-24 22:19:00 +0300507 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
508 && ((cr4 ^ old_cr4) & pdptr_bits)
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800509 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200510 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100511 return;
512 }
513
514 if (cr4 & X86_CR4_VMXE) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200515 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100516 return;
517 }
518 kvm_x86_ops->set_cr4(vcpu, cr4);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800519 vcpu->arch.cr4 = cr4;
Avi Kivity5a41acc2009-01-11 17:19:35 +0200520 vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
Carsten Ottea03490e2007-10-29 16:09:35 +0100521 kvm_mmu_reset_context(vcpu);
Carsten Ottea03490e2007-10-29 16:09:35 +0100522}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200523EXPORT_SYMBOL_GPL(kvm_set_cr4);
Carsten Ottea03490e2007-10-29 16:09:35 +0100524
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200525void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
Carsten Ottea03490e2007-10-29 16:09:35 +0100526{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800527 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
Marcelo Tosatti0ba73cd2008-09-23 13:18:34 -0300528 kvm_mmu_sync_roots(vcpu);
Avi Kivityd835dfe2007-11-21 02:57:59 +0200529 kvm_mmu_flush_tlb(vcpu);
530 return;
531 }
532
Carsten Ottea03490e2007-10-29 16:09:35 +0100533 if (is_long_mode(vcpu)) {
534 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200535 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100536 return;
537 }
538 } else {
539 if (is_pae(vcpu)) {
540 if (cr3 & CR3_PAE_RESERVED_BITS) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200541 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100542 return;
543 }
544 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200545 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100546 return;
547 }
548 }
549 /*
550 * We don't check reserved bits in nonpae mode, because
551 * this isn't enforced, and VMware depends on this.
552 */
553 }
554
Carsten Ottea03490e2007-10-29 16:09:35 +0100555 /*
556 * Does the new cr3 value map to physical memory? (Note, we
557 * catch an invalid cr3 even in real-mode, because it would
558 * cause trouble later on when we turn on paging anyway.)
559 *
560 * A real CPU would silently accept an invalid cr3 and would
561 * attempt to use it - with largely undefined (and often hard
562 * to debug) behavior on the guest side.
563 */
564 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200565 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100566 else {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800567 vcpu->arch.cr3 = cr3;
568 vcpu->arch.mmu.new_cr3(vcpu);
Carsten Ottea03490e2007-10-29 16:09:35 +0100569 }
Carsten Ottea03490e2007-10-29 16:09:35 +0100570}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200571EXPORT_SYMBOL_GPL(kvm_set_cr3);
Carsten Ottea03490e2007-10-29 16:09:35 +0100572
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200573void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
Carsten Ottea03490e2007-10-29 16:09:35 +0100574{
575 if (cr8 & CR8_RESERVED_BITS) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200576 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100577 return;
578 }
579 if (irqchip_in_kernel(vcpu->kvm))
580 kvm_lapic_set_tpr(vcpu, cr8);
581 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800582 vcpu->arch.cr8 = cr8;
Carsten Ottea03490e2007-10-29 16:09:35 +0100583}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200584EXPORT_SYMBOL_GPL(kvm_set_cr8);
Carsten Ottea03490e2007-10-29 16:09:35 +0100585
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200586unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
Carsten Ottea03490e2007-10-29 16:09:35 +0100587{
588 if (irqchip_in_kernel(vcpu->kvm))
589 return kvm_lapic_get_cr8(vcpu);
590 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800591 return vcpu->arch.cr8;
Carsten Ottea03490e2007-10-29 16:09:35 +0100592}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200593EXPORT_SYMBOL_GPL(kvm_get_cr8);
Carsten Ottea03490e2007-10-29 16:09:35 +0100594
Alexander Grafd8017472008-11-25 20:17:11 +0100595static inline u32 bit(int bitno)
596{
597 return 1 << (bitno & 31);
598}
599
Carsten Otte043405e2007-10-10 17:16:19 +0200600/*
601 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
602 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
603 *
604 * This list is modified at module load time to reflect the
Glauber Costae3267cb2009-10-06 13:24:50 -0400605 * capabilities of the host cpu. This capabilities test skips MSRs that are
606 * kvm-specific. Those are put in the beginning of the list.
Carsten Otte043405e2007-10-10 17:16:19 +0200607 */
Glauber Costae3267cb2009-10-06 13:24:50 -0400608
Gleb Natapov10388a02010-01-17 15:51:23 +0200609#define KVM_SAVE_MSRS_BEGIN 5
Carsten Otte043405e2007-10-10 17:16:19 +0200610static u32 msrs_to_save[] = {
Glauber Costae3267cb2009-10-06 13:24:50 -0400611 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
Gleb Natapov55cd8e52010-01-17 15:51:22 +0200612 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
Gleb Natapov10388a02010-01-17 15:51:23 +0200613 HV_X64_MSR_APIC_ASSIST_PAGE,
Carsten Otte043405e2007-10-10 17:16:19 +0200614 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
615 MSR_K6_STAR,
616#ifdef CONFIG_X86_64
617 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
618#endif
Glauber Costae3267cb2009-10-06 13:24:50 -0400619 MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
Carsten Otte043405e2007-10-10 17:16:19 +0200620};
621
622static unsigned num_msrs_to_save;
623
624static u32 emulated_msrs[] = {
625 MSR_IA32_MISC_ENABLE,
626};
627
Carsten Otte15c4a642007-10-30 18:44:17 +0100628static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
629{
Joerg Roedelf2b4b7d2008-01-31 14:57:37 +0100630 if (efer & efer_reserved_bits) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200631 kvm_inject_gp(vcpu, 0);
Carsten Otte15c4a642007-10-30 18:44:17 +0100632 return;
633 }
634
635 if (is_paging(vcpu)
Avi Kivityf6801df2010-01-21 15:31:50 +0200636 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200637 kvm_inject_gp(vcpu, 0);
Carsten Otte15c4a642007-10-30 18:44:17 +0100638 return;
639 }
640
Alexander Graf1b2fd702009-02-02 16:23:51 +0100641 if (efer & EFER_FFXSR) {
642 struct kvm_cpuid_entry2 *feat;
643
644 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
645 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
Alexander Graf1b2fd702009-02-02 16:23:51 +0100646 kvm_inject_gp(vcpu, 0);
647 return;
648 }
649 }
650
Alexander Grafd8017472008-11-25 20:17:11 +0100651 if (efer & EFER_SVME) {
652 struct kvm_cpuid_entry2 *feat;
653
654 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
655 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
Alexander Grafd8017472008-11-25 20:17:11 +0100656 kvm_inject_gp(vcpu, 0);
657 return;
658 }
659 }
660
Carsten Otte15c4a642007-10-30 18:44:17 +0100661 kvm_x86_ops->set_efer(vcpu, efer);
662
663 efer &= ~EFER_LMA;
Avi Kivityf6801df2010-01-21 15:31:50 +0200664 efer |= vcpu->arch.efer & EFER_LMA;
Carsten Otte15c4a642007-10-30 18:44:17 +0100665
Avi Kivityf6801df2010-01-21 15:31:50 +0200666 vcpu->arch.efer = efer;
Avi Kivity9645bb562009-03-31 11:31:54 +0300667
668 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
669 kvm_mmu_reset_context(vcpu);
Carsten Otte15c4a642007-10-30 18:44:17 +0100670}
671
Joerg Roedelf2b4b7d2008-01-31 14:57:37 +0100672void kvm_enable_efer_bits(u64 mask)
673{
674 efer_reserved_bits &= ~mask;
675}
676EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
677
678
Carsten Otte15c4a642007-10-30 18:44:17 +0100679/*
680 * Writes msr value into into the appropriate "register".
681 * Returns 0 on success, non-0 otherwise.
682 * Assumes vcpu_load() was already called.
683 */
684int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
685{
686 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
687}
688
Carsten Otte313a3dc2007-10-11 19:16:52 +0200689/*
690 * Adapt set_msr() to msr_io()'s calling convention
691 */
692static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
693{
694 return kvm_set_msr(vcpu, index, *data);
695}
696
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200697static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
698{
699 static int version;
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200700 struct pvclock_wall_clock wc;
Jason Wang923de3c2010-01-27 19:13:49 +0800701 struct timespec boot;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200702
703 if (!wall_clock)
704 return;
705
706 version++;
707
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200708 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
709
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200710 /*
711 * The guest calculates current wall clock time by adding
712 * system time (updated by kvm_write_guest_time below) to the
713 * wall clock specified here. guest system time equals host
714 * system time for us, thus we must fill in host boot time here.
715 */
Jason Wang923de3c2010-01-27 19:13:49 +0800716 getboottime(&boot);
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200717
718 wc.sec = boot.tv_sec;
719 wc.nsec = boot.tv_nsec;
720 wc.version = version;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200721
722 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
723
724 version++;
725 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200726}
727
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200728static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
729{
730 uint32_t quotient, remainder;
731
732 /* Don't try to replace with do_div(), this one calculates
733 * "(dividend << 32) / divisor" */
734 __asm__ ( "divl %4"
735 : "=a" (quotient), "=d" (remainder)
736 : "0" (0), "1" (dividend), "r" (divisor) );
737 return quotient;
738}
739
740static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
741{
742 uint64_t nsecs = 1000000000LL;
743 int32_t shift = 0;
744 uint64_t tps64;
745 uint32_t tps32;
746
747 tps64 = tsc_khz * 1000LL;
748 while (tps64 > nsecs*2) {
749 tps64 >>= 1;
750 shift--;
751 }
752
753 tps32 = (uint32_t)tps64;
754 while (tps32 <= (uint32_t)nsecs) {
755 tps32 <<= 1;
756 shift++;
757 }
758
759 hv_clock->tsc_shift = shift;
760 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
761
762 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
Harvey Harrison80a914d2008-10-15 22:01:25 -0700763 __func__, tsc_khz, hv_clock->tsc_shift,
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200764 hv_clock->tsc_to_system_mul);
765}
766
Gerd Hoffmannc8076602009-02-04 17:52:04 +0100767static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
768
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200769static void kvm_write_guest_time(struct kvm_vcpu *v)
770{
771 struct timespec ts;
772 unsigned long flags;
773 struct kvm_vcpu_arch *vcpu = &v->arch;
774 void *shared_kaddr;
Avi Kivity463656c2009-04-12 15:49:07 +0300775 unsigned long this_tsc_khz;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200776
777 if ((!vcpu->time_page))
778 return;
779
Avi Kivity463656c2009-04-12 15:49:07 +0300780 this_tsc_khz = get_cpu_var(cpu_tsc_khz);
781 if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
782 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
783 vcpu->hv_clock_tsc_khz = this_tsc_khz;
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200784 }
Avi Kivity463656c2009-04-12 15:49:07 +0300785 put_cpu_var(cpu_tsc_khz);
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200786
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200787 /* Keep irq disabled to prevent changes to the clock */
788 local_irq_save(flags);
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +0530789 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200790 ktime_get_ts(&ts);
Jason Wang923de3c2010-01-27 19:13:49 +0800791 monotonic_to_bootbased(&ts);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200792 local_irq_restore(flags);
793
794 /* With all the info we got, fill in the values */
795
796 vcpu->hv_clock.system_time = ts.tv_nsec +
Glauber Costaafbcf7a2009-10-16 15:28:36 -0400797 (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
798
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200799 /*
800 * The interface expects us to write an even number signaling that the
801 * update is finished. Since the guest won't see the intermediate
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200802 * state, we just increase by 2 at the end.
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200803 */
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200804 vcpu->hv_clock.version += 2;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200805
806 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
807
808 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200809 sizeof(vcpu->hv_clock));
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200810
811 kunmap_atomic(shared_kaddr, KM_USER0);
812
813 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
814}
815
Gerd Hoffmannc8076602009-02-04 17:52:04 +0100816static int kvm_request_guest_time_update(struct kvm_vcpu *v)
817{
818 struct kvm_vcpu_arch *vcpu = &v->arch;
819
820 if (!vcpu->time_page)
821 return 0;
822 set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
823 return 1;
824}
825
Avi Kivity9ba075a2008-05-26 20:06:35 +0300826static bool msr_mtrr_valid(unsigned msr)
827{
828 switch (msr) {
829 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
830 case MSR_MTRRfix64K_00000:
831 case MSR_MTRRfix16K_80000:
832 case MSR_MTRRfix16K_A0000:
833 case MSR_MTRRfix4K_C0000:
834 case MSR_MTRRfix4K_C8000:
835 case MSR_MTRRfix4K_D0000:
836 case MSR_MTRRfix4K_D8000:
837 case MSR_MTRRfix4K_E0000:
838 case MSR_MTRRfix4K_E8000:
839 case MSR_MTRRfix4K_F0000:
840 case MSR_MTRRfix4K_F8000:
841 case MSR_MTRRdefType:
842 case MSR_IA32_CR_PAT:
843 return true;
844 case 0x2f8:
845 return true;
846 }
847 return false;
848}
849
Marcelo Tosattid6289b92009-06-22 15:27:56 -0300850static bool valid_pat_type(unsigned t)
851{
852 return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
853}
854
855static bool valid_mtrr_type(unsigned t)
856{
857 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
858}
859
860static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
861{
862 int i;
863
864 if (!msr_mtrr_valid(msr))
865 return false;
866
867 if (msr == MSR_IA32_CR_PAT) {
868 for (i = 0; i < 8; i++)
869 if (!valid_pat_type((data >> (i * 8)) & 0xff))
870 return false;
871 return true;
872 } else if (msr == MSR_MTRRdefType) {
873 if (data & ~0xcff)
874 return false;
875 return valid_mtrr_type(data & 0xff);
876 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
877 for (i = 0; i < 8 ; i++)
878 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
879 return false;
880 return true;
881 }
882
883 /* variable MTRRs */
884 return valid_mtrr_type(data & 0xff);
885}
886
Avi Kivity9ba075a2008-05-26 20:06:35 +0300887static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
888{
Sheng Yang0bed3b52008-10-09 16:01:54 +0800889 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
890
Marcelo Tosattid6289b92009-06-22 15:27:56 -0300891 if (!mtrr_valid(vcpu, msr, data))
Avi Kivity9ba075a2008-05-26 20:06:35 +0300892 return 1;
893
Sheng Yang0bed3b52008-10-09 16:01:54 +0800894 if (msr == MSR_MTRRdefType) {
895 vcpu->arch.mtrr_state.def_type = data;
896 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
897 } else if (msr == MSR_MTRRfix64K_00000)
898 p[0] = data;
899 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
900 p[1 + msr - MSR_MTRRfix16K_80000] = data;
901 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
902 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
903 else if (msr == MSR_IA32_CR_PAT)
904 vcpu->arch.pat = data;
905 else { /* Variable MTRRs */
906 int idx, is_mtrr_mask;
907 u64 *pt;
908
909 idx = (msr - 0x200) / 2;
910 is_mtrr_mask = msr - 0x200 - 2 * idx;
911 if (!is_mtrr_mask)
912 pt =
913 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
914 else
915 pt =
916 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
917 *pt = data;
918 }
919
920 kvm_mmu_reset_context(vcpu);
Avi Kivity9ba075a2008-05-26 20:06:35 +0300921 return 0;
922}
Carsten Otte15c4a642007-10-30 18:44:17 +0100923
Huang Ying890ca9a2009-05-11 16:48:15 +0800924static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
925{
926 u64 mcg_cap = vcpu->arch.mcg_cap;
927 unsigned bank_num = mcg_cap & 0xff;
928
929 switch (msr) {
930 case MSR_IA32_MCG_STATUS:
931 vcpu->arch.mcg_status = data;
932 break;
933 case MSR_IA32_MCG_CTL:
934 if (!(mcg_cap & MCG_CTL_P))
935 return 1;
936 if (data != 0 && data != ~(u64)0)
937 return -1;
938 vcpu->arch.mcg_ctl = data;
939 break;
940 default:
941 if (msr >= MSR_IA32_MC0_CTL &&
942 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
943 u32 offset = msr - MSR_IA32_MC0_CTL;
Andre Przywara114be422010-03-24 17:46:42 +0100944 /* only 0 or all 1s can be written to IA32_MCi_CTL
945 * some Linux kernels though clear bit 10 in bank 4 to
946 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
947 * this to avoid an uncatched #GP in the guest
948 */
Huang Ying890ca9a2009-05-11 16:48:15 +0800949 if ((offset & 0x3) == 0 &&
Andre Przywara114be422010-03-24 17:46:42 +0100950 data != 0 && (data | (1 << 10)) != ~(u64)0)
Huang Ying890ca9a2009-05-11 16:48:15 +0800951 return -1;
952 vcpu->arch.mce_banks[offset] = data;
953 break;
954 }
955 return 1;
956 }
957 return 0;
958}
959
Ed Swierkffde22a2009-10-15 15:21:43 -0700960static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
961{
962 struct kvm *kvm = vcpu->kvm;
963 int lm = is_long_mode(vcpu);
964 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
965 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
966 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
967 : kvm->arch.xen_hvm_config.blob_size_32;
968 u32 page_num = data & ~PAGE_MASK;
969 u64 page_addr = data & PAGE_MASK;
970 u8 *page;
971 int r;
972
973 r = -E2BIG;
974 if (page_num >= blob_size)
975 goto out;
976 r = -ENOMEM;
977 page = kzalloc(PAGE_SIZE, GFP_KERNEL);
978 if (!page)
979 goto out;
980 r = -EFAULT;
981 if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
982 goto out_free;
983 if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
984 goto out_free;
985 r = 0;
986out_free:
987 kfree(page);
988out:
989 return r;
990}
991
Gleb Natapov55cd8e52010-01-17 15:51:22 +0200992static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
993{
994 return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
995}
996
997static bool kvm_hv_msr_partition_wide(u32 msr)
998{
999 bool r = false;
1000 switch (msr) {
1001 case HV_X64_MSR_GUEST_OS_ID:
1002 case HV_X64_MSR_HYPERCALL:
1003 r = true;
1004 break;
1005 }
1006
1007 return r;
1008}
1009
1010static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1011{
1012 struct kvm *kvm = vcpu->kvm;
1013
1014 switch (msr) {
1015 case HV_X64_MSR_GUEST_OS_ID:
1016 kvm->arch.hv_guest_os_id = data;
1017 /* setting guest os id to zero disables hypercall page */
1018 if (!kvm->arch.hv_guest_os_id)
1019 kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1020 break;
1021 case HV_X64_MSR_HYPERCALL: {
1022 u64 gfn;
1023 unsigned long addr;
1024 u8 instructions[4];
1025
1026 /* if guest os id is not set hypercall should remain disabled */
1027 if (!kvm->arch.hv_guest_os_id)
1028 break;
1029 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1030 kvm->arch.hv_hypercall = data;
1031 break;
1032 }
1033 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1034 addr = gfn_to_hva(kvm, gfn);
1035 if (kvm_is_error_hva(addr))
1036 return 1;
1037 kvm_x86_ops->patch_hypercall(vcpu, instructions);
1038 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1039 if (copy_to_user((void __user *)addr, instructions, 4))
1040 return 1;
1041 kvm->arch.hv_hypercall = data;
1042 break;
1043 }
1044 default:
1045 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1046 "data 0x%llx\n", msr, data);
1047 return 1;
1048 }
1049 return 0;
1050}
1051
1052static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1053{
Gleb Natapov10388a02010-01-17 15:51:23 +02001054 switch (msr) {
1055 case HV_X64_MSR_APIC_ASSIST_PAGE: {
1056 unsigned long addr;
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001057
Gleb Natapov10388a02010-01-17 15:51:23 +02001058 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1059 vcpu->arch.hv_vapic = data;
1060 break;
1061 }
1062 addr = gfn_to_hva(vcpu->kvm, data >>
1063 HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
1064 if (kvm_is_error_hva(addr))
1065 return 1;
1066 if (clear_user((void __user *)addr, PAGE_SIZE))
1067 return 1;
1068 vcpu->arch.hv_vapic = data;
1069 break;
1070 }
1071 case HV_X64_MSR_EOI:
1072 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1073 case HV_X64_MSR_ICR:
1074 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1075 case HV_X64_MSR_TPR:
1076 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1077 default:
1078 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1079 "data 0x%llx\n", msr, data);
1080 return 1;
1081 }
1082
1083 return 0;
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001084}
1085
Carsten Otte15c4a642007-10-30 18:44:17 +01001086int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1087{
1088 switch (msr) {
Carsten Otte15c4a642007-10-30 18:44:17 +01001089 case MSR_EFER:
1090 set_efer(vcpu, data);
1091 break;
Andre Przywara8f1589d2009-06-24 12:44:33 +02001092 case MSR_K7_HWCR:
1093 data &= ~(u64)0x40; /* ignore flush filter disable */
1094 if (data != 0) {
1095 pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
1096 data);
1097 return 1;
1098 }
Carsten Otte15c4a642007-10-30 18:44:17 +01001099 break;
Andre Przywaraf7c6d142009-07-02 15:04:14 +02001100 case MSR_FAM10H_MMIO_CONF_BASE:
1101 if (data != 0) {
1102 pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
1103 "0x%llx\n", data);
1104 return 1;
1105 }
Carsten Otte15c4a642007-10-30 18:44:17 +01001106 break;
Andre Przywarac323c0e2009-06-24 15:37:05 +02001107 case MSR_AMD64_NB_CFG:
Joerg Roedelc7ac6792008-02-11 20:28:27 +01001108 break;
Alexander Grafb5e2fec2008-07-22 08:00:45 +02001109 case MSR_IA32_DEBUGCTLMSR:
1110 if (!data) {
1111 /* We support the non-activated case already */
1112 break;
1113 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
1114 /* Values other than LBR and BTF are vendor-specific,
1115 thus reserved and should throw a #GP */
1116 return 1;
1117 }
1118 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1119 __func__, data);
1120 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001121 case MSR_IA32_UCODE_REV:
1122 case MSR_IA32_UCODE_WRITE:
Avi Kivity61a6bd62008-12-29 17:32:28 +02001123 case MSR_VM_HSAVE_PA:
Andre Przywara6098ca92009-07-03 16:00:14 +02001124 case MSR_AMD64_PATCH_LOADER:
Carsten Otte15c4a642007-10-30 18:44:17 +01001125 break;
Avi Kivity9ba075a2008-05-26 20:06:35 +03001126 case 0x200 ... 0x2ff:
1127 return set_msr_mtrr(vcpu, msr, data);
Carsten Otte15c4a642007-10-30 18:44:17 +01001128 case MSR_IA32_APICBASE:
1129 kvm_set_apic_base(vcpu, data);
1130 break;
Gleb Natapov0105d1a2009-07-05 17:39:36 +03001131 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1132 return kvm_x2apic_msr_write(vcpu, msr, data);
Carsten Otte15c4a642007-10-30 18:44:17 +01001133 case MSR_IA32_MISC_ENABLE:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001134 vcpu->arch.ia32_misc_enable_msr = data;
Carsten Otte15c4a642007-10-30 18:44:17 +01001135 break;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001136 case MSR_KVM_WALL_CLOCK:
1137 vcpu->kvm->arch.wall_clock = data;
1138 kvm_write_wall_clock(vcpu->kvm, data);
1139 break;
1140 case MSR_KVM_SYSTEM_TIME: {
1141 if (vcpu->arch.time_page) {
1142 kvm_release_page_dirty(vcpu->arch.time_page);
1143 vcpu->arch.time_page = NULL;
1144 }
1145
1146 vcpu->arch.time = data;
1147
1148 /* we verify if the enable bit is set... */
1149 if (!(data & 1))
1150 break;
1151
1152 /* ...but clean it before doing the actual write */
1153 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
1154
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001155 vcpu->arch.time_page =
1156 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001157
1158 if (is_error_page(vcpu->arch.time_page)) {
1159 kvm_release_page_clean(vcpu->arch.time_page);
1160 vcpu->arch.time_page = NULL;
1161 }
1162
Gerd Hoffmannc8076602009-02-04 17:52:04 +01001163 kvm_request_guest_time_update(vcpu);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001164 break;
1165 }
Huang Ying890ca9a2009-05-11 16:48:15 +08001166 case MSR_IA32_MCG_CTL:
1167 case MSR_IA32_MCG_STATUS:
1168 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1169 return set_msr_mce(vcpu, msr, data);
Andre Przywara71db6022009-06-12 22:01:29 +02001170
1171 /* Performance counters are not protected by a CPUID bit,
1172 * so we should check all of them in the generic path for the sake of
1173 * cross vendor migration.
1174 * Writing a zero into the event select MSRs disables them,
1175 * which we perfectly emulate ;-). Any other value should be at least
1176 * reported, some guests depend on them.
1177 */
1178 case MSR_P6_EVNTSEL0:
1179 case MSR_P6_EVNTSEL1:
1180 case MSR_K7_EVNTSEL0:
1181 case MSR_K7_EVNTSEL1:
1182 case MSR_K7_EVNTSEL2:
1183 case MSR_K7_EVNTSEL3:
1184 if (data != 0)
1185 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1186 "0x%x data 0x%llx\n", msr, data);
1187 break;
1188 /* at least RHEL 4 unconditionally writes to the perfctr registers,
1189 * so we ignore writes to make it happy.
1190 */
1191 case MSR_P6_PERFCTR0:
1192 case MSR_P6_PERFCTR1:
1193 case MSR_K7_PERFCTR0:
1194 case MSR_K7_PERFCTR1:
1195 case MSR_K7_PERFCTR2:
1196 case MSR_K7_PERFCTR3:
1197 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1198 "0x%x data 0x%llx\n", msr, data);
1199 break;
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001200 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1201 if (kvm_hv_msr_partition_wide(msr)) {
1202 int r;
1203 mutex_lock(&vcpu->kvm->lock);
1204 r = set_msr_hyperv_pw(vcpu, msr, data);
1205 mutex_unlock(&vcpu->kvm->lock);
1206 return r;
1207 } else
1208 return set_msr_hyperv(vcpu, msr, data);
1209 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001210 default:
Ed Swierkffde22a2009-10-15 15:21:43 -07001211 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1212 return xen_hvm_config(vcpu, data);
Andre Przywaraed85c062009-06-25 12:36:49 +02001213 if (!ignore_msrs) {
1214 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
1215 msr, data);
1216 return 1;
1217 } else {
1218 pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
1219 msr, data);
1220 break;
1221 }
Carsten Otte15c4a642007-10-30 18:44:17 +01001222 }
1223 return 0;
1224}
1225EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1226
1227
1228/*
1229 * Reads an msr value (of 'msr_index') into 'pdata'.
1230 * Returns 0 on success, non-0 otherwise.
1231 * Assumes vcpu_load() was already called.
1232 */
1233int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1234{
1235 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
1236}
1237
Avi Kivity9ba075a2008-05-26 20:06:35 +03001238static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1239{
Sheng Yang0bed3b52008-10-09 16:01:54 +08001240 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1241
Avi Kivity9ba075a2008-05-26 20:06:35 +03001242 if (!msr_mtrr_valid(msr))
1243 return 1;
1244
Sheng Yang0bed3b52008-10-09 16:01:54 +08001245 if (msr == MSR_MTRRdefType)
1246 *pdata = vcpu->arch.mtrr_state.def_type +
1247 (vcpu->arch.mtrr_state.enabled << 10);
1248 else if (msr == MSR_MTRRfix64K_00000)
1249 *pdata = p[0];
1250 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1251 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
1252 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1253 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
1254 else if (msr == MSR_IA32_CR_PAT)
1255 *pdata = vcpu->arch.pat;
1256 else { /* Variable MTRRs */
1257 int idx, is_mtrr_mask;
1258 u64 *pt;
1259
1260 idx = (msr - 0x200) / 2;
1261 is_mtrr_mask = msr - 0x200 - 2 * idx;
1262 if (!is_mtrr_mask)
1263 pt =
1264 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1265 else
1266 pt =
1267 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1268 *pdata = *pt;
1269 }
1270
Avi Kivity9ba075a2008-05-26 20:06:35 +03001271 return 0;
1272}
1273
Huang Ying890ca9a2009-05-11 16:48:15 +08001274static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1275{
1276 u64 data;
1277 u64 mcg_cap = vcpu->arch.mcg_cap;
1278 unsigned bank_num = mcg_cap & 0xff;
1279
1280 switch (msr) {
1281 case MSR_IA32_P5_MC_ADDR:
1282 case MSR_IA32_P5_MC_TYPE:
1283 data = 0;
1284 break;
1285 case MSR_IA32_MCG_CAP:
1286 data = vcpu->arch.mcg_cap;
1287 break;
1288 case MSR_IA32_MCG_CTL:
1289 if (!(mcg_cap & MCG_CTL_P))
1290 return 1;
1291 data = vcpu->arch.mcg_ctl;
1292 break;
1293 case MSR_IA32_MCG_STATUS:
1294 data = vcpu->arch.mcg_status;
1295 break;
1296 default:
1297 if (msr >= MSR_IA32_MC0_CTL &&
1298 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1299 u32 offset = msr - MSR_IA32_MC0_CTL;
1300 data = vcpu->arch.mce_banks[offset];
1301 break;
1302 }
1303 return 1;
1304 }
1305 *pdata = data;
1306 return 0;
1307}
1308
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001309static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1310{
1311 u64 data = 0;
1312 struct kvm *kvm = vcpu->kvm;
1313
1314 switch (msr) {
1315 case HV_X64_MSR_GUEST_OS_ID:
1316 data = kvm->arch.hv_guest_os_id;
1317 break;
1318 case HV_X64_MSR_HYPERCALL:
1319 data = kvm->arch.hv_hypercall;
1320 break;
1321 default:
1322 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1323 return 1;
1324 }
1325
1326 *pdata = data;
1327 return 0;
1328}
1329
1330static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1331{
1332 u64 data = 0;
1333
1334 switch (msr) {
1335 case HV_X64_MSR_VP_INDEX: {
1336 int r;
1337 struct kvm_vcpu *v;
1338 kvm_for_each_vcpu(r, v, vcpu->kvm)
1339 if (v == vcpu)
1340 data = r;
1341 break;
1342 }
Gleb Natapov10388a02010-01-17 15:51:23 +02001343 case HV_X64_MSR_EOI:
1344 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1345 case HV_X64_MSR_ICR:
1346 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1347 case HV_X64_MSR_TPR:
1348 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001349 default:
1350 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1351 return 1;
1352 }
1353 *pdata = data;
1354 return 0;
1355}
1356
Carsten Otte15c4a642007-10-30 18:44:17 +01001357int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1358{
1359 u64 data;
1360
1361 switch (msr) {
Carsten Otte15c4a642007-10-30 18:44:17 +01001362 case MSR_IA32_PLATFORM_ID:
Carsten Otte15c4a642007-10-30 18:44:17 +01001363 case MSR_IA32_UCODE_REV:
Carsten Otte15c4a642007-10-30 18:44:17 +01001364 case MSR_IA32_EBL_CR_POWERON:
Alexander Grafb5e2fec2008-07-22 08:00:45 +02001365 case MSR_IA32_DEBUGCTLMSR:
1366 case MSR_IA32_LASTBRANCHFROMIP:
1367 case MSR_IA32_LASTBRANCHTOIP:
1368 case MSR_IA32_LASTINTFROMIP:
1369 case MSR_IA32_LASTINTTOIP:
Jaswinder Singh Rajput60af2ec2009-05-14 11:00:10 +05301370 case MSR_K8_SYSCFG:
1371 case MSR_K7_HWCR:
Avi Kivity61a6bd62008-12-29 17:32:28 +02001372 case MSR_VM_HSAVE_PA:
Amit Shah1f3ee612009-06-30 16:24:28 +05301373 case MSR_P6_PERFCTR0:
1374 case MSR_P6_PERFCTR1:
Amit Shah7fe29e02009-03-20 12:39:00 +05301375 case MSR_P6_EVNTSEL0:
1376 case MSR_P6_EVNTSEL1:
Amit Shah9e699622009-06-15 13:25:34 +05301377 case MSR_K7_EVNTSEL0:
Amit Shah1f3ee612009-06-30 16:24:28 +05301378 case MSR_K7_PERFCTR0:
Andre Przywara1fdbd482009-06-24 12:44:34 +02001379 case MSR_K8_INT_PENDING_MSG:
Andre Przywarac323c0e2009-06-24 15:37:05 +02001380 case MSR_AMD64_NB_CFG:
Andre Przywaraf7c6d142009-07-02 15:04:14 +02001381 case MSR_FAM10H_MMIO_CONF_BASE:
Carsten Otte15c4a642007-10-30 18:44:17 +01001382 data = 0;
1383 break;
Avi Kivity9ba075a2008-05-26 20:06:35 +03001384 case MSR_MTRRcap:
1385 data = 0x500 | KVM_NR_VAR_MTRR;
1386 break;
1387 case 0x200 ... 0x2ff:
1388 return get_msr_mtrr(vcpu, msr, pdata);
Carsten Otte15c4a642007-10-30 18:44:17 +01001389 case 0xcd: /* fsb frequency */
1390 data = 3;
1391 break;
1392 case MSR_IA32_APICBASE:
1393 data = kvm_get_apic_base(vcpu);
1394 break;
Gleb Natapov0105d1a2009-07-05 17:39:36 +03001395 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1396 return kvm_x2apic_msr_read(vcpu, msr, pdata);
1397 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001398 case MSR_IA32_MISC_ENABLE:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001399 data = vcpu->arch.ia32_misc_enable_msr;
Carsten Otte15c4a642007-10-30 18:44:17 +01001400 break;
Alexander Graf847f0ad2008-02-21 12:11:01 +01001401 case MSR_IA32_PERF_STATUS:
1402 /* TSC increment by tick */
1403 data = 1000ULL;
1404 /* CPU multiplier */
1405 data |= (((uint64_t)4ULL) << 40);
1406 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001407 case MSR_EFER:
Avi Kivityf6801df2010-01-21 15:31:50 +02001408 data = vcpu->arch.efer;
Carsten Otte15c4a642007-10-30 18:44:17 +01001409 break;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001410 case MSR_KVM_WALL_CLOCK:
1411 data = vcpu->kvm->arch.wall_clock;
1412 break;
1413 case MSR_KVM_SYSTEM_TIME:
1414 data = vcpu->arch.time;
1415 break;
Huang Ying890ca9a2009-05-11 16:48:15 +08001416 case MSR_IA32_P5_MC_ADDR:
1417 case MSR_IA32_P5_MC_TYPE:
1418 case MSR_IA32_MCG_CAP:
1419 case MSR_IA32_MCG_CTL:
1420 case MSR_IA32_MCG_STATUS:
1421 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1422 return get_msr_mce(vcpu, msr, pdata);
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001423 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1424 if (kvm_hv_msr_partition_wide(msr)) {
1425 int r;
1426 mutex_lock(&vcpu->kvm->lock);
1427 r = get_msr_hyperv_pw(vcpu, msr, pdata);
1428 mutex_unlock(&vcpu->kvm->lock);
1429 return r;
1430 } else
1431 return get_msr_hyperv(vcpu, msr, pdata);
1432 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001433 default:
Andre Przywaraed85c062009-06-25 12:36:49 +02001434 if (!ignore_msrs) {
1435 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1436 return 1;
1437 } else {
1438 pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
1439 data = 0;
1440 }
1441 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001442 }
1443 *pdata = data;
1444 return 0;
1445}
1446EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1447
Carsten Otte313a3dc2007-10-11 19:16:52 +02001448/*
1449 * Read or write a bunch of msrs. All parameters are kernel addresses.
1450 *
1451 * @return number of msrs set successfully.
1452 */
1453static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1454 struct kvm_msr_entry *entries,
1455 int (*do_msr)(struct kvm_vcpu *vcpu,
1456 unsigned index, u64 *data))
1457{
Marcelo Tosattif656ce02009-12-23 14:35:25 -02001458 int i, idx;
Carsten Otte313a3dc2007-10-11 19:16:52 +02001459
1460 vcpu_load(vcpu);
1461
Marcelo Tosattif656ce02009-12-23 14:35:25 -02001462 idx = srcu_read_lock(&vcpu->kvm->srcu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001463 for (i = 0; i < msrs->nmsrs; ++i)
1464 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1465 break;
Marcelo Tosattif656ce02009-12-23 14:35:25 -02001466 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001467
1468 vcpu_put(vcpu);
1469
1470 return i;
1471}
1472
1473/*
1474 * Read or write a bunch of msrs. Parameters are user addresses.
1475 *
1476 * @return number of msrs set successfully.
1477 */
1478static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1479 int (*do_msr)(struct kvm_vcpu *vcpu,
1480 unsigned index, u64 *data),
1481 int writeback)
1482{
1483 struct kvm_msrs msrs;
1484 struct kvm_msr_entry *entries;
1485 int r, n;
1486 unsigned size;
1487
1488 r = -EFAULT;
1489 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1490 goto out;
1491
1492 r = -E2BIG;
1493 if (msrs.nmsrs >= MAX_IO_MSRS)
1494 goto out;
1495
1496 r = -ENOMEM;
1497 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1498 entries = vmalloc(size);
1499 if (!entries)
1500 goto out;
1501
1502 r = -EFAULT;
1503 if (copy_from_user(entries, user_msrs->entries, size))
1504 goto out_free;
1505
1506 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1507 if (r < 0)
1508 goto out_free;
1509
1510 r = -EFAULT;
1511 if (writeback && copy_to_user(user_msrs->entries, entries, size))
1512 goto out_free;
1513
1514 r = n;
1515
1516out_free:
1517 vfree(entries);
1518out:
1519 return r;
1520}
1521
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001522int kvm_dev_ioctl_check_extension(long ext)
1523{
1524 int r;
1525
1526 switch (ext) {
1527 case KVM_CAP_IRQCHIP:
1528 case KVM_CAP_HLT:
1529 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001530 case KVM_CAP_SET_TSS_ADDR:
Dan Kenigsberg07716712007-11-21 17:10:04 +02001531 case KVM_CAP_EXT_CPUID:
Gerd Hoffmannc8076602009-02-04 17:52:04 +01001532 case KVM_CAP_CLOCKSOURCE:
Sheng Yang78376992008-01-28 05:10:22 +08001533 case KVM_CAP_PIT:
Marcelo Tosattia28e4f52008-02-22 12:21:36 -05001534 case KVM_CAP_NOP_IO_DELAY:
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001535 case KVM_CAP_MP_STATE:
Avi Kivityed848622008-07-29 11:30:57 +03001536 case KVM_CAP_SYNC_MMU:
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02001537 case KVM_CAP_REINJECT_CONTROL:
Gleb Natapov49256632009-02-04 17:28:14 +02001538 case KVM_CAP_IRQ_INJECT_STATUS:
Sheng Yange56d5322009-03-12 21:45:39 +08001539 case KVM_CAP_ASSIGN_DEV_IRQ:
Gregory Haskins721eecb2009-05-20 10:30:49 -04001540 case KVM_CAP_IRQFD:
Gregory Haskinsd34e6b12009-07-07 17:08:49 -04001541 case KVM_CAP_IOEVENTFD:
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02001542 case KVM_CAP_PIT2:
Beth Kone9f42752009-07-07 11:50:38 -04001543 case KVM_CAP_PIT_STATE2:
Sheng Yangb927a3c2009-07-21 10:42:48 +08001544 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
Ed Swierkffde22a2009-10-15 15:21:43 -07001545 case KVM_CAP_XEN_HVM:
Glauber Costaafbcf7a2009-10-16 15:28:36 -04001546 case KVM_CAP_ADJUST_CLOCK:
Jan Kiszka3cfc3092009-11-12 01:04:25 +01001547 case KVM_CAP_VCPU_EVENTS:
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001548 case KVM_CAP_HYPERV:
Gleb Natapov10388a02010-01-17 15:51:23 +02001549 case KVM_CAP_HYPERV_VAPIC:
Gleb Natapovc25bc162010-01-17 15:51:24 +02001550 case KVM_CAP_HYPERV_SPIN:
Zhai, Edwinab9f4ec2010-01-29 14:38:44 +08001551 case KVM_CAP_PCI_SEGMENT:
Jan Kiszkad2be1652010-02-23 17:47:57 +01001552 case KVM_CAP_X86_ROBUST_SINGLESTEP:
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001553 r = 1;
1554 break;
Laurent Vivier542472b2008-05-30 16:05:55 +02001555 case KVM_CAP_COALESCED_MMIO:
1556 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1557 break;
Avi Kivity774ead32007-12-26 13:57:04 +02001558 case KVM_CAP_VAPIC:
1559 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1560 break;
Avi Kivityf7252302008-02-20 11:53:16 +02001561 case KVM_CAP_NR_VCPUS:
1562 r = KVM_MAX_VCPUS;
1563 break;
Avi Kivitya988b912008-02-20 11:59:20 +02001564 case KVM_CAP_NR_MEMSLOTS:
1565 r = KVM_MEMORY_SLOTS;
1566 break;
Marcelo Tosattia68a6a72009-10-01 19:28:39 -03001567 case KVM_CAP_PV_MMU: /* obsolete */
1568 r = 0;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05001569 break;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +03001570 case KVM_CAP_IOMMU:
Joerg Roedel19de40a2008-12-03 14:43:34 +01001571 r = iommu_found();
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +03001572 break;
Huang Ying890ca9a2009-05-11 16:48:15 +08001573 case KVM_CAP_MCE:
1574 r = KVM_MAX_MCE_BANKS;
1575 break;
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001576 default:
1577 r = 0;
1578 break;
1579 }
1580 return r;
1581
1582}
1583
Carsten Otte043405e2007-10-10 17:16:19 +02001584long kvm_arch_dev_ioctl(struct file *filp,
1585 unsigned int ioctl, unsigned long arg)
1586{
1587 void __user *argp = (void __user *)arg;
1588 long r;
1589
1590 switch (ioctl) {
1591 case KVM_GET_MSR_INDEX_LIST: {
1592 struct kvm_msr_list __user *user_msr_list = argp;
1593 struct kvm_msr_list msr_list;
1594 unsigned n;
1595
1596 r = -EFAULT;
1597 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1598 goto out;
1599 n = msr_list.nmsrs;
1600 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1601 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1602 goto out;
1603 r = -E2BIG;
Jan Kiszkae125e7b2009-07-02 21:45:47 +02001604 if (n < msr_list.nmsrs)
Carsten Otte043405e2007-10-10 17:16:19 +02001605 goto out;
1606 r = -EFAULT;
1607 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1608 num_msrs_to_save * sizeof(u32)))
1609 goto out;
Jan Kiszkae125e7b2009-07-02 21:45:47 +02001610 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
Carsten Otte043405e2007-10-10 17:16:19 +02001611 &emulated_msrs,
1612 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1613 goto out;
1614 r = 0;
1615 break;
1616 }
Avi Kivity674eea02008-02-11 18:37:23 +02001617 case KVM_GET_SUPPORTED_CPUID: {
1618 struct kvm_cpuid2 __user *cpuid_arg = argp;
1619 struct kvm_cpuid2 cpuid;
1620
1621 r = -EFAULT;
1622 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1623 goto out;
1624 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
Amit Shah19355472009-01-14 16:56:00 +00001625 cpuid_arg->entries);
Avi Kivity674eea02008-02-11 18:37:23 +02001626 if (r)
1627 goto out;
1628
1629 r = -EFAULT;
1630 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1631 goto out;
1632 r = 0;
1633 break;
1634 }
Huang Ying890ca9a2009-05-11 16:48:15 +08001635 case KVM_X86_GET_MCE_CAP_SUPPORTED: {
1636 u64 mce_cap;
1637
1638 mce_cap = KVM_MCE_CAP_SUPPORTED;
1639 r = -EFAULT;
1640 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
1641 goto out;
1642 r = 0;
1643 break;
1644 }
Carsten Otte043405e2007-10-10 17:16:19 +02001645 default:
1646 r = -EINVAL;
1647 }
1648out:
1649 return r;
1650}
1651
Carsten Otte313a3dc2007-10-11 19:16:52 +02001652void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1653{
1654 kvm_x86_ops->vcpu_load(vcpu, cpu);
Zachary Amsden6b7d7e72009-10-09 16:26:08 -10001655 if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
1656 unsigned long khz = cpufreq_quick_get(cpu);
1657 if (!khz)
1658 khz = tsc_khz;
1659 per_cpu(cpu_tsc_khz, cpu) = khz;
1660 }
Gerd Hoffmannc8076602009-02-04 17:52:04 +01001661 kvm_request_guest_time_update(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001662}
1663
1664void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1665{
Amit Shah9327fd12007-11-15 18:38:46 +02001666 kvm_put_guest_fpu(vcpu);
Avi Kivity02daab22009-12-30 12:40:26 +02001667 kvm_x86_ops->vcpu_put(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001668}
1669
Dan Kenigsberg07716712007-11-21 17:10:04 +02001670static int is_efer_nx(void)
Carsten Otte313a3dc2007-10-11 19:16:52 +02001671{
Avi Kivitye286e862009-05-03 18:50:55 +03001672 unsigned long long efer = 0;
Carsten Otte313a3dc2007-10-11 19:16:52 +02001673
Avi Kivitye286e862009-05-03 18:50:55 +03001674 rdmsrl_safe(MSR_EFER, &efer);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001675 return efer & EFER_NX;
1676}
1677
1678static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1679{
1680 int i;
1681 struct kvm_cpuid_entry2 *e, *entry;
1682
Carsten Otte313a3dc2007-10-11 19:16:52 +02001683 entry = NULL;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001684 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1685 e = &vcpu->arch.cpuid_entries[i];
Carsten Otte313a3dc2007-10-11 19:16:52 +02001686 if (e->function == 0x80000001) {
1687 entry = e;
1688 break;
1689 }
1690 }
Dan Kenigsberg07716712007-11-21 17:10:04 +02001691 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
Carsten Otte313a3dc2007-10-11 19:16:52 +02001692 entry->edx &= ~(1 << 20);
1693 printk(KERN_INFO "kvm: guest NX capability removed\n");
1694 }
1695}
1696
Dan Kenigsberg07716712007-11-21 17:10:04 +02001697/* when an old userspace process fills a new kernel module */
Carsten Otte313a3dc2007-10-11 19:16:52 +02001698static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1699 struct kvm_cpuid *cpuid,
1700 struct kvm_cpuid_entry __user *entries)
1701{
Dan Kenigsberg07716712007-11-21 17:10:04 +02001702 int r, i;
1703 struct kvm_cpuid_entry *cpuid_entries;
1704
1705 r = -E2BIG;
1706 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1707 goto out;
1708 r = -ENOMEM;
1709 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1710 if (!cpuid_entries)
1711 goto out;
1712 r = -EFAULT;
1713 if (copy_from_user(cpuid_entries, entries,
1714 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1715 goto out_free;
1716 for (i = 0; i < cpuid->nent; i++) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001717 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1718 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1719 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1720 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1721 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1722 vcpu->arch.cpuid_entries[i].index = 0;
1723 vcpu->arch.cpuid_entries[i].flags = 0;
1724 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1725 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1726 vcpu->arch.cpuid_entries[i].padding[2] = 0;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001727 }
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001728 vcpu->arch.cpuid_nent = cpuid->nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001729 cpuid_fix_nx_cap(vcpu);
1730 r = 0;
Gleb Natapovfc61b802009-07-05 17:39:35 +03001731 kvm_apic_set_version(vcpu);
Sheng Yang0e851882009-12-18 16:48:46 +08001732 kvm_x86_ops->cpuid_update(vcpu);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001733
1734out_free:
1735 vfree(cpuid_entries);
1736out:
1737 return r;
1738}
1739
1740static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
Amit Shah19355472009-01-14 16:56:00 +00001741 struct kvm_cpuid2 *cpuid,
1742 struct kvm_cpuid_entry2 __user *entries)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001743{
Carsten Otte313a3dc2007-10-11 19:16:52 +02001744 int r;
1745
1746 r = -E2BIG;
1747 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1748 goto out;
1749 r = -EFAULT;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001750 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
Dan Kenigsberg07716712007-11-21 17:10:04 +02001751 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
Carsten Otte313a3dc2007-10-11 19:16:52 +02001752 goto out;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001753 vcpu->arch.cpuid_nent = cpuid->nent;
Gleb Natapovfc61b802009-07-05 17:39:35 +03001754 kvm_apic_set_version(vcpu);
Sheng Yang0e851882009-12-18 16:48:46 +08001755 kvm_x86_ops->cpuid_update(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001756 return 0;
1757
1758out:
1759 return r;
1760}
1761
Dan Kenigsberg07716712007-11-21 17:10:04 +02001762static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
Amit Shah19355472009-01-14 16:56:00 +00001763 struct kvm_cpuid2 *cpuid,
1764 struct kvm_cpuid_entry2 __user *entries)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001765{
1766 int r;
1767
1768 r = -E2BIG;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001769 if (cpuid->nent < vcpu->arch.cpuid_nent)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001770 goto out;
1771 r = -EFAULT;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001772 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
Amit Shah19355472009-01-14 16:56:00 +00001773 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
Dan Kenigsberg07716712007-11-21 17:10:04 +02001774 goto out;
1775 return 0;
1776
1777out:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001778 cpuid->nent = vcpu->arch.cpuid_nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001779 return r;
1780}
1781
Dan Kenigsberg07716712007-11-21 17:10:04 +02001782static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
Amit Shah19355472009-01-14 16:56:00 +00001783 u32 index)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001784{
1785 entry->function = function;
1786 entry->index = index;
1787 cpuid_count(entry->function, entry->index,
Amit Shah19355472009-01-14 16:56:00 +00001788 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001789 entry->flags = 0;
1790}
1791
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001792#define F(x) bit(X86_FEATURE_##x)
1793
Dan Kenigsberg07716712007-11-21 17:10:04 +02001794static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1795 u32 index, int *nent, int maxnent)
1796{
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001797 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001798#ifdef CONFIG_X86_64
Sheng Yang17cc3932010-01-05 19:02:27 +08001799 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
1800 ? F(GBPAGES) : 0;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001801 unsigned f_lm = F(LM);
1802#else
Sheng Yang17cc3932010-01-05 19:02:27 +08001803 unsigned f_gbpages = 0;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001804 unsigned f_lm = 0;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001805#endif
Sheng Yang4e47c7a2009-12-18 16:48:47 +08001806 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001807
1808 /* cpuid 1.edx */
1809 const u32 kvm_supported_word0_x86_features =
1810 F(FPU) | F(VME) | F(DE) | F(PSE) |
1811 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1812 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
1813 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1814 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
1815 0 /* Reserved, DS, ACPI */ | F(MMX) |
1816 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
1817 0 /* HTT, TM, Reserved, PBE */;
1818 /* cpuid 0x80000001.edx */
1819 const u32 kvm_supported_word1_x86_features =
1820 F(FPU) | F(VME) | F(DE) | F(PSE) |
1821 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1822 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
1823 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1824 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1825 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
Sheng Yang4e47c7a2009-12-18 16:48:47 +08001826 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001827 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
1828 /* cpuid 1.ecx */
1829 const u32 kvm_supported_word4_x86_features =
Avi Kivityd149c732009-05-10 14:41:56 +03001830 F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
1831 0 /* DS-CPL, VMX, SMX, EST */ |
1832 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
1833 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
1834 0 /* Reserved, DCA */ | F(XMM4_1) |
Gleb Natapov0105d1a2009-07-05 17:39:36 +03001835 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
Avi Kivityd149c732009-05-10 14:41:56 +03001836 0 /* Reserved, XSAVE, OSXSAVE */;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001837 /* cpuid 0x80000001.ecx */
Dan Kenigsberg07716712007-11-21 17:10:04 +02001838 const u32 kvm_supported_word6_x86_features =
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001839 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
1840 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1841 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
1842 0 /* SKINIT */ | 0 /* WDT */;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001843
Amit Shah19355472009-01-14 16:56:00 +00001844 /* all calls to cpuid_count() should be made on the same cpu */
Dan Kenigsberg07716712007-11-21 17:10:04 +02001845 get_cpu();
1846 do_cpuid_1_ent(entry, function, index);
1847 ++*nent;
1848
1849 switch (function) {
1850 case 0:
1851 entry->eax = min(entry->eax, (u32)0xb);
1852 break;
1853 case 1:
1854 entry->edx &= kvm_supported_word0_x86_features;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001855 entry->ecx &= kvm_supported_word4_x86_features;
Gleb Natapov0d1de2d2009-07-12 16:10:55 +03001856 /* we support x2apic emulation even if host does not support
1857 * it since we emulate x2apic in software */
1858 entry->ecx |= F(X2APIC);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001859 break;
1860 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1861 * may return different values. This forces us to get_cpu() before
1862 * issuing the first command, and also to emulate this annoying behavior
1863 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1864 case 2: {
1865 int t, times = entry->eax & 0xff;
1866
1867 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
Nitin A Kamble0fdf8e52008-11-05 15:56:21 -08001868 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001869 for (t = 1; t < times && *nent < maxnent; ++t) {
1870 do_cpuid_1_ent(&entry[t], function, 0);
1871 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1872 ++*nent;
1873 }
1874 break;
1875 }
1876 /* function 4 and 0xb have additional index. */
1877 case 4: {
Harvey Harrison14af3f32008-02-19 10:25:50 -08001878 int i, cache_type;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001879
1880 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1881 /* read more entries until cache_type is zero */
Harvey Harrison14af3f32008-02-19 10:25:50 -08001882 for (i = 1; *nent < maxnent; ++i) {
1883 cache_type = entry[i - 1].eax & 0x1f;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001884 if (!cache_type)
1885 break;
Harvey Harrison14af3f32008-02-19 10:25:50 -08001886 do_cpuid_1_ent(&entry[i], function, i);
1887 entry[i].flags |=
Dan Kenigsberg07716712007-11-21 17:10:04 +02001888 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1889 ++*nent;
1890 }
1891 break;
1892 }
1893 case 0xb: {
Harvey Harrison14af3f32008-02-19 10:25:50 -08001894 int i, level_type;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001895
1896 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1897 /* read more entries until level_type is zero */
Harvey Harrison14af3f32008-02-19 10:25:50 -08001898 for (i = 1; *nent < maxnent; ++i) {
Nitin A Kamble0853d2c2008-11-05 15:37:36 -08001899 level_type = entry[i - 1].ecx & 0xff00;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001900 if (!level_type)
1901 break;
Harvey Harrison14af3f32008-02-19 10:25:50 -08001902 do_cpuid_1_ent(&entry[i], function, i);
1903 entry[i].flags |=
Dan Kenigsberg07716712007-11-21 17:10:04 +02001904 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1905 ++*nent;
1906 }
1907 break;
1908 }
1909 case 0x80000000:
1910 entry->eax = min(entry->eax, 0x8000001a);
1911 break;
1912 case 0x80000001:
1913 entry->edx &= kvm_supported_word1_x86_features;
1914 entry->ecx &= kvm_supported_word6_x86_features;
1915 break;
1916 }
1917 put_cpu();
1918}
1919
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001920#undef F
1921
Avi Kivity674eea02008-02-11 18:37:23 +02001922static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
Amit Shah19355472009-01-14 16:56:00 +00001923 struct kvm_cpuid_entry2 __user *entries)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001924{
1925 struct kvm_cpuid_entry2 *cpuid_entries;
1926 int limit, nent = 0, r = -E2BIG;
1927 u32 func;
1928
1929 if (cpuid->nent < 1)
1930 goto out;
Avi Kivity6a544352009-10-04 16:45:13 +02001931 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1932 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001933 r = -ENOMEM;
1934 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1935 if (!cpuid_entries)
1936 goto out;
1937
1938 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1939 limit = cpuid_entries[0].eax;
1940 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1941 do_cpuid_ent(&cpuid_entries[nent], func, 0,
Amit Shah19355472009-01-14 16:56:00 +00001942 &nent, cpuid->nent);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001943 r = -E2BIG;
1944 if (nent >= cpuid->nent)
1945 goto out_free;
1946
1947 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1948 limit = cpuid_entries[nent - 1].eax;
1949 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1950 do_cpuid_ent(&cpuid_entries[nent], func, 0,
Amit Shah19355472009-01-14 16:56:00 +00001951 &nent, cpuid->nent);
Mark McLoughlincb007642009-05-12 12:36:44 +01001952 r = -E2BIG;
1953 if (nent >= cpuid->nent)
1954 goto out_free;
1955
Dan Kenigsberg07716712007-11-21 17:10:04 +02001956 r = -EFAULT;
1957 if (copy_to_user(entries, cpuid_entries,
Amit Shah19355472009-01-14 16:56:00 +00001958 nent * sizeof(struct kvm_cpuid_entry2)))
Dan Kenigsberg07716712007-11-21 17:10:04 +02001959 goto out_free;
1960 cpuid->nent = nent;
1961 r = 0;
1962
1963out_free:
1964 vfree(cpuid_entries);
1965out:
1966 return r;
1967}
1968
Carsten Otte313a3dc2007-10-11 19:16:52 +02001969static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1970 struct kvm_lapic_state *s)
1971{
1972 vcpu_load(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001973 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001974 vcpu_put(vcpu);
1975
1976 return 0;
1977}
1978
1979static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1980 struct kvm_lapic_state *s)
1981{
1982 vcpu_load(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001983 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001984 kvm_apic_post_state_restore(vcpu);
Gleb Natapovcb142eb2009-08-09 15:17:40 +03001985 update_cr8_intercept(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001986 vcpu_put(vcpu);
1987
1988 return 0;
1989}
1990
Zhang Xiantaof77bc6a2007-11-21 04:36:41 +08001991static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1992 struct kvm_interrupt *irq)
1993{
1994 if (irq->irq < 0 || irq->irq >= 256)
1995 return -EINVAL;
1996 if (irqchip_in_kernel(vcpu->kvm))
1997 return -ENXIO;
1998 vcpu_load(vcpu);
1999
Gleb Natapov66fd3f72009-05-11 13:35:50 +03002000 kvm_queue_interrupt(vcpu, irq->irq, false);
Zhang Xiantaof77bc6a2007-11-21 04:36:41 +08002001
2002 vcpu_put(vcpu);
2003
2004 return 0;
2005}
2006
Jan Kiszkac4abb7c2008-09-26 09:30:55 +02002007static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2008{
2009 vcpu_load(vcpu);
2010 kvm_inject_nmi(vcpu);
2011 vcpu_put(vcpu);
2012
2013 return 0;
2014}
2015
Avi Kivityb209749f2007-10-22 16:50:39 +02002016static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2017 struct kvm_tpr_access_ctl *tac)
2018{
2019 if (tac->flags)
2020 return -EINVAL;
2021 vcpu->arch.tpr_access_reporting = !!tac->enabled;
2022 return 0;
2023}
2024
Huang Ying890ca9a2009-05-11 16:48:15 +08002025static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2026 u64 mcg_cap)
2027{
2028 int r;
2029 unsigned bank_num = mcg_cap & 0xff, bank;
2030
2031 r = -EINVAL;
Jan Kiszkaa9e38c3e2009-10-23 09:37:00 +02002032 if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
Huang Ying890ca9a2009-05-11 16:48:15 +08002033 goto out;
2034 if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
2035 goto out;
2036 r = 0;
2037 vcpu->arch.mcg_cap = mcg_cap;
2038 /* Init IA32_MCG_CTL to all 1s */
2039 if (mcg_cap & MCG_CTL_P)
2040 vcpu->arch.mcg_ctl = ~(u64)0;
2041 /* Init IA32_MCi_CTL to all 1s */
2042 for (bank = 0; bank < bank_num; bank++)
2043 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2044out:
2045 return r;
2046}
2047
2048static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2049 struct kvm_x86_mce *mce)
2050{
2051 u64 mcg_cap = vcpu->arch.mcg_cap;
2052 unsigned bank_num = mcg_cap & 0xff;
2053 u64 *banks = vcpu->arch.mce_banks;
2054
2055 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
2056 return -EINVAL;
2057 /*
2058 * if IA32_MCG_CTL is not all 1s, the uncorrected error
2059 * reporting is disabled
2060 */
2061 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
2062 vcpu->arch.mcg_ctl != ~(u64)0)
2063 return 0;
2064 banks += 4 * mce->bank;
2065 /*
2066 * if IA32_MCi_CTL is not all 1s, the uncorrected error
2067 * reporting is disabled for the bank
2068 */
2069 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
2070 return 0;
2071 if (mce->status & MCI_STATUS_UC) {
2072 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
Avi Kivityfc78f512009-12-07 12:16:48 +02002073 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
Huang Ying890ca9a2009-05-11 16:48:15 +08002074 printk(KERN_DEBUG "kvm: set_mce: "
2075 "injects mce exception while "
2076 "previous one is in progress!\n");
2077 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
2078 return 0;
2079 }
2080 if (banks[1] & MCI_STATUS_VAL)
2081 mce->status |= MCI_STATUS_OVER;
2082 banks[2] = mce->addr;
2083 banks[3] = mce->misc;
2084 vcpu->arch.mcg_status = mce->mcg_status;
2085 banks[1] = mce->status;
2086 kvm_queue_exception(vcpu, MC_VECTOR);
2087 } else if (!(banks[1] & MCI_STATUS_VAL)
2088 || !(banks[1] & MCI_STATUS_UC)) {
2089 if (banks[1] & MCI_STATUS_VAL)
2090 mce->status |= MCI_STATUS_OVER;
2091 banks[2] = mce->addr;
2092 banks[3] = mce->misc;
2093 banks[1] = mce->status;
2094 } else
2095 banks[1] |= MCI_STATUS_OVER;
2096 return 0;
2097}
2098
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002099static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2100 struct kvm_vcpu_events *events)
2101{
2102 vcpu_load(vcpu);
2103
2104 events->exception.injected = vcpu->arch.exception.pending;
2105 events->exception.nr = vcpu->arch.exception.nr;
2106 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2107 events->exception.error_code = vcpu->arch.exception.error_code;
2108
2109 events->interrupt.injected = vcpu->arch.interrupt.pending;
2110 events->interrupt.nr = vcpu->arch.interrupt.nr;
2111 events->interrupt.soft = vcpu->arch.interrupt.soft;
2112
2113 events->nmi.injected = vcpu->arch.nmi_injected;
2114 events->nmi.pending = vcpu->arch.nmi_pending;
2115 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2116
2117 events->sipi_vector = vcpu->arch.sipi_vector;
2118
Jan Kiszkadab4b912009-12-06 18:24:15 +01002119 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2120 | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002121
2122 vcpu_put(vcpu);
2123}
2124
2125static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2126 struct kvm_vcpu_events *events)
2127{
Jan Kiszkadab4b912009-12-06 18:24:15 +01002128 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
2129 | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002130 return -EINVAL;
2131
2132 vcpu_load(vcpu);
2133
2134 vcpu->arch.exception.pending = events->exception.injected;
2135 vcpu->arch.exception.nr = events->exception.nr;
2136 vcpu->arch.exception.has_error_code = events->exception.has_error_code;
2137 vcpu->arch.exception.error_code = events->exception.error_code;
2138
2139 vcpu->arch.interrupt.pending = events->interrupt.injected;
2140 vcpu->arch.interrupt.nr = events->interrupt.nr;
2141 vcpu->arch.interrupt.soft = events->interrupt.soft;
2142 if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
2143 kvm_pic_clear_isr_ack(vcpu->kvm);
2144
2145 vcpu->arch.nmi_injected = events->nmi.injected;
Jan Kiszkadab4b912009-12-06 18:24:15 +01002146 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
2147 vcpu->arch.nmi_pending = events->nmi.pending;
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002148 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
2149
Jan Kiszkadab4b912009-12-06 18:24:15 +01002150 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
2151 vcpu->arch.sipi_vector = events->sipi_vector;
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002152
2153 vcpu_put(vcpu);
2154
2155 return 0;
2156}
2157
Carsten Otte313a3dc2007-10-11 19:16:52 +02002158long kvm_arch_vcpu_ioctl(struct file *filp,
2159 unsigned int ioctl, unsigned long arg)
2160{
2161 struct kvm_vcpu *vcpu = filp->private_data;
2162 void __user *argp = (void __user *)arg;
2163 int r;
Dave Hansenb772ff32008-08-11 10:01:47 -07002164 struct kvm_lapic_state *lapic = NULL;
Carsten Otte313a3dc2007-10-11 19:16:52 +02002165
2166 switch (ioctl) {
2167 case KVM_GET_LAPIC: {
Marcelo Tosatti2204ae32009-10-29 13:44:16 -02002168 r = -EINVAL;
2169 if (!vcpu->arch.apic)
2170 goto out;
Dave Hansenb772ff32008-08-11 10:01:47 -07002171 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002172
Dave Hansenb772ff32008-08-11 10:01:47 -07002173 r = -ENOMEM;
2174 if (!lapic)
2175 goto out;
2176 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002177 if (r)
2178 goto out;
2179 r = -EFAULT;
Dave Hansenb772ff32008-08-11 10:01:47 -07002180 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
Carsten Otte313a3dc2007-10-11 19:16:52 +02002181 goto out;
2182 r = 0;
2183 break;
2184 }
2185 case KVM_SET_LAPIC: {
Marcelo Tosatti2204ae32009-10-29 13:44:16 -02002186 r = -EINVAL;
2187 if (!vcpu->arch.apic)
2188 goto out;
Dave Hansenb772ff32008-08-11 10:01:47 -07002189 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2190 r = -ENOMEM;
2191 if (!lapic)
Carsten Otte313a3dc2007-10-11 19:16:52 +02002192 goto out;
Dave Hansenb772ff32008-08-11 10:01:47 -07002193 r = -EFAULT;
2194 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
2195 goto out;
2196 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002197 if (r)
2198 goto out;
2199 r = 0;
2200 break;
2201 }
Zhang Xiantaof77bc6a2007-11-21 04:36:41 +08002202 case KVM_INTERRUPT: {
2203 struct kvm_interrupt irq;
2204
2205 r = -EFAULT;
2206 if (copy_from_user(&irq, argp, sizeof irq))
2207 goto out;
2208 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2209 if (r)
2210 goto out;
2211 r = 0;
2212 break;
2213 }
Jan Kiszkac4abb7c2008-09-26 09:30:55 +02002214 case KVM_NMI: {
2215 r = kvm_vcpu_ioctl_nmi(vcpu);
2216 if (r)
2217 goto out;
2218 r = 0;
2219 break;
2220 }
Carsten Otte313a3dc2007-10-11 19:16:52 +02002221 case KVM_SET_CPUID: {
2222 struct kvm_cpuid __user *cpuid_arg = argp;
2223 struct kvm_cpuid cpuid;
2224
2225 r = -EFAULT;
2226 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2227 goto out;
2228 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2229 if (r)
2230 goto out;
2231 break;
2232 }
Dan Kenigsberg07716712007-11-21 17:10:04 +02002233 case KVM_SET_CPUID2: {
2234 struct kvm_cpuid2 __user *cpuid_arg = argp;
2235 struct kvm_cpuid2 cpuid;
2236
2237 r = -EFAULT;
2238 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2239 goto out;
2240 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
Amit Shah19355472009-01-14 16:56:00 +00002241 cpuid_arg->entries);
Dan Kenigsberg07716712007-11-21 17:10:04 +02002242 if (r)
2243 goto out;
2244 break;
2245 }
2246 case KVM_GET_CPUID2: {
2247 struct kvm_cpuid2 __user *cpuid_arg = argp;
2248 struct kvm_cpuid2 cpuid;
2249
2250 r = -EFAULT;
2251 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2252 goto out;
2253 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
Amit Shah19355472009-01-14 16:56:00 +00002254 cpuid_arg->entries);
Dan Kenigsberg07716712007-11-21 17:10:04 +02002255 if (r)
2256 goto out;
2257 r = -EFAULT;
2258 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2259 goto out;
2260 r = 0;
2261 break;
2262 }
Carsten Otte313a3dc2007-10-11 19:16:52 +02002263 case KVM_GET_MSRS:
2264 r = msr_io(vcpu, argp, kvm_get_msr, 1);
2265 break;
2266 case KVM_SET_MSRS:
2267 r = msr_io(vcpu, argp, do_set_msr, 0);
2268 break;
Avi Kivityb209749f2007-10-22 16:50:39 +02002269 case KVM_TPR_ACCESS_REPORTING: {
2270 struct kvm_tpr_access_ctl tac;
2271
2272 r = -EFAULT;
2273 if (copy_from_user(&tac, argp, sizeof tac))
2274 goto out;
2275 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
2276 if (r)
2277 goto out;
2278 r = -EFAULT;
2279 if (copy_to_user(argp, &tac, sizeof tac))
2280 goto out;
2281 r = 0;
2282 break;
2283 };
Avi Kivityb93463a2007-10-25 16:52:32 +02002284 case KVM_SET_VAPIC_ADDR: {
2285 struct kvm_vapic_addr va;
2286
2287 r = -EINVAL;
2288 if (!irqchip_in_kernel(vcpu->kvm))
2289 goto out;
2290 r = -EFAULT;
2291 if (copy_from_user(&va, argp, sizeof va))
2292 goto out;
2293 r = 0;
2294 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
2295 break;
2296 }
Huang Ying890ca9a2009-05-11 16:48:15 +08002297 case KVM_X86_SETUP_MCE: {
2298 u64 mcg_cap;
2299
2300 r = -EFAULT;
2301 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
2302 goto out;
2303 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
2304 break;
2305 }
2306 case KVM_X86_SET_MCE: {
2307 struct kvm_x86_mce mce;
2308
2309 r = -EFAULT;
2310 if (copy_from_user(&mce, argp, sizeof mce))
2311 goto out;
2312 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
2313 break;
2314 }
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002315 case KVM_GET_VCPU_EVENTS: {
2316 struct kvm_vcpu_events events;
2317
2318 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
2319
2320 r = -EFAULT;
2321 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
2322 break;
2323 r = 0;
2324 break;
2325 }
2326 case KVM_SET_VCPU_EVENTS: {
2327 struct kvm_vcpu_events events;
2328
2329 r = -EFAULT;
2330 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
2331 break;
2332
2333 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
2334 break;
2335 }
Carsten Otte313a3dc2007-10-11 19:16:52 +02002336 default:
2337 r = -EINVAL;
2338 }
2339out:
Wei Yongjun7a6ce842009-03-31 16:47:44 +08002340 kfree(lapic);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002341 return r;
2342}
2343
Carsten Otte1fe779f2007-10-29 16:08:35 +01002344static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
2345{
2346 int ret;
2347
2348 if (addr > (unsigned int)(-3 * PAGE_SIZE))
2349 return -1;
2350 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
2351 return ret;
2352}
2353
Sheng Yangb927a3c2009-07-21 10:42:48 +08002354static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
2355 u64 ident_addr)
2356{
2357 kvm->arch.ept_identity_map_addr = ident_addr;
2358 return 0;
2359}
2360
Carsten Otte1fe779f2007-10-29 16:08:35 +01002361static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
2362 u32 kvm_nr_mmu_pages)
2363{
2364 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
2365 return -EINVAL;
2366
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002367 mutex_lock(&kvm->slots_lock);
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002368 spin_lock(&kvm->mmu_lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002369
2370 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08002371 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002372
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002373 spin_unlock(&kvm->mmu_lock);
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002374 mutex_unlock(&kvm->slots_lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002375 return 0;
2376}
2377
2378static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
2379{
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08002380 return kvm->arch.n_alloc_mmu_pages;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002381}
2382
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002383gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
2384{
2385 int i;
2386 struct kvm_mem_alias *alias;
2387 struct kvm_mem_aliases *aliases;
2388
2389 aliases = rcu_dereference(kvm->arch.aliases);
2390
2391 for (i = 0; i < aliases->naliases; ++i) {
2392 alias = &aliases->aliases[i];
2393 if (alias->flags & KVM_ALIAS_INVALID)
2394 continue;
2395 if (gfn >= alias->base_gfn
2396 && gfn < alias->base_gfn + alias->npages)
2397 return alias->target_gfn + gfn - alias->base_gfn;
2398 }
2399 return gfn;
2400}
2401
Zhang Xiantaoe9f85cd2007-11-22 11:20:33 +08002402gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
2403{
2404 int i;
2405 struct kvm_mem_alias *alias;
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002406 struct kvm_mem_aliases *aliases;
2407
2408 aliases = rcu_dereference(kvm->arch.aliases);
Zhang Xiantaoe9f85cd2007-11-22 11:20:33 +08002409
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02002410 for (i = 0; i < aliases->naliases; ++i) {
2411 alias = &aliases->aliases[i];
Zhang Xiantaoe9f85cd2007-11-22 11:20:33 +08002412 if (gfn >= alias->base_gfn
2413 && gfn < alias->base_gfn + alias->npages)
2414 return alias->target_gfn + gfn - alias->base_gfn;
2415 }
2416 return gfn;
2417}
2418
Carsten Otte1fe779f2007-10-29 16:08:35 +01002419/*
2420 * Set a new alias region. Aliases map a portion of physical memory into
2421 * another portion. This is useful for memory windows, for example the PC
2422 * VGA region.
2423 */
2424static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
2425 struct kvm_memory_alias *alias)
2426{
2427 int r, n;
2428 struct kvm_mem_alias *p;
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002429 struct kvm_mem_aliases *aliases, *old_aliases;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002430
2431 r = -EINVAL;
2432 /* General sanity checks */
2433 if (alias->memory_size & (PAGE_SIZE - 1))
2434 goto out;
2435 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
2436 goto out;
2437 if (alias->slot >= KVM_ALIAS_SLOTS)
2438 goto out;
2439 if (alias->guest_phys_addr + alias->memory_size
2440 < alias->guest_phys_addr)
2441 goto out;
2442 if (alias->target_phys_addr + alias->memory_size
2443 < alias->target_phys_addr)
2444 goto out;
2445
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002446 r = -ENOMEM;
2447 aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
2448 if (!aliases)
2449 goto out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002450
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002451 mutex_lock(&kvm->slots_lock);
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002452
2453 /* invalidate any gfn reference in case of deletion/shrinking */
2454 memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
2455 aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
2456 old_aliases = kvm->arch.aliases;
2457 rcu_assign_pointer(kvm->arch.aliases, aliases);
2458 synchronize_srcu_expedited(&kvm->srcu);
2459 kvm_mmu_zap_all(kvm);
2460 kfree(old_aliases);
2461
2462 r = -ENOMEM;
2463 aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
2464 if (!aliases)
2465 goto out_unlock;
2466
2467 memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02002468
2469 p = &aliases->aliases[alias->slot];
Carsten Otte1fe779f2007-10-29 16:08:35 +01002470 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
2471 p->npages = alias->memory_size >> PAGE_SHIFT;
2472 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002473 p->flags &= ~(KVM_ALIAS_INVALID);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002474
2475 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02002476 if (aliases->aliases[n - 1].npages)
Carsten Otte1fe779f2007-10-29 16:08:35 +01002477 break;
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02002478 aliases->naliases = n;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002479
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002480 old_aliases = kvm->arch.aliases;
2481 rcu_assign_pointer(kvm->arch.aliases, aliases);
2482 synchronize_srcu_expedited(&kvm->srcu);
2483 kfree(old_aliases);
2484 r = 0;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002485
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002486out_unlock:
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002487 mutex_unlock(&kvm->slots_lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002488out:
2489 return r;
2490}
2491
2492static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2493{
2494 int r;
2495
2496 r = 0;
2497 switch (chip->chip_id) {
2498 case KVM_IRQCHIP_PIC_MASTER:
2499 memcpy(&chip->chip.pic,
2500 &pic_irqchip(kvm)->pics[0],
2501 sizeof(struct kvm_pic_state));
2502 break;
2503 case KVM_IRQCHIP_PIC_SLAVE:
2504 memcpy(&chip->chip.pic,
2505 &pic_irqchip(kvm)->pics[1],
2506 sizeof(struct kvm_pic_state));
2507 break;
2508 case KVM_IRQCHIP_IOAPIC:
Gleb Natapoveba02262009-08-24 11:54:25 +03002509 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002510 break;
2511 default:
2512 r = -EINVAL;
2513 break;
2514 }
2515 return r;
2516}
2517
2518static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2519{
2520 int r;
2521
2522 r = 0;
2523 switch (chip->chip_id) {
2524 case KVM_IRQCHIP_PIC_MASTER:
Thomas Gleixnerfa8273e2010-02-17 14:00:41 +00002525 raw_spin_lock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002526 memcpy(&pic_irqchip(kvm)->pics[0],
2527 &chip->chip.pic,
2528 sizeof(struct kvm_pic_state));
Thomas Gleixnerfa8273e2010-02-17 14:00:41 +00002529 raw_spin_unlock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002530 break;
2531 case KVM_IRQCHIP_PIC_SLAVE:
Thomas Gleixnerfa8273e2010-02-17 14:00:41 +00002532 raw_spin_lock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002533 memcpy(&pic_irqchip(kvm)->pics[1],
2534 &chip->chip.pic,
2535 sizeof(struct kvm_pic_state));
Thomas Gleixnerfa8273e2010-02-17 14:00:41 +00002536 raw_spin_unlock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002537 break;
2538 case KVM_IRQCHIP_IOAPIC:
Gleb Natapoveba02262009-08-24 11:54:25 +03002539 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002540 break;
2541 default:
2542 r = -EINVAL;
2543 break;
2544 }
2545 kvm_pic_update_irq(pic_irqchip(kvm));
2546 return r;
2547}
2548
Sheng Yange0f63cb2008-03-04 00:50:59 +08002549static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2550{
2551 int r = 0;
2552
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002553 mutex_lock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002554 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002555 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002556 return r;
2557}
2558
2559static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2560{
2561 int r = 0;
2562
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002563 mutex_lock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002564 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
Beth Kone9f42752009-07-07 11:50:38 -04002565 kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
2566 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2567 return r;
2568}
2569
2570static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2571{
2572 int r = 0;
2573
2574 mutex_lock(&kvm->arch.vpit->pit_state.lock);
2575 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
2576 sizeof(ps->channels));
2577 ps->flags = kvm->arch.vpit->pit_state.flags;
2578 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2579 return r;
2580}
2581
2582static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2583{
2584 int r = 0, start = 0;
2585 u32 prev_legacy, cur_legacy;
2586 mutex_lock(&kvm->arch.vpit->pit_state.lock);
2587 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
2588 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
2589 if (!prev_legacy && cur_legacy)
2590 start = 1;
2591 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
2592 sizeof(kvm->arch.vpit->pit_state.channels));
2593 kvm->arch.vpit->pit_state.flags = ps->flags;
2594 kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002595 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002596 return r;
2597}
2598
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02002599static int kvm_vm_ioctl_reinject(struct kvm *kvm,
2600 struct kvm_reinject_control *control)
2601{
2602 if (!kvm->arch.vpit)
2603 return -ENXIO;
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002604 mutex_lock(&kvm->arch.vpit->pit_state.lock);
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02002605 kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002606 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02002607 return 0;
2608}
2609
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002610/*
2611 * Get (and clear) the dirty memory log for a memory slot.
2612 */
2613int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2614 struct kvm_dirty_log *log)
2615{
Takuya Yoshikawa87bf6e72010-04-12 19:35:35 +09002616 int r, i;
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002617 struct kvm_memory_slot *memslot;
Takuya Yoshikawa87bf6e72010-04-12 19:35:35 +09002618 unsigned long n;
Marcelo Tosattib050b012009-12-23 14:35:22 -02002619 unsigned long is_dirty = 0;
2620 unsigned long *dirty_bitmap = NULL;
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002621
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002622 mutex_lock(&kvm->slots_lock);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002623
Marcelo Tosattib050b012009-12-23 14:35:22 -02002624 r = -EINVAL;
2625 if (log->slot >= KVM_MEMORY_SLOTS)
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002626 goto out;
2627
Marcelo Tosattib050b012009-12-23 14:35:22 -02002628 memslot = &kvm->memslots->memslots[log->slot];
2629 r = -ENOENT;
2630 if (!memslot->dirty_bitmap)
2631 goto out;
2632
Takuya Yoshikawa87bf6e72010-04-12 19:35:35 +09002633 n = kvm_dirty_bitmap_bytes(memslot);
Marcelo Tosattib050b012009-12-23 14:35:22 -02002634
2635 r = -ENOMEM;
2636 dirty_bitmap = vmalloc(n);
2637 if (!dirty_bitmap)
2638 goto out;
2639 memset(dirty_bitmap, 0, n);
2640
2641 for (i = 0; !is_dirty && i < n/sizeof(long); i++)
2642 is_dirty = memslot->dirty_bitmap[i];
2643
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002644 /* If nothing is dirty, don't bother messing with page tables. */
2645 if (is_dirty) {
Marcelo Tosattib050b012009-12-23 14:35:22 -02002646 struct kvm_memslots *slots, *old_slots;
2647
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002648 spin_lock(&kvm->mmu_lock);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002649 kvm_mmu_slot_remove_write_access(kvm, log->slot);
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002650 spin_unlock(&kvm->mmu_lock);
Marcelo Tosattib050b012009-12-23 14:35:22 -02002651
2652 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
2653 if (!slots)
2654 goto out_free;
2655
2656 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
2657 slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
2658
2659 old_slots = kvm->memslots;
2660 rcu_assign_pointer(kvm->memslots, slots);
2661 synchronize_srcu_expedited(&kvm->srcu);
2662 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
2663 kfree(old_slots);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002664 }
Marcelo Tosattib050b012009-12-23 14:35:22 -02002665
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002666 r = 0;
Marcelo Tosattib050b012009-12-23 14:35:22 -02002667 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
2668 r = -EFAULT;
2669out_free:
2670 vfree(dirty_bitmap);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002671out:
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002672 mutex_unlock(&kvm->slots_lock);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002673 return r;
2674}
2675
Carsten Otte1fe779f2007-10-29 16:08:35 +01002676long kvm_arch_vm_ioctl(struct file *filp,
2677 unsigned int ioctl, unsigned long arg)
2678{
2679 struct kvm *kvm = filp->private_data;
2680 void __user *argp = (void __user *)arg;
Avi Kivity367e1312009-08-26 14:57:07 +03002681 int r = -ENOTTY;
Dave Hansenf0d66272008-08-11 10:01:45 -07002682 /*
2683 * This union makes it completely explicit to gcc-3.x
2684 * that these two variables' stack usage should be
2685 * combined, not added together.
2686 */
2687 union {
2688 struct kvm_pit_state ps;
Beth Kone9f42752009-07-07 11:50:38 -04002689 struct kvm_pit_state2 ps2;
Dave Hansenf0d66272008-08-11 10:01:45 -07002690 struct kvm_memory_alias alias;
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02002691 struct kvm_pit_config pit_config;
Dave Hansenf0d66272008-08-11 10:01:45 -07002692 } u;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002693
2694 switch (ioctl) {
2695 case KVM_SET_TSS_ADDR:
2696 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
2697 if (r < 0)
2698 goto out;
2699 break;
Sheng Yangb927a3c2009-07-21 10:42:48 +08002700 case KVM_SET_IDENTITY_MAP_ADDR: {
2701 u64 ident_addr;
2702
2703 r = -EFAULT;
2704 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
2705 goto out;
2706 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
2707 if (r < 0)
2708 goto out;
2709 break;
2710 }
Carsten Otte1fe779f2007-10-29 16:08:35 +01002711 case KVM_SET_MEMORY_REGION: {
2712 struct kvm_memory_region kvm_mem;
2713 struct kvm_userspace_memory_region kvm_userspace_mem;
2714
2715 r = -EFAULT;
2716 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
2717 goto out;
2718 kvm_userspace_mem.slot = kvm_mem.slot;
2719 kvm_userspace_mem.flags = kvm_mem.flags;
2720 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
2721 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
2722 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
2723 if (r)
2724 goto out;
2725 break;
2726 }
2727 case KVM_SET_NR_MMU_PAGES:
2728 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
2729 if (r)
2730 goto out;
2731 break;
2732 case KVM_GET_NR_MMU_PAGES:
2733 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
2734 break;
Dave Hansenf0d66272008-08-11 10:01:45 -07002735 case KVM_SET_MEMORY_ALIAS:
Carsten Otte1fe779f2007-10-29 16:08:35 +01002736 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07002737 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
Carsten Otte1fe779f2007-10-29 16:08:35 +01002738 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002739 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002740 if (r)
2741 goto out;
2742 break;
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002743 case KVM_CREATE_IRQCHIP: {
2744 struct kvm_pic *vpic;
2745
2746 mutex_lock(&kvm->lock);
2747 r = -EEXIST;
2748 if (kvm->arch.vpic)
2749 goto create_irqchip_unlock;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002750 r = -ENOMEM;
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002751 vpic = kvm_create_pic(kvm);
2752 if (vpic) {
Carsten Otte1fe779f2007-10-29 16:08:35 +01002753 r = kvm_ioapic_init(kvm);
2754 if (r) {
Wei Yongjun72bb2fc2010-02-09 10:33:03 +08002755 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
2756 &vpic->dev);
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002757 kfree(vpic);
2758 goto create_irqchip_unlock;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002759 }
2760 } else
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002761 goto create_irqchip_unlock;
2762 smp_wmb();
2763 kvm->arch.vpic = vpic;
2764 smp_wmb();
Avi Kivity399ec802008-11-19 13:58:46 +02002765 r = kvm_setup_default_irq_routing(kvm);
2766 if (r) {
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002767 mutex_lock(&kvm->irq_lock);
Wei Yongjun72bb2fc2010-02-09 10:33:03 +08002768 kvm_ioapic_destroy(kvm);
2769 kvm_destroy_pic(kvm);
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002770 mutex_unlock(&kvm->irq_lock);
Avi Kivity399ec802008-11-19 13:58:46 +02002771 }
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002772 create_irqchip_unlock:
2773 mutex_unlock(&kvm->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002774 break;
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002775 }
Sheng Yang78376992008-01-28 05:10:22 +08002776 case KVM_CREATE_PIT:
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02002777 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
2778 goto create_pit;
2779 case KVM_CREATE_PIT2:
2780 r = -EFAULT;
2781 if (copy_from_user(&u.pit_config, argp,
2782 sizeof(struct kvm_pit_config)))
2783 goto out;
2784 create_pit:
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002785 mutex_lock(&kvm->slots_lock);
Avi Kivity269e05e2009-01-05 15:21:42 +02002786 r = -EEXIST;
2787 if (kvm->arch.vpit)
2788 goto create_pit_unlock;
Sheng Yang78376992008-01-28 05:10:22 +08002789 r = -ENOMEM;
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02002790 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
Sheng Yang78376992008-01-28 05:10:22 +08002791 if (kvm->arch.vpit)
2792 r = 0;
Avi Kivity269e05e2009-01-05 15:21:42 +02002793 create_pit_unlock:
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002794 mutex_unlock(&kvm->slots_lock);
Sheng Yang78376992008-01-28 05:10:22 +08002795 break;
Gleb Natapov49256632009-02-04 17:28:14 +02002796 case KVM_IRQ_LINE_STATUS:
Carsten Otte1fe779f2007-10-29 16:08:35 +01002797 case KVM_IRQ_LINE: {
2798 struct kvm_irq_level irq_event;
2799
2800 r = -EFAULT;
2801 if (copy_from_user(&irq_event, argp, sizeof irq_event))
2802 goto out;
2803 if (irqchip_in_kernel(kvm)) {
Gleb Natapov49256632009-02-04 17:28:14 +02002804 __s32 status;
Gleb Natapov49256632009-02-04 17:28:14 +02002805 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2806 irq_event.irq, irq_event.level);
Gleb Natapov49256632009-02-04 17:28:14 +02002807 if (ioctl == KVM_IRQ_LINE_STATUS) {
2808 irq_event.status = status;
2809 if (copy_to_user(argp, &irq_event,
2810 sizeof irq_event))
2811 goto out;
2812 }
Carsten Otte1fe779f2007-10-29 16:08:35 +01002813 r = 0;
2814 }
2815 break;
2816 }
2817 case KVM_GET_IRQCHIP: {
2818 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
Dave Hansenf0d66272008-08-11 10:01:45 -07002819 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002820
Dave Hansenf0d66272008-08-11 10:01:45 -07002821 r = -ENOMEM;
2822 if (!chip)
Carsten Otte1fe779f2007-10-29 16:08:35 +01002823 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002824 r = -EFAULT;
2825 if (copy_from_user(chip, argp, sizeof *chip))
2826 goto get_irqchip_out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002827 r = -ENXIO;
2828 if (!irqchip_in_kernel(kvm))
Dave Hansenf0d66272008-08-11 10:01:45 -07002829 goto get_irqchip_out;
2830 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
2831 if (r)
2832 goto get_irqchip_out;
2833 r = -EFAULT;
2834 if (copy_to_user(argp, chip, sizeof *chip))
2835 goto get_irqchip_out;
2836 r = 0;
2837 get_irqchip_out:
2838 kfree(chip);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002839 if (r)
2840 goto out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002841 break;
2842 }
2843 case KVM_SET_IRQCHIP: {
2844 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
Dave Hansenf0d66272008-08-11 10:01:45 -07002845 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002846
Dave Hansenf0d66272008-08-11 10:01:45 -07002847 r = -ENOMEM;
2848 if (!chip)
Carsten Otte1fe779f2007-10-29 16:08:35 +01002849 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002850 r = -EFAULT;
2851 if (copy_from_user(chip, argp, sizeof *chip))
2852 goto set_irqchip_out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002853 r = -ENXIO;
2854 if (!irqchip_in_kernel(kvm))
Dave Hansenf0d66272008-08-11 10:01:45 -07002855 goto set_irqchip_out;
2856 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
2857 if (r)
2858 goto set_irqchip_out;
2859 r = 0;
2860 set_irqchip_out:
2861 kfree(chip);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002862 if (r)
2863 goto out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002864 break;
2865 }
Sheng Yange0f63cb2008-03-04 00:50:59 +08002866 case KVM_GET_PIT: {
Sheng Yange0f63cb2008-03-04 00:50:59 +08002867 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07002868 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
Sheng Yange0f63cb2008-03-04 00:50:59 +08002869 goto out;
2870 r = -ENXIO;
2871 if (!kvm->arch.vpit)
2872 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002873 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002874 if (r)
2875 goto out;
2876 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07002877 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
Sheng Yange0f63cb2008-03-04 00:50:59 +08002878 goto out;
2879 r = 0;
2880 break;
2881 }
2882 case KVM_SET_PIT: {
Sheng Yange0f63cb2008-03-04 00:50:59 +08002883 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07002884 if (copy_from_user(&u.ps, argp, sizeof u.ps))
Sheng Yange0f63cb2008-03-04 00:50:59 +08002885 goto out;
2886 r = -ENXIO;
2887 if (!kvm->arch.vpit)
2888 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002889 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002890 if (r)
2891 goto out;
2892 r = 0;
2893 break;
2894 }
Beth Kone9f42752009-07-07 11:50:38 -04002895 case KVM_GET_PIT2: {
2896 r = -ENXIO;
2897 if (!kvm->arch.vpit)
2898 goto out;
2899 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
2900 if (r)
2901 goto out;
2902 r = -EFAULT;
2903 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
2904 goto out;
2905 r = 0;
2906 break;
2907 }
2908 case KVM_SET_PIT2: {
2909 r = -EFAULT;
2910 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
2911 goto out;
2912 r = -ENXIO;
2913 if (!kvm->arch.vpit)
2914 goto out;
2915 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
2916 if (r)
2917 goto out;
2918 r = 0;
2919 break;
2920 }
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02002921 case KVM_REINJECT_CONTROL: {
2922 struct kvm_reinject_control control;
2923 r = -EFAULT;
2924 if (copy_from_user(&control, argp, sizeof(control)))
2925 goto out;
2926 r = kvm_vm_ioctl_reinject(kvm, &control);
2927 if (r)
2928 goto out;
2929 r = 0;
2930 break;
2931 }
Ed Swierkffde22a2009-10-15 15:21:43 -07002932 case KVM_XEN_HVM_CONFIG: {
2933 r = -EFAULT;
2934 if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
2935 sizeof(struct kvm_xen_hvm_config)))
2936 goto out;
2937 r = -EINVAL;
2938 if (kvm->arch.xen_hvm_config.flags)
2939 goto out;
2940 r = 0;
2941 break;
2942 }
Glauber Costaafbcf7a2009-10-16 15:28:36 -04002943 case KVM_SET_CLOCK: {
2944 struct timespec now;
2945 struct kvm_clock_data user_ns;
2946 u64 now_ns;
2947 s64 delta;
2948
2949 r = -EFAULT;
2950 if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
2951 goto out;
2952
2953 r = -EINVAL;
2954 if (user_ns.flags)
2955 goto out;
2956
2957 r = 0;
2958 ktime_get_ts(&now);
2959 now_ns = timespec_to_ns(&now);
2960 delta = user_ns.clock - now_ns;
2961 kvm->arch.kvmclock_offset = delta;
2962 break;
2963 }
2964 case KVM_GET_CLOCK: {
2965 struct timespec now;
2966 struct kvm_clock_data user_ns;
2967 u64 now_ns;
2968
2969 ktime_get_ts(&now);
2970 now_ns = timespec_to_ns(&now);
2971 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
2972 user_ns.flags = 0;
2973
2974 r = -EFAULT;
2975 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
2976 goto out;
2977 r = 0;
2978 break;
2979 }
2980
Carsten Otte1fe779f2007-10-29 16:08:35 +01002981 default:
2982 ;
2983 }
2984out:
2985 return r;
2986}
2987
Zhang Xiantaoa16b0432007-11-16 14:38:21 +08002988static void kvm_init_msr_list(void)
Carsten Otte043405e2007-10-10 17:16:19 +02002989{
2990 u32 dummy[2];
2991 unsigned i, j;
2992
Glauber Costae3267cb2009-10-06 13:24:50 -04002993 /* skip the first msrs in the list. KVM-specific */
2994 for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
Carsten Otte043405e2007-10-10 17:16:19 +02002995 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2996 continue;
2997 if (j < i)
2998 msrs_to_save[j] = msrs_to_save[i];
2999 j++;
3000 }
3001 num_msrs_to_save = j;
3002}
3003
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003004static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
3005 const void *v)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003006{
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003007 if (vcpu->arch.apic &&
3008 !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
3009 return 0;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003010
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003011 return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003012}
3013
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003014static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003015{
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003016 if (vcpu->arch.apic &&
3017 !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
3018 return 0;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003019
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003020 return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003021}
3022
Gleb Natapov1871c602010-02-10 14:21:32 +02003023gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3024{
3025 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3026 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3027}
3028
3029 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3030{
3031 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3032 access |= PFERR_FETCH_MASK;
3033 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3034}
3035
3036gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3037{
3038 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3039 access |= PFERR_WRITE_MASK;
3040 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3041}
3042
3043/* uses this to access any guest's mapped memory without checking CPL */
3044gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3045{
3046 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
3047}
3048
3049static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3050 struct kvm_vcpu *vcpu, u32 access,
3051 u32 *error)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003052{
3053 void *data = val;
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003054 int r = X86EMUL_CONTINUE;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003055
3056 while (bytes) {
Gleb Natapov1871c602010-02-10 14:21:32 +02003057 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003058 unsigned offset = addr & (PAGE_SIZE-1);
Izik Eidus77c20022008-12-29 01:42:19 +02003059 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003060 int ret;
3061
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003062 if (gpa == UNMAPPED_GVA) {
3063 r = X86EMUL_PROPAGATE_FAULT;
3064 goto out;
3065 }
Izik Eidus77c20022008-12-29 01:42:19 +02003066 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003067 if (ret < 0) {
3068 r = X86EMUL_UNHANDLEABLE;
3069 goto out;
3070 }
Carsten Ottebbd9b642007-10-30 18:44:21 +01003071
Izik Eidus77c20022008-12-29 01:42:19 +02003072 bytes -= toread;
3073 data += toread;
3074 addr += toread;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003075 }
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003076out:
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003077 return r;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003078}
Izik Eidus77c20022008-12-29 01:42:19 +02003079
Gleb Natapov1871c602010-02-10 14:21:32 +02003080/* used for instruction fetching */
3081static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
3082 struct kvm_vcpu *vcpu, u32 *error)
3083{
3084 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3085 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
3086 access | PFERR_FETCH_MASK, error);
3087}
3088
3089static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
3090 struct kvm_vcpu *vcpu, u32 *error)
3091{
3092 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3093 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
3094 error);
3095}
3096
3097static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
3098 struct kvm_vcpu *vcpu, u32 *error)
3099{
3100 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
3101}
3102
Hannes Edercded19f2009-02-21 02:19:13 +01003103static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
Gleb Natapov1871c602010-02-10 14:21:32 +02003104 struct kvm_vcpu *vcpu, u32 *error)
Izik Eidus77c20022008-12-29 01:42:19 +02003105{
3106 void *data = val;
3107 int r = X86EMUL_CONTINUE;
3108
3109 while (bytes) {
Gleb Natapov1871c602010-02-10 14:21:32 +02003110 gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error);
Izik Eidus77c20022008-12-29 01:42:19 +02003111 unsigned offset = addr & (PAGE_SIZE-1);
3112 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3113 int ret;
3114
3115 if (gpa == UNMAPPED_GVA) {
3116 r = X86EMUL_PROPAGATE_FAULT;
3117 goto out;
3118 }
3119 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
3120 if (ret < 0) {
3121 r = X86EMUL_UNHANDLEABLE;
3122 goto out;
3123 }
3124
3125 bytes -= towrite;
3126 data += towrite;
3127 addr += towrite;
3128 }
3129out:
3130 return r;
3131}
3132
Carsten Ottebbd9b642007-10-30 18:44:21 +01003133
Carsten Ottebbd9b642007-10-30 18:44:21 +01003134static int emulator_read_emulated(unsigned long addr,
3135 void *val,
3136 unsigned int bytes,
3137 struct kvm_vcpu *vcpu)
3138{
Carsten Ottebbd9b642007-10-30 18:44:21 +01003139 gpa_t gpa;
Gleb Natapov1871c602010-02-10 14:21:32 +02003140 u32 error_code;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003141
3142 if (vcpu->mmio_read_completed) {
3143 memcpy(val, vcpu->mmio_data, bytes);
Avi Kivityaec51dc2009-07-01 16:01:02 +03003144 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
3145 vcpu->mmio_phys_addr, *(u64 *)val);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003146 vcpu->mmio_read_completed = 0;
3147 return X86EMUL_CONTINUE;
3148 }
3149
Gleb Natapov1871c602010-02-10 14:21:32 +02003150 gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);
3151
3152 if (gpa == UNMAPPED_GVA) {
3153 kvm_inject_page_fault(vcpu, addr, error_code);
3154 return X86EMUL_PROPAGATE_FAULT;
3155 }
Carsten Ottebbd9b642007-10-30 18:44:21 +01003156
3157 /* For APIC access vmexit */
3158 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3159 goto mmio;
3160
Gleb Natapov1871c602010-02-10 14:21:32 +02003161 if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
Izik Eidus77c20022008-12-29 01:42:19 +02003162 == X86EMUL_CONTINUE)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003163 return X86EMUL_CONTINUE;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003164
3165mmio:
3166 /*
3167 * Is this MMIO handled locally?
3168 */
Avi Kivityaec51dc2009-07-01 16:01:02 +03003169 if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
3170 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003171 return X86EMUL_CONTINUE;
3172 }
Avi Kivityaec51dc2009-07-01 16:01:02 +03003173
3174 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003175
3176 vcpu->mmio_needed = 1;
3177 vcpu->mmio_phys_addr = gpa;
3178 vcpu->mmio_size = bytes;
3179 vcpu->mmio_is_write = 0;
3180
3181 return X86EMUL_UNHANDLEABLE;
3182}
3183
Marcelo Tosatti3200f402008-03-29 20:17:59 -03003184int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
Avi Kivity9f811282008-03-02 14:06:05 +02003185 const void *val, int bytes)
3186{
3187 int ret;
3188
3189 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
3190 if (ret < 0)
3191 return 0;
Marcelo Tosattiad218f82008-12-01 22:32:05 -02003192 kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
Avi Kivity9f811282008-03-02 14:06:05 +02003193 return 1;
3194}
3195
Carsten Ottebbd9b642007-10-30 18:44:21 +01003196static int emulator_write_emulated_onepage(unsigned long addr,
3197 const void *val,
3198 unsigned int bytes,
3199 struct kvm_vcpu *vcpu)
3200{
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003201 gpa_t gpa;
Gleb Natapov1871c602010-02-10 14:21:32 +02003202 u32 error_code;
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003203
Gleb Natapov1871c602010-02-10 14:21:32 +02003204 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003205
3206 if (gpa == UNMAPPED_GVA) {
Gleb Natapov1871c602010-02-10 14:21:32 +02003207 kvm_inject_page_fault(vcpu, addr, error_code);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003208 return X86EMUL_PROPAGATE_FAULT;
3209 }
3210
3211 /* For APIC access vmexit */
3212 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3213 goto mmio;
3214
3215 if (emulator_write_phys(vcpu, gpa, val, bytes))
3216 return X86EMUL_CONTINUE;
3217
3218mmio:
Avi Kivityaec51dc2009-07-01 16:01:02 +03003219 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003220 /*
3221 * Is this MMIO handled locally?
3222 */
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003223 if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
Carsten Ottebbd9b642007-10-30 18:44:21 +01003224 return X86EMUL_CONTINUE;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003225
3226 vcpu->mmio_needed = 1;
3227 vcpu->mmio_phys_addr = gpa;
3228 vcpu->mmio_size = bytes;
3229 vcpu->mmio_is_write = 1;
3230 memcpy(vcpu->mmio_data, val, bytes);
3231
3232 return X86EMUL_CONTINUE;
3233}
3234
3235int emulator_write_emulated(unsigned long addr,
3236 const void *val,
3237 unsigned int bytes,
3238 struct kvm_vcpu *vcpu)
3239{
3240 /* Crossing a page boundary? */
3241 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
3242 int rc, now;
3243
3244 now = -addr & ~PAGE_MASK;
3245 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
3246 if (rc != X86EMUL_CONTINUE)
3247 return rc;
3248 addr += now;
3249 val += now;
3250 bytes -= now;
3251 }
3252 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
3253}
3254EXPORT_SYMBOL_GPL(emulator_write_emulated);
3255
3256static int emulator_cmpxchg_emulated(unsigned long addr,
3257 const void *old,
3258 const void *new,
3259 unsigned int bytes,
3260 struct kvm_vcpu *vcpu)
3261{
Marcin Slusarz9f51e242009-08-09 21:54:00 +02003262 printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003263#ifndef CONFIG_X86_64
3264 /* guests cmpxchg8b have to be emulated atomically */
3265 if (bytes == 8) {
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003266 gpa_t gpa;
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003267 struct page *page;
Andrew Mortonc0b49b02008-02-04 22:27:18 -08003268 char *kaddr;
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003269 u64 val;
3270
Gleb Natapov1871c602010-02-10 14:21:32 +02003271 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003272
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003273 if (gpa == UNMAPPED_GVA ||
3274 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3275 goto emul_write;
3276
3277 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
3278 goto emul_write;
3279
3280 val = *(u64 *)new;
Izik Eidus72dc67a2008-02-10 18:04:15 +02003281
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003282 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
Izik Eidus72dc67a2008-02-10 18:04:15 +02003283
Andrew Mortonc0b49b02008-02-04 22:27:18 -08003284 kaddr = kmap_atomic(page, KM_USER0);
3285 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
3286 kunmap_atomic(kaddr, KM_USER0);
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003287 kvm_release_page_dirty(page);
3288 }
Marcelo Tosatti3200f402008-03-29 20:17:59 -03003289emul_write:
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003290#endif
3291
Carsten Ottebbd9b642007-10-30 18:44:21 +01003292 return emulator_write_emulated(addr, new, bytes, vcpu);
3293}
3294
3295static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
3296{
3297 return kvm_x86_ops->get_segment_base(vcpu, seg);
3298}
3299
3300int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
3301{
Marcelo Tosattia7052892008-09-23 13:18:35 -03003302 kvm_mmu_invlpg(vcpu, address);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003303 return X86EMUL_CONTINUE;
3304}
3305
3306int emulate_clts(struct kvm_vcpu *vcpu)
3307{
Avi Kivity4d4ec082009-12-29 18:07:30 +02003308 kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
Avi Kivity6b52d182010-01-21 15:31:47 +02003309 kvm_x86_ops->fpu_activate(vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003310 return X86EMUL_CONTINUE;
3311}
3312
3313int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
3314{
Jan Kiszkac76de352010-01-20 18:20:20 +01003315 return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003316}
3317
3318int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
3319{
3320 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003321
Jan Kiszkac76de352010-01-20 18:20:20 +01003322 return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003323}
3324
3325void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
3326{
Carsten Ottebbd9b642007-10-30 18:44:21 +01003327 u8 opcodes[4];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003328 unsigned long rip = kvm_rip_read(vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003329 unsigned long rip_linear;
3330
Avi Kivityf76c7102008-06-13 22:45:42 +03003331 if (!printk_ratelimit())
Carsten Ottebbd9b642007-10-30 18:44:21 +01003332 return;
3333
Glauber Costa25be4602008-06-10 10:46:53 -03003334 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
3335
Gleb Natapov1871c602010-02-10 14:21:32 +02003336 kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003337
3338 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
3339 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003340}
3341EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
3342
Harvey Harrison14af3f32008-02-19 10:25:50 -08003343static struct x86_emulate_ops emulate_ops = {
Gleb Natapov1871c602010-02-10 14:21:32 +02003344 .read_std = kvm_read_guest_virt_system,
3345 .fetch = kvm_fetch_guest_virt,
Carsten Ottebbd9b642007-10-30 18:44:21 +01003346 .read_emulated = emulator_read_emulated,
3347 .write_emulated = emulator_write_emulated,
3348 .cmpxchg_emulated = emulator_cmpxchg_emulated,
3349};
3350
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003351static void cache_all_regs(struct kvm_vcpu *vcpu)
3352{
3353 kvm_register_read(vcpu, VCPU_REGS_RAX);
3354 kvm_register_read(vcpu, VCPU_REGS_RSP);
3355 kvm_register_read(vcpu, VCPU_REGS_RIP);
3356 vcpu->arch.regs_dirty = ~0;
3357}
3358
Carsten Ottebbd9b642007-10-30 18:44:21 +01003359int emulate_instruction(struct kvm_vcpu *vcpu,
Carsten Ottebbd9b642007-10-30 18:44:21 +01003360 unsigned long cr2,
3361 u16 error_code,
Sheng Yang571008d2008-01-02 14:49:22 +08003362 int emulation_type)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003363{
Glauber Costa310b5d32009-05-12 16:21:06 -04003364 int r, shadow_mask;
Sheng Yang571008d2008-01-02 14:49:22 +08003365 struct decode_cache *c;
Avi Kivity851ba692009-08-24 11:10:17 +03003366 struct kvm_run *run = vcpu->run;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003367
Avi Kivity26eef702008-07-03 14:59:22 +03003368 kvm_clear_exception_queue(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003369 vcpu->arch.mmio_fault_cr2 = cr2;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003370 /*
Avi Kivity56e82312009-08-12 15:04:37 +03003371 * TODO: fix emulate.c to use guest_read/write_register
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003372 * instead of direct ->regs accesses, can save hundred cycles
3373 * on Intel for instructions that don't read/change RSP, for
3374 * for example.
3375 */
3376 cache_all_regs(vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003377
3378 vcpu->mmio_is_write = 0;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003379 vcpu->arch.pio.string = 0;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003380
Sheng Yang571008d2008-01-02 14:49:22 +08003381 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
Carsten Ottebbd9b642007-10-30 18:44:21 +01003382 int cs_db, cs_l;
3383 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
3384
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003385 vcpu->arch.emulate_ctxt.vcpu = vcpu;
Jan Kiszka91586a32009-10-05 13:07:21 +02003386 vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003387 vcpu->arch.emulate_ctxt.mode =
Gleb Natapova0044752010-02-10 14:21:31 +02003388 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003389 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
Gleb Natapova0044752010-02-10 14:21:31 +02003390 ? X86EMUL_MODE_VM86 : cs_l
Carsten Ottebbd9b642007-10-30 18:44:21 +01003391 ? X86EMUL_MODE_PROT64 : cs_db
3392 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
3393
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003394 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
Sheng Yang571008d2008-01-02 14:49:22 +08003395
Andre Przywara0cb57622009-06-17 15:50:31 +02003396 /* Only allow emulation of specific instructions on #UD
3397 * (namely VMMCALL, sysenter, sysexit, syscall)*/
Sheng Yang571008d2008-01-02 14:49:22 +08003398 c = &vcpu->arch.emulate_ctxt.decode;
Andre Przywara0cb57622009-06-17 15:50:31 +02003399 if (emulation_type & EMULTYPE_TRAP_UD) {
3400 if (!c->twobyte)
3401 return EMULATE_FAIL;
3402 switch (c->b) {
3403 case 0x01: /* VMMCALL */
3404 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3405 return EMULATE_FAIL;
3406 break;
3407 case 0x34: /* sysenter */
3408 case 0x35: /* sysexit */
3409 if (c->modrm_mod != 0 || c->modrm_rm != 0)
3410 return EMULATE_FAIL;
3411 break;
3412 case 0x05: /* syscall */
3413 if (c->modrm_mod != 0 || c->modrm_rm != 0)
3414 return EMULATE_FAIL;
3415 break;
3416 default:
3417 return EMULATE_FAIL;
3418 }
3419
3420 if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
3421 return EMULATE_FAIL;
3422 }
Sheng Yang571008d2008-01-02 14:49:22 +08003423
Avi Kivityf2b57562007-11-18 15:17:51 +02003424 ++vcpu->stat.insn_emulation;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003425 if (r) {
Avi Kivityf2b57562007-11-18 15:17:51 +02003426 ++vcpu->stat.insn_emulation_fail;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003427 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
3428 return EMULATE_DONE;
3429 return EMULATE_FAIL;
3430 }
3431 }
3432
Gleb Natapovba8afb62009-04-12 13:36:57 +03003433 if (emulation_type & EMULTYPE_SKIP) {
3434 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
3435 return EMULATE_DONE;
3436 }
3437
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003438 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
Glauber Costa310b5d32009-05-12 16:21:06 -04003439 shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
3440
3441 if (r == 0)
3442 kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003443
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003444 if (vcpu->arch.pio.string)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003445 return EMULATE_DO_MMIO;
3446
3447 if ((r || vcpu->mmio_is_write) && run) {
3448 run->exit_reason = KVM_EXIT_MMIO;
3449 run->mmio.phys_addr = vcpu->mmio_phys_addr;
3450 memcpy(run->mmio.data, vcpu->mmio_data, 8);
3451 run->mmio.len = vcpu->mmio_size;
3452 run->mmio.is_write = vcpu->mmio_is_write;
3453 }
3454
3455 if (r) {
3456 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
3457 return EMULATE_DONE;
3458 if (!vcpu->mmio_needed) {
3459 kvm_report_emulation_failure(vcpu, "mmio");
3460 return EMULATE_FAIL;
3461 }
3462 return EMULATE_DO_MMIO;
3463 }
3464
Jan Kiszka91586a32009-10-05 13:07:21 +02003465 kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003466
3467 if (vcpu->mmio_is_write) {
3468 vcpu->mmio_needed = 0;
3469 return EMULATE_DO_MMIO;
3470 }
3471
3472 return EMULATE_DONE;
3473}
3474EXPORT_SYMBOL_GPL(emulate_instruction);
3475
Carsten Ottede7d7892007-10-30 18:44:25 +01003476static int pio_copy_data(struct kvm_vcpu *vcpu)
3477{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003478 void *p = vcpu->arch.pio_data;
Izik Eidus0f346072008-12-29 01:42:20 +02003479 gva_t q = vcpu->arch.pio.guest_gva;
Carsten Ottede7d7892007-10-30 18:44:25 +01003480 unsigned bytes;
Izik Eidus0f346072008-12-29 01:42:20 +02003481 int ret;
Gleb Natapov1871c602010-02-10 14:21:32 +02003482 u32 error_code;
Carsten Ottede7d7892007-10-30 18:44:25 +01003483
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003484 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
3485 if (vcpu->arch.pio.in)
Gleb Natapov1871c602010-02-10 14:21:32 +02003486 ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code);
Carsten Ottede7d7892007-10-30 18:44:25 +01003487 else
Gleb Natapov1871c602010-02-10 14:21:32 +02003488 ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
3489
3490 if (ret == X86EMUL_PROPAGATE_FAULT)
3491 kvm_inject_page_fault(vcpu, q, error_code);
3492
Izik Eidus0f346072008-12-29 01:42:20 +02003493 return ret;
Carsten Ottede7d7892007-10-30 18:44:25 +01003494}
3495
3496int complete_pio(struct kvm_vcpu *vcpu)
3497{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003498 struct kvm_pio_request *io = &vcpu->arch.pio;
Carsten Ottede7d7892007-10-30 18:44:25 +01003499 long delta;
3500 int r;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003501 unsigned long val;
Carsten Ottede7d7892007-10-30 18:44:25 +01003502
3503 if (!io->string) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003504 if (io->in) {
3505 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
3506 memcpy(&val, vcpu->arch.pio_data, io->size);
3507 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
3508 }
Carsten Ottede7d7892007-10-30 18:44:25 +01003509 } else {
3510 if (io->in) {
3511 r = pio_copy_data(vcpu);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003512 if (r)
Gleb Natapov1871c602010-02-10 14:21:32 +02003513 goto out;
Carsten Ottede7d7892007-10-30 18:44:25 +01003514 }
3515
3516 delta = 1;
3517 if (io->rep) {
3518 delta *= io->cur_count;
3519 /*
3520 * The size of the register should really depend on
3521 * current address size.
3522 */
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003523 val = kvm_register_read(vcpu, VCPU_REGS_RCX);
3524 val -= delta;
3525 kvm_register_write(vcpu, VCPU_REGS_RCX, val);
Carsten Ottede7d7892007-10-30 18:44:25 +01003526 }
3527 if (io->down)
3528 delta = -delta;
3529 delta *= io->size;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003530 if (io->in) {
3531 val = kvm_register_read(vcpu, VCPU_REGS_RDI);
3532 val += delta;
3533 kvm_register_write(vcpu, VCPU_REGS_RDI, val);
3534 } else {
3535 val = kvm_register_read(vcpu, VCPU_REGS_RSI);
3536 val += delta;
3537 kvm_register_write(vcpu, VCPU_REGS_RSI, val);
3538 }
Carsten Ottede7d7892007-10-30 18:44:25 +01003539 }
Gleb Natapov1871c602010-02-10 14:21:32 +02003540out:
Carsten Ottede7d7892007-10-30 18:44:25 +01003541 io->count -= io->cur_count;
3542 io->cur_count = 0;
3543
3544 return 0;
3545}
3546
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003547static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
Carsten Ottede7d7892007-10-30 18:44:25 +01003548{
3549 /* TODO: String I/O for in kernel device */
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003550 int r;
Carsten Ottede7d7892007-10-30 18:44:25 +01003551
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003552 if (vcpu->arch.pio.in)
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003553 r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003554 vcpu->arch.pio.size, pd);
Carsten Ottede7d7892007-10-30 18:44:25 +01003555 else
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003556 r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
3557 vcpu->arch.pio.port, vcpu->arch.pio.size,
3558 pd);
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003559 return r;
Carsten Ottede7d7892007-10-30 18:44:25 +01003560}
3561
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003562static int pio_string_write(struct kvm_vcpu *vcpu)
Carsten Ottede7d7892007-10-30 18:44:25 +01003563{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003564 struct kvm_pio_request *io = &vcpu->arch.pio;
3565 void *pd = vcpu->arch.pio_data;
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003566 int i, r = 0;
Carsten Ottede7d7892007-10-30 18:44:25 +01003567
Carsten Ottede7d7892007-10-30 18:44:25 +01003568 for (i = 0; i < io->cur_count; i++) {
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003569 if (kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003570 io->port, io->size, pd)) {
3571 r = -EOPNOTSUPP;
3572 break;
3573 }
Carsten Ottede7d7892007-10-30 18:44:25 +01003574 pd += io->size;
3575 }
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003576 return r;
Carsten Ottede7d7892007-10-30 18:44:25 +01003577}
3578
Avi Kivity851ba692009-08-24 11:10:17 +03003579int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
Carsten Ottede7d7892007-10-30 18:44:25 +01003580{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003581 unsigned long val;
Carsten Ottede7d7892007-10-30 18:44:25 +01003582
Gleb Natapovf850e2e2010-02-10 14:21:33 +02003583 trace_kvm_pio(!in, port, size, 1);
3584
Carsten Ottede7d7892007-10-30 18:44:25 +01003585 vcpu->run->exit_reason = KVM_EXIT_IO;
3586 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003587 vcpu->run->io.size = vcpu->arch.pio.size = size;
Carsten Ottede7d7892007-10-30 18:44:25 +01003588 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003589 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
3590 vcpu->run->io.port = vcpu->arch.pio.port = port;
3591 vcpu->arch.pio.in = in;
3592 vcpu->arch.pio.string = 0;
3593 vcpu->arch.pio.down = 0;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003594 vcpu->arch.pio.rep = 0;
Carsten Ottede7d7892007-10-30 18:44:25 +01003595
Takuya Yoshikawa1976d2d2010-02-05 17:52:46 +09003596 if (!vcpu->arch.pio.in) {
3597 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
3598 memcpy(vcpu->arch.pio_data, &val, 4);
3599 }
Carsten Ottede7d7892007-10-30 18:44:25 +01003600
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003601 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
Carsten Ottede7d7892007-10-30 18:44:25 +01003602 complete_pio(vcpu);
3603 return 1;
3604 }
3605 return 0;
3606}
3607EXPORT_SYMBOL_GPL(kvm_emulate_pio);
3608
Avi Kivity851ba692009-08-24 11:10:17 +03003609int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
Carsten Ottede7d7892007-10-30 18:44:25 +01003610 int size, unsigned long count, int down,
3611 gva_t address, int rep, unsigned port)
3612{
3613 unsigned now, in_page;
Izik Eidus0f346072008-12-29 01:42:20 +02003614 int ret = 0;
Carsten Ottede7d7892007-10-30 18:44:25 +01003615
Gleb Natapovf850e2e2010-02-10 14:21:33 +02003616 trace_kvm_pio(!in, port, size, count);
3617
Carsten Ottede7d7892007-10-30 18:44:25 +01003618 vcpu->run->exit_reason = KVM_EXIT_IO;
3619 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003620 vcpu->run->io.size = vcpu->arch.pio.size = size;
Carsten Ottede7d7892007-10-30 18:44:25 +01003621 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003622 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
3623 vcpu->run->io.port = vcpu->arch.pio.port = port;
3624 vcpu->arch.pio.in = in;
3625 vcpu->arch.pio.string = 1;
3626 vcpu->arch.pio.down = down;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003627 vcpu->arch.pio.rep = rep;
Carsten Ottede7d7892007-10-30 18:44:25 +01003628
3629 if (!count) {
3630 kvm_x86_ops->skip_emulated_instruction(vcpu);
3631 return 1;
3632 }
3633
3634 if (!down)
3635 in_page = PAGE_SIZE - offset_in_page(address);
3636 else
3637 in_page = offset_in_page(address) + size;
3638 now = min(count, (unsigned long)in_page / size);
Izik Eidus0f346072008-12-29 01:42:20 +02003639 if (!now)
Carsten Ottede7d7892007-10-30 18:44:25 +01003640 now = 1;
Carsten Ottede7d7892007-10-30 18:44:25 +01003641 if (down) {
3642 /*
3643 * String I/O in reverse. Yuck. Kill the guest, fix later.
3644 */
3645 pr_unimpl(vcpu, "guest string pio down\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02003646 kvm_inject_gp(vcpu, 0);
Carsten Ottede7d7892007-10-30 18:44:25 +01003647 return 1;
3648 }
3649 vcpu->run->io.count = now;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003650 vcpu->arch.pio.cur_count = now;
Carsten Ottede7d7892007-10-30 18:44:25 +01003651
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003652 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
Carsten Ottede7d7892007-10-30 18:44:25 +01003653 kvm_x86_ops->skip_emulated_instruction(vcpu);
3654
Izik Eidus0f346072008-12-29 01:42:20 +02003655 vcpu->arch.pio.guest_gva = address;
Carsten Ottede7d7892007-10-30 18:44:25 +01003656
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003657 if (!vcpu->arch.pio.in) {
Carsten Ottede7d7892007-10-30 18:44:25 +01003658 /* string PIO write */
3659 ret = pio_copy_data(vcpu);
Gleb Natapov1871c602010-02-10 14:21:32 +02003660 if (ret == X86EMUL_PROPAGATE_FAULT)
Izik Eidus0f346072008-12-29 01:42:20 +02003661 return 1;
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003662 if (ret == 0 && !pio_string_write(vcpu)) {
Carsten Ottede7d7892007-10-30 18:44:25 +01003663 complete_pio(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003664 if (vcpu->arch.pio.count == 0)
Carsten Ottede7d7892007-10-30 18:44:25 +01003665 ret = 1;
3666 }
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003667 }
3668 /* no string PIO read support yet */
Carsten Ottede7d7892007-10-30 18:44:25 +01003669
3670 return ret;
3671}
3672EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
3673
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003674static void bounce_off(void *info)
3675{
3676 /* nothing */
3677}
3678
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003679static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
3680 void *data)
3681{
3682 struct cpufreq_freqs *freq = data;
3683 struct kvm *kvm;
3684 struct kvm_vcpu *vcpu;
3685 int i, send_ipi = 0;
3686
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003687 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
3688 return 0;
3689 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
3690 return 0;
Zachary Amsden0cca7902009-09-29 11:38:35 -10003691 per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003692
3693 spin_lock(&kvm_lock);
3694 list_for_each_entry(kvm, &vm_list, vm_list) {
Gleb Natapov988a2ca2009-06-09 15:56:29 +03003695 kvm_for_each_vcpu(i, vcpu, kvm) {
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003696 if (vcpu->cpu != freq->cpu)
3697 continue;
3698 if (!kvm_request_guest_time_update(vcpu))
3699 continue;
3700 if (vcpu->cpu != smp_processor_id())
3701 send_ipi++;
3702 }
3703 }
3704 spin_unlock(&kvm_lock);
3705
3706 if (freq->old < freq->new && send_ipi) {
3707 /*
3708 * We upscale the frequency. Must make the guest
3709 * doesn't see old kvmclock values while running with
3710 * the new frequency, otherwise we risk the guest sees
3711 * time go backwards.
3712 *
3713 * In case we update the frequency for another cpu
3714 * (which might be in guest context) send an interrupt
3715 * to kick the cpu out of guest context. Next time
3716 * guest context is entered kvmclock will be updated,
3717 * so the guest will not see stale values.
3718 */
3719 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
3720 }
3721 return 0;
3722}
3723
3724static struct notifier_block kvmclock_cpufreq_notifier_block = {
3725 .notifier_call = kvmclock_cpufreq_notifier
3726};
3727
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003728static void kvm_timer_init(void)
3729{
3730 int cpu;
3731
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003732 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003733 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
3734 CPUFREQ_TRANSITION_NOTIFIER);
Zachary Amsden6b7d7e72009-10-09 16:26:08 -10003735 for_each_online_cpu(cpu) {
3736 unsigned long khz = cpufreq_get(cpu);
3737 if (!khz)
3738 khz = tsc_khz;
3739 per_cpu(cpu_tsc_khz, cpu) = khz;
3740 }
Zachary Amsden0cca7902009-09-29 11:38:35 -10003741 } else {
3742 for_each_possible_cpu(cpu)
3743 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003744 }
3745}
3746
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08003747static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
3748
3749static int kvm_is_in_guest(void)
3750{
3751 return percpu_read(current_vcpu) != NULL;
3752}
3753
3754static int kvm_is_user_mode(void)
3755{
3756 int user_mode = 3;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08003757
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08003758 if (percpu_read(current_vcpu))
3759 user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08003760
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08003761 return user_mode != 0;
3762}
3763
3764static unsigned long kvm_get_guest_ip(void)
3765{
3766 unsigned long ip = 0;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08003767
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08003768 if (percpu_read(current_vcpu))
3769 ip = kvm_rip_read(percpu_read(current_vcpu));
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08003770
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08003771 return ip;
3772}
3773
3774static struct perf_guest_info_callbacks kvm_guest_cbs = {
3775 .is_in_guest = kvm_is_in_guest,
3776 .is_user_mode = kvm_is_user_mode,
3777 .get_guest_ip = kvm_get_guest_ip,
3778};
3779
3780void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
3781{
3782 percpu_write(current_vcpu, vcpu);
3783}
3784EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
3785
3786void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
3787{
3788 percpu_write(current_vcpu, NULL);
3789}
3790EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
3791
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003792int kvm_arch_init(void *opaque)
Carsten Otte043405e2007-10-10 17:16:19 +02003793{
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003794 int r;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003795 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
3796
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003797 if (kvm_x86_ops) {
3798 printk(KERN_ERR "kvm: already loaded the other module\n");
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003799 r = -EEXIST;
3800 goto out;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003801 }
3802
3803 if (!ops->cpu_has_kvm_support()) {
3804 printk(KERN_ERR "kvm: no hardware support\n");
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003805 r = -EOPNOTSUPP;
3806 goto out;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003807 }
3808 if (ops->disabled_by_bios()) {
3809 printk(KERN_ERR "kvm: disabled by bios\n");
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003810 r = -EOPNOTSUPP;
3811 goto out;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003812 }
3813
Avi Kivity97db56c2008-01-13 13:23:56 +02003814 r = kvm_mmu_module_init();
3815 if (r)
3816 goto out;
3817
3818 kvm_init_msr_list();
3819
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003820 kvm_x86_ops = ops;
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003821 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
Sheng Yang7b523452008-04-25 21:13:50 +08003822 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
3823 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08003824 PT_DIRTY_MASK, PT64_NX_MASK, 0);
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003825
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003826 kvm_timer_init();
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003827
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08003828 perf_register_guest_info_callbacks(&kvm_guest_cbs);
3829
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003830 return 0;
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003831
3832out:
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003833 return r;
Carsten Otte043405e2007-10-10 17:16:19 +02003834}
Hollis Blanchard8776e512007-10-31 17:24:24 -05003835
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003836void kvm_arch_exit(void)
3837{
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08003838 perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
3839
Jan Kiszka888d2562009-04-17 19:24:58 +02003840 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
3841 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
3842 CPUFREQ_TRANSITION_NOTIFIER);
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003843 kvm_x86_ops = NULL;
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003844 kvm_mmu_module_exit();
3845}
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003846
Hollis Blanchard8776e512007-10-31 17:24:24 -05003847int kvm_emulate_halt(struct kvm_vcpu *vcpu)
3848{
3849 ++vcpu->stat.halt_exits;
3850 if (irqchip_in_kernel(vcpu->kvm)) {
Avi Kivitya4535292008-04-13 17:54:35 +03003851 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003852 return 1;
3853 } else {
3854 vcpu->run->exit_reason = KVM_EXIT_HLT;
3855 return 0;
3856 }
3857}
3858EXPORT_SYMBOL_GPL(kvm_emulate_halt);
3859
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05003860static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
3861 unsigned long a1)
3862{
3863 if (is_long_mode(vcpu))
3864 return a0;
3865 else
3866 return a0 | ((gpa_t)a1 << 32);
3867}
3868
Gleb Natapov55cd8e52010-01-17 15:51:22 +02003869int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
3870{
3871 u64 param, ingpa, outgpa, ret;
3872 uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
3873 bool fast, longmode;
3874 int cs_db, cs_l;
3875
3876 /*
3877 * hypercall generates UD from non zero cpl and real mode
3878 * per HYPER-V spec
3879 */
Avi Kivity3eeb3282010-01-21 15:31:48 +02003880 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
Gleb Natapov55cd8e52010-01-17 15:51:22 +02003881 kvm_queue_exception(vcpu, UD_VECTOR);
3882 return 0;
3883 }
3884
3885 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
3886 longmode = is_long_mode(vcpu) && cs_l == 1;
3887
3888 if (!longmode) {
Gleb Natapovccd46932010-01-19 15:06:38 +02003889 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
3890 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
3891 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
3892 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
3893 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
3894 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
Gleb Natapov55cd8e52010-01-17 15:51:22 +02003895 }
3896#ifdef CONFIG_X86_64
3897 else {
3898 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
3899 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
3900 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
3901 }
3902#endif
3903
3904 code = param & 0xffff;
3905 fast = (param >> 16) & 0x1;
3906 rep_cnt = (param >> 32) & 0xfff;
3907 rep_idx = (param >> 48) & 0xfff;
3908
3909 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
3910
Gleb Natapovc25bc162010-01-17 15:51:24 +02003911 switch (code) {
3912 case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
3913 kvm_vcpu_on_spin(vcpu);
3914 break;
3915 default:
3916 res = HV_STATUS_INVALID_HYPERCALL_CODE;
3917 break;
3918 }
Gleb Natapov55cd8e52010-01-17 15:51:22 +02003919
3920 ret = res | (((u64)rep_done & 0xfff) << 32);
3921 if (longmode) {
3922 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
3923 } else {
3924 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
3925 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
3926 }
3927
3928 return 1;
3929}
3930
Hollis Blanchard8776e512007-10-31 17:24:24 -05003931int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
3932{
3933 unsigned long nr, a0, a1, a2, a3, ret;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05003934 int r = 1;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003935
Gleb Natapov55cd8e52010-01-17 15:51:22 +02003936 if (kvm_hv_hypercall_enabled(vcpu->kvm))
3937 return kvm_hv_hypercall(vcpu);
3938
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003939 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
3940 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
3941 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
3942 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
3943 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003944
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003945 trace_kvm_hypercall(nr, a0, a1, a2, a3);
Feng (Eric) Liu2714d1d2008-04-10 15:31:10 -04003946
Hollis Blanchard8776e512007-10-31 17:24:24 -05003947 if (!is_long_mode(vcpu)) {
3948 nr &= 0xFFFFFFFF;
3949 a0 &= 0xFFFFFFFF;
3950 a1 &= 0xFFFFFFFF;
3951 a2 &= 0xFFFFFFFF;
3952 a3 &= 0xFFFFFFFF;
3953 }
3954
Jan Kiszka07708c42009-08-03 18:43:28 +02003955 if (kvm_x86_ops->get_cpl(vcpu) != 0) {
3956 ret = -KVM_EPERM;
3957 goto out;
3958 }
3959
Hollis Blanchard8776e512007-10-31 17:24:24 -05003960 switch (nr) {
Avi Kivityb93463a2007-10-25 16:52:32 +02003961 case KVM_HC_VAPIC_POLL_IRQ:
3962 ret = 0;
3963 break;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05003964 case KVM_HC_MMU_OP:
3965 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
3966 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003967 default:
3968 ret = -KVM_ENOSYS;
3969 break;
3970 }
Jan Kiszka07708c42009-08-03 18:43:28 +02003971out:
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003972 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
Amit Shahf11c3a82008-02-21 01:00:30 +05303973 ++vcpu->stat.hypercalls;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05003974 return r;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003975}
3976EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
3977
3978int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
3979{
3980 char instruction[3];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003981 unsigned long rip = kvm_rip_read(vcpu);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003982
Hollis Blanchard8776e512007-10-31 17:24:24 -05003983 /*
3984 * Blow out the MMU to ensure that no other VCPU has an active mapping
3985 * to ensure that the updated hypercall appears atomically across all
3986 * VCPUs.
3987 */
3988 kvm_mmu_zap_all(vcpu->kvm);
3989
Hollis Blanchard8776e512007-10-31 17:24:24 -05003990 kvm_x86_ops->patch_hypercall(vcpu, instruction);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003991
Takuya Yoshikawa7edcfac2010-02-01 22:11:52 +09003992 return emulator_write_emulated(rip, instruction, 3, vcpu);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003993}
3994
3995static u64 mk_cr_64(u64 curr_cr, u32 new_val)
3996{
3997 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
3998}
3999
4000void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
4001{
4002 struct descriptor_table dt = { limit, base };
4003
4004 kvm_x86_ops->set_gdt(vcpu, &dt);
4005}
4006
4007void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
4008{
4009 struct descriptor_table dt = { limit, base };
4010
4011 kvm_x86_ops->set_idt(vcpu, &dt);
4012}
4013
4014void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
4015 unsigned long *rflags)
4016{
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004017 kvm_lmsw(vcpu, msw);
Jan Kiszka91586a32009-10-05 13:07:21 +02004018 *rflags = kvm_get_rflags(vcpu);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004019}
4020
4021unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
4022{
Joerg Roedel54e445c2008-04-30 17:56:02 +02004023 unsigned long value;
4024
Hollis Blanchard8776e512007-10-31 17:24:24 -05004025 switch (cr) {
4026 case 0:
Avi Kivity4d4ec082009-12-29 18:07:30 +02004027 value = kvm_read_cr0(vcpu);
Joerg Roedel54e445c2008-04-30 17:56:02 +02004028 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004029 case 2:
Joerg Roedel54e445c2008-04-30 17:56:02 +02004030 value = vcpu->arch.cr2;
4031 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004032 case 3:
Joerg Roedel54e445c2008-04-30 17:56:02 +02004033 value = vcpu->arch.cr3;
4034 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004035 case 4:
Avi Kivityfc78f512009-12-07 12:16:48 +02004036 value = kvm_read_cr4(vcpu);
Joerg Roedel54e445c2008-04-30 17:56:02 +02004037 break;
Joerg Roedel152ff9b2007-12-06 15:46:52 +01004038 case 8:
Joerg Roedel54e445c2008-04-30 17:56:02 +02004039 value = kvm_get_cr8(vcpu);
4040 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004041 default:
Harvey Harrisonb8688d52008-03-03 12:59:56 -08004042 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004043 return 0;
4044 }
Joerg Roedel54e445c2008-04-30 17:56:02 +02004045
4046 return value;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004047}
4048
4049void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
4050 unsigned long *rflags)
4051{
4052 switch (cr) {
4053 case 0:
Avi Kivity4d4ec082009-12-29 18:07:30 +02004054 kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
Jan Kiszka91586a32009-10-05 13:07:21 +02004055 *rflags = kvm_get_rflags(vcpu);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004056 break;
4057 case 2:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004058 vcpu->arch.cr2 = val;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004059 break;
4060 case 3:
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004061 kvm_set_cr3(vcpu, val);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004062 break;
4063 case 4:
Avi Kivityfc78f512009-12-07 12:16:48 +02004064 kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
Hollis Blanchard8776e512007-10-31 17:24:24 -05004065 break;
Joerg Roedel152ff9b2007-12-06 15:46:52 +01004066 case 8:
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004067 kvm_set_cr8(vcpu, val & 0xfUL);
Joerg Roedel152ff9b2007-12-06 15:46:52 +01004068 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004069 default:
Harvey Harrisonb8688d52008-03-03 12:59:56 -08004070 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004071 }
4072}
4073
Dan Kenigsberg07716712007-11-21 17:10:04 +02004074static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
4075{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004076 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
4077 int j, nent = vcpu->arch.cpuid_nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +02004078
4079 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
4080 /* when no next entry is found, the current entry[i] is reselected */
Nitin A Kamble0fdf8e52008-11-05 15:56:21 -08004081 for (j = i + 1; ; j = (j + 1) % nent) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004082 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
Dan Kenigsberg07716712007-11-21 17:10:04 +02004083 if (ej->function == e->function) {
4084 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
4085 return j;
4086 }
4087 }
4088 return 0; /* silence gcc, even though control never reaches here */
4089}
4090
4091/* find an entry with matching function, matching index (if needed), and that
4092 * should be read next (if it's stateful) */
4093static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
4094 u32 function, u32 index)
4095{
4096 if (e->function != function)
4097 return 0;
4098 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
4099 return 0;
4100 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
Amit Shah19355472009-01-14 16:56:00 +00004101 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
Dan Kenigsberg07716712007-11-21 17:10:04 +02004102 return 0;
4103 return 1;
4104}
4105
Alexander Grafd8017472008-11-25 20:17:11 +01004106struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
4107 u32 function, u32 index)
Hollis Blanchard8776e512007-10-31 17:24:24 -05004108{
4109 int i;
Alexander Grafd8017472008-11-25 20:17:11 +01004110 struct kvm_cpuid_entry2 *best = NULL;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004111
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004112 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
Alexander Grafd8017472008-11-25 20:17:11 +01004113 struct kvm_cpuid_entry2 *e;
4114
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004115 e = &vcpu->arch.cpuid_entries[i];
Dan Kenigsberg07716712007-11-21 17:10:04 +02004116 if (is_matching_cpuid_entry(e, function, index)) {
4117 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
4118 move_to_next_stateful_cpuid_entry(vcpu, i);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004119 best = e;
4120 break;
4121 }
4122 /*
4123 * Both basic or both extended?
4124 */
4125 if (((e->function ^ function) & 0x80000000) == 0)
4126 if (!best || e->function > best->function)
4127 best = e;
4128 }
Alexander Grafd8017472008-11-25 20:17:11 +01004129 return best;
4130}
Sheng Yang0e851882009-12-18 16:48:46 +08004131EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
Alexander Grafd8017472008-11-25 20:17:11 +01004132
Dong, Eddie82725b22009-03-30 16:21:08 +08004133int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
4134{
4135 struct kvm_cpuid_entry2 *best;
4136
4137 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
4138 if (best)
4139 return best->eax & 0xff;
4140 return 36;
4141}
4142
Alexander Grafd8017472008-11-25 20:17:11 +01004143void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
4144{
4145 u32 function, index;
4146 struct kvm_cpuid_entry2 *best;
4147
4148 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
4149 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
4150 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
4151 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
4152 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
4153 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
4154 best = kvm_find_cpuid_entry(vcpu, function, index);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004155 if (best) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004156 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
4157 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
4158 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
4159 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004160 }
Hollis Blanchard8776e512007-10-31 17:24:24 -05004161 kvm_x86_ops->skip_emulated_instruction(vcpu);
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004162 trace_kvm_cpuid(function,
4163 kvm_register_read(vcpu, VCPU_REGS_RAX),
4164 kvm_register_read(vcpu, VCPU_REGS_RBX),
4165 kvm_register_read(vcpu, VCPU_REGS_RCX),
4166 kvm_register_read(vcpu, VCPU_REGS_RDX));
Hollis Blanchard8776e512007-10-31 17:24:24 -05004167}
4168EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
Hollis Blanchardd0752062007-10-31 17:24:25 -05004169
4170/*
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004171 * Check if userspace requested an interrupt window, and that the
4172 * interrupt window is open.
4173 *
4174 * No need to exit to userspace if we already have an interrupt queued.
4175 */
Avi Kivity851ba692009-08-24 11:10:17 +03004176static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004177{
Gleb Natapov80618232009-04-21 17:44:56 +03004178 return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
Avi Kivity851ba692009-08-24 11:10:17 +03004179 vcpu->run->request_interrupt_window &&
Gleb Natapov5df56642009-04-21 17:44:59 +03004180 kvm_arch_interrupt_allowed(vcpu));
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004181}
4182
Avi Kivity851ba692009-08-24 11:10:17 +03004183static void post_kvm_run_save(struct kvm_vcpu *vcpu)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004184{
Avi Kivity851ba692009-08-24 11:10:17 +03004185 struct kvm_run *kvm_run = vcpu->run;
4186
Jan Kiszka91586a32009-10-05 13:07:21 +02004187 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004188 kvm_run->cr8 = kvm_get_cr8(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004189 kvm_run->apic_base = kvm_get_apic_base(vcpu);
Jan Kiszka45312202008-12-11 16:54:54 +01004190 if (irqchip_in_kernel(vcpu->kvm))
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004191 kvm_run->ready_for_interrupt_injection = 1;
Jan Kiszka45312202008-12-11 16:54:54 +01004192 else
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004193 kvm_run->ready_for_interrupt_injection =
Gleb Natapovfa9726b2009-05-11 13:35:47 +03004194 kvm_arch_interrupt_allowed(vcpu) &&
4195 !kvm_cpu_has_interrupt(vcpu) &&
4196 !kvm_event_needs_reinjection(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004197}
4198
Avi Kivityb93463a2007-10-25 16:52:32 +02004199static void vapic_enter(struct kvm_vcpu *vcpu)
4200{
4201 struct kvm_lapic *apic = vcpu->arch.apic;
4202 struct page *page;
4203
4204 if (!apic || !apic->vapic_addr)
4205 return;
4206
4207 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
Izik Eidus72dc67a2008-02-10 18:04:15 +02004208
4209 vcpu->arch.apic->vapic_page = page;
Avi Kivityb93463a2007-10-25 16:52:32 +02004210}
4211
4212static void vapic_exit(struct kvm_vcpu *vcpu)
4213{
4214 struct kvm_lapic *apic = vcpu->arch.apic;
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004215 int idx;
Avi Kivityb93463a2007-10-25 16:52:32 +02004216
4217 if (!apic || !apic->vapic_addr)
4218 return;
4219
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004220 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivityb93463a2007-10-25 16:52:32 +02004221 kvm_release_page_dirty(apic->vapic_page);
4222 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004223 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivityb93463a2007-10-25 16:52:32 +02004224}
4225
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004226static void update_cr8_intercept(struct kvm_vcpu *vcpu)
4227{
4228 int max_irr, tpr;
4229
4230 if (!kvm_x86_ops->update_cr8_intercept)
4231 return;
4232
Avi Kivity88c808f2009-08-17 22:49:40 +03004233 if (!vcpu->arch.apic)
4234 return;
4235
Gleb Natapov8db3baa2009-05-11 13:35:54 +03004236 if (!vcpu->arch.apic->vapic_addr)
4237 max_irr = kvm_lapic_find_highest_irr(vcpu);
4238 else
4239 max_irr = -1;
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004240
4241 if (max_irr != -1)
4242 max_irr >>= 4;
4243
4244 tpr = kvm_lapic_get_cr8(vcpu);
4245
4246 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
4247}
4248
Avi Kivity851ba692009-08-24 11:10:17 +03004249static void inject_pending_event(struct kvm_vcpu *vcpu)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004250{
4251 /* try to reinject previous events if any */
Gleb Natapovb59bb7b2009-07-09 15:33:51 +03004252 if (vcpu->arch.exception.pending) {
4253 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
4254 vcpu->arch.exception.has_error_code,
4255 vcpu->arch.exception.error_code);
4256 return;
4257 }
4258
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004259 if (vcpu->arch.nmi_injected) {
4260 kvm_x86_ops->set_nmi(vcpu);
4261 return;
4262 }
4263
4264 if (vcpu->arch.interrupt.pending) {
Gleb Natapov66fd3f72009-05-11 13:35:50 +03004265 kvm_x86_ops->set_irq(vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004266 return;
4267 }
4268
4269 /* try to inject new event if pending */
4270 if (vcpu->arch.nmi_pending) {
4271 if (kvm_x86_ops->nmi_allowed(vcpu)) {
4272 vcpu->arch.nmi_pending = false;
4273 vcpu->arch.nmi_injected = true;
4274 kvm_x86_ops->set_nmi(vcpu);
4275 }
4276 } else if (kvm_cpu_has_interrupt(vcpu)) {
4277 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
Gleb Natapov66fd3f72009-05-11 13:35:50 +03004278 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
4279 false);
4280 kvm_x86_ops->set_irq(vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004281 }
4282 }
4283}
4284
Avi Kivity851ba692009-08-24 11:10:17 +03004285static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004286{
4287 int r;
Gleb Natapov6a8b1d12009-05-11 13:35:51 +03004288 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
Avi Kivity851ba692009-08-24 11:10:17 +03004289 vcpu->run->request_interrupt_window;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004290
Marcelo Tosatti2e53d632008-02-20 14:47:24 -05004291 if (vcpu->requests)
4292 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
4293 kvm_mmu_unload(vcpu);
4294
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004295 r = kvm_mmu_reload(vcpu);
4296 if (unlikely(r))
4297 goto out;
4298
Avi Kivity2f52d582008-01-16 12:49:30 +02004299 if (vcpu->requests) {
4300 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
Marcelo Tosatti2f599712008-05-27 12:10:20 -03004301 __kvm_migrate_timers(vcpu);
Gerd Hoffmannc8076602009-02-04 17:52:04 +01004302 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
4303 kvm_write_guest_time(vcpu);
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -03004304 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
4305 kvm_mmu_sync_roots(vcpu);
Marcelo Tosattid4acf7e2008-06-06 16:37:35 -03004306 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
4307 kvm_x86_ops->tlb_flush(vcpu);
Avi Kivityb93463a2007-10-25 16:52:32 +02004308 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
4309 &vcpu->requests)) {
Avi Kivity851ba692009-08-24 11:10:17 +03004310 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
Avi Kivityb93463a2007-10-25 16:52:32 +02004311 r = 0;
4312 goto out;
4313 }
Joerg Roedel71c4dfa2008-02-26 16:49:16 +01004314 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
Avi Kivity851ba692009-08-24 11:10:17 +03004315 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
Joerg Roedel71c4dfa2008-02-26 16:49:16 +01004316 r = 0;
4317 goto out;
4318 }
Avi Kivity02daab22009-12-30 12:40:26 +02004319 if (test_and_clear_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests)) {
4320 vcpu->fpu_active = 0;
4321 kvm_x86_ops->fpu_deactivate(vcpu);
4322 }
Avi Kivity2f52d582008-01-16 12:49:30 +02004323 }
Avi Kivityb93463a2007-10-25 16:52:32 +02004324
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004325 preempt_disable();
4326
4327 kvm_x86_ops->prepare_guest_switch(vcpu);
Avi Kivity2608d7a2010-01-21 15:31:45 +02004328 if (vcpu->fpu_active)
4329 kvm_load_guest_fpu(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004330
4331 local_irq_disable();
4332
Marcelo Tosatti32f88402009-05-07 17:55:12 -03004333 clear_bit(KVM_REQ_KICK, &vcpu->requests);
4334 smp_mb__after_clear_bit();
4335
Marcelo Tosattid7690172008-09-08 15:23:48 -03004336 if (vcpu->requests || need_resched() || signal_pending(current)) {
Gleb Natapovc7f0f242009-07-07 15:27:32 +03004337 set_bit(KVM_REQ_KICK, &vcpu->requests);
Avi Kivity6c142802008-01-15 18:27:32 +02004338 local_irq_enable();
4339 preempt_enable();
4340 r = 1;
4341 goto out;
4342 }
4343
Avi Kivity851ba692009-08-24 11:10:17 +03004344 inject_pending_event(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004345
Gleb Natapov6a8b1d12009-05-11 13:35:51 +03004346 /* enable NMI/IRQ window open exits if needed */
4347 if (vcpu->arch.nmi_pending)
4348 kvm_x86_ops->enable_nmi_window(vcpu);
4349 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
4350 kvm_x86_ops->enable_irq_window(vcpu);
4351
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004352 if (kvm_lapic_enabled(vcpu)) {
Gleb Natapov8db3baa2009-05-11 13:35:54 +03004353 update_cr8_intercept(vcpu);
4354 kvm_lapic_sync_to_vapic(vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004355 }
Avi Kivityb93463a2007-10-25 16:52:32 +02004356
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004357 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Marcelo Tosatti3200f402008-03-29 20:17:59 -03004358
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004359 kvm_guest_enter();
4360
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004361 if (unlikely(vcpu->arch.switch_db_regs)) {
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004362 set_debugreg(0, 7);
4363 set_debugreg(vcpu->arch.eff_db[0], 0);
4364 set_debugreg(vcpu->arch.eff_db[1], 1);
4365 set_debugreg(vcpu->arch.eff_db[2], 2);
4366 set_debugreg(vcpu->arch.eff_db[3], 3);
4367 }
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004368
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004369 trace_kvm_entry(vcpu->vcpu_id);
Avi Kivity851ba692009-08-24 11:10:17 +03004370 kvm_x86_ops->run(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004371
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004372 /*
4373 * If the guest has used debug registers, at least dr7
4374 * will be disabled while returning to the host.
4375 * If we don't have active breakpoints in the host, we don't
4376 * care about the messed up debug address registers. But if
4377 * we have some of them active, restore the old state.
4378 */
Frederic Weisbecker59d8eb52009-11-10 11:03:12 +01004379 if (hw_breakpoint_active())
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004380 hw_breakpoint_restore();
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004381
Marcelo Tosatti32f88402009-05-07 17:55:12 -03004382 set_bit(KVM_REQ_KICK, &vcpu->requests);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004383 local_irq_enable();
4384
4385 ++vcpu->stat.exits;
4386
4387 /*
4388 * We must have an instruction between local_irq_enable() and
4389 * kvm_guest_exit(), so the timer interrupt isn't delayed by
4390 * the interrupt shadow. The stat.exits increment will do nicely.
4391 * But we need to prevent reordering, hence this barrier():
4392 */
4393 barrier();
4394
4395 kvm_guest_exit();
4396
4397 preempt_enable();
4398
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004399 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Marcelo Tosatti3200f402008-03-29 20:17:59 -03004400
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004401 /*
4402 * Profile KVM exit RIPs:
4403 */
4404 if (unlikely(prof_on == KVM_PROFILING)) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004405 unsigned long rip = kvm_rip_read(vcpu);
4406 profile_hit(KVM_PROFILING, (void *)rip);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004407 }
4408
Avi Kivity298101d2007-11-25 13:41:11 +02004409
Avi Kivityb93463a2007-10-25 16:52:32 +02004410 kvm_lapic_sync_from_vapic(vcpu);
4411
Avi Kivity851ba692009-08-24 11:10:17 +03004412 r = kvm_x86_ops->handle_exit(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004413out:
Marcelo Tosattid7690172008-09-08 15:23:48 -03004414 return r;
4415}
4416
Gleb Natapov09cec752009-03-23 15:11:44 +02004417
Avi Kivity851ba692009-08-24 11:10:17 +03004418static int __vcpu_run(struct kvm_vcpu *vcpu)
Marcelo Tosattid7690172008-09-08 15:23:48 -03004419{
4420 int r;
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004421 struct kvm *kvm = vcpu->kvm;
Marcelo Tosattid7690172008-09-08 15:23:48 -03004422
4423 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
Jan Kiszka1b10bf32008-09-30 10:41:06 +02004424 pr_debug("vcpu %d received sipi with vector # %x\n",
4425 vcpu->vcpu_id, vcpu->arch.sipi_vector);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004426 kvm_lapic_reset(vcpu);
Gleb Natapov5f179282008-10-07 15:42:33 +02004427 r = kvm_arch_vcpu_reset(vcpu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004428 if (r)
4429 return r;
4430 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004431 }
4432
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004433 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004434 vapic_enter(vcpu);
4435
4436 r = 1;
4437 while (r > 0) {
Gleb Natapovaf2152f2008-09-22 14:28:53 +03004438 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
Avi Kivity851ba692009-08-24 11:10:17 +03004439 r = vcpu_enter_guest(vcpu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004440 else {
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004441 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004442 kvm_vcpu_block(vcpu);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004443 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004444 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
Gleb Natapov09cec752009-03-23 15:11:44 +02004445 {
4446 switch(vcpu->arch.mp_state) {
4447 case KVM_MP_STATE_HALTED:
Marcelo Tosattid7690172008-09-08 15:23:48 -03004448 vcpu->arch.mp_state =
Gleb Natapov09cec752009-03-23 15:11:44 +02004449 KVM_MP_STATE_RUNNABLE;
4450 case KVM_MP_STATE_RUNNABLE:
4451 break;
4452 case KVM_MP_STATE_SIPI_RECEIVED:
4453 default:
4454 r = -EINTR;
4455 break;
4456 }
4457 }
Marcelo Tosattid7690172008-09-08 15:23:48 -03004458 }
4459
Gleb Natapov09cec752009-03-23 15:11:44 +02004460 if (r <= 0)
4461 break;
4462
4463 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
4464 if (kvm_cpu_has_pending_timer(vcpu))
4465 kvm_inject_pending_timer_irqs(vcpu);
4466
Avi Kivity851ba692009-08-24 11:10:17 +03004467 if (dm_request_for_irq_injection(vcpu)) {
Gleb Natapov09cec752009-03-23 15:11:44 +02004468 r = -EINTR;
Avi Kivity851ba692009-08-24 11:10:17 +03004469 vcpu->run->exit_reason = KVM_EXIT_INTR;
Gleb Natapov09cec752009-03-23 15:11:44 +02004470 ++vcpu->stat.request_irq_exits;
4471 }
4472 if (signal_pending(current)) {
4473 r = -EINTR;
Avi Kivity851ba692009-08-24 11:10:17 +03004474 vcpu->run->exit_reason = KVM_EXIT_INTR;
Gleb Natapov09cec752009-03-23 15:11:44 +02004475 ++vcpu->stat.signal_exits;
4476 }
4477 if (need_resched()) {
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004478 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
Gleb Natapov09cec752009-03-23 15:11:44 +02004479 kvm_resched(vcpu);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004480 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004481 }
4482 }
4483
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004484 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
Avi Kivity851ba692009-08-24 11:10:17 +03004485 post_kvm_run_save(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004486
Avi Kivityb93463a2007-10-25 16:52:32 +02004487 vapic_exit(vcpu);
4488
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004489 return r;
4490}
4491
4492int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4493{
4494 int r;
4495 sigset_t sigsaved;
4496
4497 vcpu_load(vcpu);
4498
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004499 if (vcpu->sigset_active)
4500 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
4501
Avi Kivityac9f6dc2008-07-06 15:48:31 +03004502 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
4503 kvm_vcpu_block(vcpu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004504 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Avi Kivityac9f6dc2008-07-06 15:48:31 +03004505 r = -EAGAIN;
4506 goto out;
4507 }
4508
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004509 /* re-sync apic's tpr */
4510 if (!irqchip_in_kernel(vcpu->kvm))
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004511 kvm_set_cr8(vcpu, kvm_run->cr8);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004512
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004513 if (vcpu->arch.pio.cur_count) {
Gleb Natapov7567cae2010-03-09 12:01:10 +02004514 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004515 r = complete_pio(vcpu);
Gleb Natapov7567cae2010-03-09 12:01:10 +02004516 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004517 if (r)
4518 goto out;
4519 }
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004520 if (vcpu->mmio_needed) {
4521 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
4522 vcpu->mmio_read_completed = 1;
4523 vcpu->mmio_needed = 0;
Marcelo Tosatti3200f402008-03-29 20:17:59 -03004524
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004525 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivity851ba692009-08-24 11:10:17 +03004526 r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
Sheng Yang571008d2008-01-02 14:49:22 +08004527 EMULTYPE_NO_DECODE);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004528 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004529 if (r == EMULATE_DO_MMIO) {
4530 /*
4531 * Read-modify-write. Back to userspace.
4532 */
4533 r = 0;
4534 goto out;
4535 }
4536 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004537 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
4538 kvm_register_write(vcpu, VCPU_REGS_RAX,
4539 kvm_run->hypercall.ret);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004540
Avi Kivity851ba692009-08-24 11:10:17 +03004541 r = __vcpu_run(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004542
4543out:
4544 if (vcpu->sigset_active)
4545 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
4546
4547 vcpu_put(vcpu);
4548 return r;
4549}
4550
4551int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4552{
4553 vcpu_load(vcpu);
4554
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004555 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4556 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4557 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4558 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4559 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
4560 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
4561 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4562 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004563#ifdef CONFIG_X86_64
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004564 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
4565 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
4566 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
4567 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
4568 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
4569 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
4570 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
4571 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004572#endif
4573
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004574 regs->rip = kvm_rip_read(vcpu);
Jan Kiszka91586a32009-10-05 13:07:21 +02004575 regs->rflags = kvm_get_rflags(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004576
4577 vcpu_put(vcpu);
4578
4579 return 0;
4580}
4581
4582int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4583{
4584 vcpu_load(vcpu);
4585
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004586 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
4587 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
4588 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
4589 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
4590 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
4591 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
4592 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
4593 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004594#ifdef CONFIG_X86_64
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004595 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
4596 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
4597 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
4598 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
4599 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
4600 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
4601 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
4602 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004603#endif
4604
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004605 kvm_rip_write(vcpu, regs->rip);
Jan Kiszka91586a32009-10-05 13:07:21 +02004606 kvm_set_rflags(vcpu, regs->rflags);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004607
Jan Kiszkab4f14ab2008-04-30 17:59:04 +02004608 vcpu->arch.exception.pending = false;
4609
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004610 vcpu_put(vcpu);
4611
4612 return 0;
4613}
4614
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004615void kvm_get_segment(struct kvm_vcpu *vcpu,
4616 struct kvm_segment *var, int seg)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004617{
Harvey Harrison14af3f32008-02-19 10:25:50 -08004618 kvm_x86_ops->get_segment(vcpu, var, seg);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004619}
4620
4621void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
4622{
4623 struct kvm_segment cs;
4624
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004625 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004626 *db = cs.db;
4627 *l = cs.l;
4628}
4629EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
4630
4631int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4632 struct kvm_sregs *sregs)
4633{
4634 struct descriptor_table dt;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004635
4636 vcpu_load(vcpu);
4637
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004638 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4639 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4640 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
4641 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
4642 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
4643 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004644
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004645 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4646 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004647
4648 kvm_x86_ops->get_idt(vcpu, &dt);
4649 sregs->idt.limit = dt.limit;
4650 sregs->idt.base = dt.base;
4651 kvm_x86_ops->get_gdt(vcpu, &dt);
4652 sregs->gdt.limit = dt.limit;
4653 sregs->gdt.base = dt.base;
4654
Avi Kivity4d4ec082009-12-29 18:07:30 +02004655 sregs->cr0 = kvm_read_cr0(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004656 sregs->cr2 = vcpu->arch.cr2;
4657 sregs->cr3 = vcpu->arch.cr3;
Avi Kivityfc78f512009-12-07 12:16:48 +02004658 sregs->cr4 = kvm_read_cr4(vcpu);
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004659 sregs->cr8 = kvm_get_cr8(vcpu);
Avi Kivityf6801df2010-01-21 15:31:50 +02004660 sregs->efer = vcpu->arch.efer;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004661 sregs->apic_base = kvm_get_apic_base(vcpu);
4662
Gleb Natapov923c61b2009-05-11 13:35:48 +03004663 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004664
Gleb Natapov36752c92009-05-11 13:35:53 +03004665 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
Gleb Natapov14d0bc12009-04-21 17:45:11 +03004666 set_bit(vcpu->arch.interrupt.nr,
4667 (unsigned long *)sregs->interrupt_bitmap);
Gleb Natapov16d7a192009-04-21 17:45:10 +03004668
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004669 vcpu_put(vcpu);
4670
4671 return 0;
4672}
4673
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03004674int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4675 struct kvm_mp_state *mp_state)
4676{
4677 vcpu_load(vcpu);
4678 mp_state->mp_state = vcpu->arch.mp_state;
4679 vcpu_put(vcpu);
4680 return 0;
4681}
4682
4683int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4684 struct kvm_mp_state *mp_state)
4685{
4686 vcpu_load(vcpu);
4687 vcpu->arch.mp_state = mp_state->mp_state;
4688 vcpu_put(vcpu);
4689 return 0;
4690}
4691
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004692static void kvm_set_segment(struct kvm_vcpu *vcpu,
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004693 struct kvm_segment *var, int seg)
4694{
Harvey Harrison14af3f32008-02-19 10:25:50 -08004695 kvm_x86_ops->set_segment(vcpu, var, seg);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004696}
4697
Izik Eidus37817f22008-03-24 23:14:53 +02004698static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
4699 struct kvm_segment *kvm_desct)
4700{
Akinobu Mita46a359e2009-07-18 23:58:32 +09004701 kvm_desct->base = get_desc_base(seg_desc);
4702 kvm_desct->limit = get_desc_limit(seg_desc);
Marcelo Tosattic93cd3a2008-07-19 19:08:07 -03004703 if (seg_desc->g) {
4704 kvm_desct->limit <<= 12;
4705 kvm_desct->limit |= 0xfff;
4706 }
Izik Eidus37817f22008-03-24 23:14:53 +02004707 kvm_desct->selector = selector;
4708 kvm_desct->type = seg_desc->type;
4709 kvm_desct->present = seg_desc->p;
4710 kvm_desct->dpl = seg_desc->dpl;
4711 kvm_desct->db = seg_desc->d;
4712 kvm_desct->s = seg_desc->s;
4713 kvm_desct->l = seg_desc->l;
4714 kvm_desct->g = seg_desc->g;
4715 kvm_desct->avl = seg_desc->avl;
4716 if (!selector)
4717 kvm_desct->unusable = 1;
4718 else
4719 kvm_desct->unusable = 0;
4720 kvm_desct->padding = 0;
4721}
4722
Amit Shahb8222ad2008-10-22 16:39:47 +05304723static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
4724 u16 selector,
4725 struct descriptor_table *dtable)
Izik Eidus37817f22008-03-24 23:14:53 +02004726{
4727 if (selector & 1 << 2) {
4728 struct kvm_segment kvm_seg;
4729
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004730 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
Izik Eidus37817f22008-03-24 23:14:53 +02004731
4732 if (kvm_seg.unusable)
4733 dtable->limit = 0;
4734 else
4735 dtable->limit = kvm_seg.limit;
4736 dtable->base = kvm_seg.base;
4737 }
4738 else
4739 kvm_x86_ops->get_gdt(vcpu, dtable);
4740}
4741
4742/* allowed just for 8 bytes segments */
4743static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4744 struct desc_struct *seg_desc)
4745{
4746 struct descriptor_table dtable;
4747 u16 index = selector >> 3;
Takuya Yoshikawa6f550482010-02-18 12:15:00 +02004748 int ret;
4749 u32 err;
4750 gva_t addr;
Izik Eidus37817f22008-03-24 23:14:53 +02004751
Amit Shahb8222ad2008-10-22 16:39:47 +05304752 get_segment_descriptor_dtable(vcpu, selector, &dtable);
Izik Eidus37817f22008-03-24 23:14:53 +02004753
4754 if (dtable.limit < index * 8 + 7) {
4755 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
Takuya Yoshikawac125c602010-02-01 22:11:04 +09004756 return X86EMUL_PROPAGATE_FAULT;
Izik Eidus37817f22008-03-24 23:14:53 +02004757 }
Takuya Yoshikawa6f550482010-02-18 12:15:00 +02004758 addr = dtable.base + index * 8;
4759 ret = kvm_read_guest_virt_system(addr, seg_desc, sizeof(*seg_desc),
4760 vcpu, &err);
4761 if (ret == X86EMUL_PROPAGATE_FAULT)
4762 kvm_inject_page_fault(vcpu, addr, err);
4763
4764 return ret;
Izik Eidus37817f22008-03-24 23:14:53 +02004765}
4766
4767/* allowed just for 8 bytes segments */
4768static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4769 struct desc_struct *seg_desc)
4770{
4771 struct descriptor_table dtable;
4772 u16 index = selector >> 3;
4773
Amit Shahb8222ad2008-10-22 16:39:47 +05304774 get_segment_descriptor_dtable(vcpu, selector, &dtable);
Izik Eidus37817f22008-03-24 23:14:53 +02004775
4776 if (dtable.limit < index * 8 + 7)
4777 return 1;
Gleb Natapov1871c602010-02-10 14:21:32 +02004778 return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
Izik Eidus37817f22008-03-24 23:14:53 +02004779}
4780
Gleb Natapov1871c602010-02-10 14:21:32 +02004781static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
4782 struct desc_struct *seg_desc)
4783{
4784 u32 base_addr = get_desc_base(seg_desc);
4785
4786 return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
4787}
4788
4789static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
Izik Eidus37817f22008-03-24 23:14:53 +02004790 struct desc_struct *seg_desc)
4791{
Akinobu Mita46a359e2009-07-18 23:58:32 +09004792 u32 base_addr = get_desc_base(seg_desc);
Izik Eidus37817f22008-03-24 23:14:53 +02004793
Gleb Natapov1871c602010-02-10 14:21:32 +02004794 return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
Izik Eidus37817f22008-03-24 23:14:53 +02004795}
4796
Izik Eidus37817f22008-03-24 23:14:53 +02004797static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
4798{
4799 struct kvm_segment kvm_seg;
4800
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004801 kvm_get_segment(vcpu, &kvm_seg, seg);
Izik Eidus37817f22008-03-24 23:14:53 +02004802 return kvm_seg.selector;
4803}
4804
Harvey Harrison2259e3a2008-08-22 13:29:17 -07004805static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
Avi Kivityf4bbd9a2008-08-20 15:51:42 +03004806{
4807 struct kvm_segment segvar = {
4808 .base = selector << 4,
4809 .limit = 0xffff,
4810 .selector = selector,
4811 .type = 3,
4812 .present = 1,
4813 .dpl = 3,
4814 .db = 0,
4815 .s = 1,
4816 .l = 0,
4817 .g = 0,
4818 .avl = 0,
4819 .unusable = 0,
4820 };
4821 kvm_x86_ops->set_segment(vcpu, &segvar, seg);
Gleb Natapovc6975182010-02-18 12:15:01 +02004822 return X86EMUL_CONTINUE;
Avi Kivityf4bbd9a2008-08-20 15:51:42 +03004823}
4824
Anthony Liguoric0c7c042009-08-11 15:57:59 -05004825static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
4826{
4827 return (seg != VCPU_SREG_LDTR) &&
4828 (seg != VCPU_SREG_TR) &&
Jan Kiszka91586a32009-10-05 13:07:21 +02004829 (kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
Anthony Liguoric0c7c042009-08-11 15:57:59 -05004830}
4831
Gleb Natapovc6975182010-02-18 12:15:01 +02004832int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg)
Izik Eidus37817f22008-03-24 23:14:53 +02004833{
4834 struct kvm_segment kvm_seg;
Gleb Natapove01c2422010-01-25 12:01:04 +02004835 struct desc_struct seg_desc;
Gleb Natapovc6975182010-02-18 12:15:01 +02004836 u8 dpl, rpl, cpl;
4837 unsigned err_vec = GP_VECTOR;
4838 u32 err_code = 0;
4839 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
4840 int ret;
Izik Eidus37817f22008-03-24 23:14:53 +02004841
Avi Kivity3eeb3282010-01-21 15:31:48 +02004842 if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu))
Avi Kivityf4bbd9a2008-08-20 15:51:42 +03004843 return kvm_load_realmode_segment(vcpu, selector, seg);
Gleb Natapove01c2422010-01-25 12:01:04 +02004844
Gleb Natapovc6975182010-02-18 12:15:01 +02004845 /* NULL selector is not valid for TR, CS and SS */
4846 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
4847 && null_selector)
4848 goto exception;
4849
4850 /* TR should be in GDT only */
4851 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
4852 goto exception;
4853
4854 ret = load_guest_segment_descriptor(vcpu, selector, &seg_desc);
4855 if (ret)
4856 return ret;
4857
Gleb Natapove01c2422010-01-25 12:01:04 +02004858 seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg);
Marcelo Tosatticb84b552009-11-11 17:29:49 -02004859
Gleb Natapovc6975182010-02-18 12:15:01 +02004860 if (null_selector) { /* for NULL selector skip all following checks */
4861 kvm_seg.unusable = 1;
4862 goto load;
4863 }
Izik Eidus37817f22008-03-24 23:14:53 +02004864
Gleb Natapovc6975182010-02-18 12:15:01 +02004865 err_code = selector & 0xfffc;
4866 err_vec = GP_VECTOR;
Izik Eidus37817f22008-03-24 23:14:53 +02004867
Gleb Natapovc6975182010-02-18 12:15:01 +02004868 /* can't load system descriptor into segment selecor */
4869 if (seg <= VCPU_SREG_GS && !kvm_seg.s)
4870 goto exception;
4871
4872 if (!kvm_seg.present) {
4873 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
4874 goto exception;
4875 }
4876
4877 rpl = selector & 3;
4878 dpl = kvm_seg.dpl;
4879 cpl = kvm_x86_ops->get_cpl(vcpu);
4880
4881 switch (seg) {
4882 case VCPU_SREG_SS:
4883 /*
4884 * segment is not a writable data segment or segment
4885 * selector's RPL != CPL or segment selector's RPL != CPL
4886 */
4887 if (rpl != cpl || (kvm_seg.type & 0xa) != 0x2 || dpl != cpl)
4888 goto exception;
4889 break;
4890 case VCPU_SREG_CS:
4891 if (!(kvm_seg.type & 8))
4892 goto exception;
4893
4894 if (kvm_seg.type & 4) {
4895 /* conforming */
4896 if (dpl > cpl)
4897 goto exception;
4898 } else {
4899 /* nonconforming */
4900 if (rpl > cpl || dpl != cpl)
4901 goto exception;
4902 }
4903 /* CS(RPL) <- CPL */
4904 selector = (selector & 0xfffc) | cpl;
4905 break;
4906 case VCPU_SREG_TR:
4907 if (kvm_seg.s || (kvm_seg.type != 1 && kvm_seg.type != 9))
4908 goto exception;
4909 break;
4910 case VCPU_SREG_LDTR:
4911 if (kvm_seg.s || kvm_seg.type != 2)
4912 goto exception;
4913 break;
4914 default: /* DS, ES, FS, or GS */
4915 /*
4916 * segment is not a data or readable code segment or
4917 * ((segment is a data or nonconforming code segment)
4918 * and (both RPL and CPL > DPL))
4919 */
4920 if ((kvm_seg.type & 0xa) == 0x8 ||
4921 (((kvm_seg.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl)))
4922 goto exception;
4923 break;
4924 }
4925
4926 if (!kvm_seg.unusable && kvm_seg.s) {
Gleb Natapove01c2422010-01-25 12:01:04 +02004927 /* mark segment as accessed */
Gleb Natapovc6975182010-02-18 12:15:01 +02004928 kvm_seg.type |= 1;
Gleb Natapove01c2422010-01-25 12:01:04 +02004929 seg_desc.type |= 1;
4930 save_guest_segment_descriptor(vcpu, selector, &seg_desc);
4931 }
Gleb Natapovc6975182010-02-18 12:15:01 +02004932load:
4933 kvm_set_segment(vcpu, &kvm_seg, seg);
4934 return X86EMUL_CONTINUE;
4935exception:
4936 kvm_queue_exception_e(vcpu, err_vec, err_code);
4937 return X86EMUL_PROPAGATE_FAULT;
Izik Eidus37817f22008-03-24 23:14:53 +02004938}
4939
4940static void save_state_to_tss32(struct kvm_vcpu *vcpu,
4941 struct tss_segment_32 *tss)
4942{
4943 tss->cr3 = vcpu->arch.cr3;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004944 tss->eip = kvm_rip_read(vcpu);
Jan Kiszka91586a32009-10-05 13:07:21 +02004945 tss->eflags = kvm_get_rflags(vcpu);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004946 tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4947 tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4948 tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4949 tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4950 tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4951 tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4952 tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
4953 tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
Izik Eidus37817f22008-03-24 23:14:53 +02004954 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
4955 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
4956 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
4957 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
4958 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
4959 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
4960 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
Izik Eidus37817f22008-03-24 23:14:53 +02004961}
4962
Gleb Natapovc6975182010-02-18 12:15:01 +02004963static void kvm_load_segment_selector(struct kvm_vcpu *vcpu, u16 sel, int seg)
4964{
4965 struct kvm_segment kvm_seg;
4966 kvm_get_segment(vcpu, &kvm_seg, seg);
4967 kvm_seg.selector = sel;
4968 kvm_set_segment(vcpu, &kvm_seg, seg);
4969}
4970
Izik Eidus37817f22008-03-24 23:14:53 +02004971static int load_state_from_tss32(struct kvm_vcpu *vcpu,
4972 struct tss_segment_32 *tss)
4973{
4974 kvm_set_cr3(vcpu, tss->cr3);
4975
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004976 kvm_rip_write(vcpu, tss->eip);
Jan Kiszka91586a32009-10-05 13:07:21 +02004977 kvm_set_rflags(vcpu, tss->eflags | 2);
Izik Eidus37817f22008-03-24 23:14:53 +02004978
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004979 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
4980 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
4981 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
4982 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
4983 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
4984 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
4985 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
4986 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
Izik Eidus37817f22008-03-24 23:14:53 +02004987
Gleb Natapovc6975182010-02-18 12:15:01 +02004988 /*
4989 * SDM says that segment selectors are loaded before segment
4990 * descriptors
4991 */
4992 kvm_load_segment_selector(vcpu, tss->ldt_selector, VCPU_SREG_LDTR);
4993 kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
4994 kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
4995 kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
4996 kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
4997 kvm_load_segment_selector(vcpu, tss->fs, VCPU_SREG_FS);
4998 kvm_load_segment_selector(vcpu, tss->gs, VCPU_SREG_GS);
4999
5000 /*
5001 * Now load segment descriptors. If fault happenes at this stage
5002 * it is handled in a context of new task
5003 */
5004 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, VCPU_SREG_LDTR))
Izik Eidus37817f22008-03-24 23:14:53 +02005005 return 1;
5006
Gleb Natapovc6975182010-02-18 12:15:01 +02005007 if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
Izik Eidus37817f22008-03-24 23:14:53 +02005008 return 1;
5009
Gleb Natapovc6975182010-02-18 12:15:01 +02005010 if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
Izik Eidus37817f22008-03-24 23:14:53 +02005011 return 1;
5012
Gleb Natapovc6975182010-02-18 12:15:01 +02005013 if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
Izik Eidus37817f22008-03-24 23:14:53 +02005014 return 1;
5015
Gleb Natapovc6975182010-02-18 12:15:01 +02005016 if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
Izik Eidus37817f22008-03-24 23:14:53 +02005017 return 1;
5018
Gleb Natapovc6975182010-02-18 12:15:01 +02005019 if (kvm_load_segment_descriptor(vcpu, tss->fs, VCPU_SREG_FS))
Izik Eidus37817f22008-03-24 23:14:53 +02005020 return 1;
5021
Gleb Natapovc6975182010-02-18 12:15:01 +02005022 if (kvm_load_segment_descriptor(vcpu, tss->gs, VCPU_SREG_GS))
Izik Eidus37817f22008-03-24 23:14:53 +02005023 return 1;
5024 return 0;
5025}
5026
5027static void save_state_to_tss16(struct kvm_vcpu *vcpu,
5028 struct tss_segment_16 *tss)
5029{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03005030 tss->ip = kvm_rip_read(vcpu);
Jan Kiszka91586a32009-10-05 13:07:21 +02005031 tss->flag = kvm_get_rflags(vcpu);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03005032 tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
5033 tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
5034 tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
5035 tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
5036 tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
5037 tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
5038 tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
5039 tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
Izik Eidus37817f22008-03-24 23:14:53 +02005040
5041 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
5042 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
5043 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
5044 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
5045 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
Izik Eidus37817f22008-03-24 23:14:53 +02005046}
5047
5048static int load_state_from_tss16(struct kvm_vcpu *vcpu,
5049 struct tss_segment_16 *tss)
5050{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03005051 kvm_rip_write(vcpu, tss->ip);
Jan Kiszka91586a32009-10-05 13:07:21 +02005052 kvm_set_rflags(vcpu, tss->flag | 2);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03005053 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
5054 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
5055 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
5056 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
5057 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
5058 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
5059 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
5060 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
Izik Eidus37817f22008-03-24 23:14:53 +02005061
Gleb Natapovc6975182010-02-18 12:15:01 +02005062 /*
5063 * SDM says that segment selectors are loaded before segment
5064 * descriptors
5065 */
5066 kvm_load_segment_selector(vcpu, tss->ldt, VCPU_SREG_LDTR);
5067 kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
5068 kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
5069 kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
5070 kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
5071
5072 /*
5073 * Now load segment descriptors. If fault happenes at this stage
5074 * it is handled in a context of new task
5075 */
5076 if (kvm_load_segment_descriptor(vcpu, tss->ldt, VCPU_SREG_LDTR))
Izik Eidus37817f22008-03-24 23:14:53 +02005077 return 1;
5078
Gleb Natapovc6975182010-02-18 12:15:01 +02005079 if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
Izik Eidus37817f22008-03-24 23:14:53 +02005080 return 1;
5081
Gleb Natapovc6975182010-02-18 12:15:01 +02005082 if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
Izik Eidus37817f22008-03-24 23:14:53 +02005083 return 1;
5084
Gleb Natapovc6975182010-02-18 12:15:01 +02005085 if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
Izik Eidus37817f22008-03-24 23:14:53 +02005086 return 1;
5087
Gleb Natapovc6975182010-02-18 12:15:01 +02005088 if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
Izik Eidus37817f22008-03-24 23:14:53 +02005089 return 1;
5090 return 0;
5091}
5092
Harvey Harrison8b2cf732008-04-27 12:14:13 -07005093static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
Gleb Natapovb237ac32009-03-30 16:03:24 +03005094 u16 old_tss_sel, u32 old_tss_base,
5095 struct desc_struct *nseg_desc)
Izik Eidus37817f22008-03-24 23:14:53 +02005096{
5097 struct tss_segment_16 tss_segment_16;
5098 int ret = 0;
5099
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03005100 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
5101 sizeof tss_segment_16))
Izik Eidus37817f22008-03-24 23:14:53 +02005102 goto out;
5103
5104 save_state_to_tss16(vcpu, &tss_segment_16);
Izik Eidus37817f22008-03-24 23:14:53 +02005105
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03005106 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
5107 sizeof tss_segment_16))
Izik Eidus37817f22008-03-24 23:14:53 +02005108 goto out;
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03005109
Gleb Natapov1871c602010-02-10 14:21:32 +02005110 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03005111 &tss_segment_16, sizeof tss_segment_16))
5112 goto out;
5113
Gleb Natapovb237ac32009-03-30 16:03:24 +03005114 if (old_tss_sel != 0xffff) {
5115 tss_segment_16.prev_task_link = old_tss_sel;
5116
5117 if (kvm_write_guest(vcpu->kvm,
Gleb Natapov1871c602010-02-10 14:21:32 +02005118 get_tss_base_addr_write(vcpu, nseg_desc),
Gleb Natapovb237ac32009-03-30 16:03:24 +03005119 &tss_segment_16.prev_task_link,
5120 sizeof tss_segment_16.prev_task_link))
5121 goto out;
5122 }
5123
Izik Eidus37817f22008-03-24 23:14:53 +02005124 if (load_state_from_tss16(vcpu, &tss_segment_16))
5125 goto out;
5126
5127 ret = 1;
5128out:
5129 return ret;
5130}
5131
Harvey Harrison8b2cf732008-04-27 12:14:13 -07005132static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
Gleb Natapovb237ac32009-03-30 16:03:24 +03005133 u16 old_tss_sel, u32 old_tss_base,
Izik Eidus37817f22008-03-24 23:14:53 +02005134 struct desc_struct *nseg_desc)
5135{
5136 struct tss_segment_32 tss_segment_32;
5137 int ret = 0;
5138
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03005139 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
5140 sizeof tss_segment_32))
Izik Eidus37817f22008-03-24 23:14:53 +02005141 goto out;
5142
5143 save_state_to_tss32(vcpu, &tss_segment_32);
Izik Eidus37817f22008-03-24 23:14:53 +02005144
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03005145 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
5146 sizeof tss_segment_32))
Izik Eidus37817f22008-03-24 23:14:53 +02005147 goto out;
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03005148
Gleb Natapov1871c602010-02-10 14:21:32 +02005149 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03005150 &tss_segment_32, sizeof tss_segment_32))
5151 goto out;
5152
Gleb Natapovb237ac32009-03-30 16:03:24 +03005153 if (old_tss_sel != 0xffff) {
5154 tss_segment_32.prev_task_link = old_tss_sel;
5155
5156 if (kvm_write_guest(vcpu->kvm,
Gleb Natapov1871c602010-02-10 14:21:32 +02005157 get_tss_base_addr_write(vcpu, nseg_desc),
Gleb Natapovb237ac32009-03-30 16:03:24 +03005158 &tss_segment_32.prev_task_link,
5159 sizeof tss_segment_32.prev_task_link))
5160 goto out;
5161 }
5162
Izik Eidus37817f22008-03-24 23:14:53 +02005163 if (load_state_from_tss32(vcpu, &tss_segment_32))
5164 goto out;
5165
5166 ret = 1;
5167out:
5168 return ret;
5169}
5170
5171int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
5172{
5173 struct kvm_segment tr_seg;
5174 struct desc_struct cseg_desc;
5175 struct desc_struct nseg_desc;
5176 int ret = 0;
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03005177 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
5178 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
Jan Kiszkae8861cf2010-04-14 16:57:11 +02005179 u32 desc_limit;
Izik Eidus37817f22008-03-24 23:14:53 +02005180
Gleb Natapov1871c602010-02-10 14:21:32 +02005181 old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
Izik Eidus37817f22008-03-24 23:14:53 +02005182
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03005183 /* FIXME: Handle errors. Failure to read either TSS or their
5184 * descriptors should generate a pagefault.
5185 */
Izik Eidus37817f22008-03-24 23:14:53 +02005186 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
5187 goto out;
5188
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03005189 if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
Izik Eidus37817f22008-03-24 23:14:53 +02005190 goto out;
5191
Izik Eidus37817f22008-03-24 23:14:53 +02005192 if (reason != TASK_SWITCH_IRET) {
5193 int cpl;
5194
5195 cpl = kvm_x86_ops->get_cpl(vcpu);
5196 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
5197 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
5198 return 1;
5199 }
5200 }
5201
Jan Kiszkae8861cf2010-04-14 16:57:11 +02005202 desc_limit = get_desc_limit(&nseg_desc);
5203 if (!nseg_desc.p ||
5204 ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
5205 desc_limit < 0x2b)) {
Izik Eidus37817f22008-03-24 23:14:53 +02005206 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
5207 return 1;
5208 }
5209
5210 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
Izik Eidus3fe913e2008-04-28 18:23:52 +03005211 cseg_desc.type &= ~(1 << 1); //clear the B flag
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03005212 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
Izik Eidus37817f22008-03-24 23:14:53 +02005213 }
5214
5215 if (reason == TASK_SWITCH_IRET) {
Jan Kiszka91586a32009-10-05 13:07:21 +02005216 u32 eflags = kvm_get_rflags(vcpu);
5217 kvm_set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
Izik Eidus37817f22008-03-24 23:14:53 +02005218 }
5219
Gleb Natapov64a7ec02009-03-30 16:03:29 +03005220 /* set back link to prev task only if NT bit is set in eflags
5221 note that old_tss_sel is not used afetr this point */
5222 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
5223 old_tss_sel = 0xffff;
Izik Eidus37817f22008-03-24 23:14:53 +02005224
5225 if (nseg_desc.type & 8)
Gleb Natapovb237ac32009-03-30 16:03:24 +03005226 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
5227 old_tss_base, &nseg_desc);
Izik Eidus37817f22008-03-24 23:14:53 +02005228 else
Gleb Natapovb237ac32009-03-30 16:03:24 +03005229 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
5230 old_tss_base, &nseg_desc);
Izik Eidus37817f22008-03-24 23:14:53 +02005231
5232 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
Jan Kiszka91586a32009-10-05 13:07:21 +02005233 u32 eflags = kvm_get_rflags(vcpu);
5234 kvm_set_rflags(vcpu, eflags | X86_EFLAGS_NT);
Izik Eidus37817f22008-03-24 23:14:53 +02005235 }
5236
5237 if (reason != TASK_SWITCH_IRET) {
Izik Eidus3fe913e2008-04-28 18:23:52 +03005238 nseg_desc.type |= (1 << 1);
Izik Eidus37817f22008-03-24 23:14:53 +02005239 save_guest_segment_descriptor(vcpu, tss_selector,
5240 &nseg_desc);
5241 }
5242
Avi Kivity4d4ec082009-12-29 18:07:30 +02005243 kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0(vcpu) | X86_CR0_TS);
Izik Eidus37817f22008-03-24 23:14:53 +02005244 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
5245 tr_seg.type = 11;
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02005246 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
Izik Eidus37817f22008-03-24 23:14:53 +02005247out:
Izik Eidus37817f22008-03-24 23:14:53 +02005248 return ret;
5249}
5250EXPORT_SYMBOL_GPL(kvm_task_switch);
5251
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005252int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5253 struct kvm_sregs *sregs)
5254{
5255 int mmu_reset_needed = 0;
Gleb Natapov923c61b2009-05-11 13:35:48 +03005256 int pending_vec, max_bits;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005257 struct descriptor_table dt;
5258
5259 vcpu_load(vcpu);
5260
5261 dt.limit = sregs->idt.limit;
5262 dt.base = sregs->idt.base;
5263 kvm_x86_ops->set_idt(vcpu, &dt);
5264 dt.limit = sregs->gdt.limit;
5265 dt.base = sregs->gdt.base;
5266 kvm_x86_ops->set_gdt(vcpu, &dt);
5267
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005268 vcpu->arch.cr2 = sregs->cr2;
5269 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
Jan Kiszkadc7e7952009-07-01 20:52:03 +02005270 vcpu->arch.cr3 = sregs->cr3;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005271
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02005272 kvm_set_cr8(vcpu, sregs->cr8);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005273
Avi Kivityf6801df2010-01-21 15:31:50 +02005274 mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005275 kvm_x86_ops->set_efer(vcpu, sregs->efer);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005276 kvm_set_apic_base(vcpu, sregs->apic_base);
5277
Avi Kivity4d4ec082009-12-29 18:07:30 +02005278 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005279 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
Paul Knowlesd7306162008-02-06 11:02:35 +00005280 vcpu->arch.cr0 = sregs->cr0;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005281
Avi Kivityfc78f512009-12-07 12:16:48 +02005282 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005283 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
Marcelo Tosatti7c93be42009-10-26 16:48:33 -02005284 if (!is_long_mode(vcpu) && is_pae(vcpu)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005285 load_pdptrs(vcpu, vcpu->arch.cr3);
Marcelo Tosatti7c93be42009-10-26 16:48:33 -02005286 mmu_reset_needed = 1;
5287 }
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005288
5289 if (mmu_reset_needed)
5290 kvm_mmu_reset_context(vcpu);
5291
Gleb Natapov923c61b2009-05-11 13:35:48 +03005292 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
5293 pending_vec = find_first_bit(
5294 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
5295 if (pending_vec < max_bits) {
Gleb Natapov66fd3f72009-05-11 13:35:50 +03005296 kvm_queue_interrupt(vcpu, pending_vec, false);
Gleb Natapov923c61b2009-05-11 13:35:48 +03005297 pr_debug("Set back pending irq %d\n", pending_vec);
5298 if (irqchip_in_kernel(vcpu->kvm))
5299 kvm_pic_clear_isr_ack(vcpu->kvm);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005300 }
5301
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02005302 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5303 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5304 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5305 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5306 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5307 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005308
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02005309 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5310 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005311
Mikhail Ershov5f0269f2009-08-03 14:58:25 +03005312 update_cr8_intercept(vcpu);
5313
Marcelo Tosatti9c3e4aa2008-09-10 16:40:55 -03005314 /* Older userspace won't unhalt the vcpu on reset. */
Gleb Natapovc5af89b2009-06-09 15:56:26 +03005315 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
Marcelo Tosatti9c3e4aa2008-09-10 16:40:55 -03005316 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
Avi Kivity3eeb3282010-01-21 15:31:48 +02005317 !is_protmode(vcpu))
Marcelo Tosatti9c3e4aa2008-09-10 16:40:55 -03005318 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5319
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005320 vcpu_put(vcpu);
5321
5322 return 0;
5323}
5324
Jan Kiszkad0bfb942008-12-15 13:52:10 +01005325int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
5326 struct kvm_guest_debug *dbg)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005327{
Jan Kiszka355be0b2009-10-03 00:31:21 +02005328 unsigned long rflags;
Jan Kiszkaae675ef2008-12-15 13:52:10 +01005329 int i, r;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005330
5331 vcpu_load(vcpu);
5332
Jan Kiszka4f926bf22009-10-30 12:46:59 +01005333 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
5334 r = -EBUSY;
5335 if (vcpu->arch.exception.pending)
5336 goto unlock_out;
5337 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
5338 kvm_queue_exception(vcpu, DB_VECTOR);
5339 else
5340 kvm_queue_exception(vcpu, BP_VECTOR);
5341 }
5342
Jan Kiszka91586a32009-10-05 13:07:21 +02005343 /*
5344 * Read rflags as long as potentially injected trace flags are still
5345 * filtered out.
5346 */
5347 rflags = kvm_get_rflags(vcpu);
Jan Kiszka355be0b2009-10-03 00:31:21 +02005348
5349 vcpu->guest_debug = dbg->control;
5350 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
5351 vcpu->guest_debug = 0;
5352
5353 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
Jan Kiszkaae675ef2008-12-15 13:52:10 +01005354 for (i = 0; i < KVM_NR_DB_REGS; ++i)
5355 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
5356 vcpu->arch.switch_db_regs =
5357 (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
5358 } else {
5359 for (i = 0; i < KVM_NR_DB_REGS; i++)
5360 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
5361 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
5362 }
5363
Jan Kiszka94fe45d2009-10-18 13:24:44 +02005364 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
5365 vcpu->arch.singlestep_cs =
5366 get_segment_selector(vcpu, VCPU_SREG_CS);
5367 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu);
5368 }
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005369
Jan Kiszka91586a32009-10-05 13:07:21 +02005370 /*
5371 * Trigger an rflags update that will inject or remove the trace
5372 * flags.
5373 */
5374 kvm_set_rflags(vcpu, rflags);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01005375
Jan Kiszka355be0b2009-10-03 00:31:21 +02005376 kvm_x86_ops->set_guest_debug(vcpu, dbg);
5377
Jan Kiszka4f926bf22009-10-30 12:46:59 +01005378 r = 0;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005379
Jan Kiszka4f926bf22009-10-30 12:46:59 +01005380unlock_out:
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005381 vcpu_put(vcpu);
5382
5383 return r;
5384}
5385
5386/*
Hollis Blanchardd0752062007-10-31 17:24:25 -05005387 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
5388 * we have asm/x86/processor.h
5389 */
5390struct fxsave {
5391 u16 cwd;
5392 u16 swd;
5393 u16 twd;
5394 u16 fop;
5395 u64 rip;
5396 u64 rdp;
5397 u32 mxcsr;
5398 u32 mxcsr_mask;
5399 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
5400#ifdef CONFIG_X86_64
5401 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
5402#else
5403 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
5404#endif
5405};
5406
Zhang Xiantao8b006792007-11-16 13:05:55 +08005407/*
5408 * Translate a guest virtual address to a guest physical address.
5409 */
5410int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
5411 struct kvm_translation *tr)
5412{
5413 unsigned long vaddr = tr->linear_address;
5414 gpa_t gpa;
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005415 int idx;
Zhang Xiantao8b006792007-11-16 13:05:55 +08005416
5417 vcpu_load(vcpu);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005418 idx = srcu_read_lock(&vcpu->kvm->srcu);
Gleb Natapov1871c602010-02-10 14:21:32 +02005419 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005420 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Zhang Xiantao8b006792007-11-16 13:05:55 +08005421 tr->physical_address = gpa;
5422 tr->valid = gpa != UNMAPPED_GVA;
5423 tr->writeable = 1;
5424 tr->usermode = 0;
Zhang Xiantao8b006792007-11-16 13:05:55 +08005425 vcpu_put(vcpu);
5426
5427 return 0;
5428}
5429
Hollis Blanchardd0752062007-10-31 17:24:25 -05005430int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5431{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005432 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
Hollis Blanchardd0752062007-10-31 17:24:25 -05005433
5434 vcpu_load(vcpu);
5435
5436 memcpy(fpu->fpr, fxsave->st_space, 128);
5437 fpu->fcw = fxsave->cwd;
5438 fpu->fsw = fxsave->swd;
5439 fpu->ftwx = fxsave->twd;
5440 fpu->last_opcode = fxsave->fop;
5441 fpu->last_ip = fxsave->rip;
5442 fpu->last_dp = fxsave->rdp;
5443 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
5444
5445 vcpu_put(vcpu);
5446
5447 return 0;
5448}
5449
5450int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5451{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005452 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
Hollis Blanchardd0752062007-10-31 17:24:25 -05005453
5454 vcpu_load(vcpu);
5455
5456 memcpy(fxsave->st_space, fpu->fpr, 128);
5457 fxsave->cwd = fpu->fcw;
5458 fxsave->swd = fpu->fsw;
5459 fxsave->twd = fpu->ftwx;
5460 fxsave->fop = fpu->last_opcode;
5461 fxsave->rip = fpu->last_ip;
5462 fxsave->rdp = fpu->last_dp;
5463 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
5464
5465 vcpu_put(vcpu);
5466
5467 return 0;
5468}
5469
5470void fx_init(struct kvm_vcpu *vcpu)
5471{
5472 unsigned after_mxcsr_mask;
5473
Andrea Arcangelibc1a34f2008-05-01 18:43:33 +02005474 /*
5475 * Touch the fpu the first time in non atomic context as if
5476 * this is the first fpu instruction the exception handler
5477 * will fire before the instruction returns and it'll have to
5478 * allocate ram with GFP_KERNEL.
5479 */
5480 if (!used_math())
Avi Kivityd6e88ae2008-07-10 16:53:33 +03005481 kvm_fx_save(&vcpu->arch.host_fx_image);
Andrea Arcangelibc1a34f2008-05-01 18:43:33 +02005482
Hollis Blanchardd0752062007-10-31 17:24:25 -05005483 /* Initialize guest FPU by resetting ours and saving into guest's */
5484 preempt_disable();
Avi Kivityd6e88ae2008-07-10 16:53:33 +03005485 kvm_fx_save(&vcpu->arch.host_fx_image);
5486 kvm_fx_finit();
5487 kvm_fx_save(&vcpu->arch.guest_fx_image);
5488 kvm_fx_restore(&vcpu->arch.host_fx_image);
Hollis Blanchardd0752062007-10-31 17:24:25 -05005489 preempt_enable();
5490
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005491 vcpu->arch.cr0 |= X86_CR0_ET;
Hollis Blanchardd0752062007-10-31 17:24:25 -05005492 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005493 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
5494 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
Hollis Blanchardd0752062007-10-31 17:24:25 -05005495 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
5496}
5497EXPORT_SYMBOL_GPL(fx_init);
5498
5499void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
5500{
Avi Kivity2608d7a2010-01-21 15:31:45 +02005501 if (vcpu->guest_fpu_loaded)
Hollis Blanchardd0752062007-10-31 17:24:25 -05005502 return;
5503
5504 vcpu->guest_fpu_loaded = 1;
Avi Kivityd6e88ae2008-07-10 16:53:33 +03005505 kvm_fx_save(&vcpu->arch.host_fx_image);
5506 kvm_fx_restore(&vcpu->arch.guest_fx_image);
Avi Kivity0c048512010-01-21 15:31:52 +02005507 trace_kvm_fpu(1);
Hollis Blanchardd0752062007-10-31 17:24:25 -05005508}
Hollis Blanchardd0752062007-10-31 17:24:25 -05005509
5510void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
5511{
5512 if (!vcpu->guest_fpu_loaded)
5513 return;
5514
5515 vcpu->guest_fpu_loaded = 0;
Avi Kivityd6e88ae2008-07-10 16:53:33 +03005516 kvm_fx_save(&vcpu->arch.guest_fx_image);
5517 kvm_fx_restore(&vcpu->arch.host_fx_image);
Avi Kivityf096ed82007-11-18 13:54:33 +02005518 ++vcpu->stat.fpu_reload;
Avi Kivity02daab22009-12-30 12:40:26 +02005519 set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
Avi Kivity0c048512010-01-21 15:31:52 +02005520 trace_kvm_fpu(0);
Hollis Blanchardd0752062007-10-31 17:24:25 -05005521}
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005522
5523void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
5524{
Joerg Roedel7f1ea202009-02-25 16:08:31 +01005525 if (vcpu->arch.time_page) {
5526 kvm_release_page_dirty(vcpu->arch.time_page);
5527 vcpu->arch.time_page = NULL;
5528 }
5529
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005530 kvm_x86_ops->vcpu_free(vcpu);
5531}
5532
5533struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
5534 unsigned int id)
5535{
Avi Kivity26e52152007-11-20 15:30:24 +02005536 return kvm_x86_ops->vcpu_create(kvm, id);
5537}
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005538
Avi Kivity26e52152007-11-20 15:30:24 +02005539int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
5540{
5541 int r;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005542
5543 /* We do fxsave: this must be aligned. */
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005544 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005545
Sheng Yang0bed3b52008-10-09 16:01:54 +08005546 vcpu->arch.mtrr_state.have_fixed = 1;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005547 vcpu_load(vcpu);
5548 r = kvm_arch_vcpu_reset(vcpu);
5549 if (r == 0)
5550 r = kvm_mmu_setup(vcpu);
5551 vcpu_put(vcpu);
5552 if (r < 0)
5553 goto free_vcpu;
5554
Avi Kivity26e52152007-11-20 15:30:24 +02005555 return 0;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005556free_vcpu:
5557 kvm_x86_ops->vcpu_free(vcpu);
Avi Kivity26e52152007-11-20 15:30:24 +02005558 return r;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005559}
5560
Hollis Blanchardd40ccc62007-11-19 14:04:43 -06005561void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005562{
5563 vcpu_load(vcpu);
5564 kvm_mmu_unload(vcpu);
5565 vcpu_put(vcpu);
5566
5567 kvm_x86_ops->vcpu_free(vcpu);
5568}
5569
5570int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
5571{
Jan Kiszka448fa4a2008-09-26 09:30:48 +02005572 vcpu->arch.nmi_pending = false;
5573 vcpu->arch.nmi_injected = false;
5574
Jan Kiszka42dbaa52008-12-15 13:52:10 +01005575 vcpu->arch.switch_db_regs = 0;
5576 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
5577 vcpu->arch.dr6 = DR6_FIXED_1;
5578 vcpu->arch.dr7 = DR7_FIXED_1;
5579
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005580 return kvm_x86_ops->vcpu_reset(vcpu);
5581}
5582
Alexander Graf10474ae2009-09-15 11:37:46 +02005583int kvm_arch_hardware_enable(void *garbage)
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005584{
Zachary Amsden0cca7902009-09-29 11:38:35 -10005585 /*
5586 * Since this may be called from a hotplug notifcation,
5587 * we can't get the CPU frequency directly.
5588 */
5589 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5590 int cpu = raw_smp_processor_id();
5591 per_cpu(cpu_tsc_khz, cpu) = 0;
5592 }
Avi Kivity18863bd2009-09-07 11:12:18 +03005593
5594 kvm_shared_msr_cpu_online();
5595
Alexander Graf10474ae2009-09-15 11:37:46 +02005596 return kvm_x86_ops->hardware_enable(garbage);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005597}
5598
5599void kvm_arch_hardware_disable(void *garbage)
5600{
5601 kvm_x86_ops->hardware_disable(garbage);
Avi Kivity3548bab2009-11-28 14:18:47 +02005602 drop_user_return_notifiers(garbage);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005603}
5604
5605int kvm_arch_hardware_setup(void)
5606{
5607 return kvm_x86_ops->hardware_setup();
5608}
5609
5610void kvm_arch_hardware_unsetup(void)
5611{
5612 kvm_x86_ops->hardware_unsetup();
5613}
5614
5615void kvm_arch_check_processor_compat(void *rtn)
5616{
5617 kvm_x86_ops->check_processor_compatibility(rtn);
5618}
5619
5620int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5621{
5622 struct page *page;
5623 struct kvm *kvm;
5624 int r;
5625
5626 BUG_ON(vcpu->kvm == NULL);
5627 kvm = vcpu->kvm;
5628
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005629 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
Gleb Natapovc5af89b2009-06-09 15:56:26 +03005630 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
Avi Kivitya4535292008-04-13 17:54:35 +03005631 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005632 else
Avi Kivitya4535292008-04-13 17:54:35 +03005633 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005634
5635 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
5636 if (!page) {
5637 r = -ENOMEM;
5638 goto fail;
5639 }
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005640 vcpu->arch.pio_data = page_address(page);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005641
5642 r = kvm_mmu_create(vcpu);
5643 if (r < 0)
5644 goto fail_free_pio_data;
5645
5646 if (irqchip_in_kernel(kvm)) {
5647 r = kvm_create_lapic(vcpu);
5648 if (r < 0)
5649 goto fail_mmu_destroy;
5650 }
5651
Huang Ying890ca9a2009-05-11 16:48:15 +08005652 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
5653 GFP_KERNEL);
5654 if (!vcpu->arch.mce_banks) {
5655 r = -ENOMEM;
Wei Yongjun443c39b2010-01-22 14:21:29 +08005656 goto fail_free_lapic;
Huang Ying890ca9a2009-05-11 16:48:15 +08005657 }
5658 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
5659
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005660 return 0;
Wei Yongjun443c39b2010-01-22 14:21:29 +08005661fail_free_lapic:
5662 kvm_free_lapic(vcpu);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005663fail_mmu_destroy:
5664 kvm_mmu_destroy(vcpu);
5665fail_free_pio_data:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005666 free_page((unsigned long)vcpu->arch.pio_data);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005667fail:
5668 return r;
5669}
5670
5671void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
5672{
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005673 int idx;
5674
Wei Yongjun36cb93f2010-01-22 14:18:47 +08005675 kfree(vcpu->arch.mce_banks);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005676 kvm_free_lapic(vcpu);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005677 idx = srcu_read_lock(&vcpu->kvm->srcu);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005678 kvm_mmu_destroy(vcpu);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005679 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005680 free_page((unsigned long)vcpu->arch.pio_data);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005681}
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005682
5683struct kvm *kvm_arch_create_vm(void)
5684{
5685 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
5686
5687 if (!kvm)
5688 return ERR_PTR(-ENOMEM);
5689
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02005690 kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
5691 if (!kvm->arch.aliases) {
5692 kfree(kvm);
5693 return ERR_PTR(-ENOMEM);
5694 }
5695
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08005696 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +03005697 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005698
Sheng Yang5550af42008-10-15 20:15:06 +08005699 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
5700 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
5701
Marcelo Tosatti53f658b2008-12-11 20:45:05 +01005702 rdtscll(kvm->arch.vm_init_tsc);
5703
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005704 return kvm;
5705}
5706
5707static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
5708{
5709 vcpu_load(vcpu);
5710 kvm_mmu_unload(vcpu);
5711 vcpu_put(vcpu);
5712}
5713
5714static void kvm_free_vcpus(struct kvm *kvm)
5715{
5716 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03005717 struct kvm_vcpu *vcpu;
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005718
5719 /*
5720 * Unpin any mmu pages first.
5721 */
Gleb Natapov988a2ca2009-06-09 15:56:29 +03005722 kvm_for_each_vcpu(i, vcpu, kvm)
5723 kvm_unload_vcpu_mmu(vcpu);
5724 kvm_for_each_vcpu(i, vcpu, kvm)
5725 kvm_arch_vcpu_free(vcpu);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005726
Gleb Natapov988a2ca2009-06-09 15:56:29 +03005727 mutex_lock(&kvm->lock);
5728 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
5729 kvm->vcpus[i] = NULL;
5730
5731 atomic_set(&kvm->online_vcpus, 0);
5732 mutex_unlock(&kvm->lock);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005733}
5734
Sheng Yangad8ba2c2009-01-06 10:03:02 +08005735void kvm_arch_sync_events(struct kvm *kvm)
5736{
Sheng Yangba4cef32009-01-06 10:03:03 +08005737 kvm_free_all_assigned_devices(kvm);
Sheng Yangad8ba2c2009-01-06 10:03:02 +08005738}
5739
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005740void kvm_arch_destroy_vm(struct kvm *kvm)
5741{
Sheng Yang6eb55812008-10-31 12:37:41 +08005742 kvm_iommu_unmap_guest(kvm);
Sheng Yang78376992008-01-28 05:10:22 +08005743 kvm_free_pit(kvm);
Zhang Xiantaod7deeeb2007-12-14 10:17:34 +08005744 kfree(kvm->arch.vpic);
5745 kfree(kvm->arch.vioapic);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005746 kvm_free_vcpus(kvm);
5747 kvm_free_physmem(kvm);
Avi Kivity3d458302008-03-25 11:26:13 +02005748 if (kvm->arch.apic_access_page)
5749 put_page(kvm->arch.apic_access_page);
Sheng Yangb7ebfb02008-04-25 21:44:52 +08005750 if (kvm->arch.ept_identity_pagetable)
5751 put_page(kvm->arch.ept_identity_pagetable);
Marcelo Tosatti64749202010-01-19 12:45:23 -02005752 cleanup_srcu_struct(&kvm->srcu);
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02005753 kfree(kvm->arch.aliases);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005754 kfree(kvm);
5755}
Zhang Xiantao0de10342007-11-20 16:25:04 +08005756
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005757int kvm_arch_prepare_memory_region(struct kvm *kvm,
5758 struct kvm_memory_slot *memslot,
Zhang Xiantao0de10342007-11-20 16:25:04 +08005759 struct kvm_memory_slot old,
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005760 struct kvm_userspace_memory_region *mem,
Zhang Xiantao0de10342007-11-20 16:25:04 +08005761 int user_alloc)
5762{
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005763 int npages = memslot->npages;
Zhang Xiantao0de10342007-11-20 16:25:04 +08005764
5765 /*To keep backward compatibility with older userspace,
5766 *x86 needs to hanlde !user_alloc case.
5767 */
5768 if (!user_alloc) {
5769 if (npages && !old.rmap) {
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005770 unsigned long userspace_addr;
5771
Izik Eidus72dc67a2008-02-10 18:04:15 +02005772 down_write(&current->mm->mmap_sem);
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005773 userspace_addr = do_mmap(NULL, 0,
5774 npages * PAGE_SIZE,
5775 PROT_READ | PROT_WRITE,
Avi Kivityacee3c02008-08-26 17:22:47 +03005776 MAP_PRIVATE | MAP_ANONYMOUS,
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005777 0);
Izik Eidus72dc67a2008-02-10 18:04:15 +02005778 up_write(&current->mm->mmap_sem);
Zhang Xiantao0de10342007-11-20 16:25:04 +08005779
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005780 if (IS_ERR((void *)userspace_addr))
5781 return PTR_ERR((void *)userspace_addr);
5782
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005783 memslot->userspace_addr = userspace_addr;
Zhang Xiantao0de10342007-11-20 16:25:04 +08005784 }
5785 }
5786
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005787
5788 return 0;
5789}
5790
5791void kvm_arch_commit_memory_region(struct kvm *kvm,
5792 struct kvm_userspace_memory_region *mem,
5793 struct kvm_memory_slot old,
5794 int user_alloc)
5795{
5796
5797 int npages = mem->memory_size >> PAGE_SHIFT;
5798
5799 if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
5800 int ret;
5801
5802 down_write(&current->mm->mmap_sem);
5803 ret = do_munmap(current->mm, old.userspace_addr,
5804 old.npages * PAGE_SIZE);
5805 up_write(&current->mm->mmap_sem);
5806 if (ret < 0)
5807 printk(KERN_WARNING
5808 "kvm_vm_ioctl_set_memory_region: "
5809 "failed to munmap memory\n");
5810 }
5811
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03005812 spin_lock(&kvm->mmu_lock);
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08005813 if (!kvm->arch.n_requested_mmu_pages) {
Zhang Xiantao0de10342007-11-20 16:25:04 +08005814 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
5815 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
5816 }
5817
5818 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03005819 spin_unlock(&kvm->mmu_lock);
Zhang Xiantao0de10342007-11-20 16:25:04 +08005820}
Zhang Xiantao1d737c82007-12-14 09:35:10 +08005821
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03005822void kvm_arch_flush_shadow(struct kvm *kvm)
5823{
5824 kvm_mmu_zap_all(kvm);
Marcelo Tosatti8986ecc2009-05-12 18:55:45 -03005825 kvm_reload_remote_mmus(kvm);
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03005826}
5827
Zhang Xiantao1d737c82007-12-14 09:35:10 +08005828int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
5829{
Avi Kivitya4535292008-04-13 17:54:35 +03005830 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
Gleb Natapova1b37102009-07-09 15:33:52 +03005831 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
5832 || vcpu->arch.nmi_pending ||
5833 (kvm_arch_interrupt_allowed(vcpu) &&
5834 kvm_cpu_has_interrupt(vcpu));
Zhang Xiantao1d737c82007-12-14 09:35:10 +08005835}
Zhang Xiantao57361992007-12-17 14:21:40 +08005836
Zhang Xiantao57361992007-12-17 14:21:40 +08005837void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
5838{
Marcelo Tosatti32f88402009-05-07 17:55:12 -03005839 int me;
5840 int cpu = vcpu->cpu;
Zhang Xiantao57361992007-12-17 14:21:40 +08005841
5842 if (waitqueue_active(&vcpu->wq)) {
5843 wake_up_interruptible(&vcpu->wq);
5844 ++vcpu->stat.halt_wakeup;
5845 }
Marcelo Tosatti32f88402009-05-07 17:55:12 -03005846
5847 me = get_cpu();
5848 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
5849 if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
5850 smp_send_reschedule(cpu);
Marcelo Tosattie9571ed2008-04-11 15:01:22 -03005851 put_cpu();
Zhang Xiantao57361992007-12-17 14:21:40 +08005852}
Gleb Natapov78646122009-03-23 12:12:11 +02005853
5854int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
5855{
5856 return kvm_x86_ops->interrupt_allowed(vcpu);
5857}
Marcelo Tosatti229456f2009-06-17 09:22:14 -03005858
Jan Kiszka94fe45d2009-10-18 13:24:44 +02005859unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
5860{
5861 unsigned long rflags;
5862
5863 rflags = kvm_x86_ops->get_rflags(vcpu);
5864 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5865 rflags &= ~(unsigned long)(X86_EFLAGS_TF | X86_EFLAGS_RF);
5866 return rflags;
5867}
5868EXPORT_SYMBOL_GPL(kvm_get_rflags);
5869
5870void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
5871{
5872 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
5873 vcpu->arch.singlestep_cs ==
5874 get_segment_selector(vcpu, VCPU_SREG_CS) &&
5875 vcpu->arch.singlestep_rip == kvm_rip_read(vcpu))
5876 rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
5877 kvm_x86_ops->set_rflags(vcpu, rflags);
5878}
5879EXPORT_SYMBOL_GPL(kvm_set_rflags);
5880
Marcelo Tosatti229456f2009-06-17 09:22:14 -03005881EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
5882EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
5883EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
5884EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
5885EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
Joerg Roedel0ac406d2009-10-09 16:08:27 +02005886EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02005887EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
Joerg Roedel17897f32009-10-09 16:08:29 +02005888EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
Joerg Roedel236649d2009-10-09 16:08:30 +02005889EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
Joerg Roedelec1ff792009-10-09 16:08:31 +02005890EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
Joerg Roedel532a46b2009-10-09 16:08:32 +02005891EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);