blob: e5ac21f992f0bcf17fd17975290352b275ac9859 [file] [log] [blame]
Carsten Otte043405e2007-10-10 17:16:19 +02001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +03007 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
Carsten Otte043405e2007-10-10 17:16:19 +02009 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +030013 * Amit Shah <amit.shah@qumranet.com>
14 * Ben-Ami Yassour <benami@il.ibm.com>
Carsten Otte043405e2007-10-10 17:16:19 +020015 *
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
18 *
19 */
20
Avi Kivityedf88412007-12-16 11:02:48 +020021#include <linux/kvm_host.h>
Carsten Otte313a3dc2007-10-11 19:16:52 +020022#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080023#include "mmu.h"
Sheng Yang78376992008-01-28 05:10:22 +080024#include "i8254.h"
Izik Eidus37817f22008-03-24 23:14:53 +020025#include "tss.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030026#include "kvm_cache_regs.h"
Avi Kivity26eef702008-07-03 14:59:22 +030027#include "x86.h"
Carsten Otte313a3dc2007-10-11 19:16:52 +020028
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -020029#include <linux/clocksource.h>
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +030030#include <linux/interrupt.h>
Carsten Otte313a3dc2007-10-11 19:16:52 +020031#include <linux/kvm.h>
32#include <linux/fs.h>
33#include <linux/vmalloc.h>
Carsten Otte5fb76f92007-10-29 16:08:51 +010034#include <linux/module.h>
Zhang Xiantao0de10342007-11-20 16:25:04 +080035#include <linux/mman.h>
Marcelo Tosatti2bacc552007-12-12 10:46:12 -050036#include <linux/highmem.h>
Joerg Roedel19de40a2008-12-03 14:43:34 +010037#include <linux/iommu.h>
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030038#include <linux/intel-iommu.h>
Gerd Hoffmannc8076602009-02-04 17:52:04 +010039#include <linux/cpufreq.h>
Avi Kivity18863bd2009-09-07 11:12:18 +030040#include <linux/user-return-notifier.h>
Avi Kivityaec51dc2009-07-01 16:01:02 +030041#include <trace/events/kvm.h>
42#undef TRACE_INCLUDE_FILE
Marcelo Tosatti229456f2009-06-17 09:22:14 -030043#define CREATE_TRACE_POINTS
44#include "trace.h"
Carsten Otte043405e2007-10-10 17:16:19 +020045
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +020046#include <asm/debugreg.h>
Carsten Otte043405e2007-10-10 17:16:19 +020047#include <asm/uaccess.h>
Zhang Xiantaod825ed02007-11-14 20:08:51 +080048#include <asm/msr.h>
Avi Kivitya5f61302008-02-20 17:57:21 +020049#include <asm/desc.h>
Sheng Yang0bed3b52008-10-09 16:01:54 +080050#include <asm/mtrr.h>
Huang Ying890ca9a2009-05-11 16:48:15 +080051#include <asm/mce.h>
Carsten Otte043405e2007-10-10 17:16:19 +020052
Carsten Otte313a3dc2007-10-11 19:16:52 +020053#define MAX_IO_MSRS 256
Carsten Ottea03490e2007-10-29 16:09:35 +010054#define CR0_RESERVED_BITS \
55 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
56 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
57 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
58#define CR4_RESERVED_BITS \
59 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
60 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
61 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
62 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
63
64#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
Huang Ying890ca9a2009-05-11 16:48:15 +080065
66#define KVM_MAX_MCE_BANKS 32
67#define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
68
Joerg Roedel50a37eb2008-01-31 14:57:38 +010069/* EFER defaults:
70 * - enable syscall per default because its emulated by KVM
71 * - enable LME and LMA per default on 64 bit KVM
72 */
73#ifdef CONFIG_X86_64
74static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
75#else
76static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
77#endif
Carsten Otte313a3dc2007-10-11 19:16:52 +020078
Avi Kivityba1389b2007-11-18 16:24:12 +020079#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
80#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Hollis Blanchard417bc302007-10-31 17:24:23 -050081
Gleb Natapovcb142eb2009-08-09 15:17:40 +030082static void update_cr8_intercept(struct kvm_vcpu *vcpu);
Avi Kivity674eea02008-02-11 18:37:23 +020083static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
84 struct kvm_cpuid_entry2 __user *entries);
85
Zhang Xiantao97896d02007-11-14 20:09:30 +080086struct kvm_x86_ops *kvm_x86_ops;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030087EXPORT_SYMBOL_GPL(kvm_x86_ops);
Zhang Xiantao97896d02007-11-14 20:09:30 +080088
Andre Przywaraed85c062009-06-25 12:36:49 +020089int ignore_msrs = 0;
90module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
91
Avi Kivity18863bd2009-09-07 11:12:18 +030092#define KVM_NR_SHARED_MSRS 16
93
94struct kvm_shared_msrs_global {
95 int nr;
Sheng Yang2bf78fa2009-12-18 16:48:44 +080096 u32 msrs[KVM_NR_SHARED_MSRS];
Avi Kivity18863bd2009-09-07 11:12:18 +030097};
98
99struct kvm_shared_msrs {
100 struct user_return_notifier urn;
101 bool registered;
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800102 struct kvm_shared_msr_values {
103 u64 host;
104 u64 curr;
105 } values[KVM_NR_SHARED_MSRS];
Avi Kivity18863bd2009-09-07 11:12:18 +0300106};
107
108static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
109static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
110
Hollis Blanchard417bc302007-10-31 17:24:23 -0500111struct kvm_stats_debugfs_item debugfs_entries[] = {
Avi Kivityba1389b2007-11-18 16:24:12 +0200112 { "pf_fixed", VCPU_STAT(pf_fixed) },
113 { "pf_guest", VCPU_STAT(pf_guest) },
114 { "tlb_flush", VCPU_STAT(tlb_flush) },
115 { "invlpg", VCPU_STAT(invlpg) },
116 { "exits", VCPU_STAT(exits) },
117 { "io_exits", VCPU_STAT(io_exits) },
118 { "mmio_exits", VCPU_STAT(mmio_exits) },
119 { "signal_exits", VCPU_STAT(signal_exits) },
120 { "irq_window", VCPU_STAT(irq_window_exits) },
Sheng Yangf08864b2008-05-15 18:23:25 +0800121 { "nmi_window", VCPU_STAT(nmi_window_exits) },
Avi Kivityba1389b2007-11-18 16:24:12 +0200122 { "halt_exits", VCPU_STAT(halt_exits) },
123 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Amit Shahf11c3a82008-02-21 01:00:30 +0530124 { "hypercalls", VCPU_STAT(hypercalls) },
Avi Kivityba1389b2007-11-18 16:24:12 +0200125 { "request_irq", VCPU_STAT(request_irq_exits) },
126 { "irq_exits", VCPU_STAT(irq_exits) },
127 { "host_state_reload", VCPU_STAT(host_state_reload) },
128 { "efer_reload", VCPU_STAT(efer_reload) },
129 { "fpu_reload", VCPU_STAT(fpu_reload) },
130 { "insn_emulation", VCPU_STAT(insn_emulation) },
131 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
Avi Kivityfa89a812008-09-01 15:57:51 +0300132 { "irq_injections", VCPU_STAT(irq_injections) },
Jan Kiszkac4abb7c2008-09-26 09:30:55 +0200133 { "nmi_injections", VCPU_STAT(nmi_injections) },
Avi Kivity4cee5762007-11-18 16:37:07 +0200134 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
135 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
136 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
137 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
138 { "mmu_flooded", VM_STAT(mmu_flooded) },
139 { "mmu_recycled", VM_STAT(mmu_recycled) },
Avi Kivitydfc5aa02007-12-18 19:47:18 +0200140 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -0300141 { "mmu_unsync", VM_STAT(mmu_unsync) },
Avi Kivity0f74a242007-11-20 23:01:14 +0200142 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300143 { "largepages", VM_STAT(lpages) },
Hollis Blanchard417bc302007-10-31 17:24:23 -0500144 { NULL }
145};
146
Avi Kivity18863bd2009-09-07 11:12:18 +0300147static void kvm_on_user_return(struct user_return_notifier *urn)
148{
149 unsigned slot;
Avi Kivity18863bd2009-09-07 11:12:18 +0300150 struct kvm_shared_msrs *locals
151 = container_of(urn, struct kvm_shared_msrs, urn);
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800152 struct kvm_shared_msr_values *values;
Avi Kivity18863bd2009-09-07 11:12:18 +0300153
154 for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800155 values = &locals->values[slot];
156 if (values->host != values->curr) {
157 wrmsrl(shared_msrs_global.msrs[slot], values->host);
158 values->curr = values->host;
Avi Kivity18863bd2009-09-07 11:12:18 +0300159 }
160 }
161 locals->registered = false;
162 user_return_notifier_unregister(urn);
163}
164
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800165static void shared_msr_update(unsigned slot, u32 msr)
Avi Kivity18863bd2009-09-07 11:12:18 +0300166{
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800167 struct kvm_shared_msrs *smsr;
Avi Kivity18863bd2009-09-07 11:12:18 +0300168 u64 value;
169
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800170 smsr = &__get_cpu_var(shared_msrs);
171 /* only read, and nobody should modify it at this time,
172 * so don't need lock */
173 if (slot >= shared_msrs_global.nr) {
174 printk(KERN_ERR "kvm: invalid MSR slot!");
175 return;
176 }
177 rdmsrl_safe(msr, &value);
178 smsr->values[slot].host = value;
179 smsr->values[slot].curr = value;
180}
181
182void kvm_define_shared_msr(unsigned slot, u32 msr)
183{
Avi Kivity18863bd2009-09-07 11:12:18 +0300184 if (slot >= shared_msrs_global.nr)
185 shared_msrs_global.nr = slot + 1;
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800186 shared_msrs_global.msrs[slot] = msr;
187 /* we need ensured the shared_msr_global have been updated */
188 smp_wmb();
Avi Kivity18863bd2009-09-07 11:12:18 +0300189}
190EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
191
192static void kvm_shared_msr_cpu_online(void)
193{
194 unsigned i;
Avi Kivity18863bd2009-09-07 11:12:18 +0300195
196 for (i = 0; i < shared_msrs_global.nr; ++i)
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800197 shared_msr_update(i, shared_msrs_global.msrs[i]);
Avi Kivity18863bd2009-09-07 11:12:18 +0300198}
199
Avi Kivityd5696722009-12-02 12:28:47 +0200200void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
Avi Kivity18863bd2009-09-07 11:12:18 +0300201{
202 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
203
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800204 if (((value ^ smsr->values[slot].curr) & mask) == 0)
Avi Kivity18863bd2009-09-07 11:12:18 +0300205 return;
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800206 smsr->values[slot].curr = value;
207 wrmsrl(shared_msrs_global.msrs[slot], value);
Avi Kivity18863bd2009-09-07 11:12:18 +0300208 if (!smsr->registered) {
209 smsr->urn.on_user_return = kvm_on_user_return;
210 user_return_notifier_register(&smsr->urn);
211 smsr->registered = true;
212 }
213}
214EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
215
Avi Kivity3548bab2009-11-28 14:18:47 +0200216static void drop_user_return_notifiers(void *ignore)
217{
218 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
219
220 if (smsr->registered)
221 kvm_on_user_return(&smsr->urn);
222}
223
Carsten Otte5fb76f92007-10-29 16:08:51 +0100224unsigned long segment_base(u16 selector)
225{
226 struct descriptor_table gdt;
Avi Kivitya5f61302008-02-20 17:57:21 +0200227 struct desc_struct *d;
Carsten Otte5fb76f92007-10-29 16:08:51 +0100228 unsigned long table_base;
229 unsigned long v;
230
231 if (selector == 0)
232 return 0;
233
Akinobu Mitab792c342009-07-19 00:00:01 +0900234 kvm_get_gdt(&gdt);
Carsten Otte5fb76f92007-10-29 16:08:51 +0100235 table_base = gdt.base;
236
237 if (selector & 4) { /* from ldt */
Akinobu Mitab792c342009-07-19 00:00:01 +0900238 u16 ldt_selector = kvm_read_ldt();
Carsten Otte5fb76f92007-10-29 16:08:51 +0100239
Carsten Otte5fb76f92007-10-29 16:08:51 +0100240 table_base = segment_base(ldt_selector);
241 }
Avi Kivitya5f61302008-02-20 17:57:21 +0200242 d = (struct desc_struct *)(table_base + (selector & ~7));
Akinobu Mita46a359e2009-07-18 23:58:32 +0900243 v = get_desc_base(d);
Carsten Otte5fb76f92007-10-29 16:08:51 +0100244#ifdef CONFIG_X86_64
Avi Kivitya5f61302008-02-20 17:57:21 +0200245 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
246 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
Carsten Otte5fb76f92007-10-29 16:08:51 +0100247#endif
248 return v;
249}
250EXPORT_SYMBOL_GPL(segment_base);
251
Carsten Otte6866b832007-10-29 16:09:10 +0100252u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
253{
254 if (irqchip_in_kernel(vcpu->kvm))
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800255 return vcpu->arch.apic_base;
Carsten Otte6866b832007-10-29 16:09:10 +0100256 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800257 return vcpu->arch.apic_base;
Carsten Otte6866b832007-10-29 16:09:10 +0100258}
259EXPORT_SYMBOL_GPL(kvm_get_apic_base);
260
261void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
262{
263 /* TODO: reserve bits check */
264 if (irqchip_in_kernel(vcpu->kvm))
265 kvm_lapic_set_base(vcpu, data);
266 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800267 vcpu->arch.apic_base = data;
Carsten Otte6866b832007-10-29 16:09:10 +0100268}
269EXPORT_SYMBOL_GPL(kvm_set_apic_base);
270
Eddie Dong3fd28fc2009-11-19 17:54:07 +0200271#define EXCPT_BENIGN 0
272#define EXCPT_CONTRIBUTORY 1
273#define EXCPT_PF 2
274
275static int exception_class(int vector)
276{
277 switch (vector) {
278 case PF_VECTOR:
279 return EXCPT_PF;
280 case DE_VECTOR:
281 case TS_VECTOR:
282 case NP_VECTOR:
283 case SS_VECTOR:
284 case GP_VECTOR:
285 return EXCPT_CONTRIBUTORY;
286 default:
287 break;
288 }
289 return EXCPT_BENIGN;
290}
291
292static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
293 unsigned nr, bool has_error, u32 error_code)
294{
295 u32 prev_nr;
296 int class1, class2;
297
298 if (!vcpu->arch.exception.pending) {
299 queue:
300 vcpu->arch.exception.pending = true;
301 vcpu->arch.exception.has_error_code = has_error;
302 vcpu->arch.exception.nr = nr;
303 vcpu->arch.exception.error_code = error_code;
304 return;
305 }
306
307 /* to check exception */
308 prev_nr = vcpu->arch.exception.nr;
309 if (prev_nr == DF_VECTOR) {
310 /* triple fault -> shutdown */
311 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
312 return;
313 }
314 class1 = exception_class(prev_nr);
315 class2 = exception_class(nr);
316 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
317 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
318 /* generate double fault per SDM Table 5-5 */
319 vcpu->arch.exception.pending = true;
320 vcpu->arch.exception.has_error_code = true;
321 vcpu->arch.exception.nr = DF_VECTOR;
322 vcpu->arch.exception.error_code = 0;
323 } else
324 /* replace previous exception with a new one in a hope
325 that instruction re-execution will regenerate lost
326 exception */
327 goto queue;
328}
329
Avi Kivity298101d2007-11-25 13:41:11 +0200330void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
331{
Eddie Dong3fd28fc2009-11-19 17:54:07 +0200332 kvm_multiple_exception(vcpu, nr, false, 0);
Avi Kivity298101d2007-11-25 13:41:11 +0200333}
334EXPORT_SYMBOL_GPL(kvm_queue_exception);
335
Avi Kivityc3c91fe2007-11-25 14:04:58 +0200336void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
337 u32 error_code)
338{
339 ++vcpu->stat.pf_guest;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800340 vcpu->arch.cr2 = addr;
Avi Kivityc3c91fe2007-11-25 14:04:58 +0200341 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
342}
343
Sheng Yang3419ffc2008-05-15 09:52:48 +0800344void kvm_inject_nmi(struct kvm_vcpu *vcpu)
345{
346 vcpu->arch.nmi_pending = 1;
347}
348EXPORT_SYMBOL_GPL(kvm_inject_nmi);
349
Avi Kivity298101d2007-11-25 13:41:11 +0200350void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
351{
Eddie Dong3fd28fc2009-11-19 17:54:07 +0200352 kvm_multiple_exception(vcpu, nr, true, error_code);
Avi Kivity298101d2007-11-25 13:41:11 +0200353}
354EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
355
Carsten Ottea03490e2007-10-29 16:09:35 +0100356/*
Avi Kivity0a79b002009-09-01 12:03:25 +0300357 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
358 * a #GP and return false.
359 */
360bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
Carsten Otte043405e2007-10-10 17:16:19 +0200361{
Avi Kivity0a79b002009-09-01 12:03:25 +0300362 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
363 return true;
364 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
365 return false;
Carsten Ottea03490e2007-10-29 16:09:35 +0100366}
Avi Kivity0a79b002009-09-01 12:03:25 +0300367EXPORT_SYMBOL_GPL(kvm_require_cpl);
Carsten Ottea03490e2007-10-29 16:09:35 +0100368
369/*
370 * Load the pae pdptrs. Return true is they are all valid.
371 */
372int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
373{
374 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
375 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
376 int i;
377 int ret;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800378 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
Carsten Ottea03490e2007-10-29 16:09:35 +0100379
Carsten Ottea03490e2007-10-29 16:09:35 +0100380 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
381 offset * sizeof(u64), sizeof(pdpte));
382 if (ret < 0) {
383 ret = 0;
384 goto out;
385 }
386 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
Avi Kivity43a37952009-06-10 14:12:05 +0300387 if (is_present_gpte(pdpte[i]) &&
Dong, Eddie20c466b2009-03-31 23:03:45 +0800388 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
Carsten Ottea03490e2007-10-29 16:09:35 +0100389 ret = 0;
390 goto out;
391 }
392 }
393 ret = 1;
394
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800395 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
Avi Kivity6de4f3a2009-05-31 22:58:47 +0300396 __set_bit(VCPU_EXREG_PDPTR,
397 (unsigned long *)&vcpu->arch.regs_avail);
398 __set_bit(VCPU_EXREG_PDPTR,
399 (unsigned long *)&vcpu->arch.regs_dirty);
Carsten Ottea03490e2007-10-29 16:09:35 +0100400out:
Carsten Ottea03490e2007-10-29 16:09:35 +0100401
402 return ret;
403}
Joerg Roedelcc4b6872008-02-07 13:47:43 +0100404EXPORT_SYMBOL_GPL(load_pdptrs);
Carsten Ottea03490e2007-10-29 16:09:35 +0100405
Avi Kivityd835dfe2007-11-21 02:57:59 +0200406static bool pdptrs_changed(struct kvm_vcpu *vcpu)
407{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800408 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
Avi Kivityd835dfe2007-11-21 02:57:59 +0200409 bool changed = true;
410 int r;
411
412 if (is_long_mode(vcpu) || !is_pae(vcpu))
413 return false;
414
Avi Kivity6de4f3a2009-05-31 22:58:47 +0300415 if (!test_bit(VCPU_EXREG_PDPTR,
416 (unsigned long *)&vcpu->arch.regs_avail))
417 return true;
418
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800419 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
Avi Kivityd835dfe2007-11-21 02:57:59 +0200420 if (r < 0)
421 goto out;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800422 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
Avi Kivityd835dfe2007-11-21 02:57:59 +0200423out:
Avi Kivityd835dfe2007-11-21 02:57:59 +0200424
425 return changed;
426}
427
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200428void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
Carsten Ottea03490e2007-10-29 16:09:35 +0100429{
430 if (cr0 & CR0_RESERVED_BITS) {
431 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800432 cr0, vcpu->arch.cr0);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200433 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100434 return;
435 }
436
437 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
438 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200439 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100440 return;
441 }
442
443 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
444 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
445 "and a clear PE flag\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200446 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100447 return;
448 }
449
450 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
451#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800452 if ((vcpu->arch.shadow_efer & EFER_LME)) {
Carsten Ottea03490e2007-10-29 16:09:35 +0100453 int cs_db, cs_l;
454
455 if (!is_pae(vcpu)) {
456 printk(KERN_DEBUG "set_cr0: #GP, start paging "
457 "in long mode while PAE is disabled\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200458 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100459 return;
460 }
461 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
462 if (cs_l) {
463 printk(KERN_DEBUG "set_cr0: #GP, start paging "
464 "in long mode while CS.L == 1\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200465 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100466 return;
467
468 }
469 } else
470#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800471 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
Carsten Ottea03490e2007-10-29 16:09:35 +0100472 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
473 "reserved bits\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200474 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100475 return;
476 }
477
478 }
479
480 kvm_x86_ops->set_cr0(vcpu, cr0);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800481 vcpu->arch.cr0 = cr0;
Carsten Ottea03490e2007-10-29 16:09:35 +0100482
Carsten Ottea03490e2007-10-29 16:09:35 +0100483 kvm_mmu_reset_context(vcpu);
Carsten Ottea03490e2007-10-29 16:09:35 +0100484 return;
485}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200486EXPORT_SYMBOL_GPL(kvm_set_cr0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100487
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200488void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
Carsten Ottea03490e2007-10-29 16:09:35 +0100489{
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200490 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
Carsten Ottea03490e2007-10-29 16:09:35 +0100491}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200492EXPORT_SYMBOL_GPL(kvm_lmsw);
Carsten Ottea03490e2007-10-29 16:09:35 +0100493
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200494void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Carsten Ottea03490e2007-10-29 16:09:35 +0100495{
Avi Kivityfc78f512009-12-07 12:16:48 +0200496 unsigned long old_cr4 = kvm_read_cr4(vcpu);
Avi Kivitya2edf572009-05-24 22:19:00 +0300497 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
498
Carsten Ottea03490e2007-10-29 16:09:35 +0100499 if (cr4 & CR4_RESERVED_BITS) {
500 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200501 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100502 return;
503 }
504
505 if (is_long_mode(vcpu)) {
506 if (!(cr4 & X86_CR4_PAE)) {
507 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
508 "in long mode\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200509 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100510 return;
511 }
Avi Kivitya2edf572009-05-24 22:19:00 +0300512 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
513 && ((cr4 ^ old_cr4) & pdptr_bits)
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800514 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
Carsten Ottea03490e2007-10-29 16:09:35 +0100515 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200516 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100517 return;
518 }
519
520 if (cr4 & X86_CR4_VMXE) {
521 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200522 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100523 return;
524 }
525 kvm_x86_ops->set_cr4(vcpu, cr4);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800526 vcpu->arch.cr4 = cr4;
Avi Kivity5a41acc2009-01-11 17:19:35 +0200527 vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
Carsten Ottea03490e2007-10-29 16:09:35 +0100528 kvm_mmu_reset_context(vcpu);
Carsten Ottea03490e2007-10-29 16:09:35 +0100529}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200530EXPORT_SYMBOL_GPL(kvm_set_cr4);
Carsten Ottea03490e2007-10-29 16:09:35 +0100531
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200532void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
Carsten Ottea03490e2007-10-29 16:09:35 +0100533{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800534 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
Marcelo Tosatti0ba73cd2008-09-23 13:18:34 -0300535 kvm_mmu_sync_roots(vcpu);
Avi Kivityd835dfe2007-11-21 02:57:59 +0200536 kvm_mmu_flush_tlb(vcpu);
537 return;
538 }
539
Carsten Ottea03490e2007-10-29 16:09:35 +0100540 if (is_long_mode(vcpu)) {
541 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
542 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200543 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100544 return;
545 }
546 } else {
547 if (is_pae(vcpu)) {
548 if (cr3 & CR3_PAE_RESERVED_BITS) {
549 printk(KERN_DEBUG
550 "set_cr3: #GP, reserved bits\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200551 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100552 return;
553 }
554 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
555 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
556 "reserved bits\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200557 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100558 return;
559 }
560 }
561 /*
562 * We don't check reserved bits in nonpae mode, because
563 * this isn't enforced, and VMware depends on this.
564 */
565 }
566
Carsten Ottea03490e2007-10-29 16:09:35 +0100567 /*
568 * Does the new cr3 value map to physical memory? (Note, we
569 * catch an invalid cr3 even in real-mode, because it would
570 * cause trouble later on when we turn on paging anyway.)
571 *
572 * A real CPU would silently accept an invalid cr3 and would
573 * attempt to use it - with largely undefined (and often hard
574 * to debug) behavior on the guest side.
575 */
576 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200577 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100578 else {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800579 vcpu->arch.cr3 = cr3;
580 vcpu->arch.mmu.new_cr3(vcpu);
Carsten Ottea03490e2007-10-29 16:09:35 +0100581 }
Carsten Ottea03490e2007-10-29 16:09:35 +0100582}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200583EXPORT_SYMBOL_GPL(kvm_set_cr3);
Carsten Ottea03490e2007-10-29 16:09:35 +0100584
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200585void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
Carsten Ottea03490e2007-10-29 16:09:35 +0100586{
587 if (cr8 & CR8_RESERVED_BITS) {
588 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200589 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100590 return;
591 }
592 if (irqchip_in_kernel(vcpu->kvm))
593 kvm_lapic_set_tpr(vcpu, cr8);
594 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800595 vcpu->arch.cr8 = cr8;
Carsten Ottea03490e2007-10-29 16:09:35 +0100596}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200597EXPORT_SYMBOL_GPL(kvm_set_cr8);
Carsten Ottea03490e2007-10-29 16:09:35 +0100598
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200599unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
Carsten Ottea03490e2007-10-29 16:09:35 +0100600{
601 if (irqchip_in_kernel(vcpu->kvm))
602 return kvm_lapic_get_cr8(vcpu);
603 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800604 return vcpu->arch.cr8;
Carsten Ottea03490e2007-10-29 16:09:35 +0100605}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200606EXPORT_SYMBOL_GPL(kvm_get_cr8);
Carsten Ottea03490e2007-10-29 16:09:35 +0100607
Alexander Grafd8017472008-11-25 20:17:11 +0100608static inline u32 bit(int bitno)
609{
610 return 1 << (bitno & 31);
611}
612
Carsten Otte043405e2007-10-10 17:16:19 +0200613/*
614 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
615 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
616 *
617 * This list is modified at module load time to reflect the
Glauber Costae3267cb2009-10-06 13:24:50 -0400618 * capabilities of the host cpu. This capabilities test skips MSRs that are
619 * kvm-specific. Those are put in the beginning of the list.
Carsten Otte043405e2007-10-10 17:16:19 +0200620 */
Glauber Costae3267cb2009-10-06 13:24:50 -0400621
622#define KVM_SAVE_MSRS_BEGIN 2
Carsten Otte043405e2007-10-10 17:16:19 +0200623static u32 msrs_to_save[] = {
Glauber Costae3267cb2009-10-06 13:24:50 -0400624 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
Carsten Otte043405e2007-10-10 17:16:19 +0200625 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
626 MSR_K6_STAR,
627#ifdef CONFIG_X86_64
628 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
629#endif
Glauber Costae3267cb2009-10-06 13:24:50 -0400630 MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
Carsten Otte043405e2007-10-10 17:16:19 +0200631};
632
633static unsigned num_msrs_to_save;
634
635static u32 emulated_msrs[] = {
636 MSR_IA32_MISC_ENABLE,
637};
638
Carsten Otte15c4a642007-10-30 18:44:17 +0100639static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
640{
Joerg Roedelf2b4b7d2008-01-31 14:57:37 +0100641 if (efer & efer_reserved_bits) {
Carsten Otte15c4a642007-10-30 18:44:17 +0100642 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
643 efer);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200644 kvm_inject_gp(vcpu, 0);
Carsten Otte15c4a642007-10-30 18:44:17 +0100645 return;
646 }
647
648 if (is_paging(vcpu)
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800649 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
Carsten Otte15c4a642007-10-30 18:44:17 +0100650 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200651 kvm_inject_gp(vcpu, 0);
Carsten Otte15c4a642007-10-30 18:44:17 +0100652 return;
653 }
654
Alexander Graf1b2fd702009-02-02 16:23:51 +0100655 if (efer & EFER_FFXSR) {
656 struct kvm_cpuid_entry2 *feat;
657
658 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
659 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
660 printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
661 kvm_inject_gp(vcpu, 0);
662 return;
663 }
664 }
665
Alexander Grafd8017472008-11-25 20:17:11 +0100666 if (efer & EFER_SVME) {
667 struct kvm_cpuid_entry2 *feat;
668
669 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
670 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
671 printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
672 kvm_inject_gp(vcpu, 0);
673 return;
674 }
675 }
676
Carsten Otte15c4a642007-10-30 18:44:17 +0100677 kvm_x86_ops->set_efer(vcpu, efer);
678
679 efer &= ~EFER_LMA;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800680 efer |= vcpu->arch.shadow_efer & EFER_LMA;
Carsten Otte15c4a642007-10-30 18:44:17 +0100681
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800682 vcpu->arch.shadow_efer = efer;
Avi Kivity9645bb562009-03-31 11:31:54 +0300683
684 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
685 kvm_mmu_reset_context(vcpu);
Carsten Otte15c4a642007-10-30 18:44:17 +0100686}
687
Joerg Roedelf2b4b7d2008-01-31 14:57:37 +0100688void kvm_enable_efer_bits(u64 mask)
689{
690 efer_reserved_bits &= ~mask;
691}
692EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
693
694
Carsten Otte15c4a642007-10-30 18:44:17 +0100695/*
696 * Writes msr value into into the appropriate "register".
697 * Returns 0 on success, non-0 otherwise.
698 * Assumes vcpu_load() was already called.
699 */
700int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
701{
702 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
703}
704
Carsten Otte313a3dc2007-10-11 19:16:52 +0200705/*
706 * Adapt set_msr() to msr_io()'s calling convention
707 */
708static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
709{
710 return kvm_set_msr(vcpu, index, *data);
711}
712
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200713static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
714{
715 static int version;
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200716 struct pvclock_wall_clock wc;
Jason Wang923de3c2010-01-27 19:13:49 +0800717 struct timespec boot;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200718
719 if (!wall_clock)
720 return;
721
722 version++;
723
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200724 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
725
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200726 /*
727 * The guest calculates current wall clock time by adding
728 * system time (updated by kvm_write_guest_time below) to the
729 * wall clock specified here. guest system time equals host
730 * system time for us, thus we must fill in host boot time here.
731 */
Jason Wang923de3c2010-01-27 19:13:49 +0800732 getboottime(&boot);
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200733
734 wc.sec = boot.tv_sec;
735 wc.nsec = boot.tv_nsec;
736 wc.version = version;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200737
738 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
739
740 version++;
741 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200742}
743
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200744static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
745{
746 uint32_t quotient, remainder;
747
748 /* Don't try to replace with do_div(), this one calculates
749 * "(dividend << 32) / divisor" */
750 __asm__ ( "divl %4"
751 : "=a" (quotient), "=d" (remainder)
752 : "0" (0), "1" (dividend), "r" (divisor) );
753 return quotient;
754}
755
756static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
757{
758 uint64_t nsecs = 1000000000LL;
759 int32_t shift = 0;
760 uint64_t tps64;
761 uint32_t tps32;
762
763 tps64 = tsc_khz * 1000LL;
764 while (tps64 > nsecs*2) {
765 tps64 >>= 1;
766 shift--;
767 }
768
769 tps32 = (uint32_t)tps64;
770 while (tps32 <= (uint32_t)nsecs) {
771 tps32 <<= 1;
772 shift++;
773 }
774
775 hv_clock->tsc_shift = shift;
776 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
777
778 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
Harvey Harrison80a914d2008-10-15 22:01:25 -0700779 __func__, tsc_khz, hv_clock->tsc_shift,
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200780 hv_clock->tsc_to_system_mul);
781}
782
Gerd Hoffmannc8076602009-02-04 17:52:04 +0100783static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
784
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200785static void kvm_write_guest_time(struct kvm_vcpu *v)
786{
787 struct timespec ts;
788 unsigned long flags;
789 struct kvm_vcpu_arch *vcpu = &v->arch;
790 void *shared_kaddr;
Avi Kivity463656c2009-04-12 15:49:07 +0300791 unsigned long this_tsc_khz;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200792
793 if ((!vcpu->time_page))
794 return;
795
Avi Kivity463656c2009-04-12 15:49:07 +0300796 this_tsc_khz = get_cpu_var(cpu_tsc_khz);
797 if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
798 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
799 vcpu->hv_clock_tsc_khz = this_tsc_khz;
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200800 }
Avi Kivity463656c2009-04-12 15:49:07 +0300801 put_cpu_var(cpu_tsc_khz);
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200802
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200803 /* Keep irq disabled to prevent changes to the clock */
804 local_irq_save(flags);
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +0530805 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200806 ktime_get_ts(&ts);
Jason Wang923de3c2010-01-27 19:13:49 +0800807 monotonic_to_bootbased(&ts);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200808 local_irq_restore(flags);
809
810 /* With all the info we got, fill in the values */
811
812 vcpu->hv_clock.system_time = ts.tv_nsec +
Glauber Costaafbcf7a2009-10-16 15:28:36 -0400813 (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
814
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200815 /*
816 * The interface expects us to write an even number signaling that the
817 * update is finished. Since the guest won't see the intermediate
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200818 * state, we just increase by 2 at the end.
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200819 */
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200820 vcpu->hv_clock.version += 2;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200821
822 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
823
824 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200825 sizeof(vcpu->hv_clock));
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200826
827 kunmap_atomic(shared_kaddr, KM_USER0);
828
829 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
830}
831
Gerd Hoffmannc8076602009-02-04 17:52:04 +0100832static int kvm_request_guest_time_update(struct kvm_vcpu *v)
833{
834 struct kvm_vcpu_arch *vcpu = &v->arch;
835
836 if (!vcpu->time_page)
837 return 0;
838 set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
839 return 1;
840}
841
Avi Kivity9ba075a2008-05-26 20:06:35 +0300842static bool msr_mtrr_valid(unsigned msr)
843{
844 switch (msr) {
845 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
846 case MSR_MTRRfix64K_00000:
847 case MSR_MTRRfix16K_80000:
848 case MSR_MTRRfix16K_A0000:
849 case MSR_MTRRfix4K_C0000:
850 case MSR_MTRRfix4K_C8000:
851 case MSR_MTRRfix4K_D0000:
852 case MSR_MTRRfix4K_D8000:
853 case MSR_MTRRfix4K_E0000:
854 case MSR_MTRRfix4K_E8000:
855 case MSR_MTRRfix4K_F0000:
856 case MSR_MTRRfix4K_F8000:
857 case MSR_MTRRdefType:
858 case MSR_IA32_CR_PAT:
859 return true;
860 case 0x2f8:
861 return true;
862 }
863 return false;
864}
865
Marcelo Tosattid6289b92009-06-22 15:27:56 -0300866static bool valid_pat_type(unsigned t)
867{
868 return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
869}
870
871static bool valid_mtrr_type(unsigned t)
872{
873 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
874}
875
876static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
877{
878 int i;
879
880 if (!msr_mtrr_valid(msr))
881 return false;
882
883 if (msr == MSR_IA32_CR_PAT) {
884 for (i = 0; i < 8; i++)
885 if (!valid_pat_type((data >> (i * 8)) & 0xff))
886 return false;
887 return true;
888 } else if (msr == MSR_MTRRdefType) {
889 if (data & ~0xcff)
890 return false;
891 return valid_mtrr_type(data & 0xff);
892 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
893 for (i = 0; i < 8 ; i++)
894 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
895 return false;
896 return true;
897 }
898
899 /* variable MTRRs */
900 return valid_mtrr_type(data & 0xff);
901}
902
Avi Kivity9ba075a2008-05-26 20:06:35 +0300903static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
904{
Sheng Yang0bed3b52008-10-09 16:01:54 +0800905 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
906
Marcelo Tosattid6289b92009-06-22 15:27:56 -0300907 if (!mtrr_valid(vcpu, msr, data))
Avi Kivity9ba075a2008-05-26 20:06:35 +0300908 return 1;
909
Sheng Yang0bed3b52008-10-09 16:01:54 +0800910 if (msr == MSR_MTRRdefType) {
911 vcpu->arch.mtrr_state.def_type = data;
912 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
913 } else if (msr == MSR_MTRRfix64K_00000)
914 p[0] = data;
915 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
916 p[1 + msr - MSR_MTRRfix16K_80000] = data;
917 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
918 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
919 else if (msr == MSR_IA32_CR_PAT)
920 vcpu->arch.pat = data;
921 else { /* Variable MTRRs */
922 int idx, is_mtrr_mask;
923 u64 *pt;
924
925 idx = (msr - 0x200) / 2;
926 is_mtrr_mask = msr - 0x200 - 2 * idx;
927 if (!is_mtrr_mask)
928 pt =
929 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
930 else
931 pt =
932 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
933 *pt = data;
934 }
935
936 kvm_mmu_reset_context(vcpu);
Avi Kivity9ba075a2008-05-26 20:06:35 +0300937 return 0;
938}
Carsten Otte15c4a642007-10-30 18:44:17 +0100939
Huang Ying890ca9a2009-05-11 16:48:15 +0800940static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
941{
942 u64 mcg_cap = vcpu->arch.mcg_cap;
943 unsigned bank_num = mcg_cap & 0xff;
944
945 switch (msr) {
946 case MSR_IA32_MCG_STATUS:
947 vcpu->arch.mcg_status = data;
948 break;
949 case MSR_IA32_MCG_CTL:
950 if (!(mcg_cap & MCG_CTL_P))
951 return 1;
952 if (data != 0 && data != ~(u64)0)
953 return -1;
954 vcpu->arch.mcg_ctl = data;
955 break;
956 default:
957 if (msr >= MSR_IA32_MC0_CTL &&
958 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
959 u32 offset = msr - MSR_IA32_MC0_CTL;
960 /* only 0 or all 1s can be written to IA32_MCi_CTL */
961 if ((offset & 0x3) == 0 &&
962 data != 0 && data != ~(u64)0)
963 return -1;
964 vcpu->arch.mce_banks[offset] = data;
965 break;
966 }
967 return 1;
968 }
969 return 0;
970}
971
Ed Swierkffde22a2009-10-15 15:21:43 -0700972static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
973{
974 struct kvm *kvm = vcpu->kvm;
975 int lm = is_long_mode(vcpu);
976 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
977 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
978 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
979 : kvm->arch.xen_hvm_config.blob_size_32;
980 u32 page_num = data & ~PAGE_MASK;
981 u64 page_addr = data & PAGE_MASK;
982 u8 *page;
983 int r;
984
985 r = -E2BIG;
986 if (page_num >= blob_size)
987 goto out;
988 r = -ENOMEM;
989 page = kzalloc(PAGE_SIZE, GFP_KERNEL);
990 if (!page)
991 goto out;
992 r = -EFAULT;
993 if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
994 goto out_free;
995 if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
996 goto out_free;
997 r = 0;
998out_free:
999 kfree(page);
1000out:
1001 return r;
1002}
1003
Carsten Otte15c4a642007-10-30 18:44:17 +01001004int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1005{
1006 switch (msr) {
Carsten Otte15c4a642007-10-30 18:44:17 +01001007 case MSR_EFER:
1008 set_efer(vcpu, data);
1009 break;
Andre Przywara8f1589d2009-06-24 12:44:33 +02001010 case MSR_K7_HWCR:
1011 data &= ~(u64)0x40; /* ignore flush filter disable */
1012 if (data != 0) {
1013 pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
1014 data);
1015 return 1;
1016 }
Carsten Otte15c4a642007-10-30 18:44:17 +01001017 break;
Andre Przywaraf7c6d142009-07-02 15:04:14 +02001018 case MSR_FAM10H_MMIO_CONF_BASE:
1019 if (data != 0) {
1020 pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
1021 "0x%llx\n", data);
1022 return 1;
1023 }
Carsten Otte15c4a642007-10-30 18:44:17 +01001024 break;
Andre Przywarac323c0e2009-06-24 15:37:05 +02001025 case MSR_AMD64_NB_CFG:
Joerg Roedelc7ac6792008-02-11 20:28:27 +01001026 break;
Alexander Grafb5e2fec2008-07-22 08:00:45 +02001027 case MSR_IA32_DEBUGCTLMSR:
1028 if (!data) {
1029 /* We support the non-activated case already */
1030 break;
1031 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
1032 /* Values other than LBR and BTF are vendor-specific,
1033 thus reserved and should throw a #GP */
1034 return 1;
1035 }
1036 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1037 __func__, data);
1038 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001039 case MSR_IA32_UCODE_REV:
1040 case MSR_IA32_UCODE_WRITE:
Avi Kivity61a6bd62008-12-29 17:32:28 +02001041 case MSR_VM_HSAVE_PA:
Andre Przywara6098ca92009-07-03 16:00:14 +02001042 case MSR_AMD64_PATCH_LOADER:
Carsten Otte15c4a642007-10-30 18:44:17 +01001043 break;
Avi Kivity9ba075a2008-05-26 20:06:35 +03001044 case 0x200 ... 0x2ff:
1045 return set_msr_mtrr(vcpu, msr, data);
Carsten Otte15c4a642007-10-30 18:44:17 +01001046 case MSR_IA32_APICBASE:
1047 kvm_set_apic_base(vcpu, data);
1048 break;
Gleb Natapov0105d1a2009-07-05 17:39:36 +03001049 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1050 return kvm_x2apic_msr_write(vcpu, msr, data);
Carsten Otte15c4a642007-10-30 18:44:17 +01001051 case MSR_IA32_MISC_ENABLE:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001052 vcpu->arch.ia32_misc_enable_msr = data;
Carsten Otte15c4a642007-10-30 18:44:17 +01001053 break;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001054 case MSR_KVM_WALL_CLOCK:
1055 vcpu->kvm->arch.wall_clock = data;
1056 kvm_write_wall_clock(vcpu->kvm, data);
1057 break;
1058 case MSR_KVM_SYSTEM_TIME: {
1059 if (vcpu->arch.time_page) {
1060 kvm_release_page_dirty(vcpu->arch.time_page);
1061 vcpu->arch.time_page = NULL;
1062 }
1063
1064 vcpu->arch.time = data;
1065
1066 /* we verify if the enable bit is set... */
1067 if (!(data & 1))
1068 break;
1069
1070 /* ...but clean it before doing the actual write */
1071 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
1072
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001073 vcpu->arch.time_page =
1074 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001075
1076 if (is_error_page(vcpu->arch.time_page)) {
1077 kvm_release_page_clean(vcpu->arch.time_page);
1078 vcpu->arch.time_page = NULL;
1079 }
1080
Gerd Hoffmannc8076602009-02-04 17:52:04 +01001081 kvm_request_guest_time_update(vcpu);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001082 break;
1083 }
Huang Ying890ca9a2009-05-11 16:48:15 +08001084 case MSR_IA32_MCG_CTL:
1085 case MSR_IA32_MCG_STATUS:
1086 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1087 return set_msr_mce(vcpu, msr, data);
Andre Przywara71db6022009-06-12 22:01:29 +02001088
1089 /* Performance counters are not protected by a CPUID bit,
1090 * so we should check all of them in the generic path for the sake of
1091 * cross vendor migration.
1092 * Writing a zero into the event select MSRs disables them,
1093 * which we perfectly emulate ;-). Any other value should be at least
1094 * reported, some guests depend on them.
1095 */
1096 case MSR_P6_EVNTSEL0:
1097 case MSR_P6_EVNTSEL1:
1098 case MSR_K7_EVNTSEL0:
1099 case MSR_K7_EVNTSEL1:
1100 case MSR_K7_EVNTSEL2:
1101 case MSR_K7_EVNTSEL3:
1102 if (data != 0)
1103 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1104 "0x%x data 0x%llx\n", msr, data);
1105 break;
1106 /* at least RHEL 4 unconditionally writes to the perfctr registers,
1107 * so we ignore writes to make it happy.
1108 */
1109 case MSR_P6_PERFCTR0:
1110 case MSR_P6_PERFCTR1:
1111 case MSR_K7_PERFCTR0:
1112 case MSR_K7_PERFCTR1:
1113 case MSR_K7_PERFCTR2:
1114 case MSR_K7_PERFCTR3:
1115 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1116 "0x%x data 0x%llx\n", msr, data);
1117 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001118 default:
Ed Swierkffde22a2009-10-15 15:21:43 -07001119 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1120 return xen_hvm_config(vcpu, data);
Andre Przywaraed85c062009-06-25 12:36:49 +02001121 if (!ignore_msrs) {
1122 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
1123 msr, data);
1124 return 1;
1125 } else {
1126 pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
1127 msr, data);
1128 break;
1129 }
Carsten Otte15c4a642007-10-30 18:44:17 +01001130 }
1131 return 0;
1132}
1133EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1134
1135
1136/*
1137 * Reads an msr value (of 'msr_index') into 'pdata'.
1138 * Returns 0 on success, non-0 otherwise.
1139 * Assumes vcpu_load() was already called.
1140 */
1141int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1142{
1143 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
1144}
1145
Avi Kivity9ba075a2008-05-26 20:06:35 +03001146static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1147{
Sheng Yang0bed3b52008-10-09 16:01:54 +08001148 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1149
Avi Kivity9ba075a2008-05-26 20:06:35 +03001150 if (!msr_mtrr_valid(msr))
1151 return 1;
1152
Sheng Yang0bed3b52008-10-09 16:01:54 +08001153 if (msr == MSR_MTRRdefType)
1154 *pdata = vcpu->arch.mtrr_state.def_type +
1155 (vcpu->arch.mtrr_state.enabled << 10);
1156 else if (msr == MSR_MTRRfix64K_00000)
1157 *pdata = p[0];
1158 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1159 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
1160 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1161 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
1162 else if (msr == MSR_IA32_CR_PAT)
1163 *pdata = vcpu->arch.pat;
1164 else { /* Variable MTRRs */
1165 int idx, is_mtrr_mask;
1166 u64 *pt;
1167
1168 idx = (msr - 0x200) / 2;
1169 is_mtrr_mask = msr - 0x200 - 2 * idx;
1170 if (!is_mtrr_mask)
1171 pt =
1172 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1173 else
1174 pt =
1175 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1176 *pdata = *pt;
1177 }
1178
Avi Kivity9ba075a2008-05-26 20:06:35 +03001179 return 0;
1180}
1181
Huang Ying890ca9a2009-05-11 16:48:15 +08001182static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1183{
1184 u64 data;
1185 u64 mcg_cap = vcpu->arch.mcg_cap;
1186 unsigned bank_num = mcg_cap & 0xff;
1187
1188 switch (msr) {
1189 case MSR_IA32_P5_MC_ADDR:
1190 case MSR_IA32_P5_MC_TYPE:
1191 data = 0;
1192 break;
1193 case MSR_IA32_MCG_CAP:
1194 data = vcpu->arch.mcg_cap;
1195 break;
1196 case MSR_IA32_MCG_CTL:
1197 if (!(mcg_cap & MCG_CTL_P))
1198 return 1;
1199 data = vcpu->arch.mcg_ctl;
1200 break;
1201 case MSR_IA32_MCG_STATUS:
1202 data = vcpu->arch.mcg_status;
1203 break;
1204 default:
1205 if (msr >= MSR_IA32_MC0_CTL &&
1206 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1207 u32 offset = msr - MSR_IA32_MC0_CTL;
1208 data = vcpu->arch.mce_banks[offset];
1209 break;
1210 }
1211 return 1;
1212 }
1213 *pdata = data;
1214 return 0;
1215}
1216
Carsten Otte15c4a642007-10-30 18:44:17 +01001217int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1218{
1219 u64 data;
1220
1221 switch (msr) {
Carsten Otte15c4a642007-10-30 18:44:17 +01001222 case MSR_IA32_PLATFORM_ID:
Carsten Otte15c4a642007-10-30 18:44:17 +01001223 case MSR_IA32_UCODE_REV:
Carsten Otte15c4a642007-10-30 18:44:17 +01001224 case MSR_IA32_EBL_CR_POWERON:
Alexander Grafb5e2fec2008-07-22 08:00:45 +02001225 case MSR_IA32_DEBUGCTLMSR:
1226 case MSR_IA32_LASTBRANCHFROMIP:
1227 case MSR_IA32_LASTBRANCHTOIP:
1228 case MSR_IA32_LASTINTFROMIP:
1229 case MSR_IA32_LASTINTTOIP:
Jaswinder Singh Rajput60af2ec2009-05-14 11:00:10 +05301230 case MSR_K8_SYSCFG:
1231 case MSR_K7_HWCR:
Avi Kivity61a6bd62008-12-29 17:32:28 +02001232 case MSR_VM_HSAVE_PA:
Amit Shah1f3ee612009-06-30 16:24:28 +05301233 case MSR_P6_PERFCTR0:
1234 case MSR_P6_PERFCTR1:
Amit Shah7fe29e02009-03-20 12:39:00 +05301235 case MSR_P6_EVNTSEL0:
1236 case MSR_P6_EVNTSEL1:
Amit Shah9e699622009-06-15 13:25:34 +05301237 case MSR_K7_EVNTSEL0:
Amit Shah1f3ee612009-06-30 16:24:28 +05301238 case MSR_K7_PERFCTR0:
Andre Przywara1fdbd482009-06-24 12:44:34 +02001239 case MSR_K8_INT_PENDING_MSG:
Andre Przywarac323c0e2009-06-24 15:37:05 +02001240 case MSR_AMD64_NB_CFG:
Andre Przywaraf7c6d142009-07-02 15:04:14 +02001241 case MSR_FAM10H_MMIO_CONF_BASE:
Carsten Otte15c4a642007-10-30 18:44:17 +01001242 data = 0;
1243 break;
Avi Kivity9ba075a2008-05-26 20:06:35 +03001244 case MSR_MTRRcap:
1245 data = 0x500 | KVM_NR_VAR_MTRR;
1246 break;
1247 case 0x200 ... 0x2ff:
1248 return get_msr_mtrr(vcpu, msr, pdata);
Carsten Otte15c4a642007-10-30 18:44:17 +01001249 case 0xcd: /* fsb frequency */
1250 data = 3;
1251 break;
1252 case MSR_IA32_APICBASE:
1253 data = kvm_get_apic_base(vcpu);
1254 break;
Gleb Natapov0105d1a2009-07-05 17:39:36 +03001255 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1256 return kvm_x2apic_msr_read(vcpu, msr, pdata);
1257 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001258 case MSR_IA32_MISC_ENABLE:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001259 data = vcpu->arch.ia32_misc_enable_msr;
Carsten Otte15c4a642007-10-30 18:44:17 +01001260 break;
Alexander Graf847f0ad2008-02-21 12:11:01 +01001261 case MSR_IA32_PERF_STATUS:
1262 /* TSC increment by tick */
1263 data = 1000ULL;
1264 /* CPU multiplier */
1265 data |= (((uint64_t)4ULL) << 40);
1266 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001267 case MSR_EFER:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001268 data = vcpu->arch.shadow_efer;
Carsten Otte15c4a642007-10-30 18:44:17 +01001269 break;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001270 case MSR_KVM_WALL_CLOCK:
1271 data = vcpu->kvm->arch.wall_clock;
1272 break;
1273 case MSR_KVM_SYSTEM_TIME:
1274 data = vcpu->arch.time;
1275 break;
Huang Ying890ca9a2009-05-11 16:48:15 +08001276 case MSR_IA32_P5_MC_ADDR:
1277 case MSR_IA32_P5_MC_TYPE:
1278 case MSR_IA32_MCG_CAP:
1279 case MSR_IA32_MCG_CTL:
1280 case MSR_IA32_MCG_STATUS:
1281 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1282 return get_msr_mce(vcpu, msr, pdata);
Carsten Otte15c4a642007-10-30 18:44:17 +01001283 default:
Andre Przywaraed85c062009-06-25 12:36:49 +02001284 if (!ignore_msrs) {
1285 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1286 return 1;
1287 } else {
1288 pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
1289 data = 0;
1290 }
1291 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001292 }
1293 *pdata = data;
1294 return 0;
1295}
1296EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1297
Carsten Otte313a3dc2007-10-11 19:16:52 +02001298/*
1299 * Read or write a bunch of msrs. All parameters are kernel addresses.
1300 *
1301 * @return number of msrs set successfully.
1302 */
1303static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1304 struct kvm_msr_entry *entries,
1305 int (*do_msr)(struct kvm_vcpu *vcpu,
1306 unsigned index, u64 *data))
1307{
1308 int i;
1309
1310 vcpu_load(vcpu);
1311
Marcelo Tosatti3200f402008-03-29 20:17:59 -03001312 down_read(&vcpu->kvm->slots_lock);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001313 for (i = 0; i < msrs->nmsrs; ++i)
1314 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1315 break;
Marcelo Tosatti3200f402008-03-29 20:17:59 -03001316 up_read(&vcpu->kvm->slots_lock);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001317
1318 vcpu_put(vcpu);
1319
1320 return i;
1321}
1322
1323/*
1324 * Read or write a bunch of msrs. Parameters are user addresses.
1325 *
1326 * @return number of msrs set successfully.
1327 */
1328static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1329 int (*do_msr)(struct kvm_vcpu *vcpu,
1330 unsigned index, u64 *data),
1331 int writeback)
1332{
1333 struct kvm_msrs msrs;
1334 struct kvm_msr_entry *entries;
1335 int r, n;
1336 unsigned size;
1337
1338 r = -EFAULT;
1339 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1340 goto out;
1341
1342 r = -E2BIG;
1343 if (msrs.nmsrs >= MAX_IO_MSRS)
1344 goto out;
1345
1346 r = -ENOMEM;
1347 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1348 entries = vmalloc(size);
1349 if (!entries)
1350 goto out;
1351
1352 r = -EFAULT;
1353 if (copy_from_user(entries, user_msrs->entries, size))
1354 goto out_free;
1355
1356 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1357 if (r < 0)
1358 goto out_free;
1359
1360 r = -EFAULT;
1361 if (writeback && copy_to_user(user_msrs->entries, entries, size))
1362 goto out_free;
1363
1364 r = n;
1365
1366out_free:
1367 vfree(entries);
1368out:
1369 return r;
1370}
1371
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001372int kvm_dev_ioctl_check_extension(long ext)
1373{
1374 int r;
1375
1376 switch (ext) {
1377 case KVM_CAP_IRQCHIP:
1378 case KVM_CAP_HLT:
1379 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001380 case KVM_CAP_SET_TSS_ADDR:
Dan Kenigsberg07716712007-11-21 17:10:04 +02001381 case KVM_CAP_EXT_CPUID:
Gerd Hoffmannc8076602009-02-04 17:52:04 +01001382 case KVM_CAP_CLOCKSOURCE:
Sheng Yang78376992008-01-28 05:10:22 +08001383 case KVM_CAP_PIT:
Marcelo Tosattia28e4f52008-02-22 12:21:36 -05001384 case KVM_CAP_NOP_IO_DELAY:
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001385 case KVM_CAP_MP_STATE:
Avi Kivityed848622008-07-29 11:30:57 +03001386 case KVM_CAP_SYNC_MMU:
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02001387 case KVM_CAP_REINJECT_CONTROL:
Gleb Natapov49256632009-02-04 17:28:14 +02001388 case KVM_CAP_IRQ_INJECT_STATUS:
Sheng Yange56d5322009-03-12 21:45:39 +08001389 case KVM_CAP_ASSIGN_DEV_IRQ:
Gregory Haskins721eecb2009-05-20 10:30:49 -04001390 case KVM_CAP_IRQFD:
Gregory Haskinsd34e6b12009-07-07 17:08:49 -04001391 case KVM_CAP_IOEVENTFD:
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02001392 case KVM_CAP_PIT2:
Beth Kone9f42752009-07-07 11:50:38 -04001393 case KVM_CAP_PIT_STATE2:
Sheng Yangb927a3c2009-07-21 10:42:48 +08001394 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
Ed Swierkffde22a2009-10-15 15:21:43 -07001395 case KVM_CAP_XEN_HVM:
Glauber Costaafbcf7a2009-10-16 15:28:36 -04001396 case KVM_CAP_ADJUST_CLOCK:
Jan Kiszka3cfc3092009-11-12 01:04:25 +01001397 case KVM_CAP_VCPU_EVENTS:
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001398 r = 1;
1399 break;
Laurent Vivier542472b2008-05-30 16:05:55 +02001400 case KVM_CAP_COALESCED_MMIO:
1401 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1402 break;
Avi Kivity774ead32007-12-26 13:57:04 +02001403 case KVM_CAP_VAPIC:
1404 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1405 break;
Avi Kivityf7252302008-02-20 11:53:16 +02001406 case KVM_CAP_NR_VCPUS:
1407 r = KVM_MAX_VCPUS;
1408 break;
Avi Kivitya988b912008-02-20 11:59:20 +02001409 case KVM_CAP_NR_MEMSLOTS:
1410 r = KVM_MEMORY_SLOTS;
1411 break;
Marcelo Tosattia68a6a72009-10-01 19:28:39 -03001412 case KVM_CAP_PV_MMU: /* obsolete */
1413 r = 0;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05001414 break;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +03001415 case KVM_CAP_IOMMU:
Joerg Roedel19de40a2008-12-03 14:43:34 +01001416 r = iommu_found();
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +03001417 break;
Huang Ying890ca9a2009-05-11 16:48:15 +08001418 case KVM_CAP_MCE:
1419 r = KVM_MAX_MCE_BANKS;
1420 break;
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001421 default:
1422 r = 0;
1423 break;
1424 }
1425 return r;
1426
1427}
1428
Carsten Otte043405e2007-10-10 17:16:19 +02001429long kvm_arch_dev_ioctl(struct file *filp,
1430 unsigned int ioctl, unsigned long arg)
1431{
1432 void __user *argp = (void __user *)arg;
1433 long r;
1434
1435 switch (ioctl) {
1436 case KVM_GET_MSR_INDEX_LIST: {
1437 struct kvm_msr_list __user *user_msr_list = argp;
1438 struct kvm_msr_list msr_list;
1439 unsigned n;
1440
1441 r = -EFAULT;
1442 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1443 goto out;
1444 n = msr_list.nmsrs;
1445 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1446 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1447 goto out;
1448 r = -E2BIG;
Jan Kiszkae125e7b2009-07-02 21:45:47 +02001449 if (n < msr_list.nmsrs)
Carsten Otte043405e2007-10-10 17:16:19 +02001450 goto out;
1451 r = -EFAULT;
1452 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1453 num_msrs_to_save * sizeof(u32)))
1454 goto out;
Jan Kiszkae125e7b2009-07-02 21:45:47 +02001455 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
Carsten Otte043405e2007-10-10 17:16:19 +02001456 &emulated_msrs,
1457 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1458 goto out;
1459 r = 0;
1460 break;
1461 }
Avi Kivity674eea02008-02-11 18:37:23 +02001462 case KVM_GET_SUPPORTED_CPUID: {
1463 struct kvm_cpuid2 __user *cpuid_arg = argp;
1464 struct kvm_cpuid2 cpuid;
1465
1466 r = -EFAULT;
1467 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1468 goto out;
1469 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
Amit Shah19355472009-01-14 16:56:00 +00001470 cpuid_arg->entries);
Avi Kivity674eea02008-02-11 18:37:23 +02001471 if (r)
1472 goto out;
1473
1474 r = -EFAULT;
1475 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1476 goto out;
1477 r = 0;
1478 break;
1479 }
Huang Ying890ca9a2009-05-11 16:48:15 +08001480 case KVM_X86_GET_MCE_CAP_SUPPORTED: {
1481 u64 mce_cap;
1482
1483 mce_cap = KVM_MCE_CAP_SUPPORTED;
1484 r = -EFAULT;
1485 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
1486 goto out;
1487 r = 0;
1488 break;
1489 }
Carsten Otte043405e2007-10-10 17:16:19 +02001490 default:
1491 r = -EINVAL;
1492 }
1493out:
1494 return r;
1495}
1496
Carsten Otte313a3dc2007-10-11 19:16:52 +02001497void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1498{
1499 kvm_x86_ops->vcpu_load(vcpu, cpu);
Zachary Amsden6b7d7e72009-10-09 16:26:08 -10001500 if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
1501 unsigned long khz = cpufreq_quick_get(cpu);
1502 if (!khz)
1503 khz = tsc_khz;
1504 per_cpu(cpu_tsc_khz, cpu) = khz;
1505 }
Gerd Hoffmannc8076602009-02-04 17:52:04 +01001506 kvm_request_guest_time_update(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001507}
1508
1509void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1510{
1511 kvm_x86_ops->vcpu_put(vcpu);
Amit Shah9327fd12007-11-15 18:38:46 +02001512 kvm_put_guest_fpu(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001513}
1514
Dan Kenigsberg07716712007-11-21 17:10:04 +02001515static int is_efer_nx(void)
Carsten Otte313a3dc2007-10-11 19:16:52 +02001516{
Avi Kivitye286e862009-05-03 18:50:55 +03001517 unsigned long long efer = 0;
Carsten Otte313a3dc2007-10-11 19:16:52 +02001518
Avi Kivitye286e862009-05-03 18:50:55 +03001519 rdmsrl_safe(MSR_EFER, &efer);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001520 return efer & EFER_NX;
1521}
1522
1523static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1524{
1525 int i;
1526 struct kvm_cpuid_entry2 *e, *entry;
1527
Carsten Otte313a3dc2007-10-11 19:16:52 +02001528 entry = NULL;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001529 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1530 e = &vcpu->arch.cpuid_entries[i];
Carsten Otte313a3dc2007-10-11 19:16:52 +02001531 if (e->function == 0x80000001) {
1532 entry = e;
1533 break;
1534 }
1535 }
Dan Kenigsberg07716712007-11-21 17:10:04 +02001536 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
Carsten Otte313a3dc2007-10-11 19:16:52 +02001537 entry->edx &= ~(1 << 20);
1538 printk(KERN_INFO "kvm: guest NX capability removed\n");
1539 }
1540}
1541
Dan Kenigsberg07716712007-11-21 17:10:04 +02001542/* when an old userspace process fills a new kernel module */
Carsten Otte313a3dc2007-10-11 19:16:52 +02001543static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1544 struct kvm_cpuid *cpuid,
1545 struct kvm_cpuid_entry __user *entries)
1546{
Dan Kenigsberg07716712007-11-21 17:10:04 +02001547 int r, i;
1548 struct kvm_cpuid_entry *cpuid_entries;
1549
1550 r = -E2BIG;
1551 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1552 goto out;
1553 r = -ENOMEM;
1554 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1555 if (!cpuid_entries)
1556 goto out;
1557 r = -EFAULT;
1558 if (copy_from_user(cpuid_entries, entries,
1559 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1560 goto out_free;
1561 for (i = 0; i < cpuid->nent; i++) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001562 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1563 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1564 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1565 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1566 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1567 vcpu->arch.cpuid_entries[i].index = 0;
1568 vcpu->arch.cpuid_entries[i].flags = 0;
1569 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1570 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1571 vcpu->arch.cpuid_entries[i].padding[2] = 0;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001572 }
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001573 vcpu->arch.cpuid_nent = cpuid->nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001574 cpuid_fix_nx_cap(vcpu);
1575 r = 0;
Gleb Natapovfc61b802009-07-05 17:39:35 +03001576 kvm_apic_set_version(vcpu);
Sheng Yang0e851882009-12-18 16:48:46 +08001577 kvm_x86_ops->cpuid_update(vcpu);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001578
1579out_free:
1580 vfree(cpuid_entries);
1581out:
1582 return r;
1583}
1584
1585static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
Amit Shah19355472009-01-14 16:56:00 +00001586 struct kvm_cpuid2 *cpuid,
1587 struct kvm_cpuid_entry2 __user *entries)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001588{
Carsten Otte313a3dc2007-10-11 19:16:52 +02001589 int r;
1590
1591 r = -E2BIG;
1592 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1593 goto out;
1594 r = -EFAULT;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001595 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
Dan Kenigsberg07716712007-11-21 17:10:04 +02001596 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
Carsten Otte313a3dc2007-10-11 19:16:52 +02001597 goto out;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001598 vcpu->arch.cpuid_nent = cpuid->nent;
Gleb Natapovfc61b802009-07-05 17:39:35 +03001599 kvm_apic_set_version(vcpu);
Sheng Yang0e851882009-12-18 16:48:46 +08001600 kvm_x86_ops->cpuid_update(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001601 return 0;
1602
1603out:
1604 return r;
1605}
1606
Dan Kenigsberg07716712007-11-21 17:10:04 +02001607static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
Amit Shah19355472009-01-14 16:56:00 +00001608 struct kvm_cpuid2 *cpuid,
1609 struct kvm_cpuid_entry2 __user *entries)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001610{
1611 int r;
1612
1613 r = -E2BIG;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001614 if (cpuid->nent < vcpu->arch.cpuid_nent)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001615 goto out;
1616 r = -EFAULT;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001617 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
Amit Shah19355472009-01-14 16:56:00 +00001618 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
Dan Kenigsberg07716712007-11-21 17:10:04 +02001619 goto out;
1620 return 0;
1621
1622out:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001623 cpuid->nent = vcpu->arch.cpuid_nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001624 return r;
1625}
1626
Dan Kenigsberg07716712007-11-21 17:10:04 +02001627static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
Amit Shah19355472009-01-14 16:56:00 +00001628 u32 index)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001629{
1630 entry->function = function;
1631 entry->index = index;
1632 cpuid_count(entry->function, entry->index,
Amit Shah19355472009-01-14 16:56:00 +00001633 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001634 entry->flags = 0;
1635}
1636
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001637#define F(x) bit(X86_FEATURE_##x)
1638
Dan Kenigsberg07716712007-11-21 17:10:04 +02001639static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1640 u32 index, int *nent, int maxnent)
1641{
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001642 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
Joerg Roedel344f4142009-07-27 16:30:48 +02001643 unsigned f_gbpages = kvm_x86_ops->gb_page_enable() ? F(GBPAGES) : 0;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001644#ifdef CONFIG_X86_64
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001645 unsigned f_lm = F(LM);
1646#else
1647 unsigned f_lm = 0;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001648#endif
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001649
1650 /* cpuid 1.edx */
1651 const u32 kvm_supported_word0_x86_features =
1652 F(FPU) | F(VME) | F(DE) | F(PSE) |
1653 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1654 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
1655 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1656 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
1657 0 /* Reserved, DS, ACPI */ | F(MMX) |
1658 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
1659 0 /* HTT, TM, Reserved, PBE */;
1660 /* cpuid 0x80000001.edx */
1661 const u32 kvm_supported_word1_x86_features =
1662 F(FPU) | F(VME) | F(DE) | F(PSE) |
1663 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1664 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
1665 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1666 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1667 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
Joerg Roedel344f4142009-07-27 16:30:48 +02001668 F(FXSR) | F(FXSR_OPT) | f_gbpages | 0 /* RDTSCP */ |
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001669 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
1670 /* cpuid 1.ecx */
1671 const u32 kvm_supported_word4_x86_features =
Avi Kivityd149c732009-05-10 14:41:56 +03001672 F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
1673 0 /* DS-CPL, VMX, SMX, EST */ |
1674 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
1675 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
1676 0 /* Reserved, DCA */ | F(XMM4_1) |
Gleb Natapov0105d1a2009-07-05 17:39:36 +03001677 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
Avi Kivityd149c732009-05-10 14:41:56 +03001678 0 /* Reserved, XSAVE, OSXSAVE */;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001679 /* cpuid 0x80000001.ecx */
Dan Kenigsberg07716712007-11-21 17:10:04 +02001680 const u32 kvm_supported_word6_x86_features =
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001681 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
1682 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1683 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
1684 0 /* SKINIT */ | 0 /* WDT */;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001685
Amit Shah19355472009-01-14 16:56:00 +00001686 /* all calls to cpuid_count() should be made on the same cpu */
Dan Kenigsberg07716712007-11-21 17:10:04 +02001687 get_cpu();
1688 do_cpuid_1_ent(entry, function, index);
1689 ++*nent;
1690
1691 switch (function) {
1692 case 0:
1693 entry->eax = min(entry->eax, (u32)0xb);
1694 break;
1695 case 1:
1696 entry->edx &= kvm_supported_word0_x86_features;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001697 entry->ecx &= kvm_supported_word4_x86_features;
Gleb Natapov0d1de2d2009-07-12 16:10:55 +03001698 /* we support x2apic emulation even if host does not support
1699 * it since we emulate x2apic in software */
1700 entry->ecx |= F(X2APIC);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001701 break;
1702 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1703 * may return different values. This forces us to get_cpu() before
1704 * issuing the first command, and also to emulate this annoying behavior
1705 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1706 case 2: {
1707 int t, times = entry->eax & 0xff;
1708
1709 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
Nitin A Kamble0fdf8e52008-11-05 15:56:21 -08001710 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001711 for (t = 1; t < times && *nent < maxnent; ++t) {
1712 do_cpuid_1_ent(&entry[t], function, 0);
1713 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1714 ++*nent;
1715 }
1716 break;
1717 }
1718 /* function 4 and 0xb have additional index. */
1719 case 4: {
Harvey Harrison14af3f32008-02-19 10:25:50 -08001720 int i, cache_type;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001721
1722 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1723 /* read more entries until cache_type is zero */
Harvey Harrison14af3f32008-02-19 10:25:50 -08001724 for (i = 1; *nent < maxnent; ++i) {
1725 cache_type = entry[i - 1].eax & 0x1f;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001726 if (!cache_type)
1727 break;
Harvey Harrison14af3f32008-02-19 10:25:50 -08001728 do_cpuid_1_ent(&entry[i], function, i);
1729 entry[i].flags |=
Dan Kenigsberg07716712007-11-21 17:10:04 +02001730 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1731 ++*nent;
1732 }
1733 break;
1734 }
1735 case 0xb: {
Harvey Harrison14af3f32008-02-19 10:25:50 -08001736 int i, level_type;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001737
1738 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1739 /* read more entries until level_type is zero */
Harvey Harrison14af3f32008-02-19 10:25:50 -08001740 for (i = 1; *nent < maxnent; ++i) {
Nitin A Kamble0853d2c2008-11-05 15:37:36 -08001741 level_type = entry[i - 1].ecx & 0xff00;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001742 if (!level_type)
1743 break;
Harvey Harrison14af3f32008-02-19 10:25:50 -08001744 do_cpuid_1_ent(&entry[i], function, i);
1745 entry[i].flags |=
Dan Kenigsberg07716712007-11-21 17:10:04 +02001746 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1747 ++*nent;
1748 }
1749 break;
1750 }
1751 case 0x80000000:
1752 entry->eax = min(entry->eax, 0x8000001a);
1753 break;
1754 case 0x80000001:
1755 entry->edx &= kvm_supported_word1_x86_features;
1756 entry->ecx &= kvm_supported_word6_x86_features;
1757 break;
1758 }
1759 put_cpu();
1760}
1761
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001762#undef F
1763
Avi Kivity674eea02008-02-11 18:37:23 +02001764static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
Amit Shah19355472009-01-14 16:56:00 +00001765 struct kvm_cpuid_entry2 __user *entries)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001766{
1767 struct kvm_cpuid_entry2 *cpuid_entries;
1768 int limit, nent = 0, r = -E2BIG;
1769 u32 func;
1770
1771 if (cpuid->nent < 1)
1772 goto out;
Avi Kivity6a544352009-10-04 16:45:13 +02001773 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1774 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001775 r = -ENOMEM;
1776 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1777 if (!cpuid_entries)
1778 goto out;
1779
1780 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1781 limit = cpuid_entries[0].eax;
1782 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1783 do_cpuid_ent(&cpuid_entries[nent], func, 0,
Amit Shah19355472009-01-14 16:56:00 +00001784 &nent, cpuid->nent);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001785 r = -E2BIG;
1786 if (nent >= cpuid->nent)
1787 goto out_free;
1788
1789 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1790 limit = cpuid_entries[nent - 1].eax;
1791 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1792 do_cpuid_ent(&cpuid_entries[nent], func, 0,
Amit Shah19355472009-01-14 16:56:00 +00001793 &nent, cpuid->nent);
Mark McLoughlincb007642009-05-12 12:36:44 +01001794 r = -E2BIG;
1795 if (nent >= cpuid->nent)
1796 goto out_free;
1797
Dan Kenigsberg07716712007-11-21 17:10:04 +02001798 r = -EFAULT;
1799 if (copy_to_user(entries, cpuid_entries,
Amit Shah19355472009-01-14 16:56:00 +00001800 nent * sizeof(struct kvm_cpuid_entry2)))
Dan Kenigsberg07716712007-11-21 17:10:04 +02001801 goto out_free;
1802 cpuid->nent = nent;
1803 r = 0;
1804
1805out_free:
1806 vfree(cpuid_entries);
1807out:
1808 return r;
1809}
1810
Carsten Otte313a3dc2007-10-11 19:16:52 +02001811static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1812 struct kvm_lapic_state *s)
1813{
1814 vcpu_load(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001815 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001816 vcpu_put(vcpu);
1817
1818 return 0;
1819}
1820
1821static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1822 struct kvm_lapic_state *s)
1823{
1824 vcpu_load(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001825 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001826 kvm_apic_post_state_restore(vcpu);
Gleb Natapovcb142eb2009-08-09 15:17:40 +03001827 update_cr8_intercept(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001828 vcpu_put(vcpu);
1829
1830 return 0;
1831}
1832
Zhang Xiantaof77bc6a2007-11-21 04:36:41 +08001833static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1834 struct kvm_interrupt *irq)
1835{
1836 if (irq->irq < 0 || irq->irq >= 256)
1837 return -EINVAL;
1838 if (irqchip_in_kernel(vcpu->kvm))
1839 return -ENXIO;
1840 vcpu_load(vcpu);
1841
Gleb Natapov66fd3f72009-05-11 13:35:50 +03001842 kvm_queue_interrupt(vcpu, irq->irq, false);
Zhang Xiantaof77bc6a2007-11-21 04:36:41 +08001843
1844 vcpu_put(vcpu);
1845
1846 return 0;
1847}
1848
Jan Kiszkac4abb7c2008-09-26 09:30:55 +02001849static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
1850{
1851 vcpu_load(vcpu);
1852 kvm_inject_nmi(vcpu);
1853 vcpu_put(vcpu);
1854
1855 return 0;
1856}
1857
Avi Kivityb209749f2007-10-22 16:50:39 +02001858static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1859 struct kvm_tpr_access_ctl *tac)
1860{
1861 if (tac->flags)
1862 return -EINVAL;
1863 vcpu->arch.tpr_access_reporting = !!tac->enabled;
1864 return 0;
1865}
1866
Huang Ying890ca9a2009-05-11 16:48:15 +08001867static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
1868 u64 mcg_cap)
1869{
1870 int r;
1871 unsigned bank_num = mcg_cap & 0xff, bank;
1872
1873 r = -EINVAL;
Jan Kiszkaa9e38c3e2009-10-23 09:37:00 +02001874 if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
Huang Ying890ca9a2009-05-11 16:48:15 +08001875 goto out;
1876 if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
1877 goto out;
1878 r = 0;
1879 vcpu->arch.mcg_cap = mcg_cap;
1880 /* Init IA32_MCG_CTL to all 1s */
1881 if (mcg_cap & MCG_CTL_P)
1882 vcpu->arch.mcg_ctl = ~(u64)0;
1883 /* Init IA32_MCi_CTL to all 1s */
1884 for (bank = 0; bank < bank_num; bank++)
1885 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
1886out:
1887 return r;
1888}
1889
1890static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
1891 struct kvm_x86_mce *mce)
1892{
1893 u64 mcg_cap = vcpu->arch.mcg_cap;
1894 unsigned bank_num = mcg_cap & 0xff;
1895 u64 *banks = vcpu->arch.mce_banks;
1896
1897 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
1898 return -EINVAL;
1899 /*
1900 * if IA32_MCG_CTL is not all 1s, the uncorrected error
1901 * reporting is disabled
1902 */
1903 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1904 vcpu->arch.mcg_ctl != ~(u64)0)
1905 return 0;
1906 banks += 4 * mce->bank;
1907 /*
1908 * if IA32_MCi_CTL is not all 1s, the uncorrected error
1909 * reporting is disabled for the bank
1910 */
1911 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
1912 return 0;
1913 if (mce->status & MCI_STATUS_UC) {
1914 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
Avi Kivityfc78f512009-12-07 12:16:48 +02001915 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
Huang Ying890ca9a2009-05-11 16:48:15 +08001916 printk(KERN_DEBUG "kvm: set_mce: "
1917 "injects mce exception while "
1918 "previous one is in progress!\n");
1919 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
1920 return 0;
1921 }
1922 if (banks[1] & MCI_STATUS_VAL)
1923 mce->status |= MCI_STATUS_OVER;
1924 banks[2] = mce->addr;
1925 banks[3] = mce->misc;
1926 vcpu->arch.mcg_status = mce->mcg_status;
1927 banks[1] = mce->status;
1928 kvm_queue_exception(vcpu, MC_VECTOR);
1929 } else if (!(banks[1] & MCI_STATUS_VAL)
1930 || !(banks[1] & MCI_STATUS_UC)) {
1931 if (banks[1] & MCI_STATUS_VAL)
1932 mce->status |= MCI_STATUS_OVER;
1933 banks[2] = mce->addr;
1934 banks[3] = mce->misc;
1935 banks[1] = mce->status;
1936 } else
1937 banks[1] |= MCI_STATUS_OVER;
1938 return 0;
1939}
1940
Jan Kiszka3cfc3092009-11-12 01:04:25 +01001941static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
1942 struct kvm_vcpu_events *events)
1943{
1944 vcpu_load(vcpu);
1945
1946 events->exception.injected = vcpu->arch.exception.pending;
1947 events->exception.nr = vcpu->arch.exception.nr;
1948 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
1949 events->exception.error_code = vcpu->arch.exception.error_code;
1950
1951 events->interrupt.injected = vcpu->arch.interrupt.pending;
1952 events->interrupt.nr = vcpu->arch.interrupt.nr;
1953 events->interrupt.soft = vcpu->arch.interrupt.soft;
1954
1955 events->nmi.injected = vcpu->arch.nmi_injected;
1956 events->nmi.pending = vcpu->arch.nmi_pending;
1957 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
1958
1959 events->sipi_vector = vcpu->arch.sipi_vector;
1960
Jan Kiszkadab4b912009-12-06 18:24:15 +01001961 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
1962 | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01001963
1964 vcpu_put(vcpu);
1965}
1966
1967static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
1968 struct kvm_vcpu_events *events)
1969{
Jan Kiszkadab4b912009-12-06 18:24:15 +01001970 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
1971 | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
Jan Kiszka3cfc3092009-11-12 01:04:25 +01001972 return -EINVAL;
1973
1974 vcpu_load(vcpu);
1975
1976 vcpu->arch.exception.pending = events->exception.injected;
1977 vcpu->arch.exception.nr = events->exception.nr;
1978 vcpu->arch.exception.has_error_code = events->exception.has_error_code;
1979 vcpu->arch.exception.error_code = events->exception.error_code;
1980
1981 vcpu->arch.interrupt.pending = events->interrupt.injected;
1982 vcpu->arch.interrupt.nr = events->interrupt.nr;
1983 vcpu->arch.interrupt.soft = events->interrupt.soft;
1984 if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
1985 kvm_pic_clear_isr_ack(vcpu->kvm);
1986
1987 vcpu->arch.nmi_injected = events->nmi.injected;
Jan Kiszkadab4b912009-12-06 18:24:15 +01001988 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
1989 vcpu->arch.nmi_pending = events->nmi.pending;
Jan Kiszka3cfc3092009-11-12 01:04:25 +01001990 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
1991
Jan Kiszkadab4b912009-12-06 18:24:15 +01001992 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
1993 vcpu->arch.sipi_vector = events->sipi_vector;
Jan Kiszka3cfc3092009-11-12 01:04:25 +01001994
1995 vcpu_put(vcpu);
1996
1997 return 0;
1998}
1999
Carsten Otte313a3dc2007-10-11 19:16:52 +02002000long kvm_arch_vcpu_ioctl(struct file *filp,
2001 unsigned int ioctl, unsigned long arg)
2002{
2003 struct kvm_vcpu *vcpu = filp->private_data;
2004 void __user *argp = (void __user *)arg;
2005 int r;
Dave Hansenb772ff32008-08-11 10:01:47 -07002006 struct kvm_lapic_state *lapic = NULL;
Carsten Otte313a3dc2007-10-11 19:16:52 +02002007
2008 switch (ioctl) {
2009 case KVM_GET_LAPIC: {
Marcelo Tosatti2204ae32009-10-29 13:44:16 -02002010 r = -EINVAL;
2011 if (!vcpu->arch.apic)
2012 goto out;
Dave Hansenb772ff32008-08-11 10:01:47 -07002013 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002014
Dave Hansenb772ff32008-08-11 10:01:47 -07002015 r = -ENOMEM;
2016 if (!lapic)
2017 goto out;
2018 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002019 if (r)
2020 goto out;
2021 r = -EFAULT;
Dave Hansenb772ff32008-08-11 10:01:47 -07002022 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
Carsten Otte313a3dc2007-10-11 19:16:52 +02002023 goto out;
2024 r = 0;
2025 break;
2026 }
2027 case KVM_SET_LAPIC: {
Marcelo Tosatti2204ae32009-10-29 13:44:16 -02002028 r = -EINVAL;
2029 if (!vcpu->arch.apic)
2030 goto out;
Dave Hansenb772ff32008-08-11 10:01:47 -07002031 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2032 r = -ENOMEM;
2033 if (!lapic)
Carsten Otte313a3dc2007-10-11 19:16:52 +02002034 goto out;
Dave Hansenb772ff32008-08-11 10:01:47 -07002035 r = -EFAULT;
2036 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
2037 goto out;
2038 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002039 if (r)
2040 goto out;
2041 r = 0;
2042 break;
2043 }
Zhang Xiantaof77bc6a2007-11-21 04:36:41 +08002044 case KVM_INTERRUPT: {
2045 struct kvm_interrupt irq;
2046
2047 r = -EFAULT;
2048 if (copy_from_user(&irq, argp, sizeof irq))
2049 goto out;
2050 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2051 if (r)
2052 goto out;
2053 r = 0;
2054 break;
2055 }
Jan Kiszkac4abb7c2008-09-26 09:30:55 +02002056 case KVM_NMI: {
2057 r = kvm_vcpu_ioctl_nmi(vcpu);
2058 if (r)
2059 goto out;
2060 r = 0;
2061 break;
2062 }
Carsten Otte313a3dc2007-10-11 19:16:52 +02002063 case KVM_SET_CPUID: {
2064 struct kvm_cpuid __user *cpuid_arg = argp;
2065 struct kvm_cpuid cpuid;
2066
2067 r = -EFAULT;
2068 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2069 goto out;
2070 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2071 if (r)
2072 goto out;
2073 break;
2074 }
Dan Kenigsberg07716712007-11-21 17:10:04 +02002075 case KVM_SET_CPUID2: {
2076 struct kvm_cpuid2 __user *cpuid_arg = argp;
2077 struct kvm_cpuid2 cpuid;
2078
2079 r = -EFAULT;
2080 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2081 goto out;
2082 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
Amit Shah19355472009-01-14 16:56:00 +00002083 cpuid_arg->entries);
Dan Kenigsberg07716712007-11-21 17:10:04 +02002084 if (r)
2085 goto out;
2086 break;
2087 }
2088 case KVM_GET_CPUID2: {
2089 struct kvm_cpuid2 __user *cpuid_arg = argp;
2090 struct kvm_cpuid2 cpuid;
2091
2092 r = -EFAULT;
2093 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2094 goto out;
2095 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
Amit Shah19355472009-01-14 16:56:00 +00002096 cpuid_arg->entries);
Dan Kenigsberg07716712007-11-21 17:10:04 +02002097 if (r)
2098 goto out;
2099 r = -EFAULT;
2100 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2101 goto out;
2102 r = 0;
2103 break;
2104 }
Carsten Otte313a3dc2007-10-11 19:16:52 +02002105 case KVM_GET_MSRS:
2106 r = msr_io(vcpu, argp, kvm_get_msr, 1);
2107 break;
2108 case KVM_SET_MSRS:
2109 r = msr_io(vcpu, argp, do_set_msr, 0);
2110 break;
Avi Kivityb209749f2007-10-22 16:50:39 +02002111 case KVM_TPR_ACCESS_REPORTING: {
2112 struct kvm_tpr_access_ctl tac;
2113
2114 r = -EFAULT;
2115 if (copy_from_user(&tac, argp, sizeof tac))
2116 goto out;
2117 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
2118 if (r)
2119 goto out;
2120 r = -EFAULT;
2121 if (copy_to_user(argp, &tac, sizeof tac))
2122 goto out;
2123 r = 0;
2124 break;
2125 };
Avi Kivityb93463a2007-10-25 16:52:32 +02002126 case KVM_SET_VAPIC_ADDR: {
2127 struct kvm_vapic_addr va;
2128
2129 r = -EINVAL;
2130 if (!irqchip_in_kernel(vcpu->kvm))
2131 goto out;
2132 r = -EFAULT;
2133 if (copy_from_user(&va, argp, sizeof va))
2134 goto out;
2135 r = 0;
2136 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
2137 break;
2138 }
Huang Ying890ca9a2009-05-11 16:48:15 +08002139 case KVM_X86_SETUP_MCE: {
2140 u64 mcg_cap;
2141
2142 r = -EFAULT;
2143 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
2144 goto out;
2145 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
2146 break;
2147 }
2148 case KVM_X86_SET_MCE: {
2149 struct kvm_x86_mce mce;
2150
2151 r = -EFAULT;
2152 if (copy_from_user(&mce, argp, sizeof mce))
2153 goto out;
2154 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
2155 break;
2156 }
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002157 case KVM_GET_VCPU_EVENTS: {
2158 struct kvm_vcpu_events events;
2159
2160 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
2161
2162 r = -EFAULT;
2163 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
2164 break;
2165 r = 0;
2166 break;
2167 }
2168 case KVM_SET_VCPU_EVENTS: {
2169 struct kvm_vcpu_events events;
2170
2171 r = -EFAULT;
2172 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
2173 break;
2174
2175 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
2176 break;
2177 }
Carsten Otte313a3dc2007-10-11 19:16:52 +02002178 default:
2179 r = -EINVAL;
2180 }
2181out:
Wei Yongjun7a6ce842009-03-31 16:47:44 +08002182 kfree(lapic);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002183 return r;
2184}
2185
Carsten Otte1fe779f2007-10-29 16:08:35 +01002186static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
2187{
2188 int ret;
2189
2190 if (addr > (unsigned int)(-3 * PAGE_SIZE))
2191 return -1;
2192 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
2193 return ret;
2194}
2195
Sheng Yangb927a3c2009-07-21 10:42:48 +08002196static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
2197 u64 ident_addr)
2198{
2199 kvm->arch.ept_identity_map_addr = ident_addr;
2200 return 0;
2201}
2202
Carsten Otte1fe779f2007-10-29 16:08:35 +01002203static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
2204 u32 kvm_nr_mmu_pages)
2205{
2206 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
2207 return -EINVAL;
2208
Izik Eidus72dc67a2008-02-10 18:04:15 +02002209 down_write(&kvm->slots_lock);
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002210 spin_lock(&kvm->mmu_lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002211
2212 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08002213 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002214
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002215 spin_unlock(&kvm->mmu_lock);
Izik Eidus72dc67a2008-02-10 18:04:15 +02002216 up_write(&kvm->slots_lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002217 return 0;
2218}
2219
2220static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
2221{
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08002222 return kvm->arch.n_alloc_mmu_pages;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002223}
2224
Zhang Xiantaoe9f85cd2007-11-22 11:20:33 +08002225gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
2226{
2227 int i;
2228 struct kvm_mem_alias *alias;
2229
Zhang Xiantaod69fb812007-12-14 09:54:20 +08002230 for (i = 0; i < kvm->arch.naliases; ++i) {
2231 alias = &kvm->arch.aliases[i];
Zhang Xiantaoe9f85cd2007-11-22 11:20:33 +08002232 if (gfn >= alias->base_gfn
2233 && gfn < alias->base_gfn + alias->npages)
2234 return alias->target_gfn + gfn - alias->base_gfn;
2235 }
2236 return gfn;
2237}
2238
Carsten Otte1fe779f2007-10-29 16:08:35 +01002239/*
2240 * Set a new alias region. Aliases map a portion of physical memory into
2241 * another portion. This is useful for memory windows, for example the PC
2242 * VGA region.
2243 */
2244static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
2245 struct kvm_memory_alias *alias)
2246{
2247 int r, n;
2248 struct kvm_mem_alias *p;
2249
2250 r = -EINVAL;
2251 /* General sanity checks */
2252 if (alias->memory_size & (PAGE_SIZE - 1))
2253 goto out;
2254 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
2255 goto out;
2256 if (alias->slot >= KVM_ALIAS_SLOTS)
2257 goto out;
2258 if (alias->guest_phys_addr + alias->memory_size
2259 < alias->guest_phys_addr)
2260 goto out;
2261 if (alias->target_phys_addr + alias->memory_size
2262 < alias->target_phys_addr)
2263 goto out;
2264
Izik Eidus72dc67a2008-02-10 18:04:15 +02002265 down_write(&kvm->slots_lock);
Andrea Arcangelia1708ce2008-07-25 16:26:39 +02002266 spin_lock(&kvm->mmu_lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002267
Zhang Xiantaod69fb812007-12-14 09:54:20 +08002268 p = &kvm->arch.aliases[alias->slot];
Carsten Otte1fe779f2007-10-29 16:08:35 +01002269 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
2270 p->npages = alias->memory_size >> PAGE_SHIFT;
2271 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
2272
2273 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
Zhang Xiantaod69fb812007-12-14 09:54:20 +08002274 if (kvm->arch.aliases[n - 1].npages)
Carsten Otte1fe779f2007-10-29 16:08:35 +01002275 break;
Zhang Xiantaod69fb812007-12-14 09:54:20 +08002276 kvm->arch.naliases = n;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002277
Andrea Arcangelia1708ce2008-07-25 16:26:39 +02002278 spin_unlock(&kvm->mmu_lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002279 kvm_mmu_zap_all(kvm);
2280
Izik Eidus72dc67a2008-02-10 18:04:15 +02002281 up_write(&kvm->slots_lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002282
2283 return 0;
2284
2285out:
2286 return r;
2287}
2288
2289static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2290{
2291 int r;
2292
2293 r = 0;
2294 switch (chip->chip_id) {
2295 case KVM_IRQCHIP_PIC_MASTER:
2296 memcpy(&chip->chip.pic,
2297 &pic_irqchip(kvm)->pics[0],
2298 sizeof(struct kvm_pic_state));
2299 break;
2300 case KVM_IRQCHIP_PIC_SLAVE:
2301 memcpy(&chip->chip.pic,
2302 &pic_irqchip(kvm)->pics[1],
2303 sizeof(struct kvm_pic_state));
2304 break;
2305 case KVM_IRQCHIP_IOAPIC:
Gleb Natapoveba02262009-08-24 11:54:25 +03002306 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002307 break;
2308 default:
2309 r = -EINVAL;
2310 break;
2311 }
2312 return r;
2313}
2314
2315static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2316{
2317 int r;
2318
2319 r = 0;
2320 switch (chip->chip_id) {
2321 case KVM_IRQCHIP_PIC_MASTER:
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002322 spin_lock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002323 memcpy(&pic_irqchip(kvm)->pics[0],
2324 &chip->chip.pic,
2325 sizeof(struct kvm_pic_state));
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002326 spin_unlock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002327 break;
2328 case KVM_IRQCHIP_PIC_SLAVE:
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002329 spin_lock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002330 memcpy(&pic_irqchip(kvm)->pics[1],
2331 &chip->chip.pic,
2332 sizeof(struct kvm_pic_state));
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002333 spin_unlock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002334 break;
2335 case KVM_IRQCHIP_IOAPIC:
Gleb Natapoveba02262009-08-24 11:54:25 +03002336 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002337 break;
2338 default:
2339 r = -EINVAL;
2340 break;
2341 }
2342 kvm_pic_update_irq(pic_irqchip(kvm));
2343 return r;
2344}
2345
Sheng Yange0f63cb2008-03-04 00:50:59 +08002346static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2347{
2348 int r = 0;
2349
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002350 mutex_lock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002351 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002352 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002353 return r;
2354}
2355
2356static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2357{
2358 int r = 0;
2359
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002360 mutex_lock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002361 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
Beth Kone9f42752009-07-07 11:50:38 -04002362 kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
2363 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2364 return r;
2365}
2366
2367static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2368{
2369 int r = 0;
2370
2371 mutex_lock(&kvm->arch.vpit->pit_state.lock);
2372 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
2373 sizeof(ps->channels));
2374 ps->flags = kvm->arch.vpit->pit_state.flags;
2375 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2376 return r;
2377}
2378
2379static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2380{
2381 int r = 0, start = 0;
2382 u32 prev_legacy, cur_legacy;
2383 mutex_lock(&kvm->arch.vpit->pit_state.lock);
2384 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
2385 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
2386 if (!prev_legacy && cur_legacy)
2387 start = 1;
2388 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
2389 sizeof(kvm->arch.vpit->pit_state.channels));
2390 kvm->arch.vpit->pit_state.flags = ps->flags;
2391 kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002392 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002393 return r;
2394}
2395
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02002396static int kvm_vm_ioctl_reinject(struct kvm *kvm,
2397 struct kvm_reinject_control *control)
2398{
2399 if (!kvm->arch.vpit)
2400 return -ENXIO;
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002401 mutex_lock(&kvm->arch.vpit->pit_state.lock);
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02002402 kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002403 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02002404 return 0;
2405}
2406
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002407/*
2408 * Get (and clear) the dirty memory log for a memory slot.
2409 */
2410int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2411 struct kvm_dirty_log *log)
2412{
2413 int r;
2414 int n;
2415 struct kvm_memory_slot *memslot;
2416 int is_dirty = 0;
2417
Izik Eidus72dc67a2008-02-10 18:04:15 +02002418 down_write(&kvm->slots_lock);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002419
2420 r = kvm_get_dirty_log(kvm, log, &is_dirty);
2421 if (r)
2422 goto out;
2423
2424 /* If nothing is dirty, don't bother messing with page tables. */
2425 if (is_dirty) {
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002426 spin_lock(&kvm->mmu_lock);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002427 kvm_mmu_slot_remove_write_access(kvm, log->slot);
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002428 spin_unlock(&kvm->mmu_lock);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002429 memslot = &kvm->memslots[log->slot];
2430 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
2431 memset(memslot->dirty_bitmap, 0, n);
2432 }
2433 r = 0;
2434out:
Izik Eidus72dc67a2008-02-10 18:04:15 +02002435 up_write(&kvm->slots_lock);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002436 return r;
2437}
2438
Carsten Otte1fe779f2007-10-29 16:08:35 +01002439long kvm_arch_vm_ioctl(struct file *filp,
2440 unsigned int ioctl, unsigned long arg)
2441{
2442 struct kvm *kvm = filp->private_data;
2443 void __user *argp = (void __user *)arg;
Avi Kivity367e1312009-08-26 14:57:07 +03002444 int r = -ENOTTY;
Dave Hansenf0d66272008-08-11 10:01:45 -07002445 /*
2446 * This union makes it completely explicit to gcc-3.x
2447 * that these two variables' stack usage should be
2448 * combined, not added together.
2449 */
2450 union {
2451 struct kvm_pit_state ps;
Beth Kone9f42752009-07-07 11:50:38 -04002452 struct kvm_pit_state2 ps2;
Dave Hansenf0d66272008-08-11 10:01:45 -07002453 struct kvm_memory_alias alias;
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02002454 struct kvm_pit_config pit_config;
Dave Hansenf0d66272008-08-11 10:01:45 -07002455 } u;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002456
2457 switch (ioctl) {
2458 case KVM_SET_TSS_ADDR:
2459 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
2460 if (r < 0)
2461 goto out;
2462 break;
Sheng Yangb927a3c2009-07-21 10:42:48 +08002463 case KVM_SET_IDENTITY_MAP_ADDR: {
2464 u64 ident_addr;
2465
2466 r = -EFAULT;
2467 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
2468 goto out;
2469 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
2470 if (r < 0)
2471 goto out;
2472 break;
2473 }
Carsten Otte1fe779f2007-10-29 16:08:35 +01002474 case KVM_SET_MEMORY_REGION: {
2475 struct kvm_memory_region kvm_mem;
2476 struct kvm_userspace_memory_region kvm_userspace_mem;
2477
2478 r = -EFAULT;
2479 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
2480 goto out;
2481 kvm_userspace_mem.slot = kvm_mem.slot;
2482 kvm_userspace_mem.flags = kvm_mem.flags;
2483 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
2484 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
2485 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
2486 if (r)
2487 goto out;
2488 break;
2489 }
2490 case KVM_SET_NR_MMU_PAGES:
2491 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
2492 if (r)
2493 goto out;
2494 break;
2495 case KVM_GET_NR_MMU_PAGES:
2496 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
2497 break;
Dave Hansenf0d66272008-08-11 10:01:45 -07002498 case KVM_SET_MEMORY_ALIAS:
Carsten Otte1fe779f2007-10-29 16:08:35 +01002499 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07002500 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
Carsten Otte1fe779f2007-10-29 16:08:35 +01002501 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002502 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002503 if (r)
2504 goto out;
2505 break;
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002506 case KVM_CREATE_IRQCHIP: {
2507 struct kvm_pic *vpic;
2508
2509 mutex_lock(&kvm->lock);
2510 r = -EEXIST;
2511 if (kvm->arch.vpic)
2512 goto create_irqchip_unlock;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002513 r = -ENOMEM;
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002514 vpic = kvm_create_pic(kvm);
2515 if (vpic) {
Carsten Otte1fe779f2007-10-29 16:08:35 +01002516 r = kvm_ioapic_init(kvm);
2517 if (r) {
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002518 kfree(vpic);
2519 goto create_irqchip_unlock;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002520 }
2521 } else
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002522 goto create_irqchip_unlock;
2523 smp_wmb();
2524 kvm->arch.vpic = vpic;
2525 smp_wmb();
Avi Kivity399ec802008-11-19 13:58:46 +02002526 r = kvm_setup_default_irq_routing(kvm);
2527 if (r) {
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002528 mutex_lock(&kvm->irq_lock);
Avi Kivity399ec802008-11-19 13:58:46 +02002529 kfree(kvm->arch.vpic);
2530 kfree(kvm->arch.vioapic);
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002531 kvm->arch.vpic = NULL;
2532 kvm->arch.vioapic = NULL;
2533 mutex_unlock(&kvm->irq_lock);
Avi Kivity399ec802008-11-19 13:58:46 +02002534 }
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002535 create_irqchip_unlock:
2536 mutex_unlock(&kvm->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002537 break;
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002538 }
Sheng Yang78376992008-01-28 05:10:22 +08002539 case KVM_CREATE_PIT:
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02002540 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
2541 goto create_pit;
2542 case KVM_CREATE_PIT2:
2543 r = -EFAULT;
2544 if (copy_from_user(&u.pit_config, argp,
2545 sizeof(struct kvm_pit_config)))
2546 goto out;
2547 create_pit:
Michael S. Tsirkin108b5662009-06-29 22:24:21 +03002548 down_write(&kvm->slots_lock);
Avi Kivity269e05e2009-01-05 15:21:42 +02002549 r = -EEXIST;
2550 if (kvm->arch.vpit)
2551 goto create_pit_unlock;
Sheng Yang78376992008-01-28 05:10:22 +08002552 r = -ENOMEM;
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02002553 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
Sheng Yang78376992008-01-28 05:10:22 +08002554 if (kvm->arch.vpit)
2555 r = 0;
Avi Kivity269e05e2009-01-05 15:21:42 +02002556 create_pit_unlock:
Michael S. Tsirkin108b5662009-06-29 22:24:21 +03002557 up_write(&kvm->slots_lock);
Sheng Yang78376992008-01-28 05:10:22 +08002558 break;
Gleb Natapov49256632009-02-04 17:28:14 +02002559 case KVM_IRQ_LINE_STATUS:
Carsten Otte1fe779f2007-10-29 16:08:35 +01002560 case KVM_IRQ_LINE: {
2561 struct kvm_irq_level irq_event;
2562
2563 r = -EFAULT;
2564 if (copy_from_user(&irq_event, argp, sizeof irq_event))
2565 goto out;
2566 if (irqchip_in_kernel(kvm)) {
Gleb Natapov49256632009-02-04 17:28:14 +02002567 __s32 status;
Gleb Natapov49256632009-02-04 17:28:14 +02002568 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2569 irq_event.irq, irq_event.level);
Gleb Natapov49256632009-02-04 17:28:14 +02002570 if (ioctl == KVM_IRQ_LINE_STATUS) {
2571 irq_event.status = status;
2572 if (copy_to_user(argp, &irq_event,
2573 sizeof irq_event))
2574 goto out;
2575 }
Carsten Otte1fe779f2007-10-29 16:08:35 +01002576 r = 0;
2577 }
2578 break;
2579 }
2580 case KVM_GET_IRQCHIP: {
2581 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
Dave Hansenf0d66272008-08-11 10:01:45 -07002582 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002583
Dave Hansenf0d66272008-08-11 10:01:45 -07002584 r = -ENOMEM;
2585 if (!chip)
Carsten Otte1fe779f2007-10-29 16:08:35 +01002586 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002587 r = -EFAULT;
2588 if (copy_from_user(chip, argp, sizeof *chip))
2589 goto get_irqchip_out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002590 r = -ENXIO;
2591 if (!irqchip_in_kernel(kvm))
Dave Hansenf0d66272008-08-11 10:01:45 -07002592 goto get_irqchip_out;
2593 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
2594 if (r)
2595 goto get_irqchip_out;
2596 r = -EFAULT;
2597 if (copy_to_user(argp, chip, sizeof *chip))
2598 goto get_irqchip_out;
2599 r = 0;
2600 get_irqchip_out:
2601 kfree(chip);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002602 if (r)
2603 goto out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002604 break;
2605 }
2606 case KVM_SET_IRQCHIP: {
2607 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
Dave Hansenf0d66272008-08-11 10:01:45 -07002608 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002609
Dave Hansenf0d66272008-08-11 10:01:45 -07002610 r = -ENOMEM;
2611 if (!chip)
Carsten Otte1fe779f2007-10-29 16:08:35 +01002612 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002613 r = -EFAULT;
2614 if (copy_from_user(chip, argp, sizeof *chip))
2615 goto set_irqchip_out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002616 r = -ENXIO;
2617 if (!irqchip_in_kernel(kvm))
Dave Hansenf0d66272008-08-11 10:01:45 -07002618 goto set_irqchip_out;
2619 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
2620 if (r)
2621 goto set_irqchip_out;
2622 r = 0;
2623 set_irqchip_out:
2624 kfree(chip);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002625 if (r)
2626 goto out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002627 break;
2628 }
Sheng Yange0f63cb2008-03-04 00:50:59 +08002629 case KVM_GET_PIT: {
Sheng Yange0f63cb2008-03-04 00:50:59 +08002630 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07002631 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
Sheng Yange0f63cb2008-03-04 00:50:59 +08002632 goto out;
2633 r = -ENXIO;
2634 if (!kvm->arch.vpit)
2635 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002636 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002637 if (r)
2638 goto out;
2639 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07002640 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
Sheng Yange0f63cb2008-03-04 00:50:59 +08002641 goto out;
2642 r = 0;
2643 break;
2644 }
2645 case KVM_SET_PIT: {
Sheng Yange0f63cb2008-03-04 00:50:59 +08002646 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07002647 if (copy_from_user(&u.ps, argp, sizeof u.ps))
Sheng Yange0f63cb2008-03-04 00:50:59 +08002648 goto out;
2649 r = -ENXIO;
2650 if (!kvm->arch.vpit)
2651 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002652 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002653 if (r)
2654 goto out;
2655 r = 0;
2656 break;
2657 }
Beth Kone9f42752009-07-07 11:50:38 -04002658 case KVM_GET_PIT2: {
2659 r = -ENXIO;
2660 if (!kvm->arch.vpit)
2661 goto out;
2662 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
2663 if (r)
2664 goto out;
2665 r = -EFAULT;
2666 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
2667 goto out;
2668 r = 0;
2669 break;
2670 }
2671 case KVM_SET_PIT2: {
2672 r = -EFAULT;
2673 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
2674 goto out;
2675 r = -ENXIO;
2676 if (!kvm->arch.vpit)
2677 goto out;
2678 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
2679 if (r)
2680 goto out;
2681 r = 0;
2682 break;
2683 }
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02002684 case KVM_REINJECT_CONTROL: {
2685 struct kvm_reinject_control control;
2686 r = -EFAULT;
2687 if (copy_from_user(&control, argp, sizeof(control)))
2688 goto out;
2689 r = kvm_vm_ioctl_reinject(kvm, &control);
2690 if (r)
2691 goto out;
2692 r = 0;
2693 break;
2694 }
Ed Swierkffde22a2009-10-15 15:21:43 -07002695 case KVM_XEN_HVM_CONFIG: {
2696 r = -EFAULT;
2697 if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
2698 sizeof(struct kvm_xen_hvm_config)))
2699 goto out;
2700 r = -EINVAL;
2701 if (kvm->arch.xen_hvm_config.flags)
2702 goto out;
2703 r = 0;
2704 break;
2705 }
Glauber Costaafbcf7a2009-10-16 15:28:36 -04002706 case KVM_SET_CLOCK: {
2707 struct timespec now;
2708 struct kvm_clock_data user_ns;
2709 u64 now_ns;
2710 s64 delta;
2711
2712 r = -EFAULT;
2713 if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
2714 goto out;
2715
2716 r = -EINVAL;
2717 if (user_ns.flags)
2718 goto out;
2719
2720 r = 0;
2721 ktime_get_ts(&now);
2722 now_ns = timespec_to_ns(&now);
2723 delta = user_ns.clock - now_ns;
2724 kvm->arch.kvmclock_offset = delta;
2725 break;
2726 }
2727 case KVM_GET_CLOCK: {
2728 struct timespec now;
2729 struct kvm_clock_data user_ns;
2730 u64 now_ns;
2731
2732 ktime_get_ts(&now);
2733 now_ns = timespec_to_ns(&now);
2734 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
2735 user_ns.flags = 0;
2736
2737 r = -EFAULT;
2738 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
2739 goto out;
2740 r = 0;
2741 break;
2742 }
2743
Carsten Otte1fe779f2007-10-29 16:08:35 +01002744 default:
2745 ;
2746 }
2747out:
2748 return r;
2749}
2750
Zhang Xiantaoa16b0432007-11-16 14:38:21 +08002751static void kvm_init_msr_list(void)
Carsten Otte043405e2007-10-10 17:16:19 +02002752{
2753 u32 dummy[2];
2754 unsigned i, j;
2755
Glauber Costae3267cb2009-10-06 13:24:50 -04002756 /* skip the first msrs in the list. KVM-specific */
2757 for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
Carsten Otte043405e2007-10-10 17:16:19 +02002758 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2759 continue;
2760 if (j < i)
2761 msrs_to_save[j] = msrs_to_save[i];
2762 j++;
2763 }
2764 num_msrs_to_save = j;
2765}
2766
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03002767static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
2768 const void *v)
Carsten Ottebbd9b642007-10-30 18:44:21 +01002769{
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03002770 if (vcpu->arch.apic &&
2771 !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
2772 return 0;
Carsten Ottebbd9b642007-10-30 18:44:21 +01002773
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03002774 return kvm_io_bus_write(&vcpu->kvm->mmio_bus, addr, len, v);
Carsten Ottebbd9b642007-10-30 18:44:21 +01002775}
2776
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03002777static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
Carsten Ottebbd9b642007-10-30 18:44:21 +01002778{
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03002779 if (vcpu->arch.apic &&
2780 !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
2781 return 0;
Carsten Ottebbd9b642007-10-30 18:44:21 +01002782
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03002783 return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v);
Carsten Ottebbd9b642007-10-30 18:44:21 +01002784}
2785
Hannes Edercded19f2009-02-21 02:19:13 +01002786static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
2787 struct kvm_vcpu *vcpu)
Carsten Ottebbd9b642007-10-30 18:44:21 +01002788{
2789 void *data = val;
Marcelo Tosatti10589a42007-12-20 19:18:22 -05002790 int r = X86EMUL_CONTINUE;
Carsten Ottebbd9b642007-10-30 18:44:21 +01002791
2792 while (bytes) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002793 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
Carsten Ottebbd9b642007-10-30 18:44:21 +01002794 unsigned offset = addr & (PAGE_SIZE-1);
Izik Eidus77c20022008-12-29 01:42:19 +02002795 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
Carsten Ottebbd9b642007-10-30 18:44:21 +01002796 int ret;
2797
Marcelo Tosatti10589a42007-12-20 19:18:22 -05002798 if (gpa == UNMAPPED_GVA) {
2799 r = X86EMUL_PROPAGATE_FAULT;
2800 goto out;
2801 }
Izik Eidus77c20022008-12-29 01:42:19 +02002802 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
Marcelo Tosatti10589a42007-12-20 19:18:22 -05002803 if (ret < 0) {
2804 r = X86EMUL_UNHANDLEABLE;
2805 goto out;
2806 }
Carsten Ottebbd9b642007-10-30 18:44:21 +01002807
Izik Eidus77c20022008-12-29 01:42:19 +02002808 bytes -= toread;
2809 data += toread;
2810 addr += toread;
Carsten Ottebbd9b642007-10-30 18:44:21 +01002811 }
Marcelo Tosatti10589a42007-12-20 19:18:22 -05002812out:
Marcelo Tosatti10589a42007-12-20 19:18:22 -05002813 return r;
Carsten Ottebbd9b642007-10-30 18:44:21 +01002814}
Izik Eidus77c20022008-12-29 01:42:19 +02002815
Hannes Edercded19f2009-02-21 02:19:13 +01002816static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
2817 struct kvm_vcpu *vcpu)
Izik Eidus77c20022008-12-29 01:42:19 +02002818{
2819 void *data = val;
2820 int r = X86EMUL_CONTINUE;
2821
2822 while (bytes) {
2823 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2824 unsigned offset = addr & (PAGE_SIZE-1);
2825 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
2826 int ret;
2827
2828 if (gpa == UNMAPPED_GVA) {
2829 r = X86EMUL_PROPAGATE_FAULT;
2830 goto out;
2831 }
2832 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
2833 if (ret < 0) {
2834 r = X86EMUL_UNHANDLEABLE;
2835 goto out;
2836 }
2837
2838 bytes -= towrite;
2839 data += towrite;
2840 addr += towrite;
2841 }
2842out:
2843 return r;
2844}
2845
Carsten Ottebbd9b642007-10-30 18:44:21 +01002846
Carsten Ottebbd9b642007-10-30 18:44:21 +01002847static int emulator_read_emulated(unsigned long addr,
2848 void *val,
2849 unsigned int bytes,
2850 struct kvm_vcpu *vcpu)
2851{
Carsten Ottebbd9b642007-10-30 18:44:21 +01002852 gpa_t gpa;
2853
2854 if (vcpu->mmio_read_completed) {
2855 memcpy(val, vcpu->mmio_data, bytes);
Avi Kivityaec51dc2009-07-01 16:01:02 +03002856 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
2857 vcpu->mmio_phys_addr, *(u64 *)val);
Carsten Ottebbd9b642007-10-30 18:44:21 +01002858 vcpu->mmio_read_completed = 0;
2859 return X86EMUL_CONTINUE;
2860 }
2861
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002862 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
Carsten Ottebbd9b642007-10-30 18:44:21 +01002863
2864 /* For APIC access vmexit */
2865 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2866 goto mmio;
2867
Izik Eidus77c20022008-12-29 01:42:19 +02002868 if (kvm_read_guest_virt(addr, val, bytes, vcpu)
2869 == X86EMUL_CONTINUE)
Carsten Ottebbd9b642007-10-30 18:44:21 +01002870 return X86EMUL_CONTINUE;
2871 if (gpa == UNMAPPED_GVA)
2872 return X86EMUL_PROPAGATE_FAULT;
2873
2874mmio:
2875 /*
2876 * Is this MMIO handled locally?
2877 */
Avi Kivityaec51dc2009-07-01 16:01:02 +03002878 if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
2879 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
Carsten Ottebbd9b642007-10-30 18:44:21 +01002880 return X86EMUL_CONTINUE;
2881 }
Avi Kivityaec51dc2009-07-01 16:01:02 +03002882
2883 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
Carsten Ottebbd9b642007-10-30 18:44:21 +01002884
2885 vcpu->mmio_needed = 1;
2886 vcpu->mmio_phys_addr = gpa;
2887 vcpu->mmio_size = bytes;
2888 vcpu->mmio_is_write = 0;
2889
2890 return X86EMUL_UNHANDLEABLE;
2891}
2892
Marcelo Tosatti3200f402008-03-29 20:17:59 -03002893int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
Avi Kivity9f811282008-03-02 14:06:05 +02002894 const void *val, int bytes)
2895{
2896 int ret;
2897
2898 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
2899 if (ret < 0)
2900 return 0;
Marcelo Tosattiad218f82008-12-01 22:32:05 -02002901 kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
Avi Kivity9f811282008-03-02 14:06:05 +02002902 return 1;
2903}
2904
Carsten Ottebbd9b642007-10-30 18:44:21 +01002905static int emulator_write_emulated_onepage(unsigned long addr,
2906 const void *val,
2907 unsigned int bytes,
2908 struct kvm_vcpu *vcpu)
2909{
Marcelo Tosatti10589a42007-12-20 19:18:22 -05002910 gpa_t gpa;
2911
Marcelo Tosatti10589a42007-12-20 19:18:22 -05002912 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
Carsten Ottebbd9b642007-10-30 18:44:21 +01002913
2914 if (gpa == UNMAPPED_GVA) {
Avi Kivityc3c91fe2007-11-25 14:04:58 +02002915 kvm_inject_page_fault(vcpu, addr, 2);
Carsten Ottebbd9b642007-10-30 18:44:21 +01002916 return X86EMUL_PROPAGATE_FAULT;
2917 }
2918
2919 /* For APIC access vmexit */
2920 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2921 goto mmio;
2922
2923 if (emulator_write_phys(vcpu, gpa, val, bytes))
2924 return X86EMUL_CONTINUE;
2925
2926mmio:
Avi Kivityaec51dc2009-07-01 16:01:02 +03002927 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
Carsten Ottebbd9b642007-10-30 18:44:21 +01002928 /*
2929 * Is this MMIO handled locally?
2930 */
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03002931 if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
Carsten Ottebbd9b642007-10-30 18:44:21 +01002932 return X86EMUL_CONTINUE;
Carsten Ottebbd9b642007-10-30 18:44:21 +01002933
2934 vcpu->mmio_needed = 1;
2935 vcpu->mmio_phys_addr = gpa;
2936 vcpu->mmio_size = bytes;
2937 vcpu->mmio_is_write = 1;
2938 memcpy(vcpu->mmio_data, val, bytes);
2939
2940 return X86EMUL_CONTINUE;
2941}
2942
2943int emulator_write_emulated(unsigned long addr,
2944 const void *val,
2945 unsigned int bytes,
2946 struct kvm_vcpu *vcpu)
2947{
2948 /* Crossing a page boundary? */
2949 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
2950 int rc, now;
2951
2952 now = -addr & ~PAGE_MASK;
2953 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
2954 if (rc != X86EMUL_CONTINUE)
2955 return rc;
2956 addr += now;
2957 val += now;
2958 bytes -= now;
2959 }
2960 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
2961}
2962EXPORT_SYMBOL_GPL(emulator_write_emulated);
2963
2964static int emulator_cmpxchg_emulated(unsigned long addr,
2965 const void *old,
2966 const void *new,
2967 unsigned int bytes,
2968 struct kvm_vcpu *vcpu)
2969{
Marcin Slusarz9f51e242009-08-09 21:54:00 +02002970 printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05002971#ifndef CONFIG_X86_64
2972 /* guests cmpxchg8b have to be emulated atomically */
2973 if (bytes == 8) {
Marcelo Tosatti10589a42007-12-20 19:18:22 -05002974 gpa_t gpa;
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05002975 struct page *page;
Andrew Mortonc0b49b02008-02-04 22:27:18 -08002976 char *kaddr;
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05002977 u64 val;
2978
Marcelo Tosatti10589a42007-12-20 19:18:22 -05002979 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2980
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05002981 if (gpa == UNMAPPED_GVA ||
2982 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2983 goto emul_write;
2984
2985 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
2986 goto emul_write;
2987
2988 val = *(u64 *)new;
Izik Eidus72dc67a2008-02-10 18:04:15 +02002989
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05002990 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
Izik Eidus72dc67a2008-02-10 18:04:15 +02002991
Andrew Mortonc0b49b02008-02-04 22:27:18 -08002992 kaddr = kmap_atomic(page, KM_USER0);
2993 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
2994 kunmap_atomic(kaddr, KM_USER0);
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05002995 kvm_release_page_dirty(page);
2996 }
Marcelo Tosatti3200f402008-03-29 20:17:59 -03002997emul_write:
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05002998#endif
2999
Carsten Ottebbd9b642007-10-30 18:44:21 +01003000 return emulator_write_emulated(addr, new, bytes, vcpu);
3001}
3002
3003static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
3004{
3005 return kvm_x86_ops->get_segment_base(vcpu, seg);
3006}
3007
3008int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
3009{
Marcelo Tosattia7052892008-09-23 13:18:35 -03003010 kvm_mmu_invlpg(vcpu, address);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003011 return X86EMUL_CONTINUE;
3012}
3013
3014int emulate_clts(struct kvm_vcpu *vcpu)
3015{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003016 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003017 return X86EMUL_CONTINUE;
3018}
3019
3020int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
3021{
3022 struct kvm_vcpu *vcpu = ctxt->vcpu;
3023
3024 switch (dr) {
3025 case 0 ... 3:
3026 *dest = kvm_x86_ops->get_dr(vcpu, dr);
3027 return X86EMUL_CONTINUE;
3028 default:
Harvey Harrisonb8688d52008-03-03 12:59:56 -08003029 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003030 return X86EMUL_UNHANDLEABLE;
3031 }
3032}
3033
3034int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
3035{
3036 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
3037 int exception;
3038
3039 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
3040 if (exception) {
3041 /* FIXME: better handling */
3042 return X86EMUL_UNHANDLEABLE;
3043 }
3044 return X86EMUL_CONTINUE;
3045}
3046
3047void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
3048{
Carsten Ottebbd9b642007-10-30 18:44:21 +01003049 u8 opcodes[4];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003050 unsigned long rip = kvm_rip_read(vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003051 unsigned long rip_linear;
3052
Avi Kivityf76c7102008-06-13 22:45:42 +03003053 if (!printk_ratelimit())
Carsten Ottebbd9b642007-10-30 18:44:21 +01003054 return;
3055
Glauber Costa25be4602008-06-10 10:46:53 -03003056 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
3057
Izik Eidus77c20022008-12-29 01:42:19 +02003058 kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003059
3060 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
3061 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003062}
3063EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
3064
Harvey Harrison14af3f32008-02-19 10:25:50 -08003065static struct x86_emulate_ops emulate_ops = {
Izik Eidus77c20022008-12-29 01:42:19 +02003066 .read_std = kvm_read_guest_virt,
Carsten Ottebbd9b642007-10-30 18:44:21 +01003067 .read_emulated = emulator_read_emulated,
3068 .write_emulated = emulator_write_emulated,
3069 .cmpxchg_emulated = emulator_cmpxchg_emulated,
3070};
3071
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003072static void cache_all_regs(struct kvm_vcpu *vcpu)
3073{
3074 kvm_register_read(vcpu, VCPU_REGS_RAX);
3075 kvm_register_read(vcpu, VCPU_REGS_RSP);
3076 kvm_register_read(vcpu, VCPU_REGS_RIP);
3077 vcpu->arch.regs_dirty = ~0;
3078}
3079
Carsten Ottebbd9b642007-10-30 18:44:21 +01003080int emulate_instruction(struct kvm_vcpu *vcpu,
Carsten Ottebbd9b642007-10-30 18:44:21 +01003081 unsigned long cr2,
3082 u16 error_code,
Sheng Yang571008d2008-01-02 14:49:22 +08003083 int emulation_type)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003084{
Glauber Costa310b5d32009-05-12 16:21:06 -04003085 int r, shadow_mask;
Sheng Yang571008d2008-01-02 14:49:22 +08003086 struct decode_cache *c;
Avi Kivity851ba692009-08-24 11:10:17 +03003087 struct kvm_run *run = vcpu->run;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003088
Avi Kivity26eef702008-07-03 14:59:22 +03003089 kvm_clear_exception_queue(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003090 vcpu->arch.mmio_fault_cr2 = cr2;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003091 /*
Avi Kivity56e82312009-08-12 15:04:37 +03003092 * TODO: fix emulate.c to use guest_read/write_register
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003093 * instead of direct ->regs accesses, can save hundred cycles
3094 * on Intel for instructions that don't read/change RSP, for
3095 * for example.
3096 */
3097 cache_all_regs(vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003098
3099 vcpu->mmio_is_write = 0;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003100 vcpu->arch.pio.string = 0;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003101
Sheng Yang571008d2008-01-02 14:49:22 +08003102 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
Carsten Ottebbd9b642007-10-30 18:44:21 +01003103 int cs_db, cs_l;
3104 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
3105
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003106 vcpu->arch.emulate_ctxt.vcpu = vcpu;
Jan Kiszka91586a32009-10-05 13:07:21 +02003107 vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003108 vcpu->arch.emulate_ctxt.mode =
3109 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003110 ? X86EMUL_MODE_REAL : cs_l
3111 ? X86EMUL_MODE_PROT64 : cs_db
3112 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
3113
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003114 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
Sheng Yang571008d2008-01-02 14:49:22 +08003115
Andre Przywara0cb57622009-06-17 15:50:31 +02003116 /* Only allow emulation of specific instructions on #UD
3117 * (namely VMMCALL, sysenter, sysexit, syscall)*/
Sheng Yang571008d2008-01-02 14:49:22 +08003118 c = &vcpu->arch.emulate_ctxt.decode;
Andre Przywara0cb57622009-06-17 15:50:31 +02003119 if (emulation_type & EMULTYPE_TRAP_UD) {
3120 if (!c->twobyte)
3121 return EMULATE_FAIL;
3122 switch (c->b) {
3123 case 0x01: /* VMMCALL */
3124 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3125 return EMULATE_FAIL;
3126 break;
3127 case 0x34: /* sysenter */
3128 case 0x35: /* sysexit */
3129 if (c->modrm_mod != 0 || c->modrm_rm != 0)
3130 return EMULATE_FAIL;
3131 break;
3132 case 0x05: /* syscall */
3133 if (c->modrm_mod != 0 || c->modrm_rm != 0)
3134 return EMULATE_FAIL;
3135 break;
3136 default:
3137 return EMULATE_FAIL;
3138 }
3139
3140 if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
3141 return EMULATE_FAIL;
3142 }
Sheng Yang571008d2008-01-02 14:49:22 +08003143
Avi Kivityf2b57562007-11-18 15:17:51 +02003144 ++vcpu->stat.insn_emulation;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003145 if (r) {
Avi Kivityf2b57562007-11-18 15:17:51 +02003146 ++vcpu->stat.insn_emulation_fail;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003147 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
3148 return EMULATE_DONE;
3149 return EMULATE_FAIL;
3150 }
3151 }
3152
Gleb Natapovba8afb62009-04-12 13:36:57 +03003153 if (emulation_type & EMULTYPE_SKIP) {
3154 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
3155 return EMULATE_DONE;
3156 }
3157
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003158 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
Glauber Costa310b5d32009-05-12 16:21:06 -04003159 shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
3160
3161 if (r == 0)
3162 kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003163
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003164 if (vcpu->arch.pio.string)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003165 return EMULATE_DO_MMIO;
3166
3167 if ((r || vcpu->mmio_is_write) && run) {
3168 run->exit_reason = KVM_EXIT_MMIO;
3169 run->mmio.phys_addr = vcpu->mmio_phys_addr;
3170 memcpy(run->mmio.data, vcpu->mmio_data, 8);
3171 run->mmio.len = vcpu->mmio_size;
3172 run->mmio.is_write = vcpu->mmio_is_write;
3173 }
3174
3175 if (r) {
3176 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
3177 return EMULATE_DONE;
3178 if (!vcpu->mmio_needed) {
3179 kvm_report_emulation_failure(vcpu, "mmio");
3180 return EMULATE_FAIL;
3181 }
3182 return EMULATE_DO_MMIO;
3183 }
3184
Jan Kiszka91586a32009-10-05 13:07:21 +02003185 kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003186
3187 if (vcpu->mmio_is_write) {
3188 vcpu->mmio_needed = 0;
3189 return EMULATE_DO_MMIO;
3190 }
3191
3192 return EMULATE_DONE;
3193}
3194EXPORT_SYMBOL_GPL(emulate_instruction);
3195
Carsten Ottede7d7892007-10-30 18:44:25 +01003196static int pio_copy_data(struct kvm_vcpu *vcpu)
3197{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003198 void *p = vcpu->arch.pio_data;
Izik Eidus0f346072008-12-29 01:42:20 +02003199 gva_t q = vcpu->arch.pio.guest_gva;
Carsten Ottede7d7892007-10-30 18:44:25 +01003200 unsigned bytes;
Izik Eidus0f346072008-12-29 01:42:20 +02003201 int ret;
Carsten Ottede7d7892007-10-30 18:44:25 +01003202
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003203 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
3204 if (vcpu->arch.pio.in)
Izik Eidus0f346072008-12-29 01:42:20 +02003205 ret = kvm_write_guest_virt(q, p, bytes, vcpu);
Carsten Ottede7d7892007-10-30 18:44:25 +01003206 else
Izik Eidus0f346072008-12-29 01:42:20 +02003207 ret = kvm_read_guest_virt(q, p, bytes, vcpu);
3208 return ret;
Carsten Ottede7d7892007-10-30 18:44:25 +01003209}
3210
3211int complete_pio(struct kvm_vcpu *vcpu)
3212{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003213 struct kvm_pio_request *io = &vcpu->arch.pio;
Carsten Ottede7d7892007-10-30 18:44:25 +01003214 long delta;
3215 int r;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003216 unsigned long val;
Carsten Ottede7d7892007-10-30 18:44:25 +01003217
3218 if (!io->string) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003219 if (io->in) {
3220 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
3221 memcpy(&val, vcpu->arch.pio_data, io->size);
3222 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
3223 }
Carsten Ottede7d7892007-10-30 18:44:25 +01003224 } else {
3225 if (io->in) {
3226 r = pio_copy_data(vcpu);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003227 if (r)
Carsten Ottede7d7892007-10-30 18:44:25 +01003228 return r;
Carsten Ottede7d7892007-10-30 18:44:25 +01003229 }
3230
3231 delta = 1;
3232 if (io->rep) {
3233 delta *= io->cur_count;
3234 /*
3235 * The size of the register should really depend on
3236 * current address size.
3237 */
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003238 val = kvm_register_read(vcpu, VCPU_REGS_RCX);
3239 val -= delta;
3240 kvm_register_write(vcpu, VCPU_REGS_RCX, val);
Carsten Ottede7d7892007-10-30 18:44:25 +01003241 }
3242 if (io->down)
3243 delta = -delta;
3244 delta *= io->size;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003245 if (io->in) {
3246 val = kvm_register_read(vcpu, VCPU_REGS_RDI);
3247 val += delta;
3248 kvm_register_write(vcpu, VCPU_REGS_RDI, val);
3249 } else {
3250 val = kvm_register_read(vcpu, VCPU_REGS_RSI);
3251 val += delta;
3252 kvm_register_write(vcpu, VCPU_REGS_RSI, val);
3253 }
Carsten Ottede7d7892007-10-30 18:44:25 +01003254 }
3255
Carsten Ottede7d7892007-10-30 18:44:25 +01003256 io->count -= io->cur_count;
3257 io->cur_count = 0;
3258
3259 return 0;
3260}
3261
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003262static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
Carsten Ottede7d7892007-10-30 18:44:25 +01003263{
3264 /* TODO: String I/O for in kernel device */
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003265 int r;
Carsten Ottede7d7892007-10-30 18:44:25 +01003266
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003267 if (vcpu->arch.pio.in)
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003268 r = kvm_io_bus_read(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
3269 vcpu->arch.pio.size, pd);
Carsten Ottede7d7892007-10-30 18:44:25 +01003270 else
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003271 r = kvm_io_bus_write(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
3272 vcpu->arch.pio.size, pd);
3273 return r;
Carsten Ottede7d7892007-10-30 18:44:25 +01003274}
3275
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003276static int pio_string_write(struct kvm_vcpu *vcpu)
Carsten Ottede7d7892007-10-30 18:44:25 +01003277{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003278 struct kvm_pio_request *io = &vcpu->arch.pio;
3279 void *pd = vcpu->arch.pio_data;
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003280 int i, r = 0;
Carsten Ottede7d7892007-10-30 18:44:25 +01003281
Carsten Ottede7d7892007-10-30 18:44:25 +01003282 for (i = 0; i < io->cur_count; i++) {
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003283 if (kvm_io_bus_write(&vcpu->kvm->pio_bus,
3284 io->port, io->size, pd)) {
3285 r = -EOPNOTSUPP;
3286 break;
3287 }
Carsten Ottede7d7892007-10-30 18:44:25 +01003288 pd += io->size;
3289 }
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003290 return r;
Carsten Ottede7d7892007-10-30 18:44:25 +01003291}
3292
Avi Kivity851ba692009-08-24 11:10:17 +03003293int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
Carsten Ottede7d7892007-10-30 18:44:25 +01003294{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003295 unsigned long val;
Carsten Ottede7d7892007-10-30 18:44:25 +01003296
3297 vcpu->run->exit_reason = KVM_EXIT_IO;
3298 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003299 vcpu->run->io.size = vcpu->arch.pio.size = size;
Carsten Ottede7d7892007-10-30 18:44:25 +01003300 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003301 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
3302 vcpu->run->io.port = vcpu->arch.pio.port = port;
3303 vcpu->arch.pio.in = in;
3304 vcpu->arch.pio.string = 0;
3305 vcpu->arch.pio.down = 0;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003306 vcpu->arch.pio.rep = 0;
Carsten Ottede7d7892007-10-30 18:44:25 +01003307
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003308 trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
3309 size, 1);
Feng (Eric) Liu2714d1d2008-04-10 15:31:10 -04003310
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003311 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
3312 memcpy(vcpu->arch.pio_data, &val, 4);
Carsten Ottede7d7892007-10-30 18:44:25 +01003313
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003314 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
Carsten Ottede7d7892007-10-30 18:44:25 +01003315 complete_pio(vcpu);
3316 return 1;
3317 }
3318 return 0;
3319}
3320EXPORT_SYMBOL_GPL(kvm_emulate_pio);
3321
Avi Kivity851ba692009-08-24 11:10:17 +03003322int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
Carsten Ottede7d7892007-10-30 18:44:25 +01003323 int size, unsigned long count, int down,
3324 gva_t address, int rep, unsigned port)
3325{
3326 unsigned now, in_page;
Izik Eidus0f346072008-12-29 01:42:20 +02003327 int ret = 0;
Carsten Ottede7d7892007-10-30 18:44:25 +01003328
3329 vcpu->run->exit_reason = KVM_EXIT_IO;
3330 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003331 vcpu->run->io.size = vcpu->arch.pio.size = size;
Carsten Ottede7d7892007-10-30 18:44:25 +01003332 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003333 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
3334 vcpu->run->io.port = vcpu->arch.pio.port = port;
3335 vcpu->arch.pio.in = in;
3336 vcpu->arch.pio.string = 1;
3337 vcpu->arch.pio.down = down;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003338 vcpu->arch.pio.rep = rep;
Carsten Ottede7d7892007-10-30 18:44:25 +01003339
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003340 trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
3341 size, count);
Feng (Eric) Liu2714d1d2008-04-10 15:31:10 -04003342
Carsten Ottede7d7892007-10-30 18:44:25 +01003343 if (!count) {
3344 kvm_x86_ops->skip_emulated_instruction(vcpu);
3345 return 1;
3346 }
3347
3348 if (!down)
3349 in_page = PAGE_SIZE - offset_in_page(address);
3350 else
3351 in_page = offset_in_page(address) + size;
3352 now = min(count, (unsigned long)in_page / size);
Izik Eidus0f346072008-12-29 01:42:20 +02003353 if (!now)
Carsten Ottede7d7892007-10-30 18:44:25 +01003354 now = 1;
Carsten Ottede7d7892007-10-30 18:44:25 +01003355 if (down) {
3356 /*
3357 * String I/O in reverse. Yuck. Kill the guest, fix later.
3358 */
3359 pr_unimpl(vcpu, "guest string pio down\n");
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02003360 kvm_inject_gp(vcpu, 0);
Carsten Ottede7d7892007-10-30 18:44:25 +01003361 return 1;
3362 }
3363 vcpu->run->io.count = now;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003364 vcpu->arch.pio.cur_count = now;
Carsten Ottede7d7892007-10-30 18:44:25 +01003365
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003366 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
Carsten Ottede7d7892007-10-30 18:44:25 +01003367 kvm_x86_ops->skip_emulated_instruction(vcpu);
3368
Izik Eidus0f346072008-12-29 01:42:20 +02003369 vcpu->arch.pio.guest_gva = address;
Carsten Ottede7d7892007-10-30 18:44:25 +01003370
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003371 if (!vcpu->arch.pio.in) {
Carsten Ottede7d7892007-10-30 18:44:25 +01003372 /* string PIO write */
3373 ret = pio_copy_data(vcpu);
Izik Eidus0f346072008-12-29 01:42:20 +02003374 if (ret == X86EMUL_PROPAGATE_FAULT) {
3375 kvm_inject_gp(vcpu, 0);
3376 return 1;
3377 }
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003378 if (ret == 0 && !pio_string_write(vcpu)) {
Carsten Ottede7d7892007-10-30 18:44:25 +01003379 complete_pio(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003380 if (vcpu->arch.pio.count == 0)
Carsten Ottede7d7892007-10-30 18:44:25 +01003381 ret = 1;
3382 }
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003383 }
3384 /* no string PIO read support yet */
Carsten Ottede7d7892007-10-30 18:44:25 +01003385
3386 return ret;
3387}
3388EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
3389
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003390static void bounce_off(void *info)
3391{
3392 /* nothing */
3393}
3394
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003395static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
3396 void *data)
3397{
3398 struct cpufreq_freqs *freq = data;
3399 struct kvm *kvm;
3400 struct kvm_vcpu *vcpu;
3401 int i, send_ipi = 0;
3402
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003403 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
3404 return 0;
3405 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
3406 return 0;
Zachary Amsden0cca7902009-09-29 11:38:35 -10003407 per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003408
3409 spin_lock(&kvm_lock);
3410 list_for_each_entry(kvm, &vm_list, vm_list) {
Gleb Natapov988a2ca2009-06-09 15:56:29 +03003411 kvm_for_each_vcpu(i, vcpu, kvm) {
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003412 if (vcpu->cpu != freq->cpu)
3413 continue;
3414 if (!kvm_request_guest_time_update(vcpu))
3415 continue;
3416 if (vcpu->cpu != smp_processor_id())
3417 send_ipi++;
3418 }
3419 }
3420 spin_unlock(&kvm_lock);
3421
3422 if (freq->old < freq->new && send_ipi) {
3423 /*
3424 * We upscale the frequency. Must make the guest
3425 * doesn't see old kvmclock values while running with
3426 * the new frequency, otherwise we risk the guest sees
3427 * time go backwards.
3428 *
3429 * In case we update the frequency for another cpu
3430 * (which might be in guest context) send an interrupt
3431 * to kick the cpu out of guest context. Next time
3432 * guest context is entered kvmclock will be updated,
3433 * so the guest will not see stale values.
3434 */
3435 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
3436 }
3437 return 0;
3438}
3439
3440static struct notifier_block kvmclock_cpufreq_notifier_block = {
3441 .notifier_call = kvmclock_cpufreq_notifier
3442};
3443
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003444static void kvm_timer_init(void)
3445{
3446 int cpu;
3447
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003448 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003449 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
3450 CPUFREQ_TRANSITION_NOTIFIER);
Zachary Amsden6b7d7e72009-10-09 16:26:08 -10003451 for_each_online_cpu(cpu) {
3452 unsigned long khz = cpufreq_get(cpu);
3453 if (!khz)
3454 khz = tsc_khz;
3455 per_cpu(cpu_tsc_khz, cpu) = khz;
3456 }
Zachary Amsden0cca7902009-09-29 11:38:35 -10003457 } else {
3458 for_each_possible_cpu(cpu)
3459 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003460 }
3461}
3462
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003463int kvm_arch_init(void *opaque)
Carsten Otte043405e2007-10-10 17:16:19 +02003464{
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003465 int r;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003466 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
3467
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003468 if (kvm_x86_ops) {
3469 printk(KERN_ERR "kvm: already loaded the other module\n");
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003470 r = -EEXIST;
3471 goto out;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003472 }
3473
3474 if (!ops->cpu_has_kvm_support()) {
3475 printk(KERN_ERR "kvm: no hardware support\n");
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003476 r = -EOPNOTSUPP;
3477 goto out;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003478 }
3479 if (ops->disabled_by_bios()) {
3480 printk(KERN_ERR "kvm: disabled by bios\n");
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003481 r = -EOPNOTSUPP;
3482 goto out;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003483 }
3484
Avi Kivity97db56c2008-01-13 13:23:56 +02003485 r = kvm_mmu_module_init();
3486 if (r)
3487 goto out;
3488
3489 kvm_init_msr_list();
3490
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003491 kvm_x86_ops = ops;
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003492 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
Sheng Yang7b523452008-04-25 21:13:50 +08003493 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
3494 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08003495 PT_DIRTY_MASK, PT64_NX_MASK, 0);
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003496
Zachary Amsdenb820cc02009-09-29 11:38:34 -10003497 kvm_timer_init();
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003498
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003499 return 0;
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003500
3501out:
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003502 return r;
Carsten Otte043405e2007-10-10 17:16:19 +02003503}
Hollis Blanchard8776e512007-10-31 17:24:24 -05003504
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003505void kvm_arch_exit(void)
3506{
Jan Kiszka888d2562009-04-17 19:24:58 +02003507 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
3508 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
3509 CPUFREQ_TRANSITION_NOTIFIER);
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003510 kvm_x86_ops = NULL;
Zhang Xiantao56c6d282007-11-18 20:43:21 +08003511 kvm_mmu_module_exit();
3512}
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003513
Hollis Blanchard8776e512007-10-31 17:24:24 -05003514int kvm_emulate_halt(struct kvm_vcpu *vcpu)
3515{
3516 ++vcpu->stat.halt_exits;
3517 if (irqchip_in_kernel(vcpu->kvm)) {
Avi Kivitya4535292008-04-13 17:54:35 +03003518 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003519 return 1;
3520 } else {
3521 vcpu->run->exit_reason = KVM_EXIT_HLT;
3522 return 0;
3523 }
3524}
3525EXPORT_SYMBOL_GPL(kvm_emulate_halt);
3526
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05003527static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
3528 unsigned long a1)
3529{
3530 if (is_long_mode(vcpu))
3531 return a0;
3532 else
3533 return a0 | ((gpa_t)a1 << 32);
3534}
3535
Hollis Blanchard8776e512007-10-31 17:24:24 -05003536int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
3537{
3538 unsigned long nr, a0, a1, a2, a3, ret;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05003539 int r = 1;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003540
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003541 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
3542 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
3543 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
3544 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
3545 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003546
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003547 trace_kvm_hypercall(nr, a0, a1, a2, a3);
Feng (Eric) Liu2714d1d2008-04-10 15:31:10 -04003548
Hollis Blanchard8776e512007-10-31 17:24:24 -05003549 if (!is_long_mode(vcpu)) {
3550 nr &= 0xFFFFFFFF;
3551 a0 &= 0xFFFFFFFF;
3552 a1 &= 0xFFFFFFFF;
3553 a2 &= 0xFFFFFFFF;
3554 a3 &= 0xFFFFFFFF;
3555 }
3556
Jan Kiszka07708c42009-08-03 18:43:28 +02003557 if (kvm_x86_ops->get_cpl(vcpu) != 0) {
3558 ret = -KVM_EPERM;
3559 goto out;
3560 }
3561
Hollis Blanchard8776e512007-10-31 17:24:24 -05003562 switch (nr) {
Avi Kivityb93463a2007-10-25 16:52:32 +02003563 case KVM_HC_VAPIC_POLL_IRQ:
3564 ret = 0;
3565 break;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05003566 case KVM_HC_MMU_OP:
3567 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
3568 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003569 default:
3570 ret = -KVM_ENOSYS;
3571 break;
3572 }
Jan Kiszka07708c42009-08-03 18:43:28 +02003573out:
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003574 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
Amit Shahf11c3a82008-02-21 01:00:30 +05303575 ++vcpu->stat.hypercalls;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05003576 return r;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003577}
3578EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
3579
3580int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
3581{
3582 char instruction[3];
3583 int ret = 0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003584 unsigned long rip = kvm_rip_read(vcpu);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003585
Hollis Blanchard8776e512007-10-31 17:24:24 -05003586
3587 /*
3588 * Blow out the MMU to ensure that no other VCPU has an active mapping
3589 * to ensure that the updated hypercall appears atomically across all
3590 * VCPUs.
3591 */
3592 kvm_mmu_zap_all(vcpu->kvm);
3593
Hollis Blanchard8776e512007-10-31 17:24:24 -05003594 kvm_x86_ops->patch_hypercall(vcpu, instruction);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003595 if (emulator_write_emulated(rip, instruction, 3, vcpu)
Hollis Blanchard8776e512007-10-31 17:24:24 -05003596 != X86EMUL_CONTINUE)
3597 ret = -EFAULT;
3598
Hollis Blanchard8776e512007-10-31 17:24:24 -05003599 return ret;
3600}
3601
3602static u64 mk_cr_64(u64 curr_cr, u32 new_val)
3603{
3604 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
3605}
3606
3607void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3608{
3609 struct descriptor_table dt = { limit, base };
3610
3611 kvm_x86_ops->set_gdt(vcpu, &dt);
3612}
3613
3614void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
3615{
3616 struct descriptor_table dt = { limit, base };
3617
3618 kvm_x86_ops->set_idt(vcpu, &dt);
3619}
3620
3621void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
3622 unsigned long *rflags)
3623{
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02003624 kvm_lmsw(vcpu, msw);
Jan Kiszka91586a32009-10-05 13:07:21 +02003625 *rflags = kvm_get_rflags(vcpu);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003626}
3627
3628unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
3629{
Joerg Roedel54e445c2008-04-30 17:56:02 +02003630 unsigned long value;
3631
Hollis Blanchard8776e512007-10-31 17:24:24 -05003632 switch (cr) {
3633 case 0:
Joerg Roedel54e445c2008-04-30 17:56:02 +02003634 value = vcpu->arch.cr0;
3635 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003636 case 2:
Joerg Roedel54e445c2008-04-30 17:56:02 +02003637 value = vcpu->arch.cr2;
3638 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003639 case 3:
Joerg Roedel54e445c2008-04-30 17:56:02 +02003640 value = vcpu->arch.cr3;
3641 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003642 case 4:
Avi Kivityfc78f512009-12-07 12:16:48 +02003643 value = kvm_read_cr4(vcpu);
Joerg Roedel54e445c2008-04-30 17:56:02 +02003644 break;
Joerg Roedel152ff9b2007-12-06 15:46:52 +01003645 case 8:
Joerg Roedel54e445c2008-04-30 17:56:02 +02003646 value = kvm_get_cr8(vcpu);
3647 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003648 default:
Harvey Harrisonb8688d52008-03-03 12:59:56 -08003649 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003650 return 0;
3651 }
Joerg Roedel54e445c2008-04-30 17:56:02 +02003652
3653 return value;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003654}
3655
3656void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
3657 unsigned long *rflags)
3658{
3659 switch (cr) {
3660 case 0:
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02003661 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
Jan Kiszka91586a32009-10-05 13:07:21 +02003662 *rflags = kvm_get_rflags(vcpu);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003663 break;
3664 case 2:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003665 vcpu->arch.cr2 = val;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003666 break;
3667 case 3:
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02003668 kvm_set_cr3(vcpu, val);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003669 break;
3670 case 4:
Avi Kivityfc78f512009-12-07 12:16:48 +02003671 kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
Hollis Blanchard8776e512007-10-31 17:24:24 -05003672 break;
Joerg Roedel152ff9b2007-12-06 15:46:52 +01003673 case 8:
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02003674 kvm_set_cr8(vcpu, val & 0xfUL);
Joerg Roedel152ff9b2007-12-06 15:46:52 +01003675 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003676 default:
Harvey Harrisonb8688d52008-03-03 12:59:56 -08003677 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003678 }
3679}
3680
Dan Kenigsberg07716712007-11-21 17:10:04 +02003681static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
3682{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003683 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
3684 int j, nent = vcpu->arch.cpuid_nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +02003685
3686 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
3687 /* when no next entry is found, the current entry[i] is reselected */
Nitin A Kamble0fdf8e52008-11-05 15:56:21 -08003688 for (j = i + 1; ; j = (j + 1) % nent) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003689 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
Dan Kenigsberg07716712007-11-21 17:10:04 +02003690 if (ej->function == e->function) {
3691 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
3692 return j;
3693 }
3694 }
3695 return 0; /* silence gcc, even though control never reaches here */
3696}
3697
3698/* find an entry with matching function, matching index (if needed), and that
3699 * should be read next (if it's stateful) */
3700static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
3701 u32 function, u32 index)
3702{
3703 if (e->function != function)
3704 return 0;
3705 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
3706 return 0;
3707 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
Amit Shah19355472009-01-14 16:56:00 +00003708 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
Dan Kenigsberg07716712007-11-21 17:10:04 +02003709 return 0;
3710 return 1;
3711}
3712
Alexander Grafd8017472008-11-25 20:17:11 +01003713struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
3714 u32 function, u32 index)
Hollis Blanchard8776e512007-10-31 17:24:24 -05003715{
3716 int i;
Alexander Grafd8017472008-11-25 20:17:11 +01003717 struct kvm_cpuid_entry2 *best = NULL;
Hollis Blanchard8776e512007-10-31 17:24:24 -05003718
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003719 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
Alexander Grafd8017472008-11-25 20:17:11 +01003720 struct kvm_cpuid_entry2 *e;
3721
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003722 e = &vcpu->arch.cpuid_entries[i];
Dan Kenigsberg07716712007-11-21 17:10:04 +02003723 if (is_matching_cpuid_entry(e, function, index)) {
3724 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
3725 move_to_next_stateful_cpuid_entry(vcpu, i);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003726 best = e;
3727 break;
3728 }
3729 /*
3730 * Both basic or both extended?
3731 */
3732 if (((e->function ^ function) & 0x80000000) == 0)
3733 if (!best || e->function > best->function)
3734 best = e;
3735 }
Alexander Grafd8017472008-11-25 20:17:11 +01003736 return best;
3737}
Sheng Yang0e851882009-12-18 16:48:46 +08003738EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
Alexander Grafd8017472008-11-25 20:17:11 +01003739
Dong, Eddie82725b22009-03-30 16:21:08 +08003740int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
3741{
3742 struct kvm_cpuid_entry2 *best;
3743
3744 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
3745 if (best)
3746 return best->eax & 0xff;
3747 return 36;
3748}
3749
Alexander Grafd8017472008-11-25 20:17:11 +01003750void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
3751{
3752 u32 function, index;
3753 struct kvm_cpuid_entry2 *best;
3754
3755 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
3756 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
3757 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
3758 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
3759 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
3760 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
3761 best = kvm_find_cpuid_entry(vcpu, function, index);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003762 if (best) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003763 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
3764 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
3765 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
3766 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
Hollis Blanchard8776e512007-10-31 17:24:24 -05003767 }
Hollis Blanchard8776e512007-10-31 17:24:24 -05003768 kvm_x86_ops->skip_emulated_instruction(vcpu);
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003769 trace_kvm_cpuid(function,
3770 kvm_register_read(vcpu, VCPU_REGS_RAX),
3771 kvm_register_read(vcpu, VCPU_REGS_RBX),
3772 kvm_register_read(vcpu, VCPU_REGS_RCX),
3773 kvm_register_read(vcpu, VCPU_REGS_RDX));
Hollis Blanchard8776e512007-10-31 17:24:24 -05003774}
3775EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
Hollis Blanchardd0752062007-10-31 17:24:25 -05003776
3777/*
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003778 * Check if userspace requested an interrupt window, and that the
3779 * interrupt window is open.
3780 *
3781 * No need to exit to userspace if we already have an interrupt queued.
3782 */
Avi Kivity851ba692009-08-24 11:10:17 +03003783static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003784{
Gleb Natapov80618232009-04-21 17:44:56 +03003785 return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
Avi Kivity851ba692009-08-24 11:10:17 +03003786 vcpu->run->request_interrupt_window &&
Gleb Natapov5df56642009-04-21 17:44:59 +03003787 kvm_arch_interrupt_allowed(vcpu));
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003788}
3789
Avi Kivity851ba692009-08-24 11:10:17 +03003790static void post_kvm_run_save(struct kvm_vcpu *vcpu)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003791{
Avi Kivity851ba692009-08-24 11:10:17 +03003792 struct kvm_run *kvm_run = vcpu->run;
3793
Jan Kiszka91586a32009-10-05 13:07:21 +02003794 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02003795 kvm_run->cr8 = kvm_get_cr8(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003796 kvm_run->apic_base = kvm_get_apic_base(vcpu);
Jan Kiszka45312202008-12-11 16:54:54 +01003797 if (irqchip_in_kernel(vcpu->kvm))
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003798 kvm_run->ready_for_interrupt_injection = 1;
Jan Kiszka45312202008-12-11 16:54:54 +01003799 else
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003800 kvm_run->ready_for_interrupt_injection =
Gleb Natapovfa9726b2009-05-11 13:35:47 +03003801 kvm_arch_interrupt_allowed(vcpu) &&
3802 !kvm_cpu_has_interrupt(vcpu) &&
3803 !kvm_event_needs_reinjection(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003804}
3805
Avi Kivityb93463a2007-10-25 16:52:32 +02003806static void vapic_enter(struct kvm_vcpu *vcpu)
3807{
3808 struct kvm_lapic *apic = vcpu->arch.apic;
3809 struct page *page;
3810
3811 if (!apic || !apic->vapic_addr)
3812 return;
3813
3814 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
Izik Eidus72dc67a2008-02-10 18:04:15 +02003815
3816 vcpu->arch.apic->vapic_page = page;
Avi Kivityb93463a2007-10-25 16:52:32 +02003817}
3818
3819static void vapic_exit(struct kvm_vcpu *vcpu)
3820{
3821 struct kvm_lapic *apic = vcpu->arch.apic;
3822
3823 if (!apic || !apic->vapic_addr)
3824 return;
3825
Marcelo Tosattif8b78fa2008-06-23 12:04:25 -03003826 down_read(&vcpu->kvm->slots_lock);
Avi Kivityb93463a2007-10-25 16:52:32 +02003827 kvm_release_page_dirty(apic->vapic_page);
3828 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
Marcelo Tosattif8b78fa2008-06-23 12:04:25 -03003829 up_read(&vcpu->kvm->slots_lock);
Avi Kivityb93463a2007-10-25 16:52:32 +02003830}
3831
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003832static void update_cr8_intercept(struct kvm_vcpu *vcpu)
3833{
3834 int max_irr, tpr;
3835
3836 if (!kvm_x86_ops->update_cr8_intercept)
3837 return;
3838
Avi Kivity88c808f2009-08-17 22:49:40 +03003839 if (!vcpu->arch.apic)
3840 return;
3841
Gleb Natapov8db3baa2009-05-11 13:35:54 +03003842 if (!vcpu->arch.apic->vapic_addr)
3843 max_irr = kvm_lapic_find_highest_irr(vcpu);
3844 else
3845 max_irr = -1;
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003846
3847 if (max_irr != -1)
3848 max_irr >>= 4;
3849
3850 tpr = kvm_lapic_get_cr8(vcpu);
3851
3852 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
3853}
3854
Avi Kivity851ba692009-08-24 11:10:17 +03003855static void inject_pending_event(struct kvm_vcpu *vcpu)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003856{
3857 /* try to reinject previous events if any */
Gleb Natapovb59bb7b2009-07-09 15:33:51 +03003858 if (vcpu->arch.exception.pending) {
3859 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
3860 vcpu->arch.exception.has_error_code,
3861 vcpu->arch.exception.error_code);
3862 return;
3863 }
3864
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003865 if (vcpu->arch.nmi_injected) {
3866 kvm_x86_ops->set_nmi(vcpu);
3867 return;
3868 }
3869
3870 if (vcpu->arch.interrupt.pending) {
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003871 kvm_x86_ops->set_irq(vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003872 return;
3873 }
3874
3875 /* try to inject new event if pending */
3876 if (vcpu->arch.nmi_pending) {
3877 if (kvm_x86_ops->nmi_allowed(vcpu)) {
3878 vcpu->arch.nmi_pending = false;
3879 vcpu->arch.nmi_injected = true;
3880 kvm_x86_ops->set_nmi(vcpu);
3881 }
3882 } else if (kvm_cpu_has_interrupt(vcpu)) {
3883 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003884 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
3885 false);
3886 kvm_x86_ops->set_irq(vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003887 }
3888 }
3889}
3890
Avi Kivity851ba692009-08-24 11:10:17 +03003891static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003892{
3893 int r;
Gleb Natapov6a8b1d12009-05-11 13:35:51 +03003894 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
Avi Kivity851ba692009-08-24 11:10:17 +03003895 vcpu->run->request_interrupt_window;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003896
Marcelo Tosatti2e53d632008-02-20 14:47:24 -05003897 if (vcpu->requests)
3898 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
3899 kvm_mmu_unload(vcpu);
3900
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003901 r = kvm_mmu_reload(vcpu);
3902 if (unlikely(r))
3903 goto out;
3904
Avi Kivity2f52d582008-01-16 12:49:30 +02003905 if (vcpu->requests) {
3906 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
Marcelo Tosatti2f599712008-05-27 12:10:20 -03003907 __kvm_migrate_timers(vcpu);
Gerd Hoffmannc8076602009-02-04 17:52:04 +01003908 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
3909 kvm_write_guest_time(vcpu);
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -03003910 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
3911 kvm_mmu_sync_roots(vcpu);
Marcelo Tosattid4acf7e2008-06-06 16:37:35 -03003912 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
3913 kvm_x86_ops->tlb_flush(vcpu);
Avi Kivityb93463a2007-10-25 16:52:32 +02003914 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
3915 &vcpu->requests)) {
Avi Kivity851ba692009-08-24 11:10:17 +03003916 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
Avi Kivityb93463a2007-10-25 16:52:32 +02003917 r = 0;
3918 goto out;
3919 }
Joerg Roedel71c4dfa2008-02-26 16:49:16 +01003920 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
Avi Kivity851ba692009-08-24 11:10:17 +03003921 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
Joerg Roedel71c4dfa2008-02-26 16:49:16 +01003922 r = 0;
3923 goto out;
3924 }
Avi Kivity2f52d582008-01-16 12:49:30 +02003925 }
Avi Kivityb93463a2007-10-25 16:52:32 +02003926
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003927 preempt_disable();
3928
3929 kvm_x86_ops->prepare_guest_switch(vcpu);
3930 kvm_load_guest_fpu(vcpu);
3931
3932 local_irq_disable();
3933
Marcelo Tosatti32f88402009-05-07 17:55:12 -03003934 clear_bit(KVM_REQ_KICK, &vcpu->requests);
3935 smp_mb__after_clear_bit();
3936
Marcelo Tosattid7690172008-09-08 15:23:48 -03003937 if (vcpu->requests || need_resched() || signal_pending(current)) {
Gleb Natapovc7f0f242009-07-07 15:27:32 +03003938 set_bit(KVM_REQ_KICK, &vcpu->requests);
Avi Kivity6c142802008-01-15 18:27:32 +02003939 local_irq_enable();
3940 preempt_enable();
3941 r = 1;
3942 goto out;
3943 }
3944
Avi Kivity851ba692009-08-24 11:10:17 +03003945 inject_pending_event(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003946
Gleb Natapov6a8b1d12009-05-11 13:35:51 +03003947 /* enable NMI/IRQ window open exits if needed */
3948 if (vcpu->arch.nmi_pending)
3949 kvm_x86_ops->enable_nmi_window(vcpu);
3950 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
3951 kvm_x86_ops->enable_irq_window(vcpu);
3952
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003953 if (kvm_lapic_enabled(vcpu)) {
Gleb Natapov8db3baa2009-05-11 13:35:54 +03003954 update_cr8_intercept(vcpu);
3955 kvm_lapic_sync_to_vapic(vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003956 }
Avi Kivityb93463a2007-10-25 16:52:32 +02003957
Marcelo Tosatti3200f402008-03-29 20:17:59 -03003958 up_read(&vcpu->kvm->slots_lock);
3959
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003960 kvm_guest_enter();
3961
Jan Kiszka42dbaa52008-12-15 13:52:10 +01003962 if (unlikely(vcpu->arch.switch_db_regs)) {
Jan Kiszka42dbaa52008-12-15 13:52:10 +01003963 set_debugreg(0, 7);
3964 set_debugreg(vcpu->arch.eff_db[0], 0);
3965 set_debugreg(vcpu->arch.eff_db[1], 1);
3966 set_debugreg(vcpu->arch.eff_db[2], 2);
3967 set_debugreg(vcpu->arch.eff_db[3], 3);
3968 }
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003969
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003970 trace_kvm_entry(vcpu->vcpu_id);
Avi Kivity851ba692009-08-24 11:10:17 +03003971 kvm_x86_ops->run(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003972
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02003973 /*
3974 * If the guest has used debug registers, at least dr7
3975 * will be disabled while returning to the host.
3976 * If we don't have active breakpoints in the host, we don't
3977 * care about the messed up debug address registers. But if
3978 * we have some of them active, restore the old state.
3979 */
Frederic Weisbecker59d8eb52009-11-10 11:03:12 +01003980 if (hw_breakpoint_active())
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02003981 hw_breakpoint_restore();
Jan Kiszka42dbaa52008-12-15 13:52:10 +01003982
Marcelo Tosatti32f88402009-05-07 17:55:12 -03003983 set_bit(KVM_REQ_KICK, &vcpu->requests);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05003984 local_irq_enable();
3985
3986 ++vcpu->stat.exits;
3987
3988 /*
3989 * We must have an instruction between local_irq_enable() and
3990 * kvm_guest_exit(), so the timer interrupt isn't delayed by
3991 * the interrupt shadow. The stat.exits increment will do nicely.
3992 * But we need to prevent reordering, hence this barrier():
3993 */
3994 barrier();
3995
3996 kvm_guest_exit();
3997
3998 preempt_enable();
3999
Marcelo Tosatti3200f402008-03-29 20:17:59 -03004000 down_read(&vcpu->kvm->slots_lock);
4001
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004002 /*
4003 * Profile KVM exit RIPs:
4004 */
4005 if (unlikely(prof_on == KVM_PROFILING)) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004006 unsigned long rip = kvm_rip_read(vcpu);
4007 profile_hit(KVM_PROFILING, (void *)rip);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004008 }
4009
Avi Kivity298101d2007-11-25 13:41:11 +02004010
Avi Kivityb93463a2007-10-25 16:52:32 +02004011 kvm_lapic_sync_from_vapic(vcpu);
4012
Avi Kivity851ba692009-08-24 11:10:17 +03004013 r = kvm_x86_ops->handle_exit(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004014out:
Marcelo Tosattid7690172008-09-08 15:23:48 -03004015 return r;
4016}
4017
Gleb Natapov09cec752009-03-23 15:11:44 +02004018
Avi Kivity851ba692009-08-24 11:10:17 +03004019static int __vcpu_run(struct kvm_vcpu *vcpu)
Marcelo Tosattid7690172008-09-08 15:23:48 -03004020{
4021 int r;
4022
4023 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
Jan Kiszka1b10bf32008-09-30 10:41:06 +02004024 pr_debug("vcpu %d received sipi with vector # %x\n",
4025 vcpu->vcpu_id, vcpu->arch.sipi_vector);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004026 kvm_lapic_reset(vcpu);
Gleb Natapov5f179282008-10-07 15:42:33 +02004027 r = kvm_arch_vcpu_reset(vcpu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004028 if (r)
4029 return r;
4030 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004031 }
4032
Marcelo Tosattid7690172008-09-08 15:23:48 -03004033 down_read(&vcpu->kvm->slots_lock);
4034 vapic_enter(vcpu);
4035
4036 r = 1;
4037 while (r > 0) {
Gleb Natapovaf2152f2008-09-22 14:28:53 +03004038 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
Avi Kivity851ba692009-08-24 11:10:17 +03004039 r = vcpu_enter_guest(vcpu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004040 else {
4041 up_read(&vcpu->kvm->slots_lock);
4042 kvm_vcpu_block(vcpu);
4043 down_read(&vcpu->kvm->slots_lock);
4044 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
Gleb Natapov09cec752009-03-23 15:11:44 +02004045 {
4046 switch(vcpu->arch.mp_state) {
4047 case KVM_MP_STATE_HALTED:
Marcelo Tosattid7690172008-09-08 15:23:48 -03004048 vcpu->arch.mp_state =
Gleb Natapov09cec752009-03-23 15:11:44 +02004049 KVM_MP_STATE_RUNNABLE;
4050 case KVM_MP_STATE_RUNNABLE:
4051 break;
4052 case KVM_MP_STATE_SIPI_RECEIVED:
4053 default:
4054 r = -EINTR;
4055 break;
4056 }
4057 }
Marcelo Tosattid7690172008-09-08 15:23:48 -03004058 }
4059
Gleb Natapov09cec752009-03-23 15:11:44 +02004060 if (r <= 0)
4061 break;
4062
4063 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
4064 if (kvm_cpu_has_pending_timer(vcpu))
4065 kvm_inject_pending_timer_irqs(vcpu);
4066
Avi Kivity851ba692009-08-24 11:10:17 +03004067 if (dm_request_for_irq_injection(vcpu)) {
Gleb Natapov09cec752009-03-23 15:11:44 +02004068 r = -EINTR;
Avi Kivity851ba692009-08-24 11:10:17 +03004069 vcpu->run->exit_reason = KVM_EXIT_INTR;
Gleb Natapov09cec752009-03-23 15:11:44 +02004070 ++vcpu->stat.request_irq_exits;
4071 }
4072 if (signal_pending(current)) {
4073 r = -EINTR;
Avi Kivity851ba692009-08-24 11:10:17 +03004074 vcpu->run->exit_reason = KVM_EXIT_INTR;
Gleb Natapov09cec752009-03-23 15:11:44 +02004075 ++vcpu->stat.signal_exits;
4076 }
4077 if (need_resched()) {
4078 up_read(&vcpu->kvm->slots_lock);
4079 kvm_resched(vcpu);
4080 down_read(&vcpu->kvm->slots_lock);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004081 }
4082 }
4083
4084 up_read(&vcpu->kvm->slots_lock);
Avi Kivity851ba692009-08-24 11:10:17 +03004085 post_kvm_run_save(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004086
Avi Kivityb93463a2007-10-25 16:52:32 +02004087 vapic_exit(vcpu);
4088
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004089 return r;
4090}
4091
4092int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4093{
4094 int r;
4095 sigset_t sigsaved;
4096
4097 vcpu_load(vcpu);
4098
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004099 if (vcpu->sigset_active)
4100 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
4101
Avi Kivityac9f6dc2008-07-06 15:48:31 +03004102 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
4103 kvm_vcpu_block(vcpu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004104 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Avi Kivityac9f6dc2008-07-06 15:48:31 +03004105 r = -EAGAIN;
4106 goto out;
4107 }
4108
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004109 /* re-sync apic's tpr */
4110 if (!irqchip_in_kernel(vcpu->kvm))
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004111 kvm_set_cr8(vcpu, kvm_run->cr8);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004112
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004113 if (vcpu->arch.pio.cur_count) {
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004114 r = complete_pio(vcpu);
4115 if (r)
4116 goto out;
4117 }
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004118 if (vcpu->mmio_needed) {
4119 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
4120 vcpu->mmio_read_completed = 1;
4121 vcpu->mmio_needed = 0;
Marcelo Tosatti3200f402008-03-29 20:17:59 -03004122
4123 down_read(&vcpu->kvm->slots_lock);
Avi Kivity851ba692009-08-24 11:10:17 +03004124 r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
Sheng Yang571008d2008-01-02 14:49:22 +08004125 EMULTYPE_NO_DECODE);
Marcelo Tosatti3200f402008-03-29 20:17:59 -03004126 up_read(&vcpu->kvm->slots_lock);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004127 if (r == EMULATE_DO_MMIO) {
4128 /*
4129 * Read-modify-write. Back to userspace.
4130 */
4131 r = 0;
4132 goto out;
4133 }
4134 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004135 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
4136 kvm_register_write(vcpu, VCPU_REGS_RAX,
4137 kvm_run->hypercall.ret);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004138
Avi Kivity851ba692009-08-24 11:10:17 +03004139 r = __vcpu_run(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004140
4141out:
4142 if (vcpu->sigset_active)
4143 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
4144
4145 vcpu_put(vcpu);
4146 return r;
4147}
4148
4149int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4150{
4151 vcpu_load(vcpu);
4152
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004153 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4154 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4155 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4156 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4157 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
4158 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
4159 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4160 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004161#ifdef CONFIG_X86_64
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004162 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
4163 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
4164 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
4165 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
4166 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
4167 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
4168 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
4169 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004170#endif
4171
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004172 regs->rip = kvm_rip_read(vcpu);
Jan Kiszka91586a32009-10-05 13:07:21 +02004173 regs->rflags = kvm_get_rflags(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004174
4175 vcpu_put(vcpu);
4176
4177 return 0;
4178}
4179
4180int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4181{
4182 vcpu_load(vcpu);
4183
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004184 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
4185 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
4186 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
4187 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
4188 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
4189 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
4190 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
4191 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004192#ifdef CONFIG_X86_64
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004193 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
4194 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
4195 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
4196 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
4197 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
4198 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
4199 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
4200 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004201#endif
4202
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004203 kvm_rip_write(vcpu, regs->rip);
Jan Kiszka91586a32009-10-05 13:07:21 +02004204 kvm_set_rflags(vcpu, regs->rflags);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004205
Jan Kiszkab4f14ab2008-04-30 17:59:04 +02004206 vcpu->arch.exception.pending = false;
4207
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004208 vcpu_put(vcpu);
4209
4210 return 0;
4211}
4212
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004213void kvm_get_segment(struct kvm_vcpu *vcpu,
4214 struct kvm_segment *var, int seg)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004215{
Harvey Harrison14af3f32008-02-19 10:25:50 -08004216 kvm_x86_ops->get_segment(vcpu, var, seg);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004217}
4218
4219void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
4220{
4221 struct kvm_segment cs;
4222
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004223 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004224 *db = cs.db;
4225 *l = cs.l;
4226}
4227EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
4228
4229int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4230 struct kvm_sregs *sregs)
4231{
4232 struct descriptor_table dt;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004233
4234 vcpu_load(vcpu);
4235
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004236 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4237 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4238 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
4239 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
4240 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
4241 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004242
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004243 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4244 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004245
4246 kvm_x86_ops->get_idt(vcpu, &dt);
4247 sregs->idt.limit = dt.limit;
4248 sregs->idt.base = dt.base;
4249 kvm_x86_ops->get_gdt(vcpu, &dt);
4250 sregs->gdt.limit = dt.limit;
4251 sregs->gdt.base = dt.base;
4252
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004253 sregs->cr0 = vcpu->arch.cr0;
4254 sregs->cr2 = vcpu->arch.cr2;
4255 sregs->cr3 = vcpu->arch.cr3;
Avi Kivityfc78f512009-12-07 12:16:48 +02004256 sregs->cr4 = kvm_read_cr4(vcpu);
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004257 sregs->cr8 = kvm_get_cr8(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004258 sregs->efer = vcpu->arch.shadow_efer;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004259 sregs->apic_base = kvm_get_apic_base(vcpu);
4260
Gleb Natapov923c61b2009-05-11 13:35:48 +03004261 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004262
Gleb Natapov36752c92009-05-11 13:35:53 +03004263 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
Gleb Natapov14d0bc12009-04-21 17:45:11 +03004264 set_bit(vcpu->arch.interrupt.nr,
4265 (unsigned long *)sregs->interrupt_bitmap);
Gleb Natapov16d7a192009-04-21 17:45:10 +03004266
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004267 vcpu_put(vcpu);
4268
4269 return 0;
4270}
4271
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03004272int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4273 struct kvm_mp_state *mp_state)
4274{
4275 vcpu_load(vcpu);
4276 mp_state->mp_state = vcpu->arch.mp_state;
4277 vcpu_put(vcpu);
4278 return 0;
4279}
4280
4281int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4282 struct kvm_mp_state *mp_state)
4283{
4284 vcpu_load(vcpu);
4285 vcpu->arch.mp_state = mp_state->mp_state;
4286 vcpu_put(vcpu);
4287 return 0;
4288}
4289
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004290static void kvm_set_segment(struct kvm_vcpu *vcpu,
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004291 struct kvm_segment *var, int seg)
4292{
Harvey Harrison14af3f32008-02-19 10:25:50 -08004293 kvm_x86_ops->set_segment(vcpu, var, seg);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004294}
4295
Izik Eidus37817f22008-03-24 23:14:53 +02004296static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
4297 struct kvm_segment *kvm_desct)
4298{
Akinobu Mita46a359e2009-07-18 23:58:32 +09004299 kvm_desct->base = get_desc_base(seg_desc);
4300 kvm_desct->limit = get_desc_limit(seg_desc);
Marcelo Tosattic93cd3a2008-07-19 19:08:07 -03004301 if (seg_desc->g) {
4302 kvm_desct->limit <<= 12;
4303 kvm_desct->limit |= 0xfff;
4304 }
Izik Eidus37817f22008-03-24 23:14:53 +02004305 kvm_desct->selector = selector;
4306 kvm_desct->type = seg_desc->type;
4307 kvm_desct->present = seg_desc->p;
4308 kvm_desct->dpl = seg_desc->dpl;
4309 kvm_desct->db = seg_desc->d;
4310 kvm_desct->s = seg_desc->s;
4311 kvm_desct->l = seg_desc->l;
4312 kvm_desct->g = seg_desc->g;
4313 kvm_desct->avl = seg_desc->avl;
4314 if (!selector)
4315 kvm_desct->unusable = 1;
4316 else
4317 kvm_desct->unusable = 0;
4318 kvm_desct->padding = 0;
4319}
4320
Amit Shahb8222ad2008-10-22 16:39:47 +05304321static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
4322 u16 selector,
4323 struct descriptor_table *dtable)
Izik Eidus37817f22008-03-24 23:14:53 +02004324{
4325 if (selector & 1 << 2) {
4326 struct kvm_segment kvm_seg;
4327
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004328 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
Izik Eidus37817f22008-03-24 23:14:53 +02004329
4330 if (kvm_seg.unusable)
4331 dtable->limit = 0;
4332 else
4333 dtable->limit = kvm_seg.limit;
4334 dtable->base = kvm_seg.base;
4335 }
4336 else
4337 kvm_x86_ops->get_gdt(vcpu, dtable);
4338}
4339
4340/* allowed just for 8 bytes segments */
4341static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4342 struct desc_struct *seg_desc)
4343{
4344 struct descriptor_table dtable;
4345 u16 index = selector >> 3;
4346
Amit Shahb8222ad2008-10-22 16:39:47 +05304347 get_segment_descriptor_dtable(vcpu, selector, &dtable);
Izik Eidus37817f22008-03-24 23:14:53 +02004348
4349 if (dtable.limit < index * 8 + 7) {
4350 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
4351 return 1;
4352 }
Mikhail Ershovd9048d32009-08-19 14:08:07 +04004353 return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
Izik Eidus37817f22008-03-24 23:14:53 +02004354}
4355
4356/* allowed just for 8 bytes segments */
4357static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4358 struct desc_struct *seg_desc)
4359{
4360 struct descriptor_table dtable;
4361 u16 index = selector >> 3;
4362
Amit Shahb8222ad2008-10-22 16:39:47 +05304363 get_segment_descriptor_dtable(vcpu, selector, &dtable);
Izik Eidus37817f22008-03-24 23:14:53 +02004364
4365 if (dtable.limit < index * 8 + 7)
4366 return 1;
Mikhail Ershovd9048d32009-08-19 14:08:07 +04004367 return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
Izik Eidus37817f22008-03-24 23:14:53 +02004368}
4369
Gleb Natapovabb39112009-10-25 17:42:02 +02004370static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu,
Izik Eidus37817f22008-03-24 23:14:53 +02004371 struct desc_struct *seg_desc)
4372{
Akinobu Mita46a359e2009-07-18 23:58:32 +09004373 u32 base_addr = get_desc_base(seg_desc);
Izik Eidus37817f22008-03-24 23:14:53 +02004374
Marcelo Tosatti98899aa2008-07-16 19:07:10 -03004375 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
Izik Eidus37817f22008-03-24 23:14:53 +02004376}
4377
Izik Eidus37817f22008-03-24 23:14:53 +02004378static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
4379{
4380 struct kvm_segment kvm_seg;
4381
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004382 kvm_get_segment(vcpu, &kvm_seg, seg);
Izik Eidus37817f22008-03-24 23:14:53 +02004383 return kvm_seg.selector;
4384}
4385
4386static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
4387 u16 selector,
4388 struct kvm_segment *kvm_seg)
4389{
4390 struct desc_struct seg_desc;
4391
4392 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
4393 return 1;
4394 seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
4395 return 0;
4396}
4397
Harvey Harrison2259e3a2008-08-22 13:29:17 -07004398static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
Avi Kivityf4bbd9a2008-08-20 15:51:42 +03004399{
4400 struct kvm_segment segvar = {
4401 .base = selector << 4,
4402 .limit = 0xffff,
4403 .selector = selector,
4404 .type = 3,
4405 .present = 1,
4406 .dpl = 3,
4407 .db = 0,
4408 .s = 1,
4409 .l = 0,
4410 .g = 0,
4411 .avl = 0,
4412 .unusable = 0,
4413 };
4414 kvm_x86_ops->set_segment(vcpu, &segvar, seg);
4415 return 0;
4416}
4417
Anthony Liguoric0c7c042009-08-11 15:57:59 -05004418static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
4419{
4420 return (seg != VCPU_SREG_LDTR) &&
4421 (seg != VCPU_SREG_TR) &&
Jan Kiszka91586a32009-10-05 13:07:21 +02004422 (kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
Anthony Liguoric0c7c042009-08-11 15:57:59 -05004423}
4424
Marcelo Tosatticb84b552009-11-11 17:29:49 -02004425static void kvm_check_segment_descriptor(struct kvm_vcpu *vcpu, int seg,
4426 u16 selector)
4427{
4428 /* NULL selector is not valid for CS and SS */
4429 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
4430 if (!selector)
4431 kvm_queue_exception_e(vcpu, TS_VECTOR, selector >> 3);
4432}
4433
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004434int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4435 int type_bits, int seg)
Izik Eidus37817f22008-03-24 23:14:53 +02004436{
4437 struct kvm_segment kvm_seg;
4438
Anthony Liguoric0c7c042009-08-11 15:57:59 -05004439 if (is_vm86_segment(vcpu, seg) || !(vcpu->arch.cr0 & X86_CR0_PE))
Avi Kivityf4bbd9a2008-08-20 15:51:42 +03004440 return kvm_load_realmode_segment(vcpu, selector, seg);
Izik Eidus37817f22008-03-24 23:14:53 +02004441 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
4442 return 1;
Marcelo Tosatticb84b552009-11-11 17:29:49 -02004443
4444 kvm_check_segment_descriptor(vcpu, seg, selector);
Izik Eidus37817f22008-03-24 23:14:53 +02004445 kvm_seg.type |= type_bits;
4446
4447 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
4448 seg != VCPU_SREG_LDTR)
4449 if (!kvm_seg.s)
4450 kvm_seg.unusable = 1;
4451
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004452 kvm_set_segment(vcpu, &kvm_seg, seg);
Izik Eidus37817f22008-03-24 23:14:53 +02004453 return 0;
4454}
4455
4456static void save_state_to_tss32(struct kvm_vcpu *vcpu,
4457 struct tss_segment_32 *tss)
4458{
4459 tss->cr3 = vcpu->arch.cr3;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004460 tss->eip = kvm_rip_read(vcpu);
Jan Kiszka91586a32009-10-05 13:07:21 +02004461 tss->eflags = kvm_get_rflags(vcpu);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004462 tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4463 tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4464 tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4465 tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4466 tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4467 tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4468 tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
4469 tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
Izik Eidus37817f22008-03-24 23:14:53 +02004470 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
4471 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
4472 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
4473 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
4474 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
4475 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
4476 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
Izik Eidus37817f22008-03-24 23:14:53 +02004477}
4478
4479static int load_state_from_tss32(struct kvm_vcpu *vcpu,
4480 struct tss_segment_32 *tss)
4481{
4482 kvm_set_cr3(vcpu, tss->cr3);
4483
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004484 kvm_rip_write(vcpu, tss->eip);
Jan Kiszka91586a32009-10-05 13:07:21 +02004485 kvm_set_rflags(vcpu, tss->eflags | 2);
Izik Eidus37817f22008-03-24 23:14:53 +02004486
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004487 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
4488 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
4489 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
4490 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
4491 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
4492 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
4493 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
4494 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
Izik Eidus37817f22008-03-24 23:14:53 +02004495
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004496 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
Izik Eidus37817f22008-03-24 23:14:53 +02004497 return 1;
4498
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004499 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
Izik Eidus37817f22008-03-24 23:14:53 +02004500 return 1;
4501
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004502 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
Izik Eidus37817f22008-03-24 23:14:53 +02004503 return 1;
4504
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004505 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
Izik Eidus37817f22008-03-24 23:14:53 +02004506 return 1;
4507
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004508 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
Izik Eidus37817f22008-03-24 23:14:53 +02004509 return 1;
4510
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004511 if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
Izik Eidus37817f22008-03-24 23:14:53 +02004512 return 1;
4513
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004514 if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
Izik Eidus37817f22008-03-24 23:14:53 +02004515 return 1;
4516 return 0;
4517}
4518
4519static void save_state_to_tss16(struct kvm_vcpu *vcpu,
4520 struct tss_segment_16 *tss)
4521{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004522 tss->ip = kvm_rip_read(vcpu);
Jan Kiszka91586a32009-10-05 13:07:21 +02004523 tss->flag = kvm_get_rflags(vcpu);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004524 tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4525 tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4526 tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4527 tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4528 tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4529 tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4530 tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
4531 tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
Izik Eidus37817f22008-03-24 23:14:53 +02004532
4533 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
4534 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
4535 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
4536 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
4537 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
Izik Eidus37817f22008-03-24 23:14:53 +02004538}
4539
4540static int load_state_from_tss16(struct kvm_vcpu *vcpu,
4541 struct tss_segment_16 *tss)
4542{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004543 kvm_rip_write(vcpu, tss->ip);
Jan Kiszka91586a32009-10-05 13:07:21 +02004544 kvm_set_rflags(vcpu, tss->flag | 2);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004545 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
4546 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
4547 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
4548 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
4549 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
4550 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
4551 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
4552 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
Izik Eidus37817f22008-03-24 23:14:53 +02004553
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004554 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
Izik Eidus37817f22008-03-24 23:14:53 +02004555 return 1;
4556
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004557 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
Izik Eidus37817f22008-03-24 23:14:53 +02004558 return 1;
4559
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004560 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
Izik Eidus37817f22008-03-24 23:14:53 +02004561 return 1;
4562
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004563 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
Izik Eidus37817f22008-03-24 23:14:53 +02004564 return 1;
4565
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004566 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
Izik Eidus37817f22008-03-24 23:14:53 +02004567 return 1;
4568 return 0;
4569}
4570
Harvey Harrison8b2cf732008-04-27 12:14:13 -07004571static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
Gleb Natapovb237ac32009-03-30 16:03:24 +03004572 u16 old_tss_sel, u32 old_tss_base,
4573 struct desc_struct *nseg_desc)
Izik Eidus37817f22008-03-24 23:14:53 +02004574{
4575 struct tss_segment_16 tss_segment_16;
4576 int ret = 0;
4577
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03004578 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
4579 sizeof tss_segment_16))
Izik Eidus37817f22008-03-24 23:14:53 +02004580 goto out;
4581
4582 save_state_to_tss16(vcpu, &tss_segment_16);
Izik Eidus37817f22008-03-24 23:14:53 +02004583
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03004584 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
4585 sizeof tss_segment_16))
Izik Eidus37817f22008-03-24 23:14:53 +02004586 goto out;
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03004587
4588 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
4589 &tss_segment_16, sizeof tss_segment_16))
4590 goto out;
4591
Gleb Natapovb237ac32009-03-30 16:03:24 +03004592 if (old_tss_sel != 0xffff) {
4593 tss_segment_16.prev_task_link = old_tss_sel;
4594
4595 if (kvm_write_guest(vcpu->kvm,
4596 get_tss_base_addr(vcpu, nseg_desc),
4597 &tss_segment_16.prev_task_link,
4598 sizeof tss_segment_16.prev_task_link))
4599 goto out;
4600 }
4601
Izik Eidus37817f22008-03-24 23:14:53 +02004602 if (load_state_from_tss16(vcpu, &tss_segment_16))
4603 goto out;
4604
4605 ret = 1;
4606out:
4607 return ret;
4608}
4609
Harvey Harrison8b2cf732008-04-27 12:14:13 -07004610static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
Gleb Natapovb237ac32009-03-30 16:03:24 +03004611 u16 old_tss_sel, u32 old_tss_base,
Izik Eidus37817f22008-03-24 23:14:53 +02004612 struct desc_struct *nseg_desc)
4613{
4614 struct tss_segment_32 tss_segment_32;
4615 int ret = 0;
4616
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03004617 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
4618 sizeof tss_segment_32))
Izik Eidus37817f22008-03-24 23:14:53 +02004619 goto out;
4620
4621 save_state_to_tss32(vcpu, &tss_segment_32);
Izik Eidus37817f22008-03-24 23:14:53 +02004622
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03004623 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
4624 sizeof tss_segment_32))
Izik Eidus37817f22008-03-24 23:14:53 +02004625 goto out;
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03004626
4627 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
4628 &tss_segment_32, sizeof tss_segment_32))
4629 goto out;
4630
Gleb Natapovb237ac32009-03-30 16:03:24 +03004631 if (old_tss_sel != 0xffff) {
4632 tss_segment_32.prev_task_link = old_tss_sel;
4633
4634 if (kvm_write_guest(vcpu->kvm,
4635 get_tss_base_addr(vcpu, nseg_desc),
4636 &tss_segment_32.prev_task_link,
4637 sizeof tss_segment_32.prev_task_link))
4638 goto out;
4639 }
4640
Izik Eidus37817f22008-03-24 23:14:53 +02004641 if (load_state_from_tss32(vcpu, &tss_segment_32))
4642 goto out;
4643
4644 ret = 1;
4645out:
4646 return ret;
4647}
4648
4649int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
4650{
4651 struct kvm_segment tr_seg;
4652 struct desc_struct cseg_desc;
4653 struct desc_struct nseg_desc;
4654 int ret = 0;
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03004655 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
4656 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
Izik Eidus37817f22008-03-24 23:14:53 +02004657
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03004658 old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
Izik Eidus37817f22008-03-24 23:14:53 +02004659
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03004660 /* FIXME: Handle errors. Failure to read either TSS or their
4661 * descriptors should generate a pagefault.
4662 */
Izik Eidus37817f22008-03-24 23:14:53 +02004663 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
4664 goto out;
4665
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03004666 if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
Izik Eidus37817f22008-03-24 23:14:53 +02004667 goto out;
4668
Izik Eidus37817f22008-03-24 23:14:53 +02004669 if (reason != TASK_SWITCH_IRET) {
4670 int cpl;
4671
4672 cpl = kvm_x86_ops->get_cpl(vcpu);
4673 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
4674 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4675 return 1;
4676 }
4677 }
4678
Akinobu Mita46a359e2009-07-18 23:58:32 +09004679 if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
Izik Eidus37817f22008-03-24 23:14:53 +02004680 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
4681 return 1;
4682 }
4683
4684 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
Izik Eidus3fe913e2008-04-28 18:23:52 +03004685 cseg_desc.type &= ~(1 << 1); //clear the B flag
Marcelo Tosatti34198bf2008-07-16 19:07:11 -03004686 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
Izik Eidus37817f22008-03-24 23:14:53 +02004687 }
4688
4689 if (reason == TASK_SWITCH_IRET) {
Jan Kiszka91586a32009-10-05 13:07:21 +02004690 u32 eflags = kvm_get_rflags(vcpu);
4691 kvm_set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
Izik Eidus37817f22008-03-24 23:14:53 +02004692 }
4693
Gleb Natapov64a7ec02009-03-30 16:03:29 +03004694 /* set back link to prev task only if NT bit is set in eflags
4695 note that old_tss_sel is not used afetr this point */
4696 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
4697 old_tss_sel = 0xffff;
Izik Eidus37817f22008-03-24 23:14:53 +02004698
4699 if (nseg_desc.type & 8)
Gleb Natapovb237ac32009-03-30 16:03:24 +03004700 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
4701 old_tss_base, &nseg_desc);
Izik Eidus37817f22008-03-24 23:14:53 +02004702 else
Gleb Natapovb237ac32009-03-30 16:03:24 +03004703 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
4704 old_tss_base, &nseg_desc);
Izik Eidus37817f22008-03-24 23:14:53 +02004705
4706 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
Jan Kiszka91586a32009-10-05 13:07:21 +02004707 u32 eflags = kvm_get_rflags(vcpu);
4708 kvm_set_rflags(vcpu, eflags | X86_EFLAGS_NT);
Izik Eidus37817f22008-03-24 23:14:53 +02004709 }
4710
4711 if (reason != TASK_SWITCH_IRET) {
Izik Eidus3fe913e2008-04-28 18:23:52 +03004712 nseg_desc.type |= (1 << 1);
Izik Eidus37817f22008-03-24 23:14:53 +02004713 save_guest_segment_descriptor(vcpu, tss_selector,
4714 &nseg_desc);
4715 }
4716
4717 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
4718 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
4719 tr_seg.type = 11;
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004720 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
Izik Eidus37817f22008-03-24 23:14:53 +02004721out:
Izik Eidus37817f22008-03-24 23:14:53 +02004722 return ret;
4723}
4724EXPORT_SYMBOL_GPL(kvm_task_switch);
4725
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004726int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4727 struct kvm_sregs *sregs)
4728{
4729 int mmu_reset_needed = 0;
Gleb Natapov923c61b2009-05-11 13:35:48 +03004730 int pending_vec, max_bits;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004731 struct descriptor_table dt;
4732
4733 vcpu_load(vcpu);
4734
4735 dt.limit = sregs->idt.limit;
4736 dt.base = sregs->idt.base;
4737 kvm_x86_ops->set_idt(vcpu, &dt);
4738 dt.limit = sregs->gdt.limit;
4739 dt.base = sregs->gdt.base;
4740 kvm_x86_ops->set_gdt(vcpu, &dt);
4741
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004742 vcpu->arch.cr2 = sregs->cr2;
4743 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
Jan Kiszkadc7e7952009-07-01 20:52:03 +02004744 vcpu->arch.cr3 = sregs->cr3;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004745
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004746 kvm_set_cr8(vcpu, sregs->cr8);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004747
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004748 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004749 kvm_x86_ops->set_efer(vcpu, sregs->efer);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004750 kvm_set_apic_base(vcpu, sregs->apic_base);
4751
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004752 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004753 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
Paul Knowlesd7306162008-02-06 11:02:35 +00004754 vcpu->arch.cr0 = sregs->cr0;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004755
Avi Kivityfc78f512009-12-07 12:16:48 +02004756 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004757 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
Marcelo Tosatti7c93be42009-10-26 16:48:33 -02004758 if (!is_long_mode(vcpu) && is_pae(vcpu)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004759 load_pdptrs(vcpu, vcpu->arch.cr3);
Marcelo Tosatti7c93be42009-10-26 16:48:33 -02004760 mmu_reset_needed = 1;
4761 }
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004762
4763 if (mmu_reset_needed)
4764 kvm_mmu_reset_context(vcpu);
4765
Gleb Natapov923c61b2009-05-11 13:35:48 +03004766 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
4767 pending_vec = find_first_bit(
4768 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
4769 if (pending_vec < max_bits) {
Gleb Natapov66fd3f72009-05-11 13:35:50 +03004770 kvm_queue_interrupt(vcpu, pending_vec, false);
Gleb Natapov923c61b2009-05-11 13:35:48 +03004771 pr_debug("Set back pending irq %d\n", pending_vec);
4772 if (irqchip_in_kernel(vcpu->kvm))
4773 kvm_pic_clear_isr_ack(vcpu->kvm);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004774 }
4775
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004776 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4777 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4778 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
4779 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
4780 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
4781 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004782
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004783 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4784 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004785
Mikhail Ershov5f0269f2009-08-03 14:58:25 +03004786 update_cr8_intercept(vcpu);
4787
Marcelo Tosatti9c3e4aa2008-09-10 16:40:55 -03004788 /* Older userspace won't unhalt the vcpu on reset. */
Gleb Natapovc5af89b2009-06-09 15:56:26 +03004789 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
Marcelo Tosatti9c3e4aa2008-09-10 16:40:55 -03004790 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
4791 !(vcpu->arch.cr0 & X86_CR0_PE))
4792 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4793
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004794 vcpu_put(vcpu);
4795
4796 return 0;
4797}
4798
Jan Kiszkad0bfb942008-12-15 13:52:10 +01004799int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4800 struct kvm_guest_debug *dbg)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004801{
Jan Kiszka355be0b2009-10-03 00:31:21 +02004802 unsigned long rflags;
Jan Kiszkaae675ef2008-12-15 13:52:10 +01004803 int i, r;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004804
4805 vcpu_load(vcpu);
4806
Jan Kiszka4f926bf22009-10-30 12:46:59 +01004807 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
4808 r = -EBUSY;
4809 if (vcpu->arch.exception.pending)
4810 goto unlock_out;
4811 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
4812 kvm_queue_exception(vcpu, DB_VECTOR);
4813 else
4814 kvm_queue_exception(vcpu, BP_VECTOR);
4815 }
4816
Jan Kiszka91586a32009-10-05 13:07:21 +02004817 /*
4818 * Read rflags as long as potentially injected trace flags are still
4819 * filtered out.
4820 */
4821 rflags = kvm_get_rflags(vcpu);
Jan Kiszka355be0b2009-10-03 00:31:21 +02004822
4823 vcpu->guest_debug = dbg->control;
4824 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
4825 vcpu->guest_debug = 0;
4826
4827 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
Jan Kiszkaae675ef2008-12-15 13:52:10 +01004828 for (i = 0; i < KVM_NR_DB_REGS; ++i)
4829 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
4830 vcpu->arch.switch_db_regs =
4831 (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
4832 } else {
4833 for (i = 0; i < KVM_NR_DB_REGS; i++)
4834 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
4835 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
4836 }
4837
Jan Kiszka94fe45d2009-10-18 13:24:44 +02004838 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
4839 vcpu->arch.singlestep_cs =
4840 get_segment_selector(vcpu, VCPU_SREG_CS);
4841 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu);
4842 }
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004843
Jan Kiszka91586a32009-10-05 13:07:21 +02004844 /*
4845 * Trigger an rflags update that will inject or remove the trace
4846 * flags.
4847 */
4848 kvm_set_rflags(vcpu, rflags);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01004849
Jan Kiszka355be0b2009-10-03 00:31:21 +02004850 kvm_x86_ops->set_guest_debug(vcpu, dbg);
4851
Jan Kiszka4f926bf22009-10-30 12:46:59 +01004852 r = 0;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004853
Jan Kiszka4f926bf22009-10-30 12:46:59 +01004854unlock_out:
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004855 vcpu_put(vcpu);
4856
4857 return r;
4858}
4859
4860/*
Hollis Blanchardd0752062007-10-31 17:24:25 -05004861 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
4862 * we have asm/x86/processor.h
4863 */
4864struct fxsave {
4865 u16 cwd;
4866 u16 swd;
4867 u16 twd;
4868 u16 fop;
4869 u64 rip;
4870 u64 rdp;
4871 u32 mxcsr;
4872 u32 mxcsr_mask;
4873 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
4874#ifdef CONFIG_X86_64
4875 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
4876#else
4877 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
4878#endif
4879};
4880
Zhang Xiantao8b006792007-11-16 13:05:55 +08004881/*
4882 * Translate a guest virtual address to a guest physical address.
4883 */
4884int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4885 struct kvm_translation *tr)
4886{
4887 unsigned long vaddr = tr->linear_address;
4888 gpa_t gpa;
4889
4890 vcpu_load(vcpu);
Izik Eidus72dc67a2008-02-10 18:04:15 +02004891 down_read(&vcpu->kvm->slots_lock);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004892 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
Izik Eidus72dc67a2008-02-10 18:04:15 +02004893 up_read(&vcpu->kvm->slots_lock);
Zhang Xiantao8b006792007-11-16 13:05:55 +08004894 tr->physical_address = gpa;
4895 tr->valid = gpa != UNMAPPED_GVA;
4896 tr->writeable = 1;
4897 tr->usermode = 0;
Zhang Xiantao8b006792007-11-16 13:05:55 +08004898 vcpu_put(vcpu);
4899
4900 return 0;
4901}
4902
Hollis Blanchardd0752062007-10-31 17:24:25 -05004903int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4904{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004905 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
Hollis Blanchardd0752062007-10-31 17:24:25 -05004906
4907 vcpu_load(vcpu);
4908
4909 memcpy(fpu->fpr, fxsave->st_space, 128);
4910 fpu->fcw = fxsave->cwd;
4911 fpu->fsw = fxsave->swd;
4912 fpu->ftwx = fxsave->twd;
4913 fpu->last_opcode = fxsave->fop;
4914 fpu->last_ip = fxsave->rip;
4915 fpu->last_dp = fxsave->rdp;
4916 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
4917
4918 vcpu_put(vcpu);
4919
4920 return 0;
4921}
4922
4923int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4924{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004925 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
Hollis Blanchardd0752062007-10-31 17:24:25 -05004926
4927 vcpu_load(vcpu);
4928
4929 memcpy(fxsave->st_space, fpu->fpr, 128);
4930 fxsave->cwd = fpu->fcw;
4931 fxsave->swd = fpu->fsw;
4932 fxsave->twd = fpu->ftwx;
4933 fxsave->fop = fpu->last_opcode;
4934 fxsave->rip = fpu->last_ip;
4935 fxsave->rdp = fpu->last_dp;
4936 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
4937
4938 vcpu_put(vcpu);
4939
4940 return 0;
4941}
4942
4943void fx_init(struct kvm_vcpu *vcpu)
4944{
4945 unsigned after_mxcsr_mask;
4946
Andrea Arcangelibc1a34f2008-05-01 18:43:33 +02004947 /*
4948 * Touch the fpu the first time in non atomic context as if
4949 * this is the first fpu instruction the exception handler
4950 * will fire before the instruction returns and it'll have to
4951 * allocate ram with GFP_KERNEL.
4952 */
4953 if (!used_math())
Avi Kivityd6e88ae2008-07-10 16:53:33 +03004954 kvm_fx_save(&vcpu->arch.host_fx_image);
Andrea Arcangelibc1a34f2008-05-01 18:43:33 +02004955
Hollis Blanchardd0752062007-10-31 17:24:25 -05004956 /* Initialize guest FPU by resetting ours and saving into guest's */
4957 preempt_disable();
Avi Kivityd6e88ae2008-07-10 16:53:33 +03004958 kvm_fx_save(&vcpu->arch.host_fx_image);
4959 kvm_fx_finit();
4960 kvm_fx_save(&vcpu->arch.guest_fx_image);
4961 kvm_fx_restore(&vcpu->arch.host_fx_image);
Hollis Blanchardd0752062007-10-31 17:24:25 -05004962 preempt_enable();
4963
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004964 vcpu->arch.cr0 |= X86_CR0_ET;
Hollis Blanchardd0752062007-10-31 17:24:25 -05004965 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004966 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
4967 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
Hollis Blanchardd0752062007-10-31 17:24:25 -05004968 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
4969}
4970EXPORT_SYMBOL_GPL(fx_init);
4971
4972void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
4973{
4974 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
4975 return;
4976
4977 vcpu->guest_fpu_loaded = 1;
Avi Kivityd6e88ae2008-07-10 16:53:33 +03004978 kvm_fx_save(&vcpu->arch.host_fx_image);
4979 kvm_fx_restore(&vcpu->arch.guest_fx_image);
Hollis Blanchardd0752062007-10-31 17:24:25 -05004980}
4981EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
4982
4983void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
4984{
4985 if (!vcpu->guest_fpu_loaded)
4986 return;
4987
4988 vcpu->guest_fpu_loaded = 0;
Avi Kivityd6e88ae2008-07-10 16:53:33 +03004989 kvm_fx_save(&vcpu->arch.guest_fx_image);
4990 kvm_fx_restore(&vcpu->arch.host_fx_image);
Avi Kivityf096ed82007-11-18 13:54:33 +02004991 ++vcpu->stat.fpu_reload;
Hollis Blanchardd0752062007-10-31 17:24:25 -05004992}
4993EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08004994
4995void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4996{
Joerg Roedel7f1ea202009-02-25 16:08:31 +01004997 if (vcpu->arch.time_page) {
4998 kvm_release_page_dirty(vcpu->arch.time_page);
4999 vcpu->arch.time_page = NULL;
5000 }
5001
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005002 kvm_x86_ops->vcpu_free(vcpu);
5003}
5004
5005struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
5006 unsigned int id)
5007{
Avi Kivity26e52152007-11-20 15:30:24 +02005008 return kvm_x86_ops->vcpu_create(kvm, id);
5009}
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005010
Avi Kivity26e52152007-11-20 15:30:24 +02005011int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
5012{
5013 int r;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005014
5015 /* We do fxsave: this must be aligned. */
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005016 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005017
Sheng Yang0bed3b52008-10-09 16:01:54 +08005018 vcpu->arch.mtrr_state.have_fixed = 1;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005019 vcpu_load(vcpu);
5020 r = kvm_arch_vcpu_reset(vcpu);
5021 if (r == 0)
5022 r = kvm_mmu_setup(vcpu);
5023 vcpu_put(vcpu);
5024 if (r < 0)
5025 goto free_vcpu;
5026
Avi Kivity26e52152007-11-20 15:30:24 +02005027 return 0;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005028free_vcpu:
5029 kvm_x86_ops->vcpu_free(vcpu);
Avi Kivity26e52152007-11-20 15:30:24 +02005030 return r;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005031}
5032
Hollis Blanchardd40ccc62007-11-19 14:04:43 -06005033void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005034{
5035 vcpu_load(vcpu);
5036 kvm_mmu_unload(vcpu);
5037 vcpu_put(vcpu);
5038
5039 kvm_x86_ops->vcpu_free(vcpu);
5040}
5041
5042int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
5043{
Jan Kiszka448fa4a2008-09-26 09:30:48 +02005044 vcpu->arch.nmi_pending = false;
5045 vcpu->arch.nmi_injected = false;
5046
Jan Kiszka42dbaa52008-12-15 13:52:10 +01005047 vcpu->arch.switch_db_regs = 0;
5048 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
5049 vcpu->arch.dr6 = DR6_FIXED_1;
5050 vcpu->arch.dr7 = DR7_FIXED_1;
5051
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005052 return kvm_x86_ops->vcpu_reset(vcpu);
5053}
5054
Alexander Graf10474ae2009-09-15 11:37:46 +02005055int kvm_arch_hardware_enable(void *garbage)
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005056{
Zachary Amsden0cca7902009-09-29 11:38:35 -10005057 /*
5058 * Since this may be called from a hotplug notifcation,
5059 * we can't get the CPU frequency directly.
5060 */
5061 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5062 int cpu = raw_smp_processor_id();
5063 per_cpu(cpu_tsc_khz, cpu) = 0;
5064 }
Avi Kivity18863bd2009-09-07 11:12:18 +03005065
5066 kvm_shared_msr_cpu_online();
5067
Alexander Graf10474ae2009-09-15 11:37:46 +02005068 return kvm_x86_ops->hardware_enable(garbage);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005069}
5070
5071void kvm_arch_hardware_disable(void *garbage)
5072{
5073 kvm_x86_ops->hardware_disable(garbage);
Avi Kivity3548bab2009-11-28 14:18:47 +02005074 drop_user_return_notifiers(garbage);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005075}
5076
5077int kvm_arch_hardware_setup(void)
5078{
5079 return kvm_x86_ops->hardware_setup();
5080}
5081
5082void kvm_arch_hardware_unsetup(void)
5083{
5084 kvm_x86_ops->hardware_unsetup();
5085}
5086
5087void kvm_arch_check_processor_compat(void *rtn)
5088{
5089 kvm_x86_ops->check_processor_compatibility(rtn);
5090}
5091
5092int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5093{
5094 struct page *page;
5095 struct kvm *kvm;
5096 int r;
5097
5098 BUG_ON(vcpu->kvm == NULL);
5099 kvm = vcpu->kvm;
5100
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005101 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
Gleb Natapovc5af89b2009-06-09 15:56:26 +03005102 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
Avi Kivitya4535292008-04-13 17:54:35 +03005103 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005104 else
Avi Kivitya4535292008-04-13 17:54:35 +03005105 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005106
5107 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
5108 if (!page) {
5109 r = -ENOMEM;
5110 goto fail;
5111 }
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005112 vcpu->arch.pio_data = page_address(page);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005113
5114 r = kvm_mmu_create(vcpu);
5115 if (r < 0)
5116 goto fail_free_pio_data;
5117
5118 if (irqchip_in_kernel(kvm)) {
5119 r = kvm_create_lapic(vcpu);
5120 if (r < 0)
5121 goto fail_mmu_destroy;
5122 }
5123
Huang Ying890ca9a2009-05-11 16:48:15 +08005124 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
5125 GFP_KERNEL);
5126 if (!vcpu->arch.mce_banks) {
5127 r = -ENOMEM;
Wei Yongjun443c39b2010-01-22 14:21:29 +08005128 goto fail_free_lapic;
Huang Ying890ca9a2009-05-11 16:48:15 +08005129 }
5130 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
5131
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005132 return 0;
Wei Yongjun443c39b2010-01-22 14:21:29 +08005133fail_free_lapic:
5134 kvm_free_lapic(vcpu);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005135fail_mmu_destroy:
5136 kvm_mmu_destroy(vcpu);
5137fail_free_pio_data:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005138 free_page((unsigned long)vcpu->arch.pio_data);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005139fail:
5140 return r;
5141}
5142
5143void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
5144{
Wei Yongjun36cb93f2010-01-22 14:18:47 +08005145 kfree(vcpu->arch.mce_banks);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005146 kvm_free_lapic(vcpu);
Marcelo Tosatti3200f402008-03-29 20:17:59 -03005147 down_read(&vcpu->kvm->slots_lock);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005148 kvm_mmu_destroy(vcpu);
Marcelo Tosatti3200f402008-03-29 20:17:59 -03005149 up_read(&vcpu->kvm->slots_lock);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005150 free_page((unsigned long)vcpu->arch.pio_data);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005151}
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005152
5153struct kvm *kvm_arch_create_vm(void)
5154{
5155 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
5156
5157 if (!kvm)
5158 return ERR_PTR(-ENOMEM);
5159
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08005160 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +03005161 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005162
Sheng Yang5550af42008-10-15 20:15:06 +08005163 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
5164 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
5165
Marcelo Tosatti53f658b2008-12-11 20:45:05 +01005166 rdtscll(kvm->arch.vm_init_tsc);
5167
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005168 return kvm;
5169}
5170
5171static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
5172{
5173 vcpu_load(vcpu);
5174 kvm_mmu_unload(vcpu);
5175 vcpu_put(vcpu);
5176}
5177
5178static void kvm_free_vcpus(struct kvm *kvm)
5179{
5180 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03005181 struct kvm_vcpu *vcpu;
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005182
5183 /*
5184 * Unpin any mmu pages first.
5185 */
Gleb Natapov988a2ca2009-06-09 15:56:29 +03005186 kvm_for_each_vcpu(i, vcpu, kvm)
5187 kvm_unload_vcpu_mmu(vcpu);
5188 kvm_for_each_vcpu(i, vcpu, kvm)
5189 kvm_arch_vcpu_free(vcpu);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005190
Gleb Natapov988a2ca2009-06-09 15:56:29 +03005191 mutex_lock(&kvm->lock);
5192 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
5193 kvm->vcpus[i] = NULL;
5194
5195 atomic_set(&kvm->online_vcpus, 0);
5196 mutex_unlock(&kvm->lock);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005197}
5198
Sheng Yangad8ba2c2009-01-06 10:03:02 +08005199void kvm_arch_sync_events(struct kvm *kvm)
5200{
Sheng Yangba4cef32009-01-06 10:03:03 +08005201 kvm_free_all_assigned_devices(kvm);
Sheng Yangad8ba2c2009-01-06 10:03:02 +08005202}
5203
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005204void kvm_arch_destroy_vm(struct kvm *kvm)
5205{
Sheng Yang6eb55812008-10-31 12:37:41 +08005206 kvm_iommu_unmap_guest(kvm);
Sheng Yang78376992008-01-28 05:10:22 +08005207 kvm_free_pit(kvm);
Zhang Xiantaod7deeeb2007-12-14 10:17:34 +08005208 kfree(kvm->arch.vpic);
5209 kfree(kvm->arch.vioapic);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005210 kvm_free_vcpus(kvm);
5211 kvm_free_physmem(kvm);
Avi Kivity3d458302008-03-25 11:26:13 +02005212 if (kvm->arch.apic_access_page)
5213 put_page(kvm->arch.apic_access_page);
Sheng Yangb7ebfb02008-04-25 21:44:52 +08005214 if (kvm->arch.ept_identity_pagetable)
5215 put_page(kvm->arch.ept_identity_pagetable);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005216 kfree(kvm);
5217}
Zhang Xiantao0de10342007-11-20 16:25:04 +08005218
5219int kvm_arch_set_memory_region(struct kvm *kvm,
5220 struct kvm_userspace_memory_region *mem,
5221 struct kvm_memory_slot old,
5222 int user_alloc)
5223{
5224 int npages = mem->memory_size >> PAGE_SHIFT;
5225 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
5226
5227 /*To keep backward compatibility with older userspace,
5228 *x86 needs to hanlde !user_alloc case.
5229 */
5230 if (!user_alloc) {
5231 if (npages && !old.rmap) {
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005232 unsigned long userspace_addr;
5233
Izik Eidus72dc67a2008-02-10 18:04:15 +02005234 down_write(&current->mm->mmap_sem);
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005235 userspace_addr = do_mmap(NULL, 0,
5236 npages * PAGE_SIZE,
5237 PROT_READ | PROT_WRITE,
Avi Kivityacee3c02008-08-26 17:22:47 +03005238 MAP_PRIVATE | MAP_ANONYMOUS,
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005239 0);
Izik Eidus72dc67a2008-02-10 18:04:15 +02005240 up_write(&current->mm->mmap_sem);
Zhang Xiantao0de10342007-11-20 16:25:04 +08005241
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005242 if (IS_ERR((void *)userspace_addr))
5243 return PTR_ERR((void *)userspace_addr);
5244
5245 /* set userspace_addr atomically for kvm_hva_to_rmapp */
5246 spin_lock(&kvm->mmu_lock);
5247 memslot->userspace_addr = userspace_addr;
5248 spin_unlock(&kvm->mmu_lock);
Zhang Xiantao0de10342007-11-20 16:25:04 +08005249 } else {
5250 if (!old.user_alloc && old.rmap) {
5251 int ret;
5252
Izik Eidus72dc67a2008-02-10 18:04:15 +02005253 down_write(&current->mm->mmap_sem);
Zhang Xiantao0de10342007-11-20 16:25:04 +08005254 ret = do_munmap(current->mm, old.userspace_addr,
5255 old.npages * PAGE_SIZE);
Izik Eidus72dc67a2008-02-10 18:04:15 +02005256 up_write(&current->mm->mmap_sem);
Zhang Xiantao0de10342007-11-20 16:25:04 +08005257 if (ret < 0)
5258 printk(KERN_WARNING
5259 "kvm_vm_ioctl_set_memory_region: "
5260 "failed to munmap memory\n");
5261 }
5262 }
5263 }
5264
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03005265 spin_lock(&kvm->mmu_lock);
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08005266 if (!kvm->arch.n_requested_mmu_pages) {
Zhang Xiantao0de10342007-11-20 16:25:04 +08005267 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
5268 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
5269 }
5270
5271 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03005272 spin_unlock(&kvm->mmu_lock);
Zhang Xiantao0de10342007-11-20 16:25:04 +08005273
5274 return 0;
5275}
Zhang Xiantao1d737c82007-12-14 09:35:10 +08005276
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03005277void kvm_arch_flush_shadow(struct kvm *kvm)
5278{
5279 kvm_mmu_zap_all(kvm);
Marcelo Tosatti8986ecc2009-05-12 18:55:45 -03005280 kvm_reload_remote_mmus(kvm);
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03005281}
5282
Zhang Xiantao1d737c82007-12-14 09:35:10 +08005283int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
5284{
Avi Kivitya4535292008-04-13 17:54:35 +03005285 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
Gleb Natapova1b37102009-07-09 15:33:52 +03005286 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
5287 || vcpu->arch.nmi_pending ||
5288 (kvm_arch_interrupt_allowed(vcpu) &&
5289 kvm_cpu_has_interrupt(vcpu));
Zhang Xiantao1d737c82007-12-14 09:35:10 +08005290}
Zhang Xiantao57361992007-12-17 14:21:40 +08005291
Zhang Xiantao57361992007-12-17 14:21:40 +08005292void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
5293{
Marcelo Tosatti32f88402009-05-07 17:55:12 -03005294 int me;
5295 int cpu = vcpu->cpu;
Zhang Xiantao57361992007-12-17 14:21:40 +08005296
5297 if (waitqueue_active(&vcpu->wq)) {
5298 wake_up_interruptible(&vcpu->wq);
5299 ++vcpu->stat.halt_wakeup;
5300 }
Marcelo Tosatti32f88402009-05-07 17:55:12 -03005301
5302 me = get_cpu();
5303 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
5304 if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
5305 smp_send_reschedule(cpu);
Marcelo Tosattie9571ed2008-04-11 15:01:22 -03005306 put_cpu();
Zhang Xiantao57361992007-12-17 14:21:40 +08005307}
Gleb Natapov78646122009-03-23 12:12:11 +02005308
5309int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
5310{
5311 return kvm_x86_ops->interrupt_allowed(vcpu);
5312}
Marcelo Tosatti229456f2009-06-17 09:22:14 -03005313
Jan Kiszka94fe45d2009-10-18 13:24:44 +02005314unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
5315{
5316 unsigned long rflags;
5317
5318 rflags = kvm_x86_ops->get_rflags(vcpu);
5319 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5320 rflags &= ~(unsigned long)(X86_EFLAGS_TF | X86_EFLAGS_RF);
5321 return rflags;
5322}
5323EXPORT_SYMBOL_GPL(kvm_get_rflags);
5324
5325void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
5326{
5327 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
5328 vcpu->arch.singlestep_cs ==
5329 get_segment_selector(vcpu, VCPU_SREG_CS) &&
5330 vcpu->arch.singlestep_rip == kvm_rip_read(vcpu))
5331 rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
5332 kvm_x86_ops->set_rflags(vcpu, rflags);
5333}
5334EXPORT_SYMBOL_GPL(kvm_set_rflags);
5335
Marcelo Tosatti229456f2009-06-17 09:22:14 -03005336EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
5337EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
5338EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
5339EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
5340EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
Joerg Roedel0ac406d2009-10-09 16:08:27 +02005341EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02005342EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
Joerg Roedel17897f32009-10-09 16:08:29 +02005343EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
Joerg Roedel236649d2009-10-09 16:08:30 +02005344EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
Joerg Roedelec1ff792009-10-09 16:08:31 +02005345EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
Joerg Roedel532a46b2009-10-09 16:08:32 +02005346EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);