blob: cbca46acfac3fea632b6226143e420cee1c8fe3a [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
Eddie Dong85f455f2007-07-06 12:20:49 +030018#include "irq.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080019#include "vmx.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080020#include "mmu.h"
Avi Kivitye4956062007-06-28 14:15:57 -040021
Avi Kivityedf88412007-12-16 11:02:48 +020022#include <linux/kvm_host.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080023#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020024#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/mm.h>
26#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040027#include <linux/sched.h>
Avi Kivityc7addb92007-09-16 18:58:32 +020028#include <linux/moduleparam.h>
Avi Kivitye4956062007-06-28 14:15:57 -040029
Avi Kivity6aa8b732006-12-10 02:21:36 -080030#include <asm/io.h>
Anthony Liguori3b3be0d2006-12-13 00:33:43 -080031#include <asm/desc.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080032
Avi Kivity6aa8b732006-12-10 02:21:36 -080033MODULE_AUTHOR("Qumranet");
34MODULE_LICENSE("GPL");
35
Avi Kivityc7addb92007-09-16 18:58:32 +020036static int bypass_guest_pf = 1;
37module_param(bypass_guest_pf, bool, 0);
38
Sheng Yang2384d2b2008-01-17 15:14:33 +080039static int enable_vpid = 1;
40module_param(enable_vpid, bool, 0);
41
Avi Kivity4c9fc8e2008-03-24 18:15:14 +020042static int flexpriority_enabled = 1;
43module_param(flexpriority_enabled, bool, 0);
44
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040045struct vmcs {
46 u32 revision_id;
47 u32 abort;
48 char data[0];
49};
50
51struct vcpu_vmx {
Rusty Russellfb3f0f52007-07-27 17:16:56 +100052 struct kvm_vcpu vcpu;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040053 int launched;
Avi Kivity29bd8a72007-09-10 17:27:03 +030054 u8 fail;
Avi Kivity1155f762007-11-22 11:30:47 +020055 u32 idt_vectoring_info;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040056 struct kvm_msr_entry *guest_msrs;
57 struct kvm_msr_entry *host_msrs;
58 int nmsrs;
59 int save_nmsrs;
60 int msr_offset_efer;
61#ifdef CONFIG_X86_64
62 int msr_offset_kernel_gs_base;
63#endif
64 struct vmcs *vmcs;
65 struct {
66 int loaded;
67 u16 fs_sel, gs_sel, ldt_sel;
Laurent Vivier152d3f22007-08-23 16:33:11 +020068 int gs_ldt_reload_needed;
69 int fs_reload_needed;
Avi Kivity51c6cf62007-08-29 03:48:05 +030070 int guest_efer_loaded;
Mike Dayd77c26f2007-10-08 09:02:08 -040071 } host_state;
Avi Kivity9c8cba32007-11-22 11:42:59 +020072 struct {
73 struct {
74 bool pending;
75 u8 vector;
76 unsigned rip;
77 } irq;
78 } rmode;
Sheng Yang2384d2b2008-01-17 15:14:33 +080079 int vpid;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040080};
81
82static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
83{
Rusty Russellfb3f0f52007-07-27 17:16:56 +100084 return container_of(vcpu, struct vcpu_vmx, vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040085}
86
Avi Kivity75880a02007-06-20 11:20:04 +030087static int init_rmode_tss(struct kvm *kvm);
88
Avi Kivity6aa8b732006-12-10 02:21:36 -080089static DEFINE_PER_CPU(struct vmcs *, vmxarea);
90static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
91
He, Qingfdef3ad2007-04-30 09:45:24 +030092static struct page *vmx_io_bitmap_a;
93static struct page *vmx_io_bitmap_b;
94
Sheng Yang2384d2b2008-01-17 15:14:33 +080095static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
96static DEFINE_SPINLOCK(vmx_vpid_lock);
97
Yang, Sheng1c3d14f2007-07-29 11:07:42 +030098static struct vmcs_config {
Avi Kivity6aa8b732006-12-10 02:21:36 -080099 int size;
100 int order;
101 u32 revision_id;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300102 u32 pin_based_exec_ctrl;
103 u32 cpu_based_exec_ctrl;
Sheng Yangf78e0e22007-10-29 09:40:42 +0800104 u32 cpu_based_2nd_exec_ctrl;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300105 u32 vmexit_ctrl;
106 u32 vmentry_ctrl;
107} vmcs_config;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800108
109#define VMX_SEGMENT_FIELD(seg) \
110 [VCPU_SREG_##seg] = { \
111 .selector = GUEST_##seg##_SELECTOR, \
112 .base = GUEST_##seg##_BASE, \
113 .limit = GUEST_##seg##_LIMIT, \
114 .ar_bytes = GUEST_##seg##_AR_BYTES, \
115 }
116
117static struct kvm_vmx_segment_field {
118 unsigned selector;
119 unsigned base;
120 unsigned limit;
121 unsigned ar_bytes;
122} kvm_vmx_segment_fields[] = {
123 VMX_SEGMENT_FIELD(CS),
124 VMX_SEGMENT_FIELD(DS),
125 VMX_SEGMENT_FIELD(ES),
126 VMX_SEGMENT_FIELD(FS),
127 VMX_SEGMENT_FIELD(GS),
128 VMX_SEGMENT_FIELD(SS),
129 VMX_SEGMENT_FIELD(TR),
130 VMX_SEGMENT_FIELD(LDTR),
131};
132
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300133/*
134 * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
135 * away by decrementing the array size.
136 */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800137static const u32 vmx_msr_index[] = {
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800138#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800139 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
140#endif
141 MSR_EFER, MSR_K6_STAR,
142};
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200143#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800144
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400145static void load_msrs(struct kvm_msr_entry *e, int n)
146{
147 int i;
148
149 for (i = 0; i < n; ++i)
150 wrmsrl(e[i].index, e[i].data);
151}
152
153static void save_msrs(struct kvm_msr_entry *e, int n)
154{
155 int i;
156
157 for (i = 0; i < n; ++i)
158 rdmsrl(e[i].index, e[i].data);
159}
160
Avi Kivity6aa8b732006-12-10 02:21:36 -0800161static inline int is_page_fault(u32 intr_info)
162{
163 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
164 INTR_INFO_VALID_MASK)) ==
165 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
166}
167
Anthony Liguori2ab455c2007-04-27 09:29:49 +0300168static inline int is_no_device(u32 intr_info)
169{
170 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
171 INTR_INFO_VALID_MASK)) ==
172 (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
173}
174
Anthony Liguori7aa81cc2007-09-17 14:57:50 -0500175static inline int is_invalid_opcode(u32 intr_info)
176{
177 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
178 INTR_INFO_VALID_MASK)) ==
179 (INTR_TYPE_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
180}
181
Avi Kivity6aa8b732006-12-10 02:21:36 -0800182static inline int is_external_interrupt(u32 intr_info)
183{
184 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
185 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
186}
187
Yang, Sheng6e5d8652007-09-12 18:03:11 +0800188static inline int cpu_has_vmx_tpr_shadow(void)
189{
190 return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
191}
192
193static inline int vm_need_tpr_shadow(struct kvm *kvm)
194{
195 return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
196}
197
Sheng Yangf78e0e22007-10-29 09:40:42 +0800198static inline int cpu_has_secondary_exec_ctrls(void)
199{
200 return (vmcs_config.cpu_based_exec_ctrl &
201 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
202}
203
Avi Kivity774ead32007-12-26 13:57:04 +0200204static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
Sheng Yangf78e0e22007-10-29 09:40:42 +0800205{
Avi Kivity4c9fc8e2008-03-24 18:15:14 +0200206 return flexpriority_enabled
207 && (vmcs_config.cpu_based_2nd_exec_ctrl &
208 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
Sheng Yangf78e0e22007-10-29 09:40:42 +0800209}
210
211static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
212{
213 return ((cpu_has_vmx_virtualize_apic_accesses()) &&
214 (irqchip_in_kernel(kvm)));
215}
216
Sheng Yang2384d2b2008-01-17 15:14:33 +0800217static inline int cpu_has_vmx_vpid(void)
218{
219 return (vmcs_config.cpu_based_2nd_exec_ctrl &
220 SECONDARY_EXEC_ENABLE_VPID);
221}
222
Rusty Russell8b9cf982007-07-30 16:31:43 +1000223static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
Avi Kivity7725f0b2006-12-13 00:34:01 -0800224{
225 int i;
226
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400227 for (i = 0; i < vmx->nmsrs; ++i)
228 if (vmx->guest_msrs[i].index == msr)
Eddie Donga75beee2007-05-17 18:55:15 +0300229 return i;
230 return -1;
231}
232
Sheng Yang2384d2b2008-01-17 15:14:33 +0800233static inline void __invvpid(int ext, u16 vpid, gva_t gva)
234{
235 struct {
236 u64 vpid : 16;
237 u64 rsvd : 48;
238 u64 gva;
239 } operand = { vpid, 0, gva };
240
241 asm volatile (ASM_VMX_INVVPID
242 /* CF==1 or ZF==1 --> rc = -1 */
243 "; ja 1f ; ud2 ; 1:"
244 : : "a"(&operand), "c"(ext) : "cc", "memory");
245}
246
Rusty Russell8b9cf982007-07-30 16:31:43 +1000247static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
Eddie Donga75beee2007-05-17 18:55:15 +0300248{
249 int i;
250
Rusty Russell8b9cf982007-07-30 16:31:43 +1000251 i = __find_msr_index(vmx, msr);
Eddie Donga75beee2007-05-17 18:55:15 +0300252 if (i >= 0)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400253 return &vmx->guest_msrs[i];
Al Viro8b6d44c2007-02-09 16:38:40 +0000254 return NULL;
Avi Kivity7725f0b2006-12-13 00:34:01 -0800255}
256
Avi Kivity6aa8b732006-12-10 02:21:36 -0800257static void vmcs_clear(struct vmcs *vmcs)
258{
259 u64 phys_addr = __pa(vmcs);
260 u8 error;
261
262 asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
263 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
264 : "cc", "memory");
265 if (error)
266 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
267 vmcs, phys_addr);
268}
269
270static void __vcpu_clear(void *arg)
271{
Rusty Russell8b9cf982007-07-30 16:31:43 +1000272 struct vcpu_vmx *vmx = arg;
Ingo Molnard3b2c332007-01-05 16:36:23 -0800273 int cpu = raw_smp_processor_id();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800274
Rusty Russell8b9cf982007-07-30 16:31:43 +1000275 if (vmx->vcpu.cpu == cpu)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400276 vmcs_clear(vmx->vmcs);
277 if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800278 per_cpu(current_vmcs, cpu) = NULL;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800279 rdtscll(vmx->vcpu.arch.host_tsc);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800280}
281
Rusty Russell8b9cf982007-07-30 16:31:43 +1000282static void vcpu_clear(struct vcpu_vmx *vmx)
Avi Kivity8d0be2b2007-02-12 00:54:46 -0800283{
Avi Kivityeae5ecb2007-09-30 10:50:12 +0200284 if (vmx->vcpu.cpu == -1)
285 return;
Avi Kivityf566e092007-09-30 11:02:53 +0200286 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1);
Rusty Russell8b9cf982007-07-30 16:31:43 +1000287 vmx->launched = 0;
Avi Kivity8d0be2b2007-02-12 00:54:46 -0800288}
289
Sheng Yang2384d2b2008-01-17 15:14:33 +0800290static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
291{
292 if (vmx->vpid == 0)
293 return;
294
295 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
296}
297
Avi Kivity6aa8b732006-12-10 02:21:36 -0800298static unsigned long vmcs_readl(unsigned long field)
299{
300 unsigned long value;
301
302 asm volatile (ASM_VMX_VMREAD_RDX_RAX
303 : "=a"(value) : "d"(field) : "cc");
304 return value;
305}
306
307static u16 vmcs_read16(unsigned long field)
308{
309 return vmcs_readl(field);
310}
311
312static u32 vmcs_read32(unsigned long field)
313{
314 return vmcs_readl(field);
315}
316
317static u64 vmcs_read64(unsigned long field)
318{
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800319#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800320 return vmcs_readl(field);
321#else
322 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
323#endif
324}
325
Avi Kivitye52de1b2007-01-05 16:36:56 -0800326static noinline void vmwrite_error(unsigned long field, unsigned long value)
327{
328 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
329 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
330 dump_stack();
331}
332
Avi Kivity6aa8b732006-12-10 02:21:36 -0800333static void vmcs_writel(unsigned long field, unsigned long value)
334{
335 u8 error;
336
337 asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
Mike Dayd77c26f2007-10-08 09:02:08 -0400338 : "=q"(error) : "a"(value), "d"(field) : "cc");
Avi Kivitye52de1b2007-01-05 16:36:56 -0800339 if (unlikely(error))
340 vmwrite_error(field, value);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800341}
342
343static void vmcs_write16(unsigned long field, u16 value)
344{
345 vmcs_writel(field, value);
346}
347
348static void vmcs_write32(unsigned long field, u32 value)
349{
350 vmcs_writel(field, value);
351}
352
353static void vmcs_write64(unsigned long field, u64 value)
354{
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800355#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800356 vmcs_writel(field, value);
357#else
358 vmcs_writel(field, value);
359 asm volatile ("");
360 vmcs_writel(field+1, value >> 32);
361#endif
362}
363
Anthony Liguori2ab455c2007-04-27 09:29:49 +0300364static void vmcs_clear_bits(unsigned long field, u32 mask)
365{
366 vmcs_writel(field, vmcs_readl(field) & ~mask);
367}
368
369static void vmcs_set_bits(unsigned long field, u32 mask)
370{
371 vmcs_writel(field, vmcs_readl(field) | mask);
372}
373
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300374static void update_exception_bitmap(struct kvm_vcpu *vcpu)
375{
376 u32 eb;
377
Anthony Liguori7aa81cc2007-09-17 14:57:50 -0500378 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300379 if (!vcpu->fpu_active)
380 eb |= 1u << NM_VECTOR;
381 if (vcpu->guest_debug.enabled)
382 eb |= 1u << 1;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800383 if (vcpu->arch.rmode.active)
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300384 eb = ~0;
385 vmcs_write32(EXCEPTION_BITMAP, eb);
386}
387
Avi Kivity33ed6322007-05-02 16:54:03 +0300388static void reload_tss(void)
389{
Avi Kivity33ed6322007-05-02 16:54:03 +0300390 /*
391 * VT restores TR but not its size. Useless.
392 */
393 struct descriptor_table gdt;
Avi Kivitya5f61302008-02-20 17:57:21 +0200394 struct desc_struct *descs;
Avi Kivity33ed6322007-05-02 16:54:03 +0300395
396 get_gdt(&gdt);
397 descs = (void *)gdt.base;
398 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
399 load_TR_desc();
Avi Kivity33ed6322007-05-02 16:54:03 +0300400}
401
Rusty Russell8b9cf982007-07-30 16:31:43 +1000402static void load_transition_efer(struct vcpu_vmx *vmx)
Eddie Dong2cc51562007-05-21 07:28:09 +0300403{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400404 int efer_offset = vmx->msr_offset_efer;
Avi Kivity51c6cf62007-08-29 03:48:05 +0300405 u64 host_efer = vmx->host_msrs[efer_offset].data;
406 u64 guest_efer = vmx->guest_msrs[efer_offset].data;
407 u64 ignore_bits;
Eddie Dong2cc51562007-05-21 07:28:09 +0300408
Avi Kivity51c6cf62007-08-29 03:48:05 +0300409 if (efer_offset < 0)
410 return;
411 /*
412 * NX is emulated; LMA and LME handled by hardware; SCE meaninless
413 * outside long mode
414 */
415 ignore_bits = EFER_NX | EFER_SCE;
416#ifdef CONFIG_X86_64
417 ignore_bits |= EFER_LMA | EFER_LME;
418 /* SCE is meaningful only in long mode on Intel */
419 if (guest_efer & EFER_LMA)
420 ignore_bits &= ~(u64)EFER_SCE;
421#endif
422 if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
423 return;
424
425 vmx->host_state.guest_efer_loaded = 1;
426 guest_efer &= ~ignore_bits;
427 guest_efer |= host_efer & ignore_bits;
428 wrmsrl(MSR_EFER, guest_efer);
Rusty Russell8b9cf982007-07-30 16:31:43 +1000429 vmx->vcpu.stat.efer_reload++;
Eddie Dong2cc51562007-05-21 07:28:09 +0300430}
431
Avi Kivity51c6cf62007-08-29 03:48:05 +0300432static void reload_host_efer(struct vcpu_vmx *vmx)
433{
434 if (vmx->host_state.guest_efer_loaded) {
435 vmx->host_state.guest_efer_loaded = 0;
436 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
437 }
438}
439
Avi Kivity04d2cc72007-09-10 18:10:54 +0300440static void vmx_save_host_state(struct kvm_vcpu *vcpu)
Avi Kivity33ed6322007-05-02 16:54:03 +0300441{
Avi Kivity04d2cc72007-09-10 18:10:54 +0300442 struct vcpu_vmx *vmx = to_vmx(vcpu);
443
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400444 if (vmx->host_state.loaded)
Avi Kivity33ed6322007-05-02 16:54:03 +0300445 return;
446
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400447 vmx->host_state.loaded = 1;
Avi Kivity33ed6322007-05-02 16:54:03 +0300448 /*
449 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
450 * allow segment selectors with cpl > 0 or ti == 1.
451 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400452 vmx->host_state.ldt_sel = read_ldt();
Laurent Vivier152d3f22007-08-23 16:33:11 +0200453 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400454 vmx->host_state.fs_sel = read_fs();
Laurent Vivier152d3f22007-08-23 16:33:11 +0200455 if (!(vmx->host_state.fs_sel & 7)) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400456 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
Laurent Vivier152d3f22007-08-23 16:33:11 +0200457 vmx->host_state.fs_reload_needed = 0;
458 } else {
Avi Kivity33ed6322007-05-02 16:54:03 +0300459 vmcs_write16(HOST_FS_SELECTOR, 0);
Laurent Vivier152d3f22007-08-23 16:33:11 +0200460 vmx->host_state.fs_reload_needed = 1;
Avi Kivity33ed6322007-05-02 16:54:03 +0300461 }
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400462 vmx->host_state.gs_sel = read_gs();
463 if (!(vmx->host_state.gs_sel & 7))
464 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +0300465 else {
466 vmcs_write16(HOST_GS_SELECTOR, 0);
Laurent Vivier152d3f22007-08-23 16:33:11 +0200467 vmx->host_state.gs_ldt_reload_needed = 1;
Avi Kivity33ed6322007-05-02 16:54:03 +0300468 }
469
470#ifdef CONFIG_X86_64
471 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
472 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
473#else
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400474 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
475 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
Avi Kivity33ed6322007-05-02 16:54:03 +0300476#endif
Avi Kivity707c0872007-05-02 17:33:43 +0300477
478#ifdef CONFIG_X86_64
Mike Dayd77c26f2007-10-08 09:02:08 -0400479 if (is_long_mode(&vmx->vcpu))
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400480 save_msrs(vmx->host_msrs +
481 vmx->msr_offset_kernel_gs_base, 1);
Mike Dayd77c26f2007-10-08 09:02:08 -0400482
Avi Kivity707c0872007-05-02 17:33:43 +0300483#endif
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400484 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
Avi Kivity51c6cf62007-08-29 03:48:05 +0300485 load_transition_efer(vmx);
Avi Kivity33ed6322007-05-02 16:54:03 +0300486}
487
Rusty Russell8b9cf982007-07-30 16:31:43 +1000488static void vmx_load_host_state(struct vcpu_vmx *vmx)
Avi Kivity33ed6322007-05-02 16:54:03 +0300489{
Avi Kivity15ad7142007-07-11 18:17:21 +0300490 unsigned long flags;
Avi Kivity33ed6322007-05-02 16:54:03 +0300491
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400492 if (!vmx->host_state.loaded)
Avi Kivity33ed6322007-05-02 16:54:03 +0300493 return;
494
Avi Kivitye1beb1d2007-11-18 13:50:24 +0200495 ++vmx->vcpu.stat.host_state_reload;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400496 vmx->host_state.loaded = 0;
Laurent Vivier152d3f22007-08-23 16:33:11 +0200497 if (vmx->host_state.fs_reload_needed)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400498 load_fs(vmx->host_state.fs_sel);
Laurent Vivier152d3f22007-08-23 16:33:11 +0200499 if (vmx->host_state.gs_ldt_reload_needed) {
500 load_ldt(vmx->host_state.ldt_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +0300501 /*
502 * If we have to reload gs, we must take care to
503 * preserve our gs base.
504 */
Avi Kivity15ad7142007-07-11 18:17:21 +0300505 local_irq_save(flags);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400506 load_gs(vmx->host_state.gs_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +0300507#ifdef CONFIG_X86_64
508 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
509#endif
Avi Kivity15ad7142007-07-11 18:17:21 +0300510 local_irq_restore(flags);
Avi Kivity33ed6322007-05-02 16:54:03 +0300511 }
Laurent Vivier152d3f22007-08-23 16:33:11 +0200512 reload_tss();
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400513 save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
514 load_msrs(vmx->host_msrs, vmx->save_nmsrs);
Avi Kivity51c6cf62007-08-29 03:48:05 +0300515 reload_host_efer(vmx);
Avi Kivity33ed6322007-05-02 16:54:03 +0300516}
517
Avi Kivity6aa8b732006-12-10 02:21:36 -0800518/*
519 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
520 * vcpu mutex is already taken.
521 */
Avi Kivity15ad7142007-07-11 18:17:21 +0300522static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800523{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400524 struct vcpu_vmx *vmx = to_vmx(vcpu);
525 u64 phys_addr = __pa(vmx->vmcs);
Avi Kivity019960a2008-03-04 10:44:51 +0200526 u64 tsc_this, delta, new_offset;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800527
Eddie Donga3d7f852007-09-03 16:15:12 +0300528 if (vcpu->cpu != cpu) {
Rusty Russell8b9cf982007-07-30 16:31:43 +1000529 vcpu_clear(vmx);
Eddie Donga3d7f852007-09-03 16:15:12 +0300530 kvm_migrate_apic_timer(vcpu);
Sheng Yang2384d2b2008-01-17 15:14:33 +0800531 vpid_sync_vcpu_all(vmx);
Eddie Donga3d7f852007-09-03 16:15:12 +0300532 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800533
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400534 if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800535 u8 error;
536
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400537 per_cpu(current_vmcs, cpu) = vmx->vmcs;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800538 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
539 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
540 : "cc");
541 if (error)
542 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400543 vmx->vmcs, phys_addr);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800544 }
545
546 if (vcpu->cpu != cpu) {
547 struct descriptor_table dt;
548 unsigned long sysenter_esp;
549
550 vcpu->cpu = cpu;
551 /*
552 * Linux uses per-cpu TSS and GDT, so set these when switching
553 * processors.
554 */
555 vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
556 get_gdt(&dt);
557 vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
558
559 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
560 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
Avi Kivity77002702007-06-13 19:55:28 +0300561
562 /*
563 * Make sure the time stamp counter is monotonous.
564 */
565 rdtscll(tsc_this);
Avi Kivity019960a2008-03-04 10:44:51 +0200566 if (tsc_this < vcpu->arch.host_tsc) {
567 delta = vcpu->arch.host_tsc - tsc_this;
568 new_offset = vmcs_read64(TSC_OFFSET) + delta;
569 vmcs_write64(TSC_OFFSET, new_offset);
570 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800571 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800572}
573
574static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
575{
Rusty Russell8b9cf982007-07-30 16:31:43 +1000576 vmx_load_host_state(to_vmx(vcpu));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800577}
578
Avi Kivity5fd86fc2007-05-02 20:40:00 +0300579static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
580{
581 if (vcpu->fpu_active)
582 return;
583 vcpu->fpu_active = 1;
Rusty Russell707d92f2007-07-17 23:19:08 +1000584 vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800585 if (vcpu->arch.cr0 & X86_CR0_TS)
Rusty Russell707d92f2007-07-17 23:19:08 +1000586 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
Avi Kivity5fd86fc2007-05-02 20:40:00 +0300587 update_exception_bitmap(vcpu);
588}
589
590static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
591{
592 if (!vcpu->fpu_active)
593 return;
594 vcpu->fpu_active = 0;
Rusty Russell707d92f2007-07-17 23:19:08 +1000595 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
Avi Kivity5fd86fc2007-05-02 20:40:00 +0300596 update_exception_bitmap(vcpu);
597}
598
Avi Kivity774c47f2007-02-12 00:54:47 -0800599static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
600{
Rusty Russell8b9cf982007-07-30 16:31:43 +1000601 vcpu_clear(to_vmx(vcpu));
Avi Kivity774c47f2007-02-12 00:54:47 -0800602}
603
Avi Kivity6aa8b732006-12-10 02:21:36 -0800604static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
605{
606 return vmcs_readl(GUEST_RFLAGS);
607}
608
609static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
610{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800611 if (vcpu->arch.rmode.active)
Glauber de Oliveira Costa053de042008-01-30 13:31:27 +0100612 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800613 vmcs_writel(GUEST_RFLAGS, rflags);
614}
615
616static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
617{
618 unsigned long rip;
619 u32 interruptibility;
620
621 rip = vmcs_readl(GUEST_RIP);
622 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
623 vmcs_writel(GUEST_RIP, rip);
624
625 /*
626 * We emulated an instruction, so temporary interrupt blocking
627 * should be removed, if set.
628 */
629 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
630 if (interruptibility & 3)
631 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
632 interruptibility & ~3);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800633 vcpu->arch.interrupt_window_open = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800634}
635
Avi Kivity298101d2007-11-25 13:41:11 +0200636static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
637 bool has_error_code, u32 error_code)
638{
639 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
640 nr | INTR_TYPE_EXCEPTION
Ryan Harper2e113842008-02-11 10:26:38 -0600641 | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0)
Avi Kivity298101d2007-11-25 13:41:11 +0200642 | INTR_INFO_VALID_MASK);
643 if (has_error_code)
644 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
645}
646
647static bool vmx_exception_injected(struct kvm_vcpu *vcpu)
648{
649 struct vcpu_vmx *vmx = to_vmx(vcpu);
650
651 return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
652}
653
Avi Kivity6aa8b732006-12-10 02:21:36 -0800654/*
Eddie Donga75beee2007-05-17 18:55:15 +0300655 * Swap MSR entry in host/guest MSR entry array.
656 */
Gabriel C54e11fa2007-08-01 16:23:10 +0200657#ifdef CONFIG_X86_64
Rusty Russell8b9cf982007-07-30 16:31:43 +1000658static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
Eddie Donga75beee2007-05-17 18:55:15 +0300659{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400660 struct kvm_msr_entry tmp;
661
662 tmp = vmx->guest_msrs[to];
663 vmx->guest_msrs[to] = vmx->guest_msrs[from];
664 vmx->guest_msrs[from] = tmp;
665 tmp = vmx->host_msrs[to];
666 vmx->host_msrs[to] = vmx->host_msrs[from];
667 vmx->host_msrs[from] = tmp;
Eddie Donga75beee2007-05-17 18:55:15 +0300668}
Gabriel C54e11fa2007-08-01 16:23:10 +0200669#endif
Eddie Donga75beee2007-05-17 18:55:15 +0300670
671/*
Avi Kivitye38aea32007-04-19 13:22:48 +0300672 * Set up the vmcs to automatically save and restore system
673 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
674 * mode, as fiddling with msrs is very expensive.
675 */
Rusty Russell8b9cf982007-07-30 16:31:43 +1000676static void setup_msrs(struct vcpu_vmx *vmx)
Avi Kivitye38aea32007-04-19 13:22:48 +0300677{
Eddie Dong2cc51562007-05-21 07:28:09 +0300678 int save_nmsrs;
Avi Kivitye38aea32007-04-19 13:22:48 +0300679
Avi Kivity33f9c502008-02-27 16:06:57 +0200680 vmx_load_host_state(vmx);
Eddie Donga75beee2007-05-17 18:55:15 +0300681 save_nmsrs = 0;
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300682#ifdef CONFIG_X86_64
Rusty Russell8b9cf982007-07-30 16:31:43 +1000683 if (is_long_mode(&vmx->vcpu)) {
Eddie Dong2cc51562007-05-21 07:28:09 +0300684 int index;
685
Rusty Russell8b9cf982007-07-30 16:31:43 +1000686 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
Eddie Donga75beee2007-05-17 18:55:15 +0300687 if (index >= 0)
Rusty Russell8b9cf982007-07-30 16:31:43 +1000688 move_msr_up(vmx, index, save_nmsrs++);
689 index = __find_msr_index(vmx, MSR_LSTAR);
Eddie Donga75beee2007-05-17 18:55:15 +0300690 if (index >= 0)
Rusty Russell8b9cf982007-07-30 16:31:43 +1000691 move_msr_up(vmx, index, save_nmsrs++);
692 index = __find_msr_index(vmx, MSR_CSTAR);
Eddie Donga75beee2007-05-17 18:55:15 +0300693 if (index >= 0)
Rusty Russell8b9cf982007-07-30 16:31:43 +1000694 move_msr_up(vmx, index, save_nmsrs++);
695 index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
Eddie Donga75beee2007-05-17 18:55:15 +0300696 if (index >= 0)
Rusty Russell8b9cf982007-07-30 16:31:43 +1000697 move_msr_up(vmx, index, save_nmsrs++);
Eddie Donga75beee2007-05-17 18:55:15 +0300698 /*
699 * MSR_K6_STAR is only needed on long mode guests, and only
700 * if efer.sce is enabled.
701 */
Rusty Russell8b9cf982007-07-30 16:31:43 +1000702 index = __find_msr_index(vmx, MSR_K6_STAR);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800703 if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE))
Rusty Russell8b9cf982007-07-30 16:31:43 +1000704 move_msr_up(vmx, index, save_nmsrs++);
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300705 }
Eddie Donga75beee2007-05-17 18:55:15 +0300706#endif
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400707 vmx->save_nmsrs = save_nmsrs;
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300708
Eddie Donga75beee2007-05-17 18:55:15 +0300709#ifdef CONFIG_X86_64
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400710 vmx->msr_offset_kernel_gs_base =
Rusty Russell8b9cf982007-07-30 16:31:43 +1000711 __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
Eddie Donga75beee2007-05-17 18:55:15 +0300712#endif
Rusty Russell8b9cf982007-07-30 16:31:43 +1000713 vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
Avi Kivitye38aea32007-04-19 13:22:48 +0300714}
715
716/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800717 * reads and returns guest's timestamp counter "register"
718 * guest_tsc = host_tsc + tsc_offset -- 21.3
719 */
720static u64 guest_read_tsc(void)
721{
722 u64 host_tsc, tsc_offset;
723
724 rdtscll(host_tsc);
725 tsc_offset = vmcs_read64(TSC_OFFSET);
726 return host_tsc + tsc_offset;
727}
728
729/*
730 * writes 'guest_tsc' into guest's timestamp counter "register"
731 * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
732 */
733static void guest_write_tsc(u64 guest_tsc)
734{
735 u64 host_tsc;
736
737 rdtscll(host_tsc);
738 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
739}
740
Avi Kivity6aa8b732006-12-10 02:21:36 -0800741/*
742 * Reads an msr value (of 'msr_index') into 'pdata'.
743 * Returns 0 on success, non-0 otherwise.
744 * Assumes vcpu_load() was already called.
745 */
746static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
747{
748 u64 data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400749 struct kvm_msr_entry *msr;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800750
751 if (!pdata) {
752 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
753 return -EINVAL;
754 }
755
756 switch (msr_index) {
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800757#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800758 case MSR_FS_BASE:
759 data = vmcs_readl(GUEST_FS_BASE);
760 break;
761 case MSR_GS_BASE:
762 data = vmcs_readl(GUEST_GS_BASE);
763 break;
764 case MSR_EFER:
Avi Kivity3bab1f52006-12-29 16:49:48 -0800765 return kvm_get_msr_common(vcpu, msr_index, pdata);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800766#endif
767 case MSR_IA32_TIME_STAMP_COUNTER:
768 data = guest_read_tsc();
769 break;
770 case MSR_IA32_SYSENTER_CS:
771 data = vmcs_read32(GUEST_SYSENTER_CS);
772 break;
773 case MSR_IA32_SYSENTER_EIP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200774 data = vmcs_readl(GUEST_SYSENTER_EIP);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800775 break;
776 case MSR_IA32_SYSENTER_ESP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200777 data = vmcs_readl(GUEST_SYSENTER_ESP);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800778 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800779 default:
Rusty Russell8b9cf982007-07-30 16:31:43 +1000780 msr = find_msr_entry(to_vmx(vcpu), msr_index);
Avi Kivity3bab1f52006-12-29 16:49:48 -0800781 if (msr) {
782 data = msr->data;
783 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800784 }
Avi Kivity3bab1f52006-12-29 16:49:48 -0800785 return kvm_get_msr_common(vcpu, msr_index, pdata);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800786 }
787
788 *pdata = data;
789 return 0;
790}
791
792/*
793 * Writes msr value into into the appropriate "register".
794 * Returns 0 on success, non-0 otherwise.
795 * Assumes vcpu_load() was already called.
796 */
797static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
798{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400799 struct vcpu_vmx *vmx = to_vmx(vcpu);
800 struct kvm_msr_entry *msr;
Eddie Dong2cc51562007-05-21 07:28:09 +0300801 int ret = 0;
802
Avi Kivity6aa8b732006-12-10 02:21:36 -0800803 switch (msr_index) {
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800804#ifdef CONFIG_X86_64
Avi Kivity3bab1f52006-12-29 16:49:48 -0800805 case MSR_EFER:
Eddie Dong2cc51562007-05-21 07:28:09 +0300806 ret = kvm_set_msr_common(vcpu, msr_index, data);
Avi Kivity51c6cf62007-08-29 03:48:05 +0300807 if (vmx->host_state.loaded) {
808 reload_host_efer(vmx);
Rusty Russell8b9cf982007-07-30 16:31:43 +1000809 load_transition_efer(vmx);
Avi Kivity51c6cf62007-08-29 03:48:05 +0300810 }
Eddie Dong2cc51562007-05-21 07:28:09 +0300811 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800812 case MSR_FS_BASE:
813 vmcs_writel(GUEST_FS_BASE, data);
814 break;
815 case MSR_GS_BASE:
816 vmcs_writel(GUEST_GS_BASE, data);
817 break;
818#endif
819 case MSR_IA32_SYSENTER_CS:
820 vmcs_write32(GUEST_SYSENTER_CS, data);
821 break;
822 case MSR_IA32_SYSENTER_EIP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200823 vmcs_writel(GUEST_SYSENTER_EIP, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800824 break;
825 case MSR_IA32_SYSENTER_ESP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200826 vmcs_writel(GUEST_SYSENTER_ESP, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800827 break;
Avi Kivityd27d4ac2007-02-19 14:37:46 +0200828 case MSR_IA32_TIME_STAMP_COUNTER:
Avi Kivity6aa8b732006-12-10 02:21:36 -0800829 guest_write_tsc(data);
830 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800831 default:
Rusty Russell8b9cf982007-07-30 16:31:43 +1000832 msr = find_msr_entry(vmx, msr_index);
Avi Kivity3bab1f52006-12-29 16:49:48 -0800833 if (msr) {
834 msr->data = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400835 if (vmx->host_state.loaded)
836 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
Avi Kivity3bab1f52006-12-29 16:49:48 -0800837 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800838 }
Eddie Dong2cc51562007-05-21 07:28:09 +0300839 ret = kvm_set_msr_common(vcpu, msr_index, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800840 }
841
Eddie Dong2cc51562007-05-21 07:28:09 +0300842 return ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800843}
844
845/*
846 * Sync the rsp and rip registers into the vcpu structure. This allows
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800847 * registers to be accessed by indexing vcpu->arch.regs.
Avi Kivity6aa8b732006-12-10 02:21:36 -0800848 */
849static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
850{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800851 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
852 vcpu->arch.rip = vmcs_readl(GUEST_RIP);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800853}
854
855/*
856 * Syncs rsp and rip back into the vmcs. Should be called after possible
857 * modification.
858 */
859static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
860{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800861 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
862 vmcs_writel(GUEST_RIP, vcpu->arch.rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800863}
864
865static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
866{
867 unsigned long dr7 = 0x400;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800868 int old_singlestep;
869
Avi Kivity6aa8b732006-12-10 02:21:36 -0800870 old_singlestep = vcpu->guest_debug.singlestep;
871
872 vcpu->guest_debug.enabled = dbg->enabled;
873 if (vcpu->guest_debug.enabled) {
874 int i;
875
876 dr7 |= 0x200; /* exact */
877 for (i = 0; i < 4; ++i) {
878 if (!dbg->breakpoints[i].enabled)
879 continue;
880 vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
881 dr7 |= 2 << (i*2); /* global enable */
882 dr7 |= 0 << (i*4+16); /* execution breakpoint */
883 }
884
Avi Kivity6aa8b732006-12-10 02:21:36 -0800885 vcpu->guest_debug.singlestep = dbg->singlestep;
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300886 } else
Avi Kivity6aa8b732006-12-10 02:21:36 -0800887 vcpu->guest_debug.singlestep = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800888
889 if (old_singlestep && !vcpu->guest_debug.singlestep) {
890 unsigned long flags;
891
892 flags = vmcs_readl(GUEST_RFLAGS);
893 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
894 vmcs_writel(GUEST_RFLAGS, flags);
895 }
896
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300897 update_exception_bitmap(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800898 vmcs_writel(GUEST_DR7, dr7);
899
900 return 0;
901}
902
Eddie Dong2a8067f2007-08-06 16:29:07 +0300903static int vmx_get_irq(struct kvm_vcpu *vcpu)
904{
Avi Kivity1155f762007-11-22 11:30:47 +0200905 struct vcpu_vmx *vmx = to_vmx(vcpu);
Eddie Dong2a8067f2007-08-06 16:29:07 +0300906 u32 idtv_info_field;
907
Avi Kivity1155f762007-11-22 11:30:47 +0200908 idtv_info_field = vmx->idt_vectoring_info;
Eddie Dong2a8067f2007-08-06 16:29:07 +0300909 if (idtv_info_field & INTR_INFO_VALID_MASK) {
910 if (is_external_interrupt(idtv_info_field))
911 return idtv_info_field & VECTORING_INFO_VECTOR_MASK;
912 else
Mike Dayd77c26f2007-10-08 09:02:08 -0400913 printk(KERN_DEBUG "pending exception: not handled yet\n");
Eddie Dong2a8067f2007-08-06 16:29:07 +0300914 }
915 return -1;
916}
917
Avi Kivity6aa8b732006-12-10 02:21:36 -0800918static __init int cpu_has_kvm_support(void)
919{
920 unsigned long ecx = cpuid_ecx(1);
921 return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
922}
923
924static __init int vmx_disabled_by_bios(void)
925{
926 u64 msr;
927
928 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
Yang, Sheng62b3ffb2007-07-25 12:17:06 +0300929 return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED |
930 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
931 == MSR_IA32_FEATURE_CONTROL_LOCKED;
932 /* locked but not enabled */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800933}
934
Avi Kivity774c47f2007-02-12 00:54:47 -0800935static void hardware_enable(void *garbage)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800936{
937 int cpu = raw_smp_processor_id();
938 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
939 u64 old;
940
941 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
Yang, Sheng62b3ffb2007-07-25 12:17:06 +0300942 if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED |
943 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
944 != (MSR_IA32_FEATURE_CONTROL_LOCKED |
945 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
Avi Kivity6aa8b732006-12-10 02:21:36 -0800946 /* enable and lock */
Yang, Sheng62b3ffb2007-07-25 12:17:06 +0300947 wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
948 MSR_IA32_FEATURE_CONTROL_LOCKED |
949 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED);
Rusty Russell66aee912007-07-17 23:34:16 +1000950 write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800951 asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
952 : "memory", "cc");
953}
954
955static void hardware_disable(void *garbage)
956{
957 asm volatile (ASM_VMX_VMXOFF : : : "cc");
958}
959
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300960static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
Mike Dayd77c26f2007-10-08 09:02:08 -0400961 u32 msr, u32 *result)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800962{
963 u32 vmx_msr_low, vmx_msr_high;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300964 u32 ctl = ctl_min | ctl_opt;
965
966 rdmsr(msr, vmx_msr_low, vmx_msr_high);
967
968 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
969 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
970
971 /* Ensure minimum (required) set of control bits are supported. */
972 if (ctl_min & ~ctl)
Yang, Sheng002c7f72007-07-31 14:23:01 +0300973 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300974
975 *result = ctl;
976 return 0;
977}
978
Yang, Sheng002c7f72007-07-31 14:23:01 +0300979static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300980{
981 u32 vmx_msr_low, vmx_msr_high;
982 u32 min, opt;
983 u32 _pin_based_exec_control = 0;
984 u32 _cpu_based_exec_control = 0;
Sheng Yangf78e0e22007-10-29 09:40:42 +0800985 u32 _cpu_based_2nd_exec_control = 0;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300986 u32 _vmexit_control = 0;
987 u32 _vmentry_control = 0;
988
989 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
990 opt = 0;
991 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
992 &_pin_based_exec_control) < 0)
Yang, Sheng002c7f72007-07-31 14:23:01 +0300993 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300994
995 min = CPU_BASED_HLT_EXITING |
996#ifdef CONFIG_X86_64
997 CPU_BASED_CR8_LOAD_EXITING |
998 CPU_BASED_CR8_STORE_EXITING |
999#endif
1000 CPU_BASED_USE_IO_BITMAPS |
1001 CPU_BASED_MOV_DR_EXITING |
1002 CPU_BASED_USE_TSC_OFFSETING;
Sheng Yangf78e0e22007-10-29 09:40:42 +08001003 opt = CPU_BASED_TPR_SHADOW |
1004 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001005 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
1006 &_cpu_based_exec_control) < 0)
Yang, Sheng002c7f72007-07-31 14:23:01 +03001007 return -EIO;
Yang, Sheng6e5d8652007-09-12 18:03:11 +08001008#ifdef CONFIG_X86_64
1009 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
1010 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
1011 ~CPU_BASED_CR8_STORE_EXITING;
1012#endif
Sheng Yangf78e0e22007-10-29 09:40:42 +08001013 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
1014 min = 0;
Eddie Donge5edaa02007-11-11 12:28:35 +02001015 opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
Sheng Yang2384d2b2008-01-17 15:14:33 +08001016 SECONDARY_EXEC_WBINVD_EXITING |
1017 SECONDARY_EXEC_ENABLE_VPID;
Sheng Yangf78e0e22007-10-29 09:40:42 +08001018 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2,
1019 &_cpu_based_2nd_exec_control) < 0)
1020 return -EIO;
1021 }
1022#ifndef CONFIG_X86_64
1023 if (!(_cpu_based_2nd_exec_control &
1024 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
1025 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
1026#endif
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001027
1028 min = 0;
1029#ifdef CONFIG_X86_64
1030 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
1031#endif
1032 opt = 0;
1033 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
1034 &_vmexit_control) < 0)
Yang, Sheng002c7f72007-07-31 14:23:01 +03001035 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001036
1037 min = opt = 0;
1038 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
1039 &_vmentry_control) < 0)
Yang, Sheng002c7f72007-07-31 14:23:01 +03001040 return -EIO;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001041
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -08001042 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001043
1044 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
1045 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
Yang, Sheng002c7f72007-07-31 14:23:01 +03001046 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001047
1048#ifdef CONFIG_X86_64
1049 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
1050 if (vmx_msr_high & (1u<<16))
Yang, Sheng002c7f72007-07-31 14:23:01 +03001051 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001052#endif
1053
1054 /* Require Write-Back (WB) memory type for VMCS accesses. */
1055 if (((vmx_msr_high >> 18) & 15) != 6)
Yang, Sheng002c7f72007-07-31 14:23:01 +03001056 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001057
Yang, Sheng002c7f72007-07-31 14:23:01 +03001058 vmcs_conf->size = vmx_msr_high & 0x1fff;
1059 vmcs_conf->order = get_order(vmcs_config.size);
1060 vmcs_conf->revision_id = vmx_msr_low;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001061
Yang, Sheng002c7f72007-07-31 14:23:01 +03001062 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
1063 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
Sheng Yangf78e0e22007-10-29 09:40:42 +08001064 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
Yang, Sheng002c7f72007-07-31 14:23:01 +03001065 vmcs_conf->vmexit_ctrl = _vmexit_control;
1066 vmcs_conf->vmentry_ctrl = _vmentry_control;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001067
1068 return 0;
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -08001069}
Avi Kivity6aa8b732006-12-10 02:21:36 -08001070
1071static struct vmcs *alloc_vmcs_cpu(int cpu)
1072{
1073 int node = cpu_to_node(cpu);
1074 struct page *pages;
1075 struct vmcs *vmcs;
1076
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001077 pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001078 if (!pages)
1079 return NULL;
1080 vmcs = page_address(pages);
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001081 memset(vmcs, 0, vmcs_config.size);
1082 vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001083 return vmcs;
1084}
1085
1086static struct vmcs *alloc_vmcs(void)
1087{
Ingo Molnard3b2c332007-01-05 16:36:23 -08001088 return alloc_vmcs_cpu(raw_smp_processor_id());
Avi Kivity6aa8b732006-12-10 02:21:36 -08001089}
1090
1091static void free_vmcs(struct vmcs *vmcs)
1092{
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001093 free_pages((unsigned long)vmcs, vmcs_config.order);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001094}
1095
Sam Ravnborg39959582007-06-01 00:47:13 -07001096static void free_kvm_area(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001097{
1098 int cpu;
1099
1100 for_each_online_cpu(cpu)
1101 free_vmcs(per_cpu(vmxarea, cpu));
1102}
1103
Avi Kivity6aa8b732006-12-10 02:21:36 -08001104static __init int alloc_kvm_area(void)
1105{
1106 int cpu;
1107
1108 for_each_online_cpu(cpu) {
1109 struct vmcs *vmcs;
1110
1111 vmcs = alloc_vmcs_cpu(cpu);
1112 if (!vmcs) {
1113 free_kvm_area();
1114 return -ENOMEM;
1115 }
1116
1117 per_cpu(vmxarea, cpu) = vmcs;
1118 }
1119 return 0;
1120}
1121
1122static __init int hardware_setup(void)
1123{
Yang, Sheng002c7f72007-07-31 14:23:01 +03001124 if (setup_vmcs_config(&vmcs_config) < 0)
1125 return -EIO;
Joerg Roedel50a37eb2008-01-31 14:57:38 +01001126
1127 if (boot_cpu_has(X86_FEATURE_NX))
1128 kvm_enable_efer_bits(EFER_NX);
1129
Avi Kivity6aa8b732006-12-10 02:21:36 -08001130 return alloc_kvm_area();
1131}
1132
1133static __exit void hardware_unsetup(void)
1134{
1135 free_kvm_area();
1136}
1137
Avi Kivity6aa8b732006-12-10 02:21:36 -08001138static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
1139{
1140 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1141
Avi Kivity6af11b92007-03-19 13:18:10 +02001142 if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001143 vmcs_write16(sf->selector, save->selector);
1144 vmcs_writel(sf->base, save->base);
1145 vmcs_write32(sf->limit, save->limit);
1146 vmcs_write32(sf->ar_bytes, save->ar);
1147 } else {
1148 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
1149 << AR_DPL_SHIFT;
1150 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
1151 }
1152}
1153
1154static void enter_pmode(struct kvm_vcpu *vcpu)
1155{
1156 unsigned long flags;
1157
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001158 vcpu->arch.rmode.active = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001159
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001160 vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base);
1161 vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit);
1162 vmcs_write32(GUEST_TR_AR_BYTES, vcpu->arch.rmode.tr.ar);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001163
1164 flags = vmcs_readl(GUEST_RFLAGS);
Glauber de Oliveira Costa053de042008-01-30 13:31:27 +01001165 flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001166 flags |= (vcpu->arch.rmode.save_iopl << IOPL_SHIFT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001167 vmcs_writel(GUEST_RFLAGS, flags);
1168
Rusty Russell66aee912007-07-17 23:34:16 +10001169 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1170 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001171
1172 update_exception_bitmap(vcpu);
1173
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001174 fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
1175 fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
1176 fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
1177 fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001178
1179 vmcs_write16(GUEST_SS_SELECTOR, 0);
1180 vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
1181
1182 vmcs_write16(GUEST_CS_SELECTOR,
1183 vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
1184 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1185}
1186
Mike Dayd77c26f2007-10-08 09:02:08 -04001187static gva_t rmode_tss_base(struct kvm *kvm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001188{
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08001189 if (!kvm->arch.tss_addr) {
Izik Eiduscbc94022007-10-25 00:29:55 +02001190 gfn_t base_gfn = kvm->memslots[0].base_gfn +
1191 kvm->memslots[0].npages - 3;
1192 return base_gfn << PAGE_SHIFT;
1193 }
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08001194 return kvm->arch.tss_addr;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001195}
1196
1197static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
1198{
1199 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1200
1201 save->selector = vmcs_read16(sf->selector);
1202 save->base = vmcs_readl(sf->base);
1203 save->limit = vmcs_read32(sf->limit);
1204 save->ar = vmcs_read32(sf->ar_bytes);
Jan Kiszka15b00f32007-11-19 10:21:45 +01001205 vmcs_write16(sf->selector, save->base >> 4);
1206 vmcs_write32(sf->base, save->base & 0xfffff);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001207 vmcs_write32(sf->limit, 0xffff);
1208 vmcs_write32(sf->ar_bytes, 0xf3);
1209}
1210
1211static void enter_rmode(struct kvm_vcpu *vcpu)
1212{
1213 unsigned long flags;
1214
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001215 vcpu->arch.rmode.active = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001216
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001217 vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001218 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1219
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001220 vcpu->arch.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001221 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1222
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001223 vcpu->arch.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001224 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1225
1226 flags = vmcs_readl(GUEST_RFLAGS);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001227 vcpu->arch.rmode.save_iopl
1228 = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001229
Glauber de Oliveira Costa053de042008-01-30 13:31:27 +01001230 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001231
1232 vmcs_writel(GUEST_RFLAGS, flags);
Rusty Russell66aee912007-07-17 23:34:16 +10001233 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001234 update_exception_bitmap(vcpu);
1235
1236 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
1237 vmcs_write32(GUEST_SS_LIMIT, 0xffff);
1238 vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
1239
1240 vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
Michael Riepeabacf8d2006-12-22 01:05:45 -08001241 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
Avi Kivity8cb5b032007-03-20 18:40:40 +02001242 if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
1243 vmcs_writel(GUEST_CS_BASE, 0xf0000);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001244 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1245
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001246 fix_rmode_seg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
1247 fix_rmode_seg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
1248 fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
1249 fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
Avi Kivity75880a02007-06-20 11:20:04 +03001250
Eddie Dong8668a3c2007-10-10 14:26:45 +08001251 kvm_mmu_reset_context(vcpu);
Avi Kivity75880a02007-06-20 11:20:04 +03001252 init_rmode_tss(vcpu->kvm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001253}
1254
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001255#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001256
1257static void enter_lmode(struct kvm_vcpu *vcpu)
1258{
1259 u32 guest_tr_ar;
1260
1261 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
1262 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
1263 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001264 __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001265 vmcs_write32(GUEST_TR_AR_BYTES,
1266 (guest_tr_ar & ~AR_TYPE_MASK)
1267 | AR_TYPE_BUSY_64_TSS);
1268 }
1269
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001270 vcpu->arch.shadow_efer |= EFER_LMA;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001271
Rusty Russell8b9cf982007-07-30 16:31:43 +10001272 find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001273 vmcs_write32(VM_ENTRY_CONTROLS,
1274 vmcs_read32(VM_ENTRY_CONTROLS)
Li, Xin B1e4e6e02007-08-01 21:49:10 +03001275 | VM_ENTRY_IA32E_MODE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001276}
1277
1278static void exit_lmode(struct kvm_vcpu *vcpu)
1279{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001280 vcpu->arch.shadow_efer &= ~EFER_LMA;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001281
1282 vmcs_write32(VM_ENTRY_CONTROLS,
1283 vmcs_read32(VM_ENTRY_CONTROLS)
Li, Xin B1e4e6e02007-08-01 21:49:10 +03001284 & ~VM_ENTRY_IA32E_MODE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001285}
1286
1287#endif
1288
Sheng Yang2384d2b2008-01-17 15:14:33 +08001289static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1290{
1291 vpid_sync_vcpu_all(to_vmx(vcpu));
1292}
1293
Anthony Liguori25c4c272007-04-27 09:29:21 +03001294static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08001295{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001296 vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
1297 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
Avi Kivity399badf2007-01-05 16:36:38 -08001298}
1299
Avi Kivity6aa8b732006-12-10 02:21:36 -08001300static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1301{
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001302 vmx_fpu_deactivate(vcpu);
1303
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001304 if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001305 enter_pmode(vcpu);
1306
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001307 if (!vcpu->arch.rmode.active && !(cr0 & X86_CR0_PE))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001308 enter_rmode(vcpu);
1309
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001310#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001311 if (vcpu->arch.shadow_efer & EFER_LME) {
Rusty Russell707d92f2007-07-17 23:19:08 +10001312 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001313 enter_lmode(vcpu);
Rusty Russell707d92f2007-07-17 23:19:08 +10001314 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001315 exit_lmode(vcpu);
1316 }
1317#endif
1318
1319 vmcs_writel(CR0_READ_SHADOW, cr0);
1320 vmcs_writel(GUEST_CR0,
1321 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001322 vcpu->arch.cr0 = cr0;
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001323
Rusty Russell707d92f2007-07-17 23:19:08 +10001324 if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001325 vmx_fpu_activate(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001326}
1327
Avi Kivity6aa8b732006-12-10 02:21:36 -08001328static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1329{
Sheng Yang2384d2b2008-01-17 15:14:33 +08001330 vmx_flush_tlb(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001331 vmcs_writel(GUEST_CR3, cr3);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001332 if (vcpu->arch.cr0 & X86_CR0_PE)
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001333 vmx_fpu_deactivate(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001334}
1335
1336static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1337{
1338 vmcs_writel(CR4_READ_SHADOW, cr4);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001339 vmcs_writel(GUEST_CR4, cr4 | (vcpu->arch.rmode.active ?
Avi Kivity6aa8b732006-12-10 02:21:36 -08001340 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001341 vcpu->arch.cr4 = cr4;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001342}
1343
Avi Kivity6aa8b732006-12-10 02:21:36 -08001344static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1345{
Rusty Russell8b9cf982007-07-30 16:31:43 +10001346 struct vcpu_vmx *vmx = to_vmx(vcpu);
1347 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001348
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001349 vcpu->arch.shadow_efer = efer;
Joerg Roedel9f62e192008-01-31 14:57:39 +01001350 if (!msr)
1351 return;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001352 if (efer & EFER_LMA) {
1353 vmcs_write32(VM_ENTRY_CONTROLS,
1354 vmcs_read32(VM_ENTRY_CONTROLS) |
Li, Xin B1e4e6e02007-08-01 21:49:10 +03001355 VM_ENTRY_IA32E_MODE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001356 msr->data = efer;
1357
1358 } else {
1359 vmcs_write32(VM_ENTRY_CONTROLS,
1360 vmcs_read32(VM_ENTRY_CONTROLS) &
Li, Xin B1e4e6e02007-08-01 21:49:10 +03001361 ~VM_ENTRY_IA32E_MODE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001362
1363 msr->data = efer & ~EFER_LME;
1364 }
Rusty Russell8b9cf982007-07-30 16:31:43 +10001365 setup_msrs(vmx);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001366}
1367
Avi Kivity6aa8b732006-12-10 02:21:36 -08001368static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1369{
1370 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1371
1372 return vmcs_readl(sf->base);
1373}
1374
1375static void vmx_get_segment(struct kvm_vcpu *vcpu,
1376 struct kvm_segment *var, int seg)
1377{
1378 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1379 u32 ar;
1380
1381 var->base = vmcs_readl(sf->base);
1382 var->limit = vmcs_read32(sf->limit);
1383 var->selector = vmcs_read16(sf->selector);
1384 ar = vmcs_read32(sf->ar_bytes);
1385 if (ar & AR_UNUSABLE_MASK)
1386 ar = 0;
1387 var->type = ar & 15;
1388 var->s = (ar >> 4) & 1;
1389 var->dpl = (ar >> 5) & 3;
1390 var->present = (ar >> 7) & 1;
1391 var->avl = (ar >> 12) & 1;
1392 var->l = (ar >> 13) & 1;
1393 var->db = (ar >> 14) & 1;
1394 var->g = (ar >> 15) & 1;
1395 var->unusable = (ar >> 16) & 1;
1396}
1397
Izik Eidus2e4d2652008-03-24 19:38:34 +02001398static int vmx_get_cpl(struct kvm_vcpu *vcpu)
1399{
1400 struct kvm_segment kvm_seg;
1401
1402 if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */
1403 return 0;
1404
1405 if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
1406 return 3;
1407
1408 vmx_get_segment(vcpu, &kvm_seg, VCPU_SREG_CS);
1409 return kvm_seg.selector & 3;
1410}
1411
Avi Kivity653e3102007-05-07 10:55:37 +03001412static u32 vmx_segment_access_rights(struct kvm_segment *var)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001413{
Avi Kivity6aa8b732006-12-10 02:21:36 -08001414 u32 ar;
1415
Avi Kivity653e3102007-05-07 10:55:37 +03001416 if (var->unusable)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001417 ar = 1 << 16;
1418 else {
1419 ar = var->type & 15;
1420 ar |= (var->s & 1) << 4;
1421 ar |= (var->dpl & 3) << 5;
1422 ar |= (var->present & 1) << 7;
1423 ar |= (var->avl & 1) << 12;
1424 ar |= (var->l & 1) << 13;
1425 ar |= (var->db & 1) << 14;
1426 ar |= (var->g & 1) << 15;
1427 }
Uri Lublinf7fbf1f2006-12-13 00:34:00 -08001428 if (ar == 0) /* a 0 value means unusable */
1429 ar = AR_UNUSABLE_MASK;
Avi Kivity653e3102007-05-07 10:55:37 +03001430
1431 return ar;
1432}
1433
1434static void vmx_set_segment(struct kvm_vcpu *vcpu,
1435 struct kvm_segment *var, int seg)
1436{
1437 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1438 u32 ar;
1439
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001440 if (vcpu->arch.rmode.active && seg == VCPU_SREG_TR) {
1441 vcpu->arch.rmode.tr.selector = var->selector;
1442 vcpu->arch.rmode.tr.base = var->base;
1443 vcpu->arch.rmode.tr.limit = var->limit;
1444 vcpu->arch.rmode.tr.ar = vmx_segment_access_rights(var);
Avi Kivity653e3102007-05-07 10:55:37 +03001445 return;
1446 }
1447 vmcs_writel(sf->base, var->base);
1448 vmcs_write32(sf->limit, var->limit);
1449 vmcs_write16(sf->selector, var->selector);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001450 if (vcpu->arch.rmode.active && var->s) {
Avi Kivity653e3102007-05-07 10:55:37 +03001451 /*
1452 * Hack real-mode segments into vm86 compatibility.
1453 */
1454 if (var->base == 0xffff0000 && var->selector == 0xf000)
1455 vmcs_writel(sf->base, 0xf0000);
1456 ar = 0xf3;
1457 } else
1458 ar = vmx_segment_access_rights(var);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001459 vmcs_write32(sf->ar_bytes, ar);
1460}
1461
Avi Kivity6aa8b732006-12-10 02:21:36 -08001462static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1463{
1464 u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
1465
1466 *db = (ar >> 14) & 1;
1467 *l = (ar >> 13) & 1;
1468}
1469
1470static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1471{
1472 dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
1473 dt->base = vmcs_readl(GUEST_IDTR_BASE);
1474}
1475
1476static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1477{
1478 vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
1479 vmcs_writel(GUEST_IDTR_BASE, dt->base);
1480}
1481
1482static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1483{
1484 dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
1485 dt->base = vmcs_readl(GUEST_GDTR_BASE);
1486}
1487
1488static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1489{
1490 vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
1491 vmcs_writel(GUEST_GDTR_BASE, dt->base);
1492}
1493
Mike Dayd77c26f2007-10-08 09:02:08 -04001494static int init_rmode_tss(struct kvm *kvm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001495{
Avi Kivity6aa8b732006-12-10 02:21:36 -08001496 gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
Izik Eidus195aefd2007-10-01 22:14:18 +02001497 u16 data = 0;
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001498 int ret = 0;
Izik Eidus195aefd2007-10-01 22:14:18 +02001499 int r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001500
Marcelo Tosatti707a18a2008-03-18 17:42:34 -03001501 down_read(&kvm->slots_lock);
Izik Eidus195aefd2007-10-01 22:14:18 +02001502 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
1503 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001504 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02001505 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
1506 r = kvm_write_guest_page(kvm, fn++, &data, 0x66, sizeof(u16));
1507 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001508 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02001509 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
1510 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001511 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02001512 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
1513 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001514 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02001515 data = ~0;
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001516 r = kvm_write_guest_page(kvm, fn, &data,
1517 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
1518 sizeof(u8));
Izik Eidus195aefd2007-10-01 22:14:18 +02001519 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001520 goto out;
1521
1522 ret = 1;
1523out:
Marcelo Tosatti707a18a2008-03-18 17:42:34 -03001524 up_read(&kvm->slots_lock);
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001525 return ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001526}
1527
Avi Kivity6aa8b732006-12-10 02:21:36 -08001528static void seg_setup(int seg)
1529{
1530 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1531
1532 vmcs_write16(sf->selector, 0);
1533 vmcs_writel(sf->base, 0);
1534 vmcs_write32(sf->limit, 0xffff);
1535 vmcs_write32(sf->ar_bytes, 0x93);
1536}
1537
Sheng Yangf78e0e22007-10-29 09:40:42 +08001538static int alloc_apic_access_page(struct kvm *kvm)
1539{
1540 struct kvm_userspace_memory_region kvm_userspace_mem;
1541 int r = 0;
1542
Izik Eidus72dc67a2008-02-10 18:04:15 +02001543 down_write(&kvm->slots_lock);
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08001544 if (kvm->arch.apic_access_page)
Sheng Yangf78e0e22007-10-29 09:40:42 +08001545 goto out;
1546 kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
1547 kvm_userspace_mem.flags = 0;
1548 kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
1549 kvm_userspace_mem.memory_size = PAGE_SIZE;
1550 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
1551 if (r)
1552 goto out;
Izik Eidus72dc67a2008-02-10 18:04:15 +02001553
1554 down_read(&current->mm->mmap_sem);
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08001555 kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
Izik Eidus72dc67a2008-02-10 18:04:15 +02001556 up_read(&current->mm->mmap_sem);
Sheng Yangf78e0e22007-10-29 09:40:42 +08001557out:
Izik Eidus72dc67a2008-02-10 18:04:15 +02001558 up_write(&kvm->slots_lock);
Sheng Yangf78e0e22007-10-29 09:40:42 +08001559 return r;
1560}
1561
Sheng Yang2384d2b2008-01-17 15:14:33 +08001562static void allocate_vpid(struct vcpu_vmx *vmx)
1563{
1564 int vpid;
1565
1566 vmx->vpid = 0;
1567 if (!enable_vpid || !cpu_has_vmx_vpid())
1568 return;
1569 spin_lock(&vmx_vpid_lock);
1570 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
1571 if (vpid < VMX_NR_VPIDS) {
1572 vmx->vpid = vpid;
1573 __set_bit(vpid, vmx_vpid_bitmap);
1574 }
1575 spin_unlock(&vmx_vpid_lock);
1576}
1577
Avi Kivity6aa8b732006-12-10 02:21:36 -08001578/*
1579 * Sets up the vmcs for emulated real mode.
1580 */
Rusty Russell8b9cf982007-07-30 16:31:43 +10001581static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001582{
1583 u32 host_sysenter_cs;
1584 u32 junk;
1585 unsigned long a;
1586 struct descriptor_table dt;
1587 int i;
Avi Kivitycd2276a2007-05-14 20:41:13 +03001588 unsigned long kvm_vmx_return;
Yang, Sheng6e5d8652007-09-12 18:03:11 +08001589 u32 exec_control;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001590
Avi Kivity6aa8b732006-12-10 02:21:36 -08001591 /* I/O */
He, Qingfdef3ad2007-04-30 09:45:24 +03001592 vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
1593 vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001594
Avi Kivity6aa8b732006-12-10 02:21:36 -08001595 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
1596
Avi Kivity6aa8b732006-12-10 02:21:36 -08001597 /* Control */
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001598 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
1599 vmcs_config.pin_based_exec_ctrl);
Yang, Sheng6e5d8652007-09-12 18:03:11 +08001600
1601 exec_control = vmcs_config.cpu_based_exec_ctrl;
1602 if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
1603 exec_control &= ~CPU_BASED_TPR_SHADOW;
1604#ifdef CONFIG_X86_64
1605 exec_control |= CPU_BASED_CR8_STORE_EXITING |
1606 CPU_BASED_CR8_LOAD_EXITING;
1607#endif
1608 }
1609 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001610
Sheng Yang83ff3b92007-11-21 14:33:25 +08001611 if (cpu_has_secondary_exec_ctrls()) {
1612 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
1613 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
1614 exec_control &=
1615 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
Sheng Yang2384d2b2008-01-17 15:14:33 +08001616 if (vmx->vpid == 0)
1617 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
Sheng Yang83ff3b92007-11-21 14:33:25 +08001618 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
1619 }
Sheng Yangf78e0e22007-10-29 09:40:42 +08001620
Avi Kivityc7addb92007-09-16 18:58:32 +02001621 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
1622 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001623 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
1624
1625 vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */
1626 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
1627 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
1628
1629 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
1630 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1631 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1632 vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */
1633 vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */
1634 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001635#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001636 rdmsrl(MSR_FS_BASE, a);
1637 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
1638 rdmsrl(MSR_GS_BASE, a);
1639 vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
1640#else
1641 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
1642 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
1643#endif
1644
1645 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
1646
1647 get_idt(&dt);
1648 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
1649
Mike Dayd77c26f2007-10-08 09:02:08 -04001650 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
Avi Kivitycd2276a2007-05-14 20:41:13 +03001651 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
Eddie Dong2cc51562007-05-21 07:28:09 +03001652 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1653 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
1654 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001655
1656 rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
1657 vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
1658 rdmsrl(MSR_IA32_SYSENTER_ESP, a);
1659 vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
1660 rdmsrl(MSR_IA32_SYSENTER_EIP, a);
1661 vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
1662
Avi Kivity6aa8b732006-12-10 02:21:36 -08001663 for (i = 0; i < NR_VMX_MSR; ++i) {
1664 u32 index = vmx_msr_index[i];
1665 u32 data_low, data_high;
1666 u64 data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001667 int j = vmx->nmsrs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001668
1669 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1670 continue;
Avi Kivity432bd6c2007-01-31 23:48:13 -08001671 if (wrmsr_safe(index, data_low, data_high) < 0)
1672 continue;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001673 data = data_low | ((u64)data_high << 32);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001674 vmx->host_msrs[j].index = index;
1675 vmx->host_msrs[j].reserved = 0;
1676 vmx->host_msrs[j].data = data;
1677 vmx->guest_msrs[j] = vmx->host_msrs[j];
1678 ++vmx->nmsrs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001679 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001680
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001681 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001682
1683 /* 22.2.1, 20.8.1 */
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001684 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
1685
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001686 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
1687 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1688
Sheng Yangf78e0e22007-10-29 09:40:42 +08001689
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001690 return 0;
1691}
1692
1693static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1694{
1695 struct vcpu_vmx *vmx = to_vmx(vcpu);
1696 u64 msr;
1697 int ret;
1698
1699 if (!init_rmode_tss(vmx->vcpu.kvm)) {
1700 ret = -ENOMEM;
1701 goto out;
1702 }
1703
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001704 vmx->vcpu.arch.rmode.active = 0;
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001705
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001706 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02001707 kvm_set_cr8(&vmx->vcpu, 0);
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001708 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1709 if (vmx->vcpu.vcpu_id == 0)
1710 msr |= MSR_IA32_APICBASE_BSP;
1711 kvm_set_apic_base(&vmx->vcpu, msr);
1712
1713 fx_init(&vmx->vcpu);
1714
1715 /*
1716 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
1717 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
1718 */
1719 if (vmx->vcpu.vcpu_id == 0) {
1720 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
1721 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
1722 } else {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001723 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
1724 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001725 }
1726 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1727 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1728
1729 seg_setup(VCPU_SREG_DS);
1730 seg_setup(VCPU_SREG_ES);
1731 seg_setup(VCPU_SREG_FS);
1732 seg_setup(VCPU_SREG_GS);
1733 seg_setup(VCPU_SREG_SS);
1734
1735 vmcs_write16(GUEST_TR_SELECTOR, 0);
1736 vmcs_writel(GUEST_TR_BASE, 0);
1737 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
1738 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1739
1740 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
1741 vmcs_writel(GUEST_LDTR_BASE, 0);
1742 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
1743 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
1744
1745 vmcs_write32(GUEST_SYSENTER_CS, 0);
1746 vmcs_writel(GUEST_SYSENTER_ESP, 0);
1747 vmcs_writel(GUEST_SYSENTER_EIP, 0);
1748
1749 vmcs_writel(GUEST_RFLAGS, 0x02);
1750 if (vmx->vcpu.vcpu_id == 0)
1751 vmcs_writel(GUEST_RIP, 0xfff0);
1752 else
1753 vmcs_writel(GUEST_RIP, 0);
1754 vmcs_writel(GUEST_RSP, 0);
1755
1756 /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */
1757 vmcs_writel(GUEST_DR7, 0x400);
1758
1759 vmcs_writel(GUEST_GDTR_BASE, 0);
1760 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
1761
1762 vmcs_writel(GUEST_IDTR_BASE, 0);
1763 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
1764
1765 vmcs_write32(GUEST_ACTIVITY_STATE, 0);
1766 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
1767 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
1768
1769 guest_write_tsc(0);
1770
1771 /* Special registers */
1772 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
1773
1774 setup_msrs(vmx);
1775
Avi Kivity6aa8b732006-12-10 02:21:36 -08001776 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
1777
Sheng Yangf78e0e22007-10-29 09:40:42 +08001778 if (cpu_has_vmx_tpr_shadow()) {
1779 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
1780 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
1781 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001782 page_to_phys(vmx->vcpu.arch.apic->regs_page));
Sheng Yangf78e0e22007-10-29 09:40:42 +08001783 vmcs_write32(TPR_THRESHOLD, 0);
1784 }
1785
1786 if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
1787 vmcs_write64(APIC_ACCESS_ADDR,
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08001788 page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001789
Sheng Yang2384d2b2008-01-17 15:14:33 +08001790 if (vmx->vpid != 0)
1791 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
1792
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001793 vmx->vcpu.arch.cr0 = 0x60000010;
1794 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
Rusty Russell8b9cf982007-07-30 16:31:43 +10001795 vmx_set_cr4(&vmx->vcpu, 0);
Rusty Russell8b9cf982007-07-30 16:31:43 +10001796 vmx_set_efer(&vmx->vcpu, 0);
Rusty Russell8b9cf982007-07-30 16:31:43 +10001797 vmx_fpu_activate(&vmx->vcpu);
1798 update_exception_bitmap(&vmx->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001799
Sheng Yang2384d2b2008-01-17 15:14:33 +08001800 vpid_sync_vcpu_all(vmx);
1801
Avi Kivity6aa8b732006-12-10 02:21:36 -08001802 return 0;
1803
Avi Kivity6aa8b732006-12-10 02:21:36 -08001804out:
1805 return ret;
1806}
1807
Eddie Dong85f455f2007-07-06 12:20:49 +03001808static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
1809{
Avi Kivity9c8cba32007-11-22 11:42:59 +02001810 struct vcpu_vmx *vmx = to_vmx(vcpu);
1811
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001812 if (vcpu->arch.rmode.active) {
Avi Kivity9c8cba32007-11-22 11:42:59 +02001813 vmx->rmode.irq.pending = true;
1814 vmx->rmode.irq.vector = irq;
1815 vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP);
Avi Kivity9c5623e2007-11-08 18:19:20 +02001816 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1817 irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
1818 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
Avi Kivity9c8cba32007-11-22 11:42:59 +02001819 vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1);
Eddie Dong85f455f2007-07-06 12:20:49 +03001820 return;
1821 }
1822 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1823 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1824}
1825
Avi Kivity6aa8b732006-12-10 02:21:36 -08001826static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1827{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001828 int word_index = __ffs(vcpu->arch.irq_summary);
1829 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001830 int irq = word_index * BITS_PER_LONG + bit_index;
1831
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001832 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
1833 if (!vcpu->arch.irq_pending[word_index])
1834 clear_bit(word_index, &vcpu->arch.irq_summary);
Eddie Dong85f455f2007-07-06 12:20:49 +03001835 vmx_inject_irq(vcpu, irq);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001836}
1837
Dor Laorc1150d82007-01-05 16:36:24 -08001838
1839static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1840 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001841{
Dor Laorc1150d82007-01-05 16:36:24 -08001842 u32 cpu_based_vm_exec_control;
1843
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001844 vcpu->arch.interrupt_window_open =
Dor Laorc1150d82007-01-05 16:36:24 -08001845 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1846 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1847
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001848 if (vcpu->arch.interrupt_window_open &&
1849 vcpu->arch.irq_summary &&
Dor Laorc1150d82007-01-05 16:36:24 -08001850 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001851 /*
Dor Laorc1150d82007-01-05 16:36:24 -08001852 * If interrupts enabled, and not blocked by sti or mov ss. Good.
Avi Kivity6aa8b732006-12-10 02:21:36 -08001853 */
1854 kvm_do_inject_irq(vcpu);
Dor Laorc1150d82007-01-05 16:36:24 -08001855
1856 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001857 if (!vcpu->arch.interrupt_window_open &&
1858 (vcpu->arch.irq_summary || kvm_run->request_interrupt_window))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001859 /*
1860 * Interrupts blocked. Wait for unblock.
1861 */
Dor Laorc1150d82007-01-05 16:36:24 -08001862 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
1863 else
1864 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
1865 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001866}
1867
Izik Eiduscbc94022007-10-25 00:29:55 +02001868static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
1869{
1870 int ret;
1871 struct kvm_userspace_memory_region tss_mem = {
1872 .slot = 8,
1873 .guest_phys_addr = addr,
1874 .memory_size = PAGE_SIZE * 3,
1875 .flags = 0,
1876 };
1877
1878 ret = kvm_set_memory_region(kvm, &tss_mem, 0);
1879 if (ret)
1880 return ret;
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08001881 kvm->arch.tss_addr = addr;
Izik Eiduscbc94022007-10-25 00:29:55 +02001882 return 0;
1883}
1884
Avi Kivity6aa8b732006-12-10 02:21:36 -08001885static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
1886{
1887 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
1888
1889 set_debugreg(dbg->bp[0], 0);
1890 set_debugreg(dbg->bp[1], 1);
1891 set_debugreg(dbg->bp[2], 2);
1892 set_debugreg(dbg->bp[3], 3);
1893
1894 if (dbg->singlestep) {
1895 unsigned long flags;
1896
1897 flags = vmcs_readl(GUEST_RFLAGS);
1898 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1899 vmcs_writel(GUEST_RFLAGS, flags);
1900 }
1901}
1902
1903static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1904 int vec, u32 err_code)
1905{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001906 if (!vcpu->arch.rmode.active)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001907 return 0;
1908
Nitin A Kambleb3f37702007-05-17 15:50:34 +03001909 /*
1910 * Instruction with address size override prefix opcode 0x67
1911 * Cause the #SS fault with 0 error code in VM86 mode.
1912 */
1913 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
Laurent Vivier34273182007-09-18 11:27:37 +02001914 if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001915 return 1;
1916 return 0;
1917}
1918
1919static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1920{
Avi Kivity1155f762007-11-22 11:30:47 +02001921 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001922 u32 intr_info, error_code;
1923 unsigned long cr2, rip;
1924 u32 vect_info;
1925 enum emulation_result er;
1926
Avi Kivity1155f762007-11-22 11:30:47 +02001927 vect_info = vmx->idt_vectoring_info;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001928 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1929
1930 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
Mike Dayd77c26f2007-10-08 09:02:08 -04001931 !is_page_fault(intr_info))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001932 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001933 "intr info 0x%x\n", __func__, vect_info, intr_info);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001934
Eddie Dong85f455f2007-07-06 12:20:49 +03001935 if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001936 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001937 set_bit(irq, vcpu->arch.irq_pending);
1938 set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001939 }
1940
Avi Kivity1b6269d2007-10-09 12:12:19 +02001941 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
1942 return 1; /* already handled by vmx_vcpu_run() */
Anthony Liguori2ab455c2007-04-27 09:29:49 +03001943
1944 if (is_no_device(intr_info)) {
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001945 vmx_fpu_activate(vcpu);
Anthony Liguori2ab455c2007-04-27 09:29:49 +03001946 return 1;
1947 }
1948
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001949 if (is_invalid_opcode(intr_info)) {
Sheng Yang571008d2008-01-02 14:49:22 +08001950 er = emulate_instruction(vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001951 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001952 kvm_queue_exception(vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001953 return 1;
1954 }
1955
Avi Kivity6aa8b732006-12-10 02:21:36 -08001956 error_code = 0;
1957 rip = vmcs_readl(GUEST_RIP);
Ryan Harper2e113842008-02-11 10:26:38 -06001958 if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001959 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
1960 if (is_page_fault(intr_info)) {
1961 cr2 = vmcs_readl(EXIT_QUALIFICATION);
Avi Kivity30677142007-10-28 18:48:59 +02001962 return kvm_mmu_page_fault(vcpu, cr2, error_code);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001963 }
1964
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001965 if (vcpu->arch.rmode.active &&
Avi Kivity6aa8b732006-12-10 02:21:36 -08001966 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
Avi Kivity72d6e5a2007-06-05 16:15:51 +03001967 error_code)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001968 if (vcpu->arch.halt_request) {
1969 vcpu->arch.halt_request = 0;
Avi Kivity72d6e5a2007-06-05 16:15:51 +03001970 return kvm_emulate_halt(vcpu);
1971 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001972 return 1;
Avi Kivity72d6e5a2007-06-05 16:15:51 +03001973 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001974
Mike Dayd77c26f2007-10-08 09:02:08 -04001975 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) ==
1976 (INTR_TYPE_EXCEPTION | 1)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001977 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1978 return 0;
1979 }
1980 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
1981 kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
1982 kvm_run->ex.error_code = error_code;
1983 return 0;
1984}
1985
1986static int handle_external_interrupt(struct kvm_vcpu *vcpu,
1987 struct kvm_run *kvm_run)
1988{
Avi Kivity1165f5f2007-04-19 17:27:43 +03001989 ++vcpu->stat.irq_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001990 return 1;
1991}
1992
Avi Kivity988ad742007-02-12 00:54:36 -08001993static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1994{
1995 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1996 return 0;
1997}
Avi Kivity6aa8b732006-12-10 02:21:36 -08001998
Avi Kivity6aa8b732006-12-10 02:21:36 -08001999static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2000{
He, Qingbfdaab02007-09-12 14:18:28 +08002001 unsigned long exit_qualification;
Avi Kivity039576c2007-03-20 12:46:50 +02002002 int size, down, in, string, rep;
2003 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002004
Avi Kivity1165f5f2007-04-19 17:27:43 +03002005 ++vcpu->stat.io_exits;
He, Qingbfdaab02007-09-12 14:18:28 +08002006 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
Avi Kivity039576c2007-03-20 12:46:50 +02002007 string = (exit_qualification & 16) != 0;
Laurent Viviere70669a2007-08-05 10:36:40 +03002008
2009 if (string) {
Laurent Vivier34273182007-09-18 11:27:37 +02002010 if (emulate_instruction(vcpu,
2011 kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
Laurent Viviere70669a2007-08-05 10:36:40 +03002012 return 0;
2013 return 1;
2014 }
2015
2016 size = (exit_qualification & 7) + 1;
2017 in = (exit_qualification & 8) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02002018 down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02002019 rep = (exit_qualification & 32) != 0;
2020 port = exit_qualification >> 16;
Laurent Viviere70669a2007-08-05 10:36:40 +03002021
Laurent Vivier3090dd72007-08-05 10:43:32 +03002022 return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002023}
2024
Ingo Molnar102d8322007-02-19 14:37:47 +02002025static void
2026vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
2027{
2028 /*
2029 * Patch in the VMCALL instruction:
2030 */
2031 hypercall[0] = 0x0f;
2032 hypercall[1] = 0x01;
2033 hypercall[2] = 0xc1;
Ingo Molnar102d8322007-02-19 14:37:47 +02002034}
2035
Avi Kivity6aa8b732006-12-10 02:21:36 -08002036static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2037{
He, Qingbfdaab02007-09-12 14:18:28 +08002038 unsigned long exit_qualification;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002039 int cr;
2040 int reg;
2041
He, Qingbfdaab02007-09-12 14:18:28 +08002042 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002043 cr = exit_qualification & 15;
2044 reg = (exit_qualification >> 8) & 15;
2045 switch ((exit_qualification >> 4) & 3) {
2046 case 0: /* mov to cr */
2047 switch (cr) {
2048 case 0:
2049 vcpu_load_rsp_rip(vcpu);
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02002050 kvm_set_cr0(vcpu, vcpu->arch.regs[reg]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002051 skip_emulated_instruction(vcpu);
2052 return 1;
2053 case 3:
2054 vcpu_load_rsp_rip(vcpu);
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02002055 kvm_set_cr3(vcpu, vcpu->arch.regs[reg]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002056 skip_emulated_instruction(vcpu);
2057 return 1;
2058 case 4:
2059 vcpu_load_rsp_rip(vcpu);
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02002060 kvm_set_cr4(vcpu, vcpu->arch.regs[reg]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002061 skip_emulated_instruction(vcpu);
2062 return 1;
2063 case 8:
2064 vcpu_load_rsp_rip(vcpu);
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02002065 kvm_set_cr8(vcpu, vcpu->arch.regs[reg]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002066 skip_emulated_instruction(vcpu);
Avi Kivitye5314062007-12-06 16:32:45 +02002067 if (irqchip_in_kernel(vcpu->kvm))
2068 return 1;
Yang, Sheng253abde2007-08-16 13:01:00 +03002069 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2070 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002071 };
2072 break;
Anthony Liguori25c4c272007-04-27 09:29:21 +03002073 case 2: /* clts */
2074 vcpu_load_rsp_rip(vcpu);
Avi Kivity5fd86fc2007-05-02 20:40:00 +03002075 vmx_fpu_deactivate(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002076 vcpu->arch.cr0 &= ~X86_CR0_TS;
2077 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
Avi Kivity5fd86fc2007-05-02 20:40:00 +03002078 vmx_fpu_activate(vcpu);
Anthony Liguori25c4c272007-04-27 09:29:21 +03002079 skip_emulated_instruction(vcpu);
2080 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002081 case 1: /*mov from cr*/
2082 switch (cr) {
2083 case 3:
2084 vcpu_load_rsp_rip(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002085 vcpu->arch.regs[reg] = vcpu->arch.cr3;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002086 vcpu_put_rsp_rip(vcpu);
2087 skip_emulated_instruction(vcpu);
2088 return 1;
2089 case 8:
Avi Kivity6aa8b732006-12-10 02:21:36 -08002090 vcpu_load_rsp_rip(vcpu);
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02002091 vcpu->arch.regs[reg] = kvm_get_cr8(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002092 vcpu_put_rsp_rip(vcpu);
2093 skip_emulated_instruction(vcpu);
2094 return 1;
2095 }
2096 break;
2097 case 3: /* lmsw */
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02002098 kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002099
2100 skip_emulated_instruction(vcpu);
2101 return 1;
2102 default:
2103 break;
2104 }
2105 kvm_run->exit_reason = 0;
Rusty Russellf0242472007-08-01 10:48:02 +10002106 pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
Avi Kivity6aa8b732006-12-10 02:21:36 -08002107 (int)(exit_qualification >> 4) & 3, cr);
2108 return 0;
2109}
2110
2111static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2112{
He, Qingbfdaab02007-09-12 14:18:28 +08002113 unsigned long exit_qualification;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002114 unsigned long val;
2115 int dr, reg;
2116
2117 /*
2118 * FIXME: this code assumes the host is debugging the guest.
2119 * need to deal with guest debugging itself too.
2120 */
He, Qingbfdaab02007-09-12 14:18:28 +08002121 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002122 dr = exit_qualification & 7;
2123 reg = (exit_qualification >> 8) & 15;
2124 vcpu_load_rsp_rip(vcpu);
2125 if (exit_qualification & 16) {
2126 /* mov from dr */
2127 switch (dr) {
2128 case 6:
2129 val = 0xffff0ff0;
2130 break;
2131 case 7:
2132 val = 0x400;
2133 break;
2134 default:
2135 val = 0;
2136 }
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002137 vcpu->arch.regs[reg] = val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002138 } else {
2139 /* mov to dr */
2140 }
2141 vcpu_put_rsp_rip(vcpu);
2142 skip_emulated_instruction(vcpu);
2143 return 1;
2144}
2145
2146static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2147{
Avi Kivity06465c52007-02-28 20:46:53 +02002148 kvm_emulate_cpuid(vcpu);
2149 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002150}
2151
2152static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2153{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002154 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
Avi Kivity6aa8b732006-12-10 02:21:36 -08002155 u64 data;
2156
2157 if (vmx_get_msr(vcpu, ecx, &data)) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02002158 kvm_inject_gp(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002159 return 1;
2160 }
2161
2162 /* FIXME: handling of bits 32:63 of rax, rdx */
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002163 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
2164 vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002165 skip_emulated_instruction(vcpu);
2166 return 1;
2167}
2168
2169static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2170{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002171 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
2172 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
2173 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002174
2175 if (vmx_set_msr(vcpu, ecx, data) != 0) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02002176 kvm_inject_gp(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002177 return 1;
2178 }
2179
2180 skip_emulated_instruction(vcpu);
2181 return 1;
2182}
2183
Yang, Sheng6e5d8652007-09-12 18:03:11 +08002184static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
2185 struct kvm_run *kvm_run)
2186{
2187 return 1;
2188}
2189
Avi Kivity6aa8b732006-12-10 02:21:36 -08002190static int handle_interrupt_window(struct kvm_vcpu *vcpu,
2191 struct kvm_run *kvm_run)
2192{
Eddie Dong85f455f2007-07-06 12:20:49 +03002193 u32 cpu_based_vm_exec_control;
2194
2195 /* clear pending irq */
2196 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2197 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2198 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
Dor Laorc1150d82007-01-05 16:36:24 -08002199 /*
2200 * If the user space waits to inject interrupts, exit as soon as
2201 * possible
2202 */
2203 if (kvm_run->request_interrupt_window &&
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002204 !vcpu->arch.irq_summary) {
Dor Laorc1150d82007-01-05 16:36:24 -08002205 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
Avi Kivity1165f5f2007-04-19 17:27:43 +03002206 ++vcpu->stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08002207 return 0;
2208 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002209 return 1;
2210}
2211
2212static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2213{
2214 skip_emulated_instruction(vcpu);
Avi Kivityd3bef152007-06-05 15:53:05 +03002215 return kvm_emulate_halt(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002216}
2217
Ingo Molnarc21415e2007-02-19 14:37:47 +02002218static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2219{
Dor Laor510043d2007-02-19 18:25:43 +02002220 skip_emulated_instruction(vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002221 kvm_emulate_hypercall(vcpu);
2222 return 1;
Ingo Molnarc21415e2007-02-19 14:37:47 +02002223}
2224
Eddie Donge5edaa02007-11-11 12:28:35 +02002225static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2226{
2227 skip_emulated_instruction(vcpu);
2228 /* TODO: Add support for VT-d/pass-through device */
2229 return 1;
2230}
2231
Sheng Yangf78e0e22007-10-29 09:40:42 +08002232static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2233{
2234 u64 exit_qualification;
2235 enum emulation_result er;
2236 unsigned long offset;
2237
2238 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
2239 offset = exit_qualification & 0xffful;
2240
2241 er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
2242
2243 if (er != EMULATE_DONE) {
2244 printk(KERN_ERR
2245 "Fail to handle apic access vmexit! Offset is 0x%lx\n",
2246 offset);
2247 return -ENOTSUPP;
2248 }
2249 return 1;
2250}
2251
Izik Eidus37817f22008-03-24 23:14:53 +02002252static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2253{
2254 unsigned long exit_qualification;
2255 u16 tss_selector;
2256 int reason;
2257
2258 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2259
2260 reason = (u32)exit_qualification >> 30;
2261 tss_selector = exit_qualification;
2262
2263 return kvm_task_switch(vcpu, tss_selector, reason);
2264}
2265
Avi Kivity6aa8b732006-12-10 02:21:36 -08002266/*
2267 * The exit handlers return 1 if the exit was handled fully and guest execution
2268 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
2269 * to be done to userspace and return 0.
2270 */
2271static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
2272 struct kvm_run *kvm_run) = {
2273 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
2274 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
Avi Kivity988ad742007-02-12 00:54:36 -08002275 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002276 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002277 [EXIT_REASON_CR_ACCESS] = handle_cr,
2278 [EXIT_REASON_DR_ACCESS] = handle_dr,
2279 [EXIT_REASON_CPUID] = handle_cpuid,
2280 [EXIT_REASON_MSR_READ] = handle_rdmsr,
2281 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
2282 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
2283 [EXIT_REASON_HLT] = handle_halt,
Ingo Molnarc21415e2007-02-19 14:37:47 +02002284 [EXIT_REASON_VMCALL] = handle_vmcall,
Sheng Yangf78e0e22007-10-29 09:40:42 +08002285 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
2286 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
Eddie Donge5edaa02007-11-11 12:28:35 +02002287 [EXIT_REASON_WBINVD] = handle_wbinvd,
Izik Eidus37817f22008-03-24 23:14:53 +02002288 [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002289};
2290
2291static const int kvm_vmx_max_exit_handlers =
Robert P. J. Day50a34852007-06-03 13:35:29 -04002292 ARRAY_SIZE(kvm_vmx_exit_handlers);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002293
2294/*
2295 * The guest has exited. See if we can fix it or if we need userspace
2296 * assistance.
2297 */
2298static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2299{
Avi Kivity6aa8b732006-12-10 02:21:36 -08002300 u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
Avi Kivity29bd8a72007-09-10 17:27:03 +03002301 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity1155f762007-11-22 11:30:47 +02002302 u32 vectoring_info = vmx->idt_vectoring_info;
Avi Kivity29bd8a72007-09-10 17:27:03 +03002303
2304 if (unlikely(vmx->fail)) {
2305 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2306 kvm_run->fail_entry.hardware_entry_failure_reason
2307 = vmcs_read32(VM_INSTRUCTION_ERROR);
2308 return 0;
2309 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002310
Mike Dayd77c26f2007-10-08 09:02:08 -04002311 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
2312 exit_reason != EXIT_REASON_EXCEPTION_NMI)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002313 printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
Harvey Harrisonb8688d52008-03-03 12:59:56 -08002314 "exit reason is 0x%x\n", __func__, exit_reason);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002315 if (exit_reason < kvm_vmx_max_exit_handlers
2316 && kvm_vmx_exit_handlers[exit_reason])
2317 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
2318 else {
2319 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
2320 kvm_run->hw.hardware_exit_reason = exit_reason;
2321 }
2322 return 0;
2323}
2324
Yang, Sheng6e5d8652007-09-12 18:03:11 +08002325static void update_tpr_threshold(struct kvm_vcpu *vcpu)
2326{
2327 int max_irr, tpr;
2328
2329 if (!vm_need_tpr_shadow(vcpu->kvm))
2330 return;
2331
2332 if (!kvm_lapic_enabled(vcpu) ||
2333 ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
2334 vmcs_write32(TPR_THRESHOLD, 0);
2335 return;
2336 }
2337
2338 tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4;
2339 vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
2340}
2341
Eddie Dong85f455f2007-07-06 12:20:49 +03002342static void enable_irq_window(struct kvm_vcpu *vcpu)
2343{
2344 u32 cpu_based_vm_exec_control;
2345
2346 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2347 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
2348 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2349}
2350
2351static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2352{
Avi Kivity1155f762007-11-22 11:30:47 +02002353 struct vcpu_vmx *vmx = to_vmx(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03002354 u32 idtv_info_field, intr_info_field;
2355 int has_ext_irq, interrupt_window_open;
Eddie Dong1b9778d2007-09-03 16:56:58 +03002356 int vector;
Eddie Dong85f455f2007-07-06 12:20:49 +03002357
Yang, Sheng6e5d8652007-09-12 18:03:11 +08002358 update_tpr_threshold(vcpu);
2359
Eddie Dong85f455f2007-07-06 12:20:49 +03002360 has_ext_irq = kvm_cpu_has_interrupt(vcpu);
2361 intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
Avi Kivity1155f762007-11-22 11:30:47 +02002362 idtv_info_field = vmx->idt_vectoring_info;
Eddie Dong85f455f2007-07-06 12:20:49 +03002363 if (intr_info_field & INTR_INFO_VALID_MASK) {
2364 if (idtv_info_field & INTR_INFO_VALID_MASK) {
2365 /* TODO: fault when IDT_Vectoring */
Ryan Harper9584bf22007-12-13 10:21:10 -06002366 if (printk_ratelimit())
2367 printk(KERN_ERR "Fault when IDT_Vectoring\n");
Eddie Dong85f455f2007-07-06 12:20:49 +03002368 }
2369 if (has_ext_irq)
2370 enable_irq_window(vcpu);
2371 return;
2372 }
2373 if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
Avi Kivity9c8cba32007-11-22 11:42:59 +02002374 if ((idtv_info_field & VECTORING_INFO_TYPE_MASK)
2375 == INTR_TYPE_EXT_INTR
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002376 && vcpu->arch.rmode.active) {
Avi Kivity9c8cba32007-11-22 11:42:59 +02002377 u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK;
2378
2379 vmx_inject_irq(vcpu, vect);
2380 if (unlikely(has_ext_irq))
2381 enable_irq_window(vcpu);
2382 return;
2383 }
2384
Eddie Dong85f455f2007-07-06 12:20:49 +03002385 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
2386 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2387 vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
2388
Ryan Harper2e113842008-02-11 10:26:38 -06002389 if (unlikely(idtv_info_field & INTR_INFO_DELIVER_CODE_MASK))
Eddie Dong85f455f2007-07-06 12:20:49 +03002390 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2391 vmcs_read32(IDT_VECTORING_ERROR_CODE));
2392 if (unlikely(has_ext_irq))
2393 enable_irq_window(vcpu);
2394 return;
2395 }
2396 if (!has_ext_irq)
2397 return;
2398 interrupt_window_open =
2399 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
2400 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
Eddie Dong1b9778d2007-09-03 16:56:58 +03002401 if (interrupt_window_open) {
2402 vector = kvm_cpu_get_interrupt(vcpu);
2403 vmx_inject_irq(vcpu, vector);
2404 kvm_timer_intr_post(vcpu, vector);
2405 } else
Eddie Dong85f455f2007-07-06 12:20:49 +03002406 enable_irq_window(vcpu);
2407}
2408
Avi Kivity9c8cba32007-11-22 11:42:59 +02002409/*
2410 * Failure to inject an interrupt should give us the information
2411 * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs
2412 * when fetching the interrupt redirection bitmap in the real-mode
2413 * tss, this doesn't happen. So we do it ourselves.
2414 */
2415static void fixup_rmode_irq(struct vcpu_vmx *vmx)
2416{
2417 vmx->rmode.irq.pending = 0;
2418 if (vmcs_readl(GUEST_RIP) + 1 != vmx->rmode.irq.rip)
2419 return;
2420 vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip);
2421 if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
2422 vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
2423 vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
2424 return;
2425 }
2426 vmx->idt_vectoring_info =
2427 VECTORING_INFO_VALID_MASK
2428 | INTR_TYPE_EXT_INTR
2429 | vmx->rmode.irq.vector;
2430}
2431
Avi Kivity04d2cc72007-09-10 18:10:54 +03002432static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002433{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002434 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity1b6269d2007-10-09 12:12:19 +02002435 u32 intr_info;
Avi Kivitye6adf282007-04-30 16:07:54 +03002436
2437 /*
2438 * Loading guest fpu may have cleared host cr0.ts
2439 */
2440 vmcs_writel(HOST_CR0, read_cr0());
2441
Mike Dayd77c26f2007-10-08 09:02:08 -04002442 asm(
Avi Kivity6aa8b732006-12-10 02:21:36 -08002443 /* Store host registers */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002444#ifdef CONFIG_X86_64
Laurent Vivierc2036302007-10-25 14:18:52 +02002445 "push %%rdx; push %%rbp;"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002446 "push %%rcx \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002447#else
Laurent Vivierff593e52007-10-25 14:18:55 +02002448 "push %%edx; push %%ebp;"
2449 "push %%ecx \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002450#endif
Laurent Vivierc2036302007-10-25 14:18:52 +02002451 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002452 /* Check if vmlaunch of vmresume is needed */
Avi Kivitye08aa782007-11-15 18:06:18 +02002453 "cmpl $0, %c[launched](%0) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002454 /* Load guest registers. Don't clobber flags. */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002455#ifdef CONFIG_X86_64
Avi Kivitye08aa782007-11-15 18:06:18 +02002456 "mov %c[cr2](%0), %%rax \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002457 "mov %%rax, %%cr2 \n\t"
Avi Kivitye08aa782007-11-15 18:06:18 +02002458 "mov %c[rax](%0), %%rax \n\t"
2459 "mov %c[rbx](%0), %%rbx \n\t"
2460 "mov %c[rdx](%0), %%rdx \n\t"
2461 "mov %c[rsi](%0), %%rsi \n\t"
2462 "mov %c[rdi](%0), %%rdi \n\t"
2463 "mov %c[rbp](%0), %%rbp \n\t"
2464 "mov %c[r8](%0), %%r8 \n\t"
2465 "mov %c[r9](%0), %%r9 \n\t"
2466 "mov %c[r10](%0), %%r10 \n\t"
2467 "mov %c[r11](%0), %%r11 \n\t"
2468 "mov %c[r12](%0), %%r12 \n\t"
2469 "mov %c[r13](%0), %%r13 \n\t"
2470 "mov %c[r14](%0), %%r14 \n\t"
2471 "mov %c[r15](%0), %%r15 \n\t"
2472 "mov %c[rcx](%0), %%rcx \n\t" /* kills %0 (rcx) */
Avi Kivity6aa8b732006-12-10 02:21:36 -08002473#else
Avi Kivitye08aa782007-11-15 18:06:18 +02002474 "mov %c[cr2](%0), %%eax \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002475 "mov %%eax, %%cr2 \n\t"
Avi Kivitye08aa782007-11-15 18:06:18 +02002476 "mov %c[rax](%0), %%eax \n\t"
2477 "mov %c[rbx](%0), %%ebx \n\t"
2478 "mov %c[rdx](%0), %%edx \n\t"
2479 "mov %c[rsi](%0), %%esi \n\t"
2480 "mov %c[rdi](%0), %%edi \n\t"
2481 "mov %c[rbp](%0), %%ebp \n\t"
2482 "mov %c[rcx](%0), %%ecx \n\t" /* kills %0 (ecx) */
Avi Kivity6aa8b732006-12-10 02:21:36 -08002483#endif
2484 /* Enter guest mode */
Avi Kivitycd2276a2007-05-14 20:41:13 +03002485 "jne .Llaunched \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002486 ASM_VMX_VMLAUNCH "\n\t"
Avi Kivitycd2276a2007-05-14 20:41:13 +03002487 "jmp .Lkvm_vmx_return \n\t"
2488 ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
2489 ".Lkvm_vmx_return: "
Avi Kivity6aa8b732006-12-10 02:21:36 -08002490 /* Save guest registers, load host registers, keep flags */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002491#ifdef CONFIG_X86_64
Avi Kivitye08aa782007-11-15 18:06:18 +02002492 "xchg %0, (%%rsp) \n\t"
2493 "mov %%rax, %c[rax](%0) \n\t"
2494 "mov %%rbx, %c[rbx](%0) \n\t"
2495 "pushq (%%rsp); popq %c[rcx](%0) \n\t"
2496 "mov %%rdx, %c[rdx](%0) \n\t"
2497 "mov %%rsi, %c[rsi](%0) \n\t"
2498 "mov %%rdi, %c[rdi](%0) \n\t"
2499 "mov %%rbp, %c[rbp](%0) \n\t"
2500 "mov %%r8, %c[r8](%0) \n\t"
2501 "mov %%r9, %c[r9](%0) \n\t"
2502 "mov %%r10, %c[r10](%0) \n\t"
2503 "mov %%r11, %c[r11](%0) \n\t"
2504 "mov %%r12, %c[r12](%0) \n\t"
2505 "mov %%r13, %c[r13](%0) \n\t"
2506 "mov %%r14, %c[r14](%0) \n\t"
2507 "mov %%r15, %c[r15](%0) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002508 "mov %%cr2, %%rax \n\t"
Avi Kivitye08aa782007-11-15 18:06:18 +02002509 "mov %%rax, %c[cr2](%0) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002510
Avi Kivitye08aa782007-11-15 18:06:18 +02002511 "pop %%rbp; pop %%rbp; pop %%rdx \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002512#else
Avi Kivitye08aa782007-11-15 18:06:18 +02002513 "xchg %0, (%%esp) \n\t"
2514 "mov %%eax, %c[rax](%0) \n\t"
2515 "mov %%ebx, %c[rbx](%0) \n\t"
2516 "pushl (%%esp); popl %c[rcx](%0) \n\t"
2517 "mov %%edx, %c[rdx](%0) \n\t"
2518 "mov %%esi, %c[rsi](%0) \n\t"
2519 "mov %%edi, %c[rdi](%0) \n\t"
2520 "mov %%ebp, %c[rbp](%0) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002521 "mov %%cr2, %%eax \n\t"
Avi Kivitye08aa782007-11-15 18:06:18 +02002522 "mov %%eax, %c[cr2](%0) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002523
Avi Kivitye08aa782007-11-15 18:06:18 +02002524 "pop %%ebp; pop %%ebp; pop %%edx \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002525#endif
Avi Kivitye08aa782007-11-15 18:06:18 +02002526 "setbe %c[fail](%0) \n\t"
2527 : : "c"(vmx), "d"((unsigned long)HOST_RSP),
2528 [launched]"i"(offsetof(struct vcpu_vmx, launched)),
2529 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002530 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
2531 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
2532 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
2533 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
2534 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
2535 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
2536 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002537#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002538 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
2539 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
2540 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
2541 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
2542 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
2543 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
2544 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
2545 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
Avi Kivity6aa8b732006-12-10 02:21:36 -08002546#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002547 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
Laurent Vivierc2036302007-10-25 14:18:52 +02002548 : "cc", "memory"
2549#ifdef CONFIG_X86_64
2550 , "rbx", "rdi", "rsi"
2551 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
Laurent Vivierff593e52007-10-25 14:18:55 +02002552#else
2553 , "ebx", "edi", "rsi"
Laurent Vivierc2036302007-10-25 14:18:52 +02002554#endif
2555 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08002556
Avi Kivity1155f762007-11-22 11:30:47 +02002557 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
Avi Kivity9c8cba32007-11-22 11:42:59 +02002558 if (vmx->rmode.irq.pending)
2559 fixup_rmode_irq(vmx);
Avi Kivity1155f762007-11-22 11:30:47 +02002560
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002561 vcpu->arch.interrupt_window_open =
Mike Dayd77c26f2007-10-08 09:02:08 -04002562 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002563
Mike Dayd77c26f2007-10-08 09:02:08 -04002564 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
Avi Kivity15ad7142007-07-11 18:17:21 +03002565 vmx->launched = 1;
Avi Kivity1b6269d2007-10-09 12:12:19 +02002566
2567 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
2568
2569 /* We need to handle NMIs before interrupts are enabled */
2570 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
2571 asm("int $2");
Avi Kivity6aa8b732006-12-10 02:21:36 -08002572}
2573
Avi Kivity6aa8b732006-12-10 02:21:36 -08002574static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
2575{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002576 struct vcpu_vmx *vmx = to_vmx(vcpu);
2577
2578 if (vmx->vmcs) {
Rusty Russell8b9cf982007-07-30 16:31:43 +10002579 on_each_cpu(__vcpu_clear, vmx, 0, 1);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002580 free_vmcs(vmx->vmcs);
2581 vmx->vmcs = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002582 }
2583}
2584
2585static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
2586{
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002587 struct vcpu_vmx *vmx = to_vmx(vcpu);
2588
Sheng Yang2384d2b2008-01-17 15:14:33 +08002589 spin_lock(&vmx_vpid_lock);
2590 if (vmx->vpid != 0)
2591 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
2592 spin_unlock(&vmx_vpid_lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002593 vmx_free_vmcs(vcpu);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002594 kfree(vmx->host_msrs);
2595 kfree(vmx->guest_msrs);
2596 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +10002597 kmem_cache_free(kvm_vcpu_cache, vmx);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002598}
2599
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002600static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002601{
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002602 int err;
Rusty Russellc16f8622007-07-30 21:12:19 +10002603 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Avi Kivity15ad7142007-07-11 18:17:21 +03002604 int cpu;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002605
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002606 if (!vmx)
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002607 return ERR_PTR(-ENOMEM);
2608
Sheng Yang2384d2b2008-01-17 15:14:33 +08002609 allocate_vpid(vmx);
2610
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002611 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
2612 if (err)
2613 goto free_vcpu;
Ingo Molnar965b58a2007-01-05 16:36:23 -08002614
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002615 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002616 if (!vmx->guest_msrs) {
2617 err = -ENOMEM;
2618 goto uninit_vcpu;
2619 }
Ingo Molnar965b58a2007-01-05 16:36:23 -08002620
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002621 vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2622 if (!vmx->host_msrs)
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002623 goto free_guest_msrs;
Ingo Molnar965b58a2007-01-05 16:36:23 -08002624
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002625 vmx->vmcs = alloc_vmcs();
2626 if (!vmx->vmcs)
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002627 goto free_msrs;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002628
2629 vmcs_clear(vmx->vmcs);
2630
Avi Kivity15ad7142007-07-11 18:17:21 +03002631 cpu = get_cpu();
2632 vmx_vcpu_load(&vmx->vcpu, cpu);
Rusty Russell8b9cf982007-07-30 16:31:43 +10002633 err = vmx_vcpu_setup(vmx);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002634 vmx_vcpu_put(&vmx->vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03002635 put_cpu();
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002636 if (err)
2637 goto free_vmcs;
Marcelo Tosatti5e4a0b32008-02-14 21:21:43 -02002638 if (vm_need_virtualize_apic_accesses(kvm))
2639 if (alloc_apic_access_page(kvm) != 0)
2640 goto free_vmcs;
Ingo Molnar965b58a2007-01-05 16:36:23 -08002641
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002642 return &vmx->vcpu;
Ingo Molnar965b58a2007-01-05 16:36:23 -08002643
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002644free_vmcs:
2645 free_vmcs(vmx->vmcs);
2646free_msrs:
2647 kfree(vmx->host_msrs);
2648free_guest_msrs:
2649 kfree(vmx->guest_msrs);
2650uninit_vcpu:
2651 kvm_vcpu_uninit(&vmx->vcpu);
2652free_vcpu:
Rusty Russella4770342007-08-01 14:46:11 +10002653 kmem_cache_free(kvm_vcpu_cache, vmx);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002654 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002655}
2656
Yang, Sheng002c7f72007-07-31 14:23:01 +03002657static void __init vmx_check_processor_compat(void *rtn)
2658{
2659 struct vmcs_config vmcs_conf;
2660
2661 *(int *)rtn = 0;
2662 if (setup_vmcs_config(&vmcs_conf) < 0)
2663 *(int *)rtn = -EIO;
2664 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
2665 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
2666 smp_processor_id());
2667 *(int *)rtn = -EIO;
2668 }
2669}
2670
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002671static struct kvm_x86_ops vmx_x86_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08002672 .cpu_has_kvm_support = cpu_has_kvm_support,
2673 .disabled_by_bios = vmx_disabled_by_bios,
2674 .hardware_setup = hardware_setup,
2675 .hardware_unsetup = hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03002676 .check_processor_compatibility = vmx_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002677 .hardware_enable = hardware_enable,
2678 .hardware_disable = hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02002679 .cpu_has_accelerated_tpr = cpu_has_vmx_virtualize_apic_accesses,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002680
2681 .vcpu_create = vmx_create_vcpu,
2682 .vcpu_free = vmx_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03002683 .vcpu_reset = vmx_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002684
Avi Kivity04d2cc72007-09-10 18:10:54 +03002685 .prepare_guest_switch = vmx_save_host_state,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002686 .vcpu_load = vmx_vcpu_load,
2687 .vcpu_put = vmx_vcpu_put,
Avi Kivity774c47f2007-02-12 00:54:47 -08002688 .vcpu_decache = vmx_vcpu_decache,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002689
2690 .set_guest_debug = set_guest_debug,
Avi Kivity04d2cc72007-09-10 18:10:54 +03002691 .guest_debug_pre = kvm_guest_debug_pre,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002692 .get_msr = vmx_get_msr,
2693 .set_msr = vmx_set_msr,
2694 .get_segment_base = vmx_get_segment_base,
2695 .get_segment = vmx_get_segment,
2696 .set_segment = vmx_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02002697 .get_cpl = vmx_get_cpl,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002698 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
Anthony Liguori25c4c272007-04-27 09:29:21 +03002699 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002700 .set_cr0 = vmx_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002701 .set_cr3 = vmx_set_cr3,
2702 .set_cr4 = vmx_set_cr4,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002703 .set_efer = vmx_set_efer,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002704 .get_idt = vmx_get_idt,
2705 .set_idt = vmx_set_idt,
2706 .get_gdt = vmx_get_gdt,
2707 .set_gdt = vmx_set_gdt,
2708 .cache_regs = vcpu_load_rsp_rip,
2709 .decache_regs = vcpu_put_rsp_rip,
2710 .get_rflags = vmx_get_rflags,
2711 .set_rflags = vmx_set_rflags,
2712
2713 .tlb_flush = vmx_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002714
Avi Kivity6aa8b732006-12-10 02:21:36 -08002715 .run = vmx_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03002716 .handle_exit = kvm_handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002717 .skip_emulated_instruction = skip_emulated_instruction,
Ingo Molnar102d8322007-02-19 14:37:47 +02002718 .patch_hypercall = vmx_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03002719 .get_irq = vmx_get_irq,
2720 .set_irq = vmx_inject_irq,
Avi Kivity298101d2007-11-25 13:41:11 +02002721 .queue_exception = vmx_queue_exception,
2722 .exception_injected = vmx_exception_injected,
Avi Kivity04d2cc72007-09-10 18:10:54 +03002723 .inject_pending_irq = vmx_intr_assist,
2724 .inject_pending_vectors = do_interrupt_requests,
Izik Eiduscbc94022007-10-25 00:29:55 +02002725
2726 .set_tss_addr = vmx_set_tss_addr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002727};
2728
2729static int __init vmx_init(void)
2730{
He, Qingfdef3ad2007-04-30 09:45:24 +03002731 void *iova;
2732 int r;
2733
2734 vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2735 if (!vmx_io_bitmap_a)
2736 return -ENOMEM;
2737
2738 vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2739 if (!vmx_io_bitmap_b) {
2740 r = -ENOMEM;
2741 goto out;
2742 }
2743
2744 /*
2745 * Allow direct access to the PC debug port (it is often used for I/O
2746 * delays, but the vmexits simply slow things down).
2747 */
2748 iova = kmap(vmx_io_bitmap_a);
2749 memset(iova, 0xff, PAGE_SIZE);
2750 clear_bit(0x80, iova);
Avi Kivitycd0536d2007-05-08 11:34:07 +03002751 kunmap(vmx_io_bitmap_a);
He, Qingfdef3ad2007-04-30 09:45:24 +03002752
2753 iova = kmap(vmx_io_bitmap_b);
2754 memset(iova, 0xff, PAGE_SIZE);
Avi Kivitycd0536d2007-05-08 11:34:07 +03002755 kunmap(vmx_io_bitmap_b);
He, Qingfdef3ad2007-04-30 09:45:24 +03002756
Sheng Yang2384d2b2008-01-17 15:14:33 +08002757 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
2758
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08002759 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
He, Qingfdef3ad2007-04-30 09:45:24 +03002760 if (r)
2761 goto out1;
2762
Avi Kivityc7addb92007-09-16 18:58:32 +02002763 if (bypass_guest_pf)
2764 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
2765
He, Qingfdef3ad2007-04-30 09:45:24 +03002766 return 0;
2767
2768out1:
2769 __free_page(vmx_io_bitmap_b);
2770out:
2771 __free_page(vmx_io_bitmap_a);
2772 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002773}
2774
2775static void __exit vmx_exit(void)
2776{
He, Qingfdef3ad2007-04-30 09:45:24 +03002777 __free_page(vmx_io_bitmap_b);
2778 __free_page(vmx_io_bitmap_a);
2779
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08002780 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08002781}
2782
2783module_init(vmx_init)
2784module_exit(vmx_exit)