blob: a509910f6b532fb2060ecc781b4516958ac666bf [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
Eddie Dong85f455f2007-07-06 12:20:49 +030018#include "irq.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080019#include "vmx.h"
Avi Kivitye4956062007-06-28 14:15:57 -040020#include "segment_descriptor.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080021#include "mmu.h"
Avi Kivitye4956062007-06-28 14:15:57 -040022
Avi Kivityedf88412007-12-16 11:02:48 +020023#include <linux/kvm_host.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080024#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020025#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080026#include <linux/mm.h>
27#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040028#include <linux/sched.h>
Avi Kivityc7addb92007-09-16 18:58:32 +020029#include <linux/moduleparam.h>
Avi Kivitye4956062007-06-28 14:15:57 -040030
Avi Kivity6aa8b732006-12-10 02:21:36 -080031#include <asm/io.h>
Anthony Liguori3b3be0d2006-12-13 00:33:43 -080032#include <asm/desc.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080033
Avi Kivity6aa8b732006-12-10 02:21:36 -080034MODULE_AUTHOR("Qumranet");
35MODULE_LICENSE("GPL");
36
Avi Kivityc7addb92007-09-16 18:58:32 +020037static int bypass_guest_pf = 1;
38module_param(bypass_guest_pf, bool, 0);
39
Sheng Yang2384d2b2008-01-17 15:14:33 +080040static int enable_vpid = 1;
41module_param(enable_vpid, bool, 0);
42
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040043struct vmcs {
44 u32 revision_id;
45 u32 abort;
46 char data[0];
47};
48
49struct vcpu_vmx {
Rusty Russellfb3f0f52007-07-27 17:16:56 +100050 struct kvm_vcpu vcpu;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040051 int launched;
Avi Kivity29bd8a72007-09-10 17:27:03 +030052 u8 fail;
Avi Kivity1155f762007-11-22 11:30:47 +020053 u32 idt_vectoring_info;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040054 struct kvm_msr_entry *guest_msrs;
55 struct kvm_msr_entry *host_msrs;
56 int nmsrs;
57 int save_nmsrs;
58 int msr_offset_efer;
59#ifdef CONFIG_X86_64
60 int msr_offset_kernel_gs_base;
61#endif
62 struct vmcs *vmcs;
63 struct {
64 int loaded;
65 u16 fs_sel, gs_sel, ldt_sel;
Laurent Vivier152d3f22007-08-23 16:33:11 +020066 int gs_ldt_reload_needed;
67 int fs_reload_needed;
Avi Kivity51c6cf62007-08-29 03:48:05 +030068 int guest_efer_loaded;
Mike Dayd77c26f2007-10-08 09:02:08 -040069 } host_state;
Avi Kivity9c8cba32007-11-22 11:42:59 +020070 struct {
71 struct {
72 bool pending;
73 u8 vector;
74 unsigned rip;
75 } irq;
76 } rmode;
Sheng Yang2384d2b2008-01-17 15:14:33 +080077 int vpid;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040078};
79
80static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
81{
Rusty Russellfb3f0f52007-07-27 17:16:56 +100082 return container_of(vcpu, struct vcpu_vmx, vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040083}
84
Avi Kivity75880a02007-06-20 11:20:04 +030085static int init_rmode_tss(struct kvm *kvm);
86
Avi Kivity6aa8b732006-12-10 02:21:36 -080087static DEFINE_PER_CPU(struct vmcs *, vmxarea);
88static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
89
He, Qingfdef3ad2007-04-30 09:45:24 +030090static struct page *vmx_io_bitmap_a;
91static struct page *vmx_io_bitmap_b;
92
Sheng Yang2384d2b2008-01-17 15:14:33 +080093static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
94static DEFINE_SPINLOCK(vmx_vpid_lock);
95
Yang, Sheng1c3d14f2007-07-29 11:07:42 +030096static struct vmcs_config {
Avi Kivity6aa8b732006-12-10 02:21:36 -080097 int size;
98 int order;
99 u32 revision_id;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300100 u32 pin_based_exec_ctrl;
101 u32 cpu_based_exec_ctrl;
Sheng Yangf78e0e22007-10-29 09:40:42 +0800102 u32 cpu_based_2nd_exec_ctrl;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300103 u32 vmexit_ctrl;
104 u32 vmentry_ctrl;
105} vmcs_config;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800106
107#define VMX_SEGMENT_FIELD(seg) \
108 [VCPU_SREG_##seg] = { \
109 .selector = GUEST_##seg##_SELECTOR, \
110 .base = GUEST_##seg##_BASE, \
111 .limit = GUEST_##seg##_LIMIT, \
112 .ar_bytes = GUEST_##seg##_AR_BYTES, \
113 }
114
115static struct kvm_vmx_segment_field {
116 unsigned selector;
117 unsigned base;
118 unsigned limit;
119 unsigned ar_bytes;
120} kvm_vmx_segment_fields[] = {
121 VMX_SEGMENT_FIELD(CS),
122 VMX_SEGMENT_FIELD(DS),
123 VMX_SEGMENT_FIELD(ES),
124 VMX_SEGMENT_FIELD(FS),
125 VMX_SEGMENT_FIELD(GS),
126 VMX_SEGMENT_FIELD(SS),
127 VMX_SEGMENT_FIELD(TR),
128 VMX_SEGMENT_FIELD(LDTR),
129};
130
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300131/*
132 * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
133 * away by decrementing the array size.
134 */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800135static const u32 vmx_msr_index[] = {
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800136#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800137 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
138#endif
139 MSR_EFER, MSR_K6_STAR,
140};
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200141#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800142
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400143static void load_msrs(struct kvm_msr_entry *e, int n)
144{
145 int i;
146
147 for (i = 0; i < n; ++i)
148 wrmsrl(e[i].index, e[i].data);
149}
150
151static void save_msrs(struct kvm_msr_entry *e, int n)
152{
153 int i;
154
155 for (i = 0; i < n; ++i)
156 rdmsrl(e[i].index, e[i].data);
157}
158
Avi Kivity6aa8b732006-12-10 02:21:36 -0800159static inline int is_page_fault(u32 intr_info)
160{
161 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
162 INTR_INFO_VALID_MASK)) ==
163 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
164}
165
Anthony Liguori2ab455c2007-04-27 09:29:49 +0300166static inline int is_no_device(u32 intr_info)
167{
168 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
169 INTR_INFO_VALID_MASK)) ==
170 (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
171}
172
Anthony Liguori7aa81cc2007-09-17 14:57:50 -0500173static inline int is_invalid_opcode(u32 intr_info)
174{
175 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
176 INTR_INFO_VALID_MASK)) ==
177 (INTR_TYPE_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
178}
179
Avi Kivity6aa8b732006-12-10 02:21:36 -0800180static inline int is_external_interrupt(u32 intr_info)
181{
182 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
183 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
184}
185
Yang, Sheng6e5d8652007-09-12 18:03:11 +0800186static inline int cpu_has_vmx_tpr_shadow(void)
187{
188 return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
189}
190
191static inline int vm_need_tpr_shadow(struct kvm *kvm)
192{
193 return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
194}
195
Sheng Yangf78e0e22007-10-29 09:40:42 +0800196static inline int cpu_has_secondary_exec_ctrls(void)
197{
198 return (vmcs_config.cpu_based_exec_ctrl &
199 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
200}
201
Avi Kivity774ead32007-12-26 13:57:04 +0200202static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
Sheng Yangf78e0e22007-10-29 09:40:42 +0800203{
204 return (vmcs_config.cpu_based_2nd_exec_ctrl &
205 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
206}
207
208static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
209{
210 return ((cpu_has_vmx_virtualize_apic_accesses()) &&
211 (irqchip_in_kernel(kvm)));
212}
213
Sheng Yang2384d2b2008-01-17 15:14:33 +0800214static inline int cpu_has_vmx_vpid(void)
215{
216 return (vmcs_config.cpu_based_2nd_exec_ctrl &
217 SECONDARY_EXEC_ENABLE_VPID);
218}
219
Rusty Russell8b9cf982007-07-30 16:31:43 +1000220static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
Avi Kivity7725f0b2006-12-13 00:34:01 -0800221{
222 int i;
223
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400224 for (i = 0; i < vmx->nmsrs; ++i)
225 if (vmx->guest_msrs[i].index == msr)
Eddie Donga75beee2007-05-17 18:55:15 +0300226 return i;
227 return -1;
228}
229
Sheng Yang2384d2b2008-01-17 15:14:33 +0800230static inline void __invvpid(int ext, u16 vpid, gva_t gva)
231{
232 struct {
233 u64 vpid : 16;
234 u64 rsvd : 48;
235 u64 gva;
236 } operand = { vpid, 0, gva };
237
238 asm volatile (ASM_VMX_INVVPID
239 /* CF==1 or ZF==1 --> rc = -1 */
240 "; ja 1f ; ud2 ; 1:"
241 : : "a"(&operand), "c"(ext) : "cc", "memory");
242}
243
Rusty Russell8b9cf982007-07-30 16:31:43 +1000244static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
Eddie Donga75beee2007-05-17 18:55:15 +0300245{
246 int i;
247
Rusty Russell8b9cf982007-07-30 16:31:43 +1000248 i = __find_msr_index(vmx, msr);
Eddie Donga75beee2007-05-17 18:55:15 +0300249 if (i >= 0)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400250 return &vmx->guest_msrs[i];
Al Viro8b6d44c2007-02-09 16:38:40 +0000251 return NULL;
Avi Kivity7725f0b2006-12-13 00:34:01 -0800252}
253
Avi Kivity6aa8b732006-12-10 02:21:36 -0800254static void vmcs_clear(struct vmcs *vmcs)
255{
256 u64 phys_addr = __pa(vmcs);
257 u8 error;
258
259 asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
260 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
261 : "cc", "memory");
262 if (error)
263 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
264 vmcs, phys_addr);
265}
266
267static void __vcpu_clear(void *arg)
268{
Rusty Russell8b9cf982007-07-30 16:31:43 +1000269 struct vcpu_vmx *vmx = arg;
Ingo Molnard3b2c332007-01-05 16:36:23 -0800270 int cpu = raw_smp_processor_id();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800271
Rusty Russell8b9cf982007-07-30 16:31:43 +1000272 if (vmx->vcpu.cpu == cpu)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400273 vmcs_clear(vmx->vmcs);
274 if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800275 per_cpu(current_vmcs, cpu) = NULL;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800276 rdtscll(vmx->vcpu.arch.host_tsc);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800277}
278
Rusty Russell8b9cf982007-07-30 16:31:43 +1000279static void vcpu_clear(struct vcpu_vmx *vmx)
Avi Kivity8d0be2b2007-02-12 00:54:46 -0800280{
Avi Kivityeae5ecb2007-09-30 10:50:12 +0200281 if (vmx->vcpu.cpu == -1)
282 return;
Avi Kivityf566e092007-09-30 11:02:53 +0200283 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1);
Rusty Russell8b9cf982007-07-30 16:31:43 +1000284 vmx->launched = 0;
Avi Kivity8d0be2b2007-02-12 00:54:46 -0800285}
286
Sheng Yang2384d2b2008-01-17 15:14:33 +0800287static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
288{
289 if (vmx->vpid == 0)
290 return;
291
292 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
293}
294
Avi Kivity6aa8b732006-12-10 02:21:36 -0800295static unsigned long vmcs_readl(unsigned long field)
296{
297 unsigned long value;
298
299 asm volatile (ASM_VMX_VMREAD_RDX_RAX
300 : "=a"(value) : "d"(field) : "cc");
301 return value;
302}
303
304static u16 vmcs_read16(unsigned long field)
305{
306 return vmcs_readl(field);
307}
308
309static u32 vmcs_read32(unsigned long field)
310{
311 return vmcs_readl(field);
312}
313
314static u64 vmcs_read64(unsigned long field)
315{
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800316#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800317 return vmcs_readl(field);
318#else
319 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
320#endif
321}
322
Avi Kivitye52de1b2007-01-05 16:36:56 -0800323static noinline void vmwrite_error(unsigned long field, unsigned long value)
324{
325 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
326 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
327 dump_stack();
328}
329
Avi Kivity6aa8b732006-12-10 02:21:36 -0800330static void vmcs_writel(unsigned long field, unsigned long value)
331{
332 u8 error;
333
334 asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
Mike Dayd77c26f2007-10-08 09:02:08 -0400335 : "=q"(error) : "a"(value), "d"(field) : "cc");
Avi Kivitye52de1b2007-01-05 16:36:56 -0800336 if (unlikely(error))
337 vmwrite_error(field, value);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800338}
339
340static void vmcs_write16(unsigned long field, u16 value)
341{
342 vmcs_writel(field, value);
343}
344
345static void vmcs_write32(unsigned long field, u32 value)
346{
347 vmcs_writel(field, value);
348}
349
350static void vmcs_write64(unsigned long field, u64 value)
351{
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800352#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800353 vmcs_writel(field, value);
354#else
355 vmcs_writel(field, value);
356 asm volatile ("");
357 vmcs_writel(field+1, value >> 32);
358#endif
359}
360
Anthony Liguori2ab455c2007-04-27 09:29:49 +0300361static void vmcs_clear_bits(unsigned long field, u32 mask)
362{
363 vmcs_writel(field, vmcs_readl(field) & ~mask);
364}
365
366static void vmcs_set_bits(unsigned long field, u32 mask)
367{
368 vmcs_writel(field, vmcs_readl(field) | mask);
369}
370
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300371static void update_exception_bitmap(struct kvm_vcpu *vcpu)
372{
373 u32 eb;
374
Anthony Liguori7aa81cc2007-09-17 14:57:50 -0500375 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300376 if (!vcpu->fpu_active)
377 eb |= 1u << NM_VECTOR;
378 if (vcpu->guest_debug.enabled)
379 eb |= 1u << 1;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800380 if (vcpu->arch.rmode.active)
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300381 eb = ~0;
382 vmcs_write32(EXCEPTION_BITMAP, eb);
383}
384
Avi Kivity33ed6322007-05-02 16:54:03 +0300385static void reload_tss(void)
386{
Avi Kivity33ed6322007-05-02 16:54:03 +0300387 /*
388 * VT restores TR but not its size. Useless.
389 */
390 struct descriptor_table gdt;
391 struct segment_descriptor *descs;
392
393 get_gdt(&gdt);
394 descs = (void *)gdt.base;
395 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
396 load_TR_desc();
Avi Kivity33ed6322007-05-02 16:54:03 +0300397}
398
Rusty Russell8b9cf982007-07-30 16:31:43 +1000399static void load_transition_efer(struct vcpu_vmx *vmx)
Eddie Dong2cc51562007-05-21 07:28:09 +0300400{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400401 int efer_offset = vmx->msr_offset_efer;
Avi Kivity51c6cf62007-08-29 03:48:05 +0300402 u64 host_efer = vmx->host_msrs[efer_offset].data;
403 u64 guest_efer = vmx->guest_msrs[efer_offset].data;
404 u64 ignore_bits;
Eddie Dong2cc51562007-05-21 07:28:09 +0300405
Avi Kivity51c6cf62007-08-29 03:48:05 +0300406 if (efer_offset < 0)
407 return;
408 /*
409 * NX is emulated; LMA and LME handled by hardware; SCE meaninless
410 * outside long mode
411 */
412 ignore_bits = EFER_NX | EFER_SCE;
413#ifdef CONFIG_X86_64
414 ignore_bits |= EFER_LMA | EFER_LME;
415 /* SCE is meaningful only in long mode on Intel */
416 if (guest_efer & EFER_LMA)
417 ignore_bits &= ~(u64)EFER_SCE;
418#endif
419 if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
420 return;
421
422 vmx->host_state.guest_efer_loaded = 1;
423 guest_efer &= ~ignore_bits;
424 guest_efer |= host_efer & ignore_bits;
425 wrmsrl(MSR_EFER, guest_efer);
Rusty Russell8b9cf982007-07-30 16:31:43 +1000426 vmx->vcpu.stat.efer_reload++;
Eddie Dong2cc51562007-05-21 07:28:09 +0300427}
428
Avi Kivity51c6cf62007-08-29 03:48:05 +0300429static void reload_host_efer(struct vcpu_vmx *vmx)
430{
431 if (vmx->host_state.guest_efer_loaded) {
432 vmx->host_state.guest_efer_loaded = 0;
433 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
434 }
435}
436
Avi Kivity04d2cc72007-09-10 18:10:54 +0300437static void vmx_save_host_state(struct kvm_vcpu *vcpu)
Avi Kivity33ed6322007-05-02 16:54:03 +0300438{
Avi Kivity04d2cc72007-09-10 18:10:54 +0300439 struct vcpu_vmx *vmx = to_vmx(vcpu);
440
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400441 if (vmx->host_state.loaded)
Avi Kivity33ed6322007-05-02 16:54:03 +0300442 return;
443
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400444 vmx->host_state.loaded = 1;
Avi Kivity33ed6322007-05-02 16:54:03 +0300445 /*
446 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
447 * allow segment selectors with cpl > 0 or ti == 1.
448 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400449 vmx->host_state.ldt_sel = read_ldt();
Laurent Vivier152d3f22007-08-23 16:33:11 +0200450 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400451 vmx->host_state.fs_sel = read_fs();
Laurent Vivier152d3f22007-08-23 16:33:11 +0200452 if (!(vmx->host_state.fs_sel & 7)) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400453 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
Laurent Vivier152d3f22007-08-23 16:33:11 +0200454 vmx->host_state.fs_reload_needed = 0;
455 } else {
Avi Kivity33ed6322007-05-02 16:54:03 +0300456 vmcs_write16(HOST_FS_SELECTOR, 0);
Laurent Vivier152d3f22007-08-23 16:33:11 +0200457 vmx->host_state.fs_reload_needed = 1;
Avi Kivity33ed6322007-05-02 16:54:03 +0300458 }
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400459 vmx->host_state.gs_sel = read_gs();
460 if (!(vmx->host_state.gs_sel & 7))
461 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +0300462 else {
463 vmcs_write16(HOST_GS_SELECTOR, 0);
Laurent Vivier152d3f22007-08-23 16:33:11 +0200464 vmx->host_state.gs_ldt_reload_needed = 1;
Avi Kivity33ed6322007-05-02 16:54:03 +0300465 }
466
467#ifdef CONFIG_X86_64
468 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
469 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
470#else
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400471 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
472 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
Avi Kivity33ed6322007-05-02 16:54:03 +0300473#endif
Avi Kivity707c0872007-05-02 17:33:43 +0300474
475#ifdef CONFIG_X86_64
Mike Dayd77c26f2007-10-08 09:02:08 -0400476 if (is_long_mode(&vmx->vcpu))
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400477 save_msrs(vmx->host_msrs +
478 vmx->msr_offset_kernel_gs_base, 1);
Mike Dayd77c26f2007-10-08 09:02:08 -0400479
Avi Kivity707c0872007-05-02 17:33:43 +0300480#endif
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400481 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
Avi Kivity51c6cf62007-08-29 03:48:05 +0300482 load_transition_efer(vmx);
Avi Kivity33ed6322007-05-02 16:54:03 +0300483}
484
Rusty Russell8b9cf982007-07-30 16:31:43 +1000485static void vmx_load_host_state(struct vcpu_vmx *vmx)
Avi Kivity33ed6322007-05-02 16:54:03 +0300486{
Avi Kivity15ad7142007-07-11 18:17:21 +0300487 unsigned long flags;
Avi Kivity33ed6322007-05-02 16:54:03 +0300488
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400489 if (!vmx->host_state.loaded)
Avi Kivity33ed6322007-05-02 16:54:03 +0300490 return;
491
Avi Kivitye1beb1d2007-11-18 13:50:24 +0200492 ++vmx->vcpu.stat.host_state_reload;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400493 vmx->host_state.loaded = 0;
Laurent Vivier152d3f22007-08-23 16:33:11 +0200494 if (vmx->host_state.fs_reload_needed)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400495 load_fs(vmx->host_state.fs_sel);
Laurent Vivier152d3f22007-08-23 16:33:11 +0200496 if (vmx->host_state.gs_ldt_reload_needed) {
497 load_ldt(vmx->host_state.ldt_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +0300498 /*
499 * If we have to reload gs, we must take care to
500 * preserve our gs base.
501 */
Avi Kivity15ad7142007-07-11 18:17:21 +0300502 local_irq_save(flags);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400503 load_gs(vmx->host_state.gs_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +0300504#ifdef CONFIG_X86_64
505 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
506#endif
Avi Kivity15ad7142007-07-11 18:17:21 +0300507 local_irq_restore(flags);
Avi Kivity33ed6322007-05-02 16:54:03 +0300508 }
Laurent Vivier152d3f22007-08-23 16:33:11 +0200509 reload_tss();
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400510 save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
511 load_msrs(vmx->host_msrs, vmx->save_nmsrs);
Avi Kivity51c6cf62007-08-29 03:48:05 +0300512 reload_host_efer(vmx);
Avi Kivity33ed6322007-05-02 16:54:03 +0300513}
514
Avi Kivity6aa8b732006-12-10 02:21:36 -0800515/*
516 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
517 * vcpu mutex is already taken.
518 */
Avi Kivity15ad7142007-07-11 18:17:21 +0300519static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800520{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400521 struct vcpu_vmx *vmx = to_vmx(vcpu);
522 u64 phys_addr = __pa(vmx->vmcs);
Avi Kivity77002702007-06-13 19:55:28 +0300523 u64 tsc_this, delta;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800524
Eddie Donga3d7f852007-09-03 16:15:12 +0300525 if (vcpu->cpu != cpu) {
Rusty Russell8b9cf982007-07-30 16:31:43 +1000526 vcpu_clear(vmx);
Eddie Donga3d7f852007-09-03 16:15:12 +0300527 kvm_migrate_apic_timer(vcpu);
Sheng Yang2384d2b2008-01-17 15:14:33 +0800528 vpid_sync_vcpu_all(vmx);
Eddie Donga3d7f852007-09-03 16:15:12 +0300529 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800530
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400531 if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800532 u8 error;
533
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400534 per_cpu(current_vmcs, cpu) = vmx->vmcs;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800535 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
536 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
537 : "cc");
538 if (error)
539 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400540 vmx->vmcs, phys_addr);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800541 }
542
543 if (vcpu->cpu != cpu) {
544 struct descriptor_table dt;
545 unsigned long sysenter_esp;
546
547 vcpu->cpu = cpu;
548 /*
549 * Linux uses per-cpu TSS and GDT, so set these when switching
550 * processors.
551 */
552 vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
553 get_gdt(&dt);
554 vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
555
556 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
557 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
Avi Kivity77002702007-06-13 19:55:28 +0300558
559 /*
560 * Make sure the time stamp counter is monotonous.
561 */
562 rdtscll(tsc_this);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800563 delta = vcpu->arch.host_tsc - tsc_this;
Avi Kivity77002702007-06-13 19:55:28 +0300564 vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800565 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800566}
567
568static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
569{
Rusty Russell8b9cf982007-07-30 16:31:43 +1000570 vmx_load_host_state(to_vmx(vcpu));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800571}
572
Avi Kivity5fd86fc2007-05-02 20:40:00 +0300573static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
574{
575 if (vcpu->fpu_active)
576 return;
577 vcpu->fpu_active = 1;
Rusty Russell707d92f2007-07-17 23:19:08 +1000578 vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800579 if (vcpu->arch.cr0 & X86_CR0_TS)
Rusty Russell707d92f2007-07-17 23:19:08 +1000580 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
Avi Kivity5fd86fc2007-05-02 20:40:00 +0300581 update_exception_bitmap(vcpu);
582}
583
584static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
585{
586 if (!vcpu->fpu_active)
587 return;
588 vcpu->fpu_active = 0;
Rusty Russell707d92f2007-07-17 23:19:08 +1000589 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
Avi Kivity5fd86fc2007-05-02 20:40:00 +0300590 update_exception_bitmap(vcpu);
591}
592
Avi Kivity774c47f2007-02-12 00:54:47 -0800593static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
594{
Rusty Russell8b9cf982007-07-30 16:31:43 +1000595 vcpu_clear(to_vmx(vcpu));
Avi Kivity774c47f2007-02-12 00:54:47 -0800596}
597
Avi Kivity6aa8b732006-12-10 02:21:36 -0800598static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
599{
600 return vmcs_readl(GUEST_RFLAGS);
601}
602
603static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
604{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800605 if (vcpu->arch.rmode.active)
Glauber de Oliveira Costa053de042008-01-30 13:31:27 +0100606 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800607 vmcs_writel(GUEST_RFLAGS, rflags);
608}
609
610static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
611{
612 unsigned long rip;
613 u32 interruptibility;
614
615 rip = vmcs_readl(GUEST_RIP);
616 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
617 vmcs_writel(GUEST_RIP, rip);
618
619 /*
620 * We emulated an instruction, so temporary interrupt blocking
621 * should be removed, if set.
622 */
623 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
624 if (interruptibility & 3)
625 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
626 interruptibility & ~3);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800627 vcpu->arch.interrupt_window_open = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800628}
629
Avi Kivity298101d2007-11-25 13:41:11 +0200630static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
631 bool has_error_code, u32 error_code)
632{
633 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
634 nr | INTR_TYPE_EXCEPTION
635 | (has_error_code ? INTR_INFO_DELIEVER_CODE_MASK : 0)
636 | INTR_INFO_VALID_MASK);
637 if (has_error_code)
638 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
639}
640
641static bool vmx_exception_injected(struct kvm_vcpu *vcpu)
642{
643 struct vcpu_vmx *vmx = to_vmx(vcpu);
644
645 return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
646}
647
Avi Kivity6aa8b732006-12-10 02:21:36 -0800648/*
Eddie Donga75beee2007-05-17 18:55:15 +0300649 * Swap MSR entry in host/guest MSR entry array.
650 */
Gabriel C54e11fa2007-08-01 16:23:10 +0200651#ifdef CONFIG_X86_64
Rusty Russell8b9cf982007-07-30 16:31:43 +1000652static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
Eddie Donga75beee2007-05-17 18:55:15 +0300653{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400654 struct kvm_msr_entry tmp;
655
656 tmp = vmx->guest_msrs[to];
657 vmx->guest_msrs[to] = vmx->guest_msrs[from];
658 vmx->guest_msrs[from] = tmp;
659 tmp = vmx->host_msrs[to];
660 vmx->host_msrs[to] = vmx->host_msrs[from];
661 vmx->host_msrs[from] = tmp;
Eddie Donga75beee2007-05-17 18:55:15 +0300662}
Gabriel C54e11fa2007-08-01 16:23:10 +0200663#endif
Eddie Donga75beee2007-05-17 18:55:15 +0300664
665/*
Avi Kivitye38aea32007-04-19 13:22:48 +0300666 * Set up the vmcs to automatically save and restore system
667 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
668 * mode, as fiddling with msrs is very expensive.
669 */
Rusty Russell8b9cf982007-07-30 16:31:43 +1000670static void setup_msrs(struct vcpu_vmx *vmx)
Avi Kivitye38aea32007-04-19 13:22:48 +0300671{
Eddie Dong2cc51562007-05-21 07:28:09 +0300672 int save_nmsrs;
Avi Kivitye38aea32007-04-19 13:22:48 +0300673
Avi Kivity33f9c502008-02-27 16:06:57 +0200674 vmx_load_host_state(vmx);
Eddie Donga75beee2007-05-17 18:55:15 +0300675 save_nmsrs = 0;
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300676#ifdef CONFIG_X86_64
Rusty Russell8b9cf982007-07-30 16:31:43 +1000677 if (is_long_mode(&vmx->vcpu)) {
Eddie Dong2cc51562007-05-21 07:28:09 +0300678 int index;
679
Rusty Russell8b9cf982007-07-30 16:31:43 +1000680 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
Eddie Donga75beee2007-05-17 18:55:15 +0300681 if (index >= 0)
Rusty Russell8b9cf982007-07-30 16:31:43 +1000682 move_msr_up(vmx, index, save_nmsrs++);
683 index = __find_msr_index(vmx, MSR_LSTAR);
Eddie Donga75beee2007-05-17 18:55:15 +0300684 if (index >= 0)
Rusty Russell8b9cf982007-07-30 16:31:43 +1000685 move_msr_up(vmx, index, save_nmsrs++);
686 index = __find_msr_index(vmx, MSR_CSTAR);
Eddie Donga75beee2007-05-17 18:55:15 +0300687 if (index >= 0)
Rusty Russell8b9cf982007-07-30 16:31:43 +1000688 move_msr_up(vmx, index, save_nmsrs++);
689 index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
Eddie Donga75beee2007-05-17 18:55:15 +0300690 if (index >= 0)
Rusty Russell8b9cf982007-07-30 16:31:43 +1000691 move_msr_up(vmx, index, save_nmsrs++);
Eddie Donga75beee2007-05-17 18:55:15 +0300692 /*
693 * MSR_K6_STAR is only needed on long mode guests, and only
694 * if efer.sce is enabled.
695 */
Rusty Russell8b9cf982007-07-30 16:31:43 +1000696 index = __find_msr_index(vmx, MSR_K6_STAR);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800697 if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE))
Rusty Russell8b9cf982007-07-30 16:31:43 +1000698 move_msr_up(vmx, index, save_nmsrs++);
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300699 }
Eddie Donga75beee2007-05-17 18:55:15 +0300700#endif
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400701 vmx->save_nmsrs = save_nmsrs;
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300702
Eddie Donga75beee2007-05-17 18:55:15 +0300703#ifdef CONFIG_X86_64
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400704 vmx->msr_offset_kernel_gs_base =
Rusty Russell8b9cf982007-07-30 16:31:43 +1000705 __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
Eddie Donga75beee2007-05-17 18:55:15 +0300706#endif
Rusty Russell8b9cf982007-07-30 16:31:43 +1000707 vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
Avi Kivitye38aea32007-04-19 13:22:48 +0300708}
709
710/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800711 * reads and returns guest's timestamp counter "register"
712 * guest_tsc = host_tsc + tsc_offset -- 21.3
713 */
714static u64 guest_read_tsc(void)
715{
716 u64 host_tsc, tsc_offset;
717
718 rdtscll(host_tsc);
719 tsc_offset = vmcs_read64(TSC_OFFSET);
720 return host_tsc + tsc_offset;
721}
722
723/*
724 * writes 'guest_tsc' into guest's timestamp counter "register"
725 * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
726 */
727static void guest_write_tsc(u64 guest_tsc)
728{
729 u64 host_tsc;
730
731 rdtscll(host_tsc);
732 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
733}
734
Avi Kivity6aa8b732006-12-10 02:21:36 -0800735/*
736 * Reads an msr value (of 'msr_index') into 'pdata'.
737 * Returns 0 on success, non-0 otherwise.
738 * Assumes vcpu_load() was already called.
739 */
740static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
741{
742 u64 data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400743 struct kvm_msr_entry *msr;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800744
745 if (!pdata) {
746 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
747 return -EINVAL;
748 }
749
750 switch (msr_index) {
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800751#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800752 case MSR_FS_BASE:
753 data = vmcs_readl(GUEST_FS_BASE);
754 break;
755 case MSR_GS_BASE:
756 data = vmcs_readl(GUEST_GS_BASE);
757 break;
758 case MSR_EFER:
Avi Kivity3bab1f52006-12-29 16:49:48 -0800759 return kvm_get_msr_common(vcpu, msr_index, pdata);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800760#endif
761 case MSR_IA32_TIME_STAMP_COUNTER:
762 data = guest_read_tsc();
763 break;
764 case MSR_IA32_SYSENTER_CS:
765 data = vmcs_read32(GUEST_SYSENTER_CS);
766 break;
767 case MSR_IA32_SYSENTER_EIP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200768 data = vmcs_readl(GUEST_SYSENTER_EIP);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800769 break;
770 case MSR_IA32_SYSENTER_ESP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200771 data = vmcs_readl(GUEST_SYSENTER_ESP);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800772 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800773 default:
Rusty Russell8b9cf982007-07-30 16:31:43 +1000774 msr = find_msr_entry(to_vmx(vcpu), msr_index);
Avi Kivity3bab1f52006-12-29 16:49:48 -0800775 if (msr) {
776 data = msr->data;
777 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800778 }
Avi Kivity3bab1f52006-12-29 16:49:48 -0800779 return kvm_get_msr_common(vcpu, msr_index, pdata);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800780 }
781
782 *pdata = data;
783 return 0;
784}
785
786/*
787 * Writes msr value into into the appropriate "register".
788 * Returns 0 on success, non-0 otherwise.
789 * Assumes vcpu_load() was already called.
790 */
791static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
792{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400793 struct vcpu_vmx *vmx = to_vmx(vcpu);
794 struct kvm_msr_entry *msr;
Eddie Dong2cc51562007-05-21 07:28:09 +0300795 int ret = 0;
796
Avi Kivity6aa8b732006-12-10 02:21:36 -0800797 switch (msr_index) {
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800798#ifdef CONFIG_X86_64
Avi Kivity3bab1f52006-12-29 16:49:48 -0800799 case MSR_EFER:
Eddie Dong2cc51562007-05-21 07:28:09 +0300800 ret = kvm_set_msr_common(vcpu, msr_index, data);
Avi Kivity51c6cf62007-08-29 03:48:05 +0300801 if (vmx->host_state.loaded) {
802 reload_host_efer(vmx);
Rusty Russell8b9cf982007-07-30 16:31:43 +1000803 load_transition_efer(vmx);
Avi Kivity51c6cf62007-08-29 03:48:05 +0300804 }
Eddie Dong2cc51562007-05-21 07:28:09 +0300805 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800806 case MSR_FS_BASE:
807 vmcs_writel(GUEST_FS_BASE, data);
808 break;
809 case MSR_GS_BASE:
810 vmcs_writel(GUEST_GS_BASE, data);
811 break;
812#endif
813 case MSR_IA32_SYSENTER_CS:
814 vmcs_write32(GUEST_SYSENTER_CS, data);
815 break;
816 case MSR_IA32_SYSENTER_EIP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200817 vmcs_writel(GUEST_SYSENTER_EIP, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800818 break;
819 case MSR_IA32_SYSENTER_ESP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200820 vmcs_writel(GUEST_SYSENTER_ESP, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800821 break;
Avi Kivityd27d4ac2007-02-19 14:37:46 +0200822 case MSR_IA32_TIME_STAMP_COUNTER:
Avi Kivity6aa8b732006-12-10 02:21:36 -0800823 guest_write_tsc(data);
824 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800825 default:
Rusty Russell8b9cf982007-07-30 16:31:43 +1000826 msr = find_msr_entry(vmx, msr_index);
Avi Kivity3bab1f52006-12-29 16:49:48 -0800827 if (msr) {
828 msr->data = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400829 if (vmx->host_state.loaded)
830 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
Avi Kivity3bab1f52006-12-29 16:49:48 -0800831 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800832 }
Eddie Dong2cc51562007-05-21 07:28:09 +0300833 ret = kvm_set_msr_common(vcpu, msr_index, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800834 }
835
Eddie Dong2cc51562007-05-21 07:28:09 +0300836 return ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800837}
838
839/*
840 * Sync the rsp and rip registers into the vcpu structure. This allows
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800841 * registers to be accessed by indexing vcpu->arch.regs.
Avi Kivity6aa8b732006-12-10 02:21:36 -0800842 */
843static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
844{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800845 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
846 vcpu->arch.rip = vmcs_readl(GUEST_RIP);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800847}
848
849/*
850 * Syncs rsp and rip back into the vmcs. Should be called after possible
851 * modification.
852 */
853static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
854{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800855 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
856 vmcs_writel(GUEST_RIP, vcpu->arch.rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800857}
858
859static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
860{
861 unsigned long dr7 = 0x400;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800862 int old_singlestep;
863
Avi Kivity6aa8b732006-12-10 02:21:36 -0800864 old_singlestep = vcpu->guest_debug.singlestep;
865
866 vcpu->guest_debug.enabled = dbg->enabled;
867 if (vcpu->guest_debug.enabled) {
868 int i;
869
870 dr7 |= 0x200; /* exact */
871 for (i = 0; i < 4; ++i) {
872 if (!dbg->breakpoints[i].enabled)
873 continue;
874 vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
875 dr7 |= 2 << (i*2); /* global enable */
876 dr7 |= 0 << (i*4+16); /* execution breakpoint */
877 }
878
Avi Kivity6aa8b732006-12-10 02:21:36 -0800879 vcpu->guest_debug.singlestep = dbg->singlestep;
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300880 } else
Avi Kivity6aa8b732006-12-10 02:21:36 -0800881 vcpu->guest_debug.singlestep = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800882
883 if (old_singlestep && !vcpu->guest_debug.singlestep) {
884 unsigned long flags;
885
886 flags = vmcs_readl(GUEST_RFLAGS);
887 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
888 vmcs_writel(GUEST_RFLAGS, flags);
889 }
890
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300891 update_exception_bitmap(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800892 vmcs_writel(GUEST_DR7, dr7);
893
894 return 0;
895}
896
Eddie Dong2a8067f2007-08-06 16:29:07 +0300897static int vmx_get_irq(struct kvm_vcpu *vcpu)
898{
Avi Kivity1155f762007-11-22 11:30:47 +0200899 struct vcpu_vmx *vmx = to_vmx(vcpu);
Eddie Dong2a8067f2007-08-06 16:29:07 +0300900 u32 idtv_info_field;
901
Avi Kivity1155f762007-11-22 11:30:47 +0200902 idtv_info_field = vmx->idt_vectoring_info;
Eddie Dong2a8067f2007-08-06 16:29:07 +0300903 if (idtv_info_field & INTR_INFO_VALID_MASK) {
904 if (is_external_interrupt(idtv_info_field))
905 return idtv_info_field & VECTORING_INFO_VECTOR_MASK;
906 else
Mike Dayd77c26f2007-10-08 09:02:08 -0400907 printk(KERN_DEBUG "pending exception: not handled yet\n");
Eddie Dong2a8067f2007-08-06 16:29:07 +0300908 }
909 return -1;
910}
911
Avi Kivity6aa8b732006-12-10 02:21:36 -0800912static __init int cpu_has_kvm_support(void)
913{
914 unsigned long ecx = cpuid_ecx(1);
915 return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
916}
917
918static __init int vmx_disabled_by_bios(void)
919{
920 u64 msr;
921
922 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
Yang, Sheng62b3ffb2007-07-25 12:17:06 +0300923 return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED |
924 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
925 == MSR_IA32_FEATURE_CONTROL_LOCKED;
926 /* locked but not enabled */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800927}
928
Avi Kivity774c47f2007-02-12 00:54:47 -0800929static void hardware_enable(void *garbage)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800930{
931 int cpu = raw_smp_processor_id();
932 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
933 u64 old;
934
935 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
Yang, Sheng62b3ffb2007-07-25 12:17:06 +0300936 if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED |
937 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
938 != (MSR_IA32_FEATURE_CONTROL_LOCKED |
939 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
Avi Kivity6aa8b732006-12-10 02:21:36 -0800940 /* enable and lock */
Yang, Sheng62b3ffb2007-07-25 12:17:06 +0300941 wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
942 MSR_IA32_FEATURE_CONTROL_LOCKED |
943 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED);
Rusty Russell66aee912007-07-17 23:34:16 +1000944 write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800945 asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
946 : "memory", "cc");
947}
948
949static void hardware_disable(void *garbage)
950{
951 asm volatile (ASM_VMX_VMXOFF : : : "cc");
952}
953
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300954static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
Mike Dayd77c26f2007-10-08 09:02:08 -0400955 u32 msr, u32 *result)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800956{
957 u32 vmx_msr_low, vmx_msr_high;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300958 u32 ctl = ctl_min | ctl_opt;
959
960 rdmsr(msr, vmx_msr_low, vmx_msr_high);
961
962 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
963 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
964
965 /* Ensure minimum (required) set of control bits are supported. */
966 if (ctl_min & ~ctl)
Yang, Sheng002c7f72007-07-31 14:23:01 +0300967 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300968
969 *result = ctl;
970 return 0;
971}
972
Yang, Sheng002c7f72007-07-31 14:23:01 +0300973static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300974{
975 u32 vmx_msr_low, vmx_msr_high;
976 u32 min, opt;
977 u32 _pin_based_exec_control = 0;
978 u32 _cpu_based_exec_control = 0;
Sheng Yangf78e0e22007-10-29 09:40:42 +0800979 u32 _cpu_based_2nd_exec_control = 0;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300980 u32 _vmexit_control = 0;
981 u32 _vmentry_control = 0;
982
983 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
984 opt = 0;
985 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
986 &_pin_based_exec_control) < 0)
Yang, Sheng002c7f72007-07-31 14:23:01 +0300987 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300988
989 min = CPU_BASED_HLT_EXITING |
990#ifdef CONFIG_X86_64
991 CPU_BASED_CR8_LOAD_EXITING |
992 CPU_BASED_CR8_STORE_EXITING |
993#endif
994 CPU_BASED_USE_IO_BITMAPS |
995 CPU_BASED_MOV_DR_EXITING |
996 CPU_BASED_USE_TSC_OFFSETING;
Sheng Yangf78e0e22007-10-29 09:40:42 +0800997 opt = CPU_BASED_TPR_SHADOW |
998 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300999 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
1000 &_cpu_based_exec_control) < 0)
Yang, Sheng002c7f72007-07-31 14:23:01 +03001001 return -EIO;
Yang, Sheng6e5d8652007-09-12 18:03:11 +08001002#ifdef CONFIG_X86_64
1003 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
1004 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
1005 ~CPU_BASED_CR8_STORE_EXITING;
1006#endif
Sheng Yangf78e0e22007-10-29 09:40:42 +08001007 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
1008 min = 0;
Eddie Donge5edaa02007-11-11 12:28:35 +02001009 opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
Sheng Yang2384d2b2008-01-17 15:14:33 +08001010 SECONDARY_EXEC_WBINVD_EXITING |
1011 SECONDARY_EXEC_ENABLE_VPID;
Sheng Yangf78e0e22007-10-29 09:40:42 +08001012 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2,
1013 &_cpu_based_2nd_exec_control) < 0)
1014 return -EIO;
1015 }
1016#ifndef CONFIG_X86_64
1017 if (!(_cpu_based_2nd_exec_control &
1018 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
1019 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
1020#endif
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001021
1022 min = 0;
1023#ifdef CONFIG_X86_64
1024 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
1025#endif
1026 opt = 0;
1027 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
1028 &_vmexit_control) < 0)
Yang, Sheng002c7f72007-07-31 14:23:01 +03001029 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001030
1031 min = opt = 0;
1032 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
1033 &_vmentry_control) < 0)
Yang, Sheng002c7f72007-07-31 14:23:01 +03001034 return -EIO;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001035
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -08001036 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001037
1038 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
1039 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
Yang, Sheng002c7f72007-07-31 14:23:01 +03001040 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001041
1042#ifdef CONFIG_X86_64
1043 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
1044 if (vmx_msr_high & (1u<<16))
Yang, Sheng002c7f72007-07-31 14:23:01 +03001045 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001046#endif
1047
1048 /* Require Write-Back (WB) memory type for VMCS accesses. */
1049 if (((vmx_msr_high >> 18) & 15) != 6)
Yang, Sheng002c7f72007-07-31 14:23:01 +03001050 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001051
Yang, Sheng002c7f72007-07-31 14:23:01 +03001052 vmcs_conf->size = vmx_msr_high & 0x1fff;
1053 vmcs_conf->order = get_order(vmcs_config.size);
1054 vmcs_conf->revision_id = vmx_msr_low;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001055
Yang, Sheng002c7f72007-07-31 14:23:01 +03001056 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
1057 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
Sheng Yangf78e0e22007-10-29 09:40:42 +08001058 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
Yang, Sheng002c7f72007-07-31 14:23:01 +03001059 vmcs_conf->vmexit_ctrl = _vmexit_control;
1060 vmcs_conf->vmentry_ctrl = _vmentry_control;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001061
1062 return 0;
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -08001063}
Avi Kivity6aa8b732006-12-10 02:21:36 -08001064
1065static struct vmcs *alloc_vmcs_cpu(int cpu)
1066{
1067 int node = cpu_to_node(cpu);
1068 struct page *pages;
1069 struct vmcs *vmcs;
1070
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001071 pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001072 if (!pages)
1073 return NULL;
1074 vmcs = page_address(pages);
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001075 memset(vmcs, 0, vmcs_config.size);
1076 vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001077 return vmcs;
1078}
1079
1080static struct vmcs *alloc_vmcs(void)
1081{
Ingo Molnard3b2c332007-01-05 16:36:23 -08001082 return alloc_vmcs_cpu(raw_smp_processor_id());
Avi Kivity6aa8b732006-12-10 02:21:36 -08001083}
1084
1085static void free_vmcs(struct vmcs *vmcs)
1086{
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001087 free_pages((unsigned long)vmcs, vmcs_config.order);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001088}
1089
Sam Ravnborg39959582007-06-01 00:47:13 -07001090static void free_kvm_area(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001091{
1092 int cpu;
1093
1094 for_each_online_cpu(cpu)
1095 free_vmcs(per_cpu(vmxarea, cpu));
1096}
1097
Avi Kivity6aa8b732006-12-10 02:21:36 -08001098static __init int alloc_kvm_area(void)
1099{
1100 int cpu;
1101
1102 for_each_online_cpu(cpu) {
1103 struct vmcs *vmcs;
1104
1105 vmcs = alloc_vmcs_cpu(cpu);
1106 if (!vmcs) {
1107 free_kvm_area();
1108 return -ENOMEM;
1109 }
1110
1111 per_cpu(vmxarea, cpu) = vmcs;
1112 }
1113 return 0;
1114}
1115
1116static __init int hardware_setup(void)
1117{
Yang, Sheng002c7f72007-07-31 14:23:01 +03001118 if (setup_vmcs_config(&vmcs_config) < 0)
1119 return -EIO;
Joerg Roedel50a37eb2008-01-31 14:57:38 +01001120
1121 if (boot_cpu_has(X86_FEATURE_NX))
1122 kvm_enable_efer_bits(EFER_NX);
1123
Avi Kivity6aa8b732006-12-10 02:21:36 -08001124 return alloc_kvm_area();
1125}
1126
1127static __exit void hardware_unsetup(void)
1128{
1129 free_kvm_area();
1130}
1131
Avi Kivity6aa8b732006-12-10 02:21:36 -08001132static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
1133{
1134 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1135
Avi Kivity6af11b92007-03-19 13:18:10 +02001136 if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001137 vmcs_write16(sf->selector, save->selector);
1138 vmcs_writel(sf->base, save->base);
1139 vmcs_write32(sf->limit, save->limit);
1140 vmcs_write32(sf->ar_bytes, save->ar);
1141 } else {
1142 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
1143 << AR_DPL_SHIFT;
1144 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
1145 }
1146}
1147
1148static void enter_pmode(struct kvm_vcpu *vcpu)
1149{
1150 unsigned long flags;
1151
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001152 vcpu->arch.rmode.active = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001153
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001154 vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base);
1155 vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit);
1156 vmcs_write32(GUEST_TR_AR_BYTES, vcpu->arch.rmode.tr.ar);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001157
1158 flags = vmcs_readl(GUEST_RFLAGS);
Glauber de Oliveira Costa053de042008-01-30 13:31:27 +01001159 flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001160 flags |= (vcpu->arch.rmode.save_iopl << IOPL_SHIFT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001161 vmcs_writel(GUEST_RFLAGS, flags);
1162
Rusty Russell66aee912007-07-17 23:34:16 +10001163 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1164 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001165
1166 update_exception_bitmap(vcpu);
1167
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001168 fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
1169 fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
1170 fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
1171 fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001172
1173 vmcs_write16(GUEST_SS_SELECTOR, 0);
1174 vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
1175
1176 vmcs_write16(GUEST_CS_SELECTOR,
1177 vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
1178 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1179}
1180
Mike Dayd77c26f2007-10-08 09:02:08 -04001181static gva_t rmode_tss_base(struct kvm *kvm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001182{
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08001183 if (!kvm->arch.tss_addr) {
Izik Eiduscbc94022007-10-25 00:29:55 +02001184 gfn_t base_gfn = kvm->memslots[0].base_gfn +
1185 kvm->memslots[0].npages - 3;
1186 return base_gfn << PAGE_SHIFT;
1187 }
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08001188 return kvm->arch.tss_addr;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001189}
1190
1191static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
1192{
1193 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1194
1195 save->selector = vmcs_read16(sf->selector);
1196 save->base = vmcs_readl(sf->base);
1197 save->limit = vmcs_read32(sf->limit);
1198 save->ar = vmcs_read32(sf->ar_bytes);
Jan Kiszka15b00f32007-11-19 10:21:45 +01001199 vmcs_write16(sf->selector, save->base >> 4);
1200 vmcs_write32(sf->base, save->base & 0xfffff);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001201 vmcs_write32(sf->limit, 0xffff);
1202 vmcs_write32(sf->ar_bytes, 0xf3);
1203}
1204
1205static void enter_rmode(struct kvm_vcpu *vcpu)
1206{
1207 unsigned long flags;
1208
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001209 vcpu->arch.rmode.active = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001210
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001211 vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001212 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1213
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001214 vcpu->arch.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001215 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1216
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001217 vcpu->arch.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001218 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1219
1220 flags = vmcs_readl(GUEST_RFLAGS);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001221 vcpu->arch.rmode.save_iopl
1222 = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001223
Glauber de Oliveira Costa053de042008-01-30 13:31:27 +01001224 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001225
1226 vmcs_writel(GUEST_RFLAGS, flags);
Rusty Russell66aee912007-07-17 23:34:16 +10001227 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001228 update_exception_bitmap(vcpu);
1229
1230 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
1231 vmcs_write32(GUEST_SS_LIMIT, 0xffff);
1232 vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
1233
1234 vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
Michael Riepeabacf8d2006-12-22 01:05:45 -08001235 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
Avi Kivity8cb5b032007-03-20 18:40:40 +02001236 if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
1237 vmcs_writel(GUEST_CS_BASE, 0xf0000);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001238 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1239
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001240 fix_rmode_seg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
1241 fix_rmode_seg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
1242 fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
1243 fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
Avi Kivity75880a02007-06-20 11:20:04 +03001244
Eddie Dong8668a3c2007-10-10 14:26:45 +08001245 kvm_mmu_reset_context(vcpu);
Avi Kivity75880a02007-06-20 11:20:04 +03001246 init_rmode_tss(vcpu->kvm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001247}
1248
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001249#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001250
1251static void enter_lmode(struct kvm_vcpu *vcpu)
1252{
1253 u32 guest_tr_ar;
1254
1255 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
1256 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
1257 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
1258 __FUNCTION__);
1259 vmcs_write32(GUEST_TR_AR_BYTES,
1260 (guest_tr_ar & ~AR_TYPE_MASK)
1261 | AR_TYPE_BUSY_64_TSS);
1262 }
1263
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001264 vcpu->arch.shadow_efer |= EFER_LMA;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001265
Rusty Russell8b9cf982007-07-30 16:31:43 +10001266 find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001267 vmcs_write32(VM_ENTRY_CONTROLS,
1268 vmcs_read32(VM_ENTRY_CONTROLS)
Li, Xin B1e4e6e02007-08-01 21:49:10 +03001269 | VM_ENTRY_IA32E_MODE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001270}
1271
1272static void exit_lmode(struct kvm_vcpu *vcpu)
1273{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001274 vcpu->arch.shadow_efer &= ~EFER_LMA;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001275
1276 vmcs_write32(VM_ENTRY_CONTROLS,
1277 vmcs_read32(VM_ENTRY_CONTROLS)
Li, Xin B1e4e6e02007-08-01 21:49:10 +03001278 & ~VM_ENTRY_IA32E_MODE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001279}
1280
1281#endif
1282
Sheng Yang2384d2b2008-01-17 15:14:33 +08001283static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1284{
1285 vpid_sync_vcpu_all(to_vmx(vcpu));
1286}
1287
Anthony Liguori25c4c272007-04-27 09:29:21 +03001288static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08001289{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001290 vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
1291 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
Avi Kivity399badf2007-01-05 16:36:38 -08001292}
1293
Avi Kivity6aa8b732006-12-10 02:21:36 -08001294static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1295{
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001296 vmx_fpu_deactivate(vcpu);
1297
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001298 if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001299 enter_pmode(vcpu);
1300
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001301 if (!vcpu->arch.rmode.active && !(cr0 & X86_CR0_PE))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001302 enter_rmode(vcpu);
1303
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001304#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001305 if (vcpu->arch.shadow_efer & EFER_LME) {
Rusty Russell707d92f2007-07-17 23:19:08 +10001306 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001307 enter_lmode(vcpu);
Rusty Russell707d92f2007-07-17 23:19:08 +10001308 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001309 exit_lmode(vcpu);
1310 }
1311#endif
1312
1313 vmcs_writel(CR0_READ_SHADOW, cr0);
1314 vmcs_writel(GUEST_CR0,
1315 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001316 vcpu->arch.cr0 = cr0;
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001317
Rusty Russell707d92f2007-07-17 23:19:08 +10001318 if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001319 vmx_fpu_activate(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001320}
1321
Avi Kivity6aa8b732006-12-10 02:21:36 -08001322static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1323{
Sheng Yang2384d2b2008-01-17 15:14:33 +08001324 vmx_flush_tlb(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001325 vmcs_writel(GUEST_CR3, cr3);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001326 if (vcpu->arch.cr0 & X86_CR0_PE)
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001327 vmx_fpu_deactivate(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001328}
1329
1330static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1331{
1332 vmcs_writel(CR4_READ_SHADOW, cr4);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001333 vmcs_writel(GUEST_CR4, cr4 | (vcpu->arch.rmode.active ?
Avi Kivity6aa8b732006-12-10 02:21:36 -08001334 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001335 vcpu->arch.cr4 = cr4;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001336}
1337
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001338#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001339
1340static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1341{
Rusty Russell8b9cf982007-07-30 16:31:43 +10001342 struct vcpu_vmx *vmx = to_vmx(vcpu);
1343 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001344
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001345 vcpu->arch.shadow_efer = efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001346 if (efer & EFER_LMA) {
1347 vmcs_write32(VM_ENTRY_CONTROLS,
1348 vmcs_read32(VM_ENTRY_CONTROLS) |
Li, Xin B1e4e6e02007-08-01 21:49:10 +03001349 VM_ENTRY_IA32E_MODE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001350 msr->data = efer;
1351
1352 } else {
1353 vmcs_write32(VM_ENTRY_CONTROLS,
1354 vmcs_read32(VM_ENTRY_CONTROLS) &
Li, Xin B1e4e6e02007-08-01 21:49:10 +03001355 ~VM_ENTRY_IA32E_MODE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001356
1357 msr->data = efer & ~EFER_LME;
1358 }
Rusty Russell8b9cf982007-07-30 16:31:43 +10001359 setup_msrs(vmx);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001360}
1361
1362#endif
1363
1364static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1365{
1366 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1367
1368 return vmcs_readl(sf->base);
1369}
1370
1371static void vmx_get_segment(struct kvm_vcpu *vcpu,
1372 struct kvm_segment *var, int seg)
1373{
1374 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1375 u32 ar;
1376
1377 var->base = vmcs_readl(sf->base);
1378 var->limit = vmcs_read32(sf->limit);
1379 var->selector = vmcs_read16(sf->selector);
1380 ar = vmcs_read32(sf->ar_bytes);
1381 if (ar & AR_UNUSABLE_MASK)
1382 ar = 0;
1383 var->type = ar & 15;
1384 var->s = (ar >> 4) & 1;
1385 var->dpl = (ar >> 5) & 3;
1386 var->present = (ar >> 7) & 1;
1387 var->avl = (ar >> 12) & 1;
1388 var->l = (ar >> 13) & 1;
1389 var->db = (ar >> 14) & 1;
1390 var->g = (ar >> 15) & 1;
1391 var->unusable = (ar >> 16) & 1;
1392}
1393
Avi Kivity653e3102007-05-07 10:55:37 +03001394static u32 vmx_segment_access_rights(struct kvm_segment *var)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001395{
Avi Kivity6aa8b732006-12-10 02:21:36 -08001396 u32 ar;
1397
Avi Kivity653e3102007-05-07 10:55:37 +03001398 if (var->unusable)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001399 ar = 1 << 16;
1400 else {
1401 ar = var->type & 15;
1402 ar |= (var->s & 1) << 4;
1403 ar |= (var->dpl & 3) << 5;
1404 ar |= (var->present & 1) << 7;
1405 ar |= (var->avl & 1) << 12;
1406 ar |= (var->l & 1) << 13;
1407 ar |= (var->db & 1) << 14;
1408 ar |= (var->g & 1) << 15;
1409 }
Uri Lublinf7fbf1f2006-12-13 00:34:00 -08001410 if (ar == 0) /* a 0 value means unusable */
1411 ar = AR_UNUSABLE_MASK;
Avi Kivity653e3102007-05-07 10:55:37 +03001412
1413 return ar;
1414}
1415
1416static void vmx_set_segment(struct kvm_vcpu *vcpu,
1417 struct kvm_segment *var, int seg)
1418{
1419 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1420 u32 ar;
1421
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001422 if (vcpu->arch.rmode.active && seg == VCPU_SREG_TR) {
1423 vcpu->arch.rmode.tr.selector = var->selector;
1424 vcpu->arch.rmode.tr.base = var->base;
1425 vcpu->arch.rmode.tr.limit = var->limit;
1426 vcpu->arch.rmode.tr.ar = vmx_segment_access_rights(var);
Avi Kivity653e3102007-05-07 10:55:37 +03001427 return;
1428 }
1429 vmcs_writel(sf->base, var->base);
1430 vmcs_write32(sf->limit, var->limit);
1431 vmcs_write16(sf->selector, var->selector);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001432 if (vcpu->arch.rmode.active && var->s) {
Avi Kivity653e3102007-05-07 10:55:37 +03001433 /*
1434 * Hack real-mode segments into vm86 compatibility.
1435 */
1436 if (var->base == 0xffff0000 && var->selector == 0xf000)
1437 vmcs_writel(sf->base, 0xf0000);
1438 ar = 0xf3;
1439 } else
1440 ar = vmx_segment_access_rights(var);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001441 vmcs_write32(sf->ar_bytes, ar);
1442}
1443
Avi Kivity6aa8b732006-12-10 02:21:36 -08001444static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1445{
1446 u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
1447
1448 *db = (ar >> 14) & 1;
1449 *l = (ar >> 13) & 1;
1450}
1451
1452static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1453{
1454 dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
1455 dt->base = vmcs_readl(GUEST_IDTR_BASE);
1456}
1457
1458static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1459{
1460 vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
1461 vmcs_writel(GUEST_IDTR_BASE, dt->base);
1462}
1463
1464static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1465{
1466 dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
1467 dt->base = vmcs_readl(GUEST_GDTR_BASE);
1468}
1469
1470static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1471{
1472 vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
1473 vmcs_writel(GUEST_GDTR_BASE, dt->base);
1474}
1475
Mike Dayd77c26f2007-10-08 09:02:08 -04001476static int init_rmode_tss(struct kvm *kvm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001477{
Avi Kivity6aa8b732006-12-10 02:21:36 -08001478 gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
Izik Eidus195aefd2007-10-01 22:14:18 +02001479 u16 data = 0;
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001480 int ret = 0;
Izik Eidus195aefd2007-10-01 22:14:18 +02001481 int r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001482
Marcelo Tosatti707a18a2008-03-18 17:42:34 -03001483 down_read(&kvm->slots_lock);
Izik Eidus195aefd2007-10-01 22:14:18 +02001484 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
1485 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001486 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02001487 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
1488 r = kvm_write_guest_page(kvm, fn++, &data, 0x66, sizeof(u16));
1489 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001490 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02001491 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
1492 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001493 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02001494 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
1495 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001496 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02001497 data = ~0;
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001498 r = kvm_write_guest_page(kvm, fn, &data,
1499 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
1500 sizeof(u8));
Izik Eidus195aefd2007-10-01 22:14:18 +02001501 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001502 goto out;
1503
1504 ret = 1;
1505out:
Marcelo Tosatti707a18a2008-03-18 17:42:34 -03001506 up_read(&kvm->slots_lock);
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001507 return ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001508}
1509
Avi Kivity6aa8b732006-12-10 02:21:36 -08001510static void seg_setup(int seg)
1511{
1512 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1513
1514 vmcs_write16(sf->selector, 0);
1515 vmcs_writel(sf->base, 0);
1516 vmcs_write32(sf->limit, 0xffff);
1517 vmcs_write32(sf->ar_bytes, 0x93);
1518}
1519
Sheng Yangf78e0e22007-10-29 09:40:42 +08001520static int alloc_apic_access_page(struct kvm *kvm)
1521{
1522 struct kvm_userspace_memory_region kvm_userspace_mem;
1523 int r = 0;
1524
Izik Eidus72dc67a2008-02-10 18:04:15 +02001525 down_write(&kvm->slots_lock);
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08001526 if (kvm->arch.apic_access_page)
Sheng Yangf78e0e22007-10-29 09:40:42 +08001527 goto out;
1528 kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
1529 kvm_userspace_mem.flags = 0;
1530 kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
1531 kvm_userspace_mem.memory_size = PAGE_SIZE;
1532 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
1533 if (r)
1534 goto out;
Izik Eidus72dc67a2008-02-10 18:04:15 +02001535
1536 down_read(&current->mm->mmap_sem);
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08001537 kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
Izik Eidus72dc67a2008-02-10 18:04:15 +02001538 up_read(&current->mm->mmap_sem);
Sheng Yangf78e0e22007-10-29 09:40:42 +08001539out:
Izik Eidus72dc67a2008-02-10 18:04:15 +02001540 up_write(&kvm->slots_lock);
Sheng Yangf78e0e22007-10-29 09:40:42 +08001541 return r;
1542}
1543
Sheng Yang2384d2b2008-01-17 15:14:33 +08001544static void allocate_vpid(struct vcpu_vmx *vmx)
1545{
1546 int vpid;
1547
1548 vmx->vpid = 0;
1549 if (!enable_vpid || !cpu_has_vmx_vpid())
1550 return;
1551 spin_lock(&vmx_vpid_lock);
1552 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
1553 if (vpid < VMX_NR_VPIDS) {
1554 vmx->vpid = vpid;
1555 __set_bit(vpid, vmx_vpid_bitmap);
1556 }
1557 spin_unlock(&vmx_vpid_lock);
1558}
1559
Avi Kivity6aa8b732006-12-10 02:21:36 -08001560/*
1561 * Sets up the vmcs for emulated real mode.
1562 */
Rusty Russell8b9cf982007-07-30 16:31:43 +10001563static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001564{
1565 u32 host_sysenter_cs;
1566 u32 junk;
1567 unsigned long a;
1568 struct descriptor_table dt;
1569 int i;
Avi Kivitycd2276a2007-05-14 20:41:13 +03001570 unsigned long kvm_vmx_return;
Yang, Sheng6e5d8652007-09-12 18:03:11 +08001571 u32 exec_control;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001572
Avi Kivity6aa8b732006-12-10 02:21:36 -08001573 /* I/O */
He, Qingfdef3ad2007-04-30 09:45:24 +03001574 vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
1575 vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001576
Avi Kivity6aa8b732006-12-10 02:21:36 -08001577 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
1578
Avi Kivity6aa8b732006-12-10 02:21:36 -08001579 /* Control */
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001580 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
1581 vmcs_config.pin_based_exec_ctrl);
Yang, Sheng6e5d8652007-09-12 18:03:11 +08001582
1583 exec_control = vmcs_config.cpu_based_exec_ctrl;
1584 if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
1585 exec_control &= ~CPU_BASED_TPR_SHADOW;
1586#ifdef CONFIG_X86_64
1587 exec_control |= CPU_BASED_CR8_STORE_EXITING |
1588 CPU_BASED_CR8_LOAD_EXITING;
1589#endif
1590 }
1591 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001592
Sheng Yang83ff3b92007-11-21 14:33:25 +08001593 if (cpu_has_secondary_exec_ctrls()) {
1594 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
1595 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
1596 exec_control &=
1597 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
Sheng Yang2384d2b2008-01-17 15:14:33 +08001598 if (vmx->vpid == 0)
1599 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
Sheng Yang83ff3b92007-11-21 14:33:25 +08001600 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
1601 }
Sheng Yangf78e0e22007-10-29 09:40:42 +08001602
Avi Kivityc7addb92007-09-16 18:58:32 +02001603 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
1604 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001605 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
1606
1607 vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */
1608 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
1609 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
1610
1611 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
1612 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1613 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1614 vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */
1615 vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */
1616 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001617#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001618 rdmsrl(MSR_FS_BASE, a);
1619 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
1620 rdmsrl(MSR_GS_BASE, a);
1621 vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
1622#else
1623 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
1624 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
1625#endif
1626
1627 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
1628
1629 get_idt(&dt);
1630 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
1631
Mike Dayd77c26f2007-10-08 09:02:08 -04001632 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
Avi Kivitycd2276a2007-05-14 20:41:13 +03001633 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
Eddie Dong2cc51562007-05-21 07:28:09 +03001634 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1635 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
1636 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001637
1638 rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
1639 vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
1640 rdmsrl(MSR_IA32_SYSENTER_ESP, a);
1641 vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
1642 rdmsrl(MSR_IA32_SYSENTER_EIP, a);
1643 vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
1644
Avi Kivity6aa8b732006-12-10 02:21:36 -08001645 for (i = 0; i < NR_VMX_MSR; ++i) {
1646 u32 index = vmx_msr_index[i];
1647 u32 data_low, data_high;
1648 u64 data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001649 int j = vmx->nmsrs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001650
1651 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1652 continue;
Avi Kivity432bd6c2007-01-31 23:48:13 -08001653 if (wrmsr_safe(index, data_low, data_high) < 0)
1654 continue;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001655 data = data_low | ((u64)data_high << 32);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001656 vmx->host_msrs[j].index = index;
1657 vmx->host_msrs[j].reserved = 0;
1658 vmx->host_msrs[j].data = data;
1659 vmx->guest_msrs[j] = vmx->host_msrs[j];
1660 ++vmx->nmsrs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001661 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001662
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001663 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001664
1665 /* 22.2.1, 20.8.1 */
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001666 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
1667
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001668 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
1669 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1670
Sheng Yangf78e0e22007-10-29 09:40:42 +08001671
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001672 return 0;
1673}
1674
1675static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1676{
1677 struct vcpu_vmx *vmx = to_vmx(vcpu);
1678 u64 msr;
1679 int ret;
1680
1681 if (!init_rmode_tss(vmx->vcpu.kvm)) {
1682 ret = -ENOMEM;
1683 goto out;
1684 }
1685
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001686 vmx->vcpu.arch.rmode.active = 0;
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001687
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001688 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001689 set_cr8(&vmx->vcpu, 0);
1690 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1691 if (vmx->vcpu.vcpu_id == 0)
1692 msr |= MSR_IA32_APICBASE_BSP;
1693 kvm_set_apic_base(&vmx->vcpu, msr);
1694
1695 fx_init(&vmx->vcpu);
1696
1697 /*
1698 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
1699 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
1700 */
1701 if (vmx->vcpu.vcpu_id == 0) {
1702 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
1703 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
1704 } else {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001705 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
1706 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001707 }
1708 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1709 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1710
1711 seg_setup(VCPU_SREG_DS);
1712 seg_setup(VCPU_SREG_ES);
1713 seg_setup(VCPU_SREG_FS);
1714 seg_setup(VCPU_SREG_GS);
1715 seg_setup(VCPU_SREG_SS);
1716
1717 vmcs_write16(GUEST_TR_SELECTOR, 0);
1718 vmcs_writel(GUEST_TR_BASE, 0);
1719 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
1720 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1721
1722 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
1723 vmcs_writel(GUEST_LDTR_BASE, 0);
1724 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
1725 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
1726
1727 vmcs_write32(GUEST_SYSENTER_CS, 0);
1728 vmcs_writel(GUEST_SYSENTER_ESP, 0);
1729 vmcs_writel(GUEST_SYSENTER_EIP, 0);
1730
1731 vmcs_writel(GUEST_RFLAGS, 0x02);
1732 if (vmx->vcpu.vcpu_id == 0)
1733 vmcs_writel(GUEST_RIP, 0xfff0);
1734 else
1735 vmcs_writel(GUEST_RIP, 0);
1736 vmcs_writel(GUEST_RSP, 0);
1737
1738 /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */
1739 vmcs_writel(GUEST_DR7, 0x400);
1740
1741 vmcs_writel(GUEST_GDTR_BASE, 0);
1742 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
1743
1744 vmcs_writel(GUEST_IDTR_BASE, 0);
1745 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
1746
1747 vmcs_write32(GUEST_ACTIVITY_STATE, 0);
1748 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
1749 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
1750
1751 guest_write_tsc(0);
1752
1753 /* Special registers */
1754 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
1755
1756 setup_msrs(vmx);
1757
Avi Kivity6aa8b732006-12-10 02:21:36 -08001758 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
1759
Sheng Yangf78e0e22007-10-29 09:40:42 +08001760 if (cpu_has_vmx_tpr_shadow()) {
1761 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
1762 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
1763 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001764 page_to_phys(vmx->vcpu.arch.apic->regs_page));
Sheng Yangf78e0e22007-10-29 09:40:42 +08001765 vmcs_write32(TPR_THRESHOLD, 0);
1766 }
1767
1768 if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
1769 vmcs_write64(APIC_ACCESS_ADDR,
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08001770 page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001771
Sheng Yang2384d2b2008-01-17 15:14:33 +08001772 if (vmx->vpid != 0)
1773 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
1774
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001775 vmx->vcpu.arch.cr0 = 0x60000010;
1776 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
Rusty Russell8b9cf982007-07-30 16:31:43 +10001777 vmx_set_cr4(&vmx->vcpu, 0);
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001778#ifdef CONFIG_X86_64
Rusty Russell8b9cf982007-07-30 16:31:43 +10001779 vmx_set_efer(&vmx->vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001780#endif
Rusty Russell8b9cf982007-07-30 16:31:43 +10001781 vmx_fpu_activate(&vmx->vcpu);
1782 update_exception_bitmap(&vmx->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001783
Sheng Yang2384d2b2008-01-17 15:14:33 +08001784 vpid_sync_vcpu_all(vmx);
1785
Avi Kivity6aa8b732006-12-10 02:21:36 -08001786 return 0;
1787
Avi Kivity6aa8b732006-12-10 02:21:36 -08001788out:
1789 return ret;
1790}
1791
Eddie Dong85f455f2007-07-06 12:20:49 +03001792static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
1793{
Avi Kivity9c8cba32007-11-22 11:42:59 +02001794 struct vcpu_vmx *vmx = to_vmx(vcpu);
1795
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001796 if (vcpu->arch.rmode.active) {
Avi Kivity9c8cba32007-11-22 11:42:59 +02001797 vmx->rmode.irq.pending = true;
1798 vmx->rmode.irq.vector = irq;
1799 vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP);
Avi Kivity9c5623e2007-11-08 18:19:20 +02001800 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1801 irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
1802 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
Avi Kivity9c8cba32007-11-22 11:42:59 +02001803 vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1);
Eddie Dong85f455f2007-07-06 12:20:49 +03001804 return;
1805 }
1806 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1807 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1808}
1809
Avi Kivity6aa8b732006-12-10 02:21:36 -08001810static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1811{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001812 int word_index = __ffs(vcpu->arch.irq_summary);
1813 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001814 int irq = word_index * BITS_PER_LONG + bit_index;
1815
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001816 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
1817 if (!vcpu->arch.irq_pending[word_index])
1818 clear_bit(word_index, &vcpu->arch.irq_summary);
Eddie Dong85f455f2007-07-06 12:20:49 +03001819 vmx_inject_irq(vcpu, irq);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001820}
1821
Dor Laorc1150d82007-01-05 16:36:24 -08001822
1823static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1824 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001825{
Dor Laorc1150d82007-01-05 16:36:24 -08001826 u32 cpu_based_vm_exec_control;
1827
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001828 vcpu->arch.interrupt_window_open =
Dor Laorc1150d82007-01-05 16:36:24 -08001829 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1830 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1831
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001832 if (vcpu->arch.interrupt_window_open &&
1833 vcpu->arch.irq_summary &&
Dor Laorc1150d82007-01-05 16:36:24 -08001834 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001835 /*
Dor Laorc1150d82007-01-05 16:36:24 -08001836 * If interrupts enabled, and not blocked by sti or mov ss. Good.
Avi Kivity6aa8b732006-12-10 02:21:36 -08001837 */
1838 kvm_do_inject_irq(vcpu);
Dor Laorc1150d82007-01-05 16:36:24 -08001839
1840 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001841 if (!vcpu->arch.interrupt_window_open &&
1842 (vcpu->arch.irq_summary || kvm_run->request_interrupt_window))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001843 /*
1844 * Interrupts blocked. Wait for unblock.
1845 */
Dor Laorc1150d82007-01-05 16:36:24 -08001846 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
1847 else
1848 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
1849 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001850}
1851
Izik Eiduscbc94022007-10-25 00:29:55 +02001852static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
1853{
1854 int ret;
1855 struct kvm_userspace_memory_region tss_mem = {
1856 .slot = 8,
1857 .guest_phys_addr = addr,
1858 .memory_size = PAGE_SIZE * 3,
1859 .flags = 0,
1860 };
1861
1862 ret = kvm_set_memory_region(kvm, &tss_mem, 0);
1863 if (ret)
1864 return ret;
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08001865 kvm->arch.tss_addr = addr;
Izik Eiduscbc94022007-10-25 00:29:55 +02001866 return 0;
1867}
1868
Avi Kivity6aa8b732006-12-10 02:21:36 -08001869static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
1870{
1871 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
1872
1873 set_debugreg(dbg->bp[0], 0);
1874 set_debugreg(dbg->bp[1], 1);
1875 set_debugreg(dbg->bp[2], 2);
1876 set_debugreg(dbg->bp[3], 3);
1877
1878 if (dbg->singlestep) {
1879 unsigned long flags;
1880
1881 flags = vmcs_readl(GUEST_RFLAGS);
1882 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1883 vmcs_writel(GUEST_RFLAGS, flags);
1884 }
1885}
1886
1887static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1888 int vec, u32 err_code)
1889{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001890 if (!vcpu->arch.rmode.active)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001891 return 0;
1892
Nitin A Kambleb3f37702007-05-17 15:50:34 +03001893 /*
1894 * Instruction with address size override prefix opcode 0x67
1895 * Cause the #SS fault with 0 error code in VM86 mode.
1896 */
1897 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
Laurent Vivier34273182007-09-18 11:27:37 +02001898 if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001899 return 1;
1900 return 0;
1901}
1902
1903static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1904{
Avi Kivity1155f762007-11-22 11:30:47 +02001905 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001906 u32 intr_info, error_code;
1907 unsigned long cr2, rip;
1908 u32 vect_info;
1909 enum emulation_result er;
1910
Avi Kivity1155f762007-11-22 11:30:47 +02001911 vect_info = vmx->idt_vectoring_info;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001912 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1913
1914 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
Mike Dayd77c26f2007-10-08 09:02:08 -04001915 !is_page_fault(intr_info))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001916 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
1917 "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001918
Eddie Dong85f455f2007-07-06 12:20:49 +03001919 if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001920 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001921 set_bit(irq, vcpu->arch.irq_pending);
1922 set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001923 }
1924
Avi Kivity1b6269d2007-10-09 12:12:19 +02001925 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
1926 return 1; /* already handled by vmx_vcpu_run() */
Anthony Liguori2ab455c2007-04-27 09:29:49 +03001927
1928 if (is_no_device(intr_info)) {
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001929 vmx_fpu_activate(vcpu);
Anthony Liguori2ab455c2007-04-27 09:29:49 +03001930 return 1;
1931 }
1932
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001933 if (is_invalid_opcode(intr_info)) {
Sheng Yang571008d2008-01-02 14:49:22 +08001934 er = emulate_instruction(vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001935 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001936 kvm_queue_exception(vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001937 return 1;
1938 }
1939
Avi Kivity6aa8b732006-12-10 02:21:36 -08001940 error_code = 0;
1941 rip = vmcs_readl(GUEST_RIP);
1942 if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
1943 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
1944 if (is_page_fault(intr_info)) {
1945 cr2 = vmcs_readl(EXIT_QUALIFICATION);
Avi Kivity30677142007-10-28 18:48:59 +02001946 return kvm_mmu_page_fault(vcpu, cr2, error_code);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001947 }
1948
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001949 if (vcpu->arch.rmode.active &&
Avi Kivity6aa8b732006-12-10 02:21:36 -08001950 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
Avi Kivity72d6e5a2007-06-05 16:15:51 +03001951 error_code)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001952 if (vcpu->arch.halt_request) {
1953 vcpu->arch.halt_request = 0;
Avi Kivity72d6e5a2007-06-05 16:15:51 +03001954 return kvm_emulate_halt(vcpu);
1955 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001956 return 1;
Avi Kivity72d6e5a2007-06-05 16:15:51 +03001957 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001958
Mike Dayd77c26f2007-10-08 09:02:08 -04001959 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) ==
1960 (INTR_TYPE_EXCEPTION | 1)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001961 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1962 return 0;
1963 }
1964 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
1965 kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
1966 kvm_run->ex.error_code = error_code;
1967 return 0;
1968}
1969
1970static int handle_external_interrupt(struct kvm_vcpu *vcpu,
1971 struct kvm_run *kvm_run)
1972{
Avi Kivity1165f5f2007-04-19 17:27:43 +03001973 ++vcpu->stat.irq_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001974 return 1;
1975}
1976
Avi Kivity988ad742007-02-12 00:54:36 -08001977static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1978{
1979 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1980 return 0;
1981}
Avi Kivity6aa8b732006-12-10 02:21:36 -08001982
Avi Kivity6aa8b732006-12-10 02:21:36 -08001983static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1984{
He, Qingbfdaab02007-09-12 14:18:28 +08001985 unsigned long exit_qualification;
Avi Kivity039576c2007-03-20 12:46:50 +02001986 int size, down, in, string, rep;
1987 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001988
Avi Kivity1165f5f2007-04-19 17:27:43 +03001989 ++vcpu->stat.io_exits;
He, Qingbfdaab02007-09-12 14:18:28 +08001990 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
Avi Kivity039576c2007-03-20 12:46:50 +02001991 string = (exit_qualification & 16) != 0;
Laurent Viviere70669a2007-08-05 10:36:40 +03001992
1993 if (string) {
Laurent Vivier34273182007-09-18 11:27:37 +02001994 if (emulate_instruction(vcpu,
1995 kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
Laurent Viviere70669a2007-08-05 10:36:40 +03001996 return 0;
1997 return 1;
1998 }
1999
2000 size = (exit_qualification & 7) + 1;
2001 in = (exit_qualification & 8) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02002002 down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02002003 rep = (exit_qualification & 32) != 0;
2004 port = exit_qualification >> 16;
Laurent Viviere70669a2007-08-05 10:36:40 +03002005
Laurent Vivier3090dd72007-08-05 10:43:32 +03002006 return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002007}
2008
Ingo Molnar102d8322007-02-19 14:37:47 +02002009static void
2010vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
2011{
2012 /*
2013 * Patch in the VMCALL instruction:
2014 */
2015 hypercall[0] = 0x0f;
2016 hypercall[1] = 0x01;
2017 hypercall[2] = 0xc1;
Ingo Molnar102d8322007-02-19 14:37:47 +02002018}
2019
Avi Kivity6aa8b732006-12-10 02:21:36 -08002020static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2021{
He, Qingbfdaab02007-09-12 14:18:28 +08002022 unsigned long exit_qualification;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002023 int cr;
2024 int reg;
2025
He, Qingbfdaab02007-09-12 14:18:28 +08002026 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002027 cr = exit_qualification & 15;
2028 reg = (exit_qualification >> 8) & 15;
2029 switch ((exit_qualification >> 4) & 3) {
2030 case 0: /* mov to cr */
2031 switch (cr) {
2032 case 0:
2033 vcpu_load_rsp_rip(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002034 set_cr0(vcpu, vcpu->arch.regs[reg]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002035 skip_emulated_instruction(vcpu);
2036 return 1;
2037 case 3:
2038 vcpu_load_rsp_rip(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002039 set_cr3(vcpu, vcpu->arch.regs[reg]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002040 skip_emulated_instruction(vcpu);
2041 return 1;
2042 case 4:
2043 vcpu_load_rsp_rip(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002044 set_cr4(vcpu, vcpu->arch.regs[reg]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002045 skip_emulated_instruction(vcpu);
2046 return 1;
2047 case 8:
2048 vcpu_load_rsp_rip(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002049 set_cr8(vcpu, vcpu->arch.regs[reg]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002050 skip_emulated_instruction(vcpu);
Avi Kivitye5314062007-12-06 16:32:45 +02002051 if (irqchip_in_kernel(vcpu->kvm))
2052 return 1;
Yang, Sheng253abde2007-08-16 13:01:00 +03002053 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2054 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002055 };
2056 break;
Anthony Liguori25c4c272007-04-27 09:29:21 +03002057 case 2: /* clts */
2058 vcpu_load_rsp_rip(vcpu);
Avi Kivity5fd86fc2007-05-02 20:40:00 +03002059 vmx_fpu_deactivate(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002060 vcpu->arch.cr0 &= ~X86_CR0_TS;
2061 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
Avi Kivity5fd86fc2007-05-02 20:40:00 +03002062 vmx_fpu_activate(vcpu);
Anthony Liguori25c4c272007-04-27 09:29:21 +03002063 skip_emulated_instruction(vcpu);
2064 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002065 case 1: /*mov from cr*/
2066 switch (cr) {
2067 case 3:
2068 vcpu_load_rsp_rip(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002069 vcpu->arch.regs[reg] = vcpu->arch.cr3;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002070 vcpu_put_rsp_rip(vcpu);
2071 skip_emulated_instruction(vcpu);
2072 return 1;
2073 case 8:
Avi Kivity6aa8b732006-12-10 02:21:36 -08002074 vcpu_load_rsp_rip(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002075 vcpu->arch.regs[reg] = get_cr8(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002076 vcpu_put_rsp_rip(vcpu);
2077 skip_emulated_instruction(vcpu);
2078 return 1;
2079 }
2080 break;
2081 case 3: /* lmsw */
2082 lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
2083
2084 skip_emulated_instruction(vcpu);
2085 return 1;
2086 default:
2087 break;
2088 }
2089 kvm_run->exit_reason = 0;
Rusty Russellf0242472007-08-01 10:48:02 +10002090 pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
Avi Kivity6aa8b732006-12-10 02:21:36 -08002091 (int)(exit_qualification >> 4) & 3, cr);
2092 return 0;
2093}
2094
2095static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2096{
He, Qingbfdaab02007-09-12 14:18:28 +08002097 unsigned long exit_qualification;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002098 unsigned long val;
2099 int dr, reg;
2100
2101 /*
2102 * FIXME: this code assumes the host is debugging the guest.
2103 * need to deal with guest debugging itself too.
2104 */
He, Qingbfdaab02007-09-12 14:18:28 +08002105 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002106 dr = exit_qualification & 7;
2107 reg = (exit_qualification >> 8) & 15;
2108 vcpu_load_rsp_rip(vcpu);
2109 if (exit_qualification & 16) {
2110 /* mov from dr */
2111 switch (dr) {
2112 case 6:
2113 val = 0xffff0ff0;
2114 break;
2115 case 7:
2116 val = 0x400;
2117 break;
2118 default:
2119 val = 0;
2120 }
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002121 vcpu->arch.regs[reg] = val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002122 } else {
2123 /* mov to dr */
2124 }
2125 vcpu_put_rsp_rip(vcpu);
2126 skip_emulated_instruction(vcpu);
2127 return 1;
2128}
2129
2130static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2131{
Avi Kivity06465c52007-02-28 20:46:53 +02002132 kvm_emulate_cpuid(vcpu);
2133 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002134}
2135
2136static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2137{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002138 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
Avi Kivity6aa8b732006-12-10 02:21:36 -08002139 u64 data;
2140
2141 if (vmx_get_msr(vcpu, ecx, &data)) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02002142 kvm_inject_gp(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002143 return 1;
2144 }
2145
2146 /* FIXME: handling of bits 32:63 of rax, rdx */
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002147 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
2148 vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002149 skip_emulated_instruction(vcpu);
2150 return 1;
2151}
2152
2153static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2154{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002155 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
2156 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
2157 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002158
2159 if (vmx_set_msr(vcpu, ecx, data) != 0) {
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02002160 kvm_inject_gp(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002161 return 1;
2162 }
2163
2164 skip_emulated_instruction(vcpu);
2165 return 1;
2166}
2167
Yang, Sheng6e5d8652007-09-12 18:03:11 +08002168static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
2169 struct kvm_run *kvm_run)
2170{
2171 return 1;
2172}
2173
Avi Kivity6aa8b732006-12-10 02:21:36 -08002174static int handle_interrupt_window(struct kvm_vcpu *vcpu,
2175 struct kvm_run *kvm_run)
2176{
Eddie Dong85f455f2007-07-06 12:20:49 +03002177 u32 cpu_based_vm_exec_control;
2178
2179 /* clear pending irq */
2180 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2181 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2182 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
Dor Laorc1150d82007-01-05 16:36:24 -08002183 /*
2184 * If the user space waits to inject interrupts, exit as soon as
2185 * possible
2186 */
2187 if (kvm_run->request_interrupt_window &&
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002188 !vcpu->arch.irq_summary) {
Dor Laorc1150d82007-01-05 16:36:24 -08002189 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
Avi Kivity1165f5f2007-04-19 17:27:43 +03002190 ++vcpu->stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08002191 return 0;
2192 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002193 return 1;
2194}
2195
2196static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2197{
2198 skip_emulated_instruction(vcpu);
Avi Kivityd3bef152007-06-05 15:53:05 +03002199 return kvm_emulate_halt(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002200}
2201
Ingo Molnarc21415e2007-02-19 14:37:47 +02002202static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2203{
Dor Laor510043d2007-02-19 18:25:43 +02002204 skip_emulated_instruction(vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002205 kvm_emulate_hypercall(vcpu);
2206 return 1;
Ingo Molnarc21415e2007-02-19 14:37:47 +02002207}
2208
Eddie Donge5edaa02007-11-11 12:28:35 +02002209static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2210{
2211 skip_emulated_instruction(vcpu);
2212 /* TODO: Add support for VT-d/pass-through device */
2213 return 1;
2214}
2215
Sheng Yangf78e0e22007-10-29 09:40:42 +08002216static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2217{
2218 u64 exit_qualification;
2219 enum emulation_result er;
2220 unsigned long offset;
2221
2222 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
2223 offset = exit_qualification & 0xffful;
2224
2225 er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
2226
2227 if (er != EMULATE_DONE) {
2228 printk(KERN_ERR
2229 "Fail to handle apic access vmexit! Offset is 0x%lx\n",
2230 offset);
2231 return -ENOTSUPP;
2232 }
2233 return 1;
2234}
2235
Avi Kivity6aa8b732006-12-10 02:21:36 -08002236/*
2237 * The exit handlers return 1 if the exit was handled fully and guest execution
2238 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
2239 * to be done to userspace and return 0.
2240 */
2241static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
2242 struct kvm_run *kvm_run) = {
2243 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
2244 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
Avi Kivity988ad742007-02-12 00:54:36 -08002245 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002246 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002247 [EXIT_REASON_CR_ACCESS] = handle_cr,
2248 [EXIT_REASON_DR_ACCESS] = handle_dr,
2249 [EXIT_REASON_CPUID] = handle_cpuid,
2250 [EXIT_REASON_MSR_READ] = handle_rdmsr,
2251 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
2252 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
2253 [EXIT_REASON_HLT] = handle_halt,
Ingo Molnarc21415e2007-02-19 14:37:47 +02002254 [EXIT_REASON_VMCALL] = handle_vmcall,
Sheng Yangf78e0e22007-10-29 09:40:42 +08002255 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
2256 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
Eddie Donge5edaa02007-11-11 12:28:35 +02002257 [EXIT_REASON_WBINVD] = handle_wbinvd,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002258};
2259
2260static const int kvm_vmx_max_exit_handlers =
Robert P. J. Day50a34852007-06-03 13:35:29 -04002261 ARRAY_SIZE(kvm_vmx_exit_handlers);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002262
2263/*
2264 * The guest has exited. See if we can fix it or if we need userspace
2265 * assistance.
2266 */
2267static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2268{
Avi Kivity6aa8b732006-12-10 02:21:36 -08002269 u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
Avi Kivity29bd8a72007-09-10 17:27:03 +03002270 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity1155f762007-11-22 11:30:47 +02002271 u32 vectoring_info = vmx->idt_vectoring_info;
Avi Kivity29bd8a72007-09-10 17:27:03 +03002272
2273 if (unlikely(vmx->fail)) {
2274 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2275 kvm_run->fail_entry.hardware_entry_failure_reason
2276 = vmcs_read32(VM_INSTRUCTION_ERROR);
2277 return 0;
2278 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002279
Mike Dayd77c26f2007-10-08 09:02:08 -04002280 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
2281 exit_reason != EXIT_REASON_EXCEPTION_NMI)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002282 printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
2283 "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002284 if (exit_reason < kvm_vmx_max_exit_handlers
2285 && kvm_vmx_exit_handlers[exit_reason])
2286 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
2287 else {
2288 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
2289 kvm_run->hw.hardware_exit_reason = exit_reason;
2290 }
2291 return 0;
2292}
2293
Yang, Sheng6e5d8652007-09-12 18:03:11 +08002294static void update_tpr_threshold(struct kvm_vcpu *vcpu)
2295{
2296 int max_irr, tpr;
2297
2298 if (!vm_need_tpr_shadow(vcpu->kvm))
2299 return;
2300
2301 if (!kvm_lapic_enabled(vcpu) ||
2302 ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
2303 vmcs_write32(TPR_THRESHOLD, 0);
2304 return;
2305 }
2306
2307 tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4;
2308 vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
2309}
2310
Eddie Dong85f455f2007-07-06 12:20:49 +03002311static void enable_irq_window(struct kvm_vcpu *vcpu)
2312{
2313 u32 cpu_based_vm_exec_control;
2314
2315 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2316 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
2317 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2318}
2319
2320static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2321{
Avi Kivity1155f762007-11-22 11:30:47 +02002322 struct vcpu_vmx *vmx = to_vmx(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03002323 u32 idtv_info_field, intr_info_field;
2324 int has_ext_irq, interrupt_window_open;
Eddie Dong1b9778d2007-09-03 16:56:58 +03002325 int vector;
Eddie Dong85f455f2007-07-06 12:20:49 +03002326
Yang, Sheng6e5d8652007-09-12 18:03:11 +08002327 update_tpr_threshold(vcpu);
2328
Eddie Dong85f455f2007-07-06 12:20:49 +03002329 has_ext_irq = kvm_cpu_has_interrupt(vcpu);
2330 intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
Avi Kivity1155f762007-11-22 11:30:47 +02002331 idtv_info_field = vmx->idt_vectoring_info;
Eddie Dong85f455f2007-07-06 12:20:49 +03002332 if (intr_info_field & INTR_INFO_VALID_MASK) {
2333 if (idtv_info_field & INTR_INFO_VALID_MASK) {
2334 /* TODO: fault when IDT_Vectoring */
Ryan Harper9584bf22007-12-13 10:21:10 -06002335 if (printk_ratelimit())
2336 printk(KERN_ERR "Fault when IDT_Vectoring\n");
Eddie Dong85f455f2007-07-06 12:20:49 +03002337 }
2338 if (has_ext_irq)
2339 enable_irq_window(vcpu);
2340 return;
2341 }
2342 if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
Avi Kivity9c8cba32007-11-22 11:42:59 +02002343 if ((idtv_info_field & VECTORING_INFO_TYPE_MASK)
2344 == INTR_TYPE_EXT_INTR
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002345 && vcpu->arch.rmode.active) {
Avi Kivity9c8cba32007-11-22 11:42:59 +02002346 u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK;
2347
2348 vmx_inject_irq(vcpu, vect);
2349 if (unlikely(has_ext_irq))
2350 enable_irq_window(vcpu);
2351 return;
2352 }
2353
Eddie Dong85f455f2007-07-06 12:20:49 +03002354 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
2355 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2356 vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
2357
2358 if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK))
2359 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2360 vmcs_read32(IDT_VECTORING_ERROR_CODE));
2361 if (unlikely(has_ext_irq))
2362 enable_irq_window(vcpu);
2363 return;
2364 }
2365 if (!has_ext_irq)
2366 return;
2367 interrupt_window_open =
2368 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
2369 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
Eddie Dong1b9778d2007-09-03 16:56:58 +03002370 if (interrupt_window_open) {
2371 vector = kvm_cpu_get_interrupt(vcpu);
2372 vmx_inject_irq(vcpu, vector);
2373 kvm_timer_intr_post(vcpu, vector);
2374 } else
Eddie Dong85f455f2007-07-06 12:20:49 +03002375 enable_irq_window(vcpu);
2376}
2377
Avi Kivity9c8cba32007-11-22 11:42:59 +02002378/*
2379 * Failure to inject an interrupt should give us the information
2380 * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs
2381 * when fetching the interrupt redirection bitmap in the real-mode
2382 * tss, this doesn't happen. So we do it ourselves.
2383 */
2384static void fixup_rmode_irq(struct vcpu_vmx *vmx)
2385{
2386 vmx->rmode.irq.pending = 0;
2387 if (vmcs_readl(GUEST_RIP) + 1 != vmx->rmode.irq.rip)
2388 return;
2389 vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip);
2390 if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
2391 vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
2392 vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
2393 return;
2394 }
2395 vmx->idt_vectoring_info =
2396 VECTORING_INFO_VALID_MASK
2397 | INTR_TYPE_EXT_INTR
2398 | vmx->rmode.irq.vector;
2399}
2400
Avi Kivity04d2cc72007-09-10 18:10:54 +03002401static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002402{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002403 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity1b6269d2007-10-09 12:12:19 +02002404 u32 intr_info;
Avi Kivitye6adf282007-04-30 16:07:54 +03002405
2406 /*
2407 * Loading guest fpu may have cleared host cr0.ts
2408 */
2409 vmcs_writel(HOST_CR0, read_cr0());
2410
Mike Dayd77c26f2007-10-08 09:02:08 -04002411 asm(
Avi Kivity6aa8b732006-12-10 02:21:36 -08002412 /* Store host registers */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002413#ifdef CONFIG_X86_64
Laurent Vivierc2036302007-10-25 14:18:52 +02002414 "push %%rdx; push %%rbp;"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002415 "push %%rcx \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002416#else
Laurent Vivierff593e52007-10-25 14:18:55 +02002417 "push %%edx; push %%ebp;"
2418 "push %%ecx \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002419#endif
Laurent Vivierc2036302007-10-25 14:18:52 +02002420 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002421 /* Check if vmlaunch of vmresume is needed */
Avi Kivitye08aa782007-11-15 18:06:18 +02002422 "cmpl $0, %c[launched](%0) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002423 /* Load guest registers. Don't clobber flags. */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002424#ifdef CONFIG_X86_64
Avi Kivitye08aa782007-11-15 18:06:18 +02002425 "mov %c[cr2](%0), %%rax \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002426 "mov %%rax, %%cr2 \n\t"
Avi Kivitye08aa782007-11-15 18:06:18 +02002427 "mov %c[rax](%0), %%rax \n\t"
2428 "mov %c[rbx](%0), %%rbx \n\t"
2429 "mov %c[rdx](%0), %%rdx \n\t"
2430 "mov %c[rsi](%0), %%rsi \n\t"
2431 "mov %c[rdi](%0), %%rdi \n\t"
2432 "mov %c[rbp](%0), %%rbp \n\t"
2433 "mov %c[r8](%0), %%r8 \n\t"
2434 "mov %c[r9](%0), %%r9 \n\t"
2435 "mov %c[r10](%0), %%r10 \n\t"
2436 "mov %c[r11](%0), %%r11 \n\t"
2437 "mov %c[r12](%0), %%r12 \n\t"
2438 "mov %c[r13](%0), %%r13 \n\t"
2439 "mov %c[r14](%0), %%r14 \n\t"
2440 "mov %c[r15](%0), %%r15 \n\t"
2441 "mov %c[rcx](%0), %%rcx \n\t" /* kills %0 (rcx) */
Avi Kivity6aa8b732006-12-10 02:21:36 -08002442#else
Avi Kivitye08aa782007-11-15 18:06:18 +02002443 "mov %c[cr2](%0), %%eax \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002444 "mov %%eax, %%cr2 \n\t"
Avi Kivitye08aa782007-11-15 18:06:18 +02002445 "mov %c[rax](%0), %%eax \n\t"
2446 "mov %c[rbx](%0), %%ebx \n\t"
2447 "mov %c[rdx](%0), %%edx \n\t"
2448 "mov %c[rsi](%0), %%esi \n\t"
2449 "mov %c[rdi](%0), %%edi \n\t"
2450 "mov %c[rbp](%0), %%ebp \n\t"
2451 "mov %c[rcx](%0), %%ecx \n\t" /* kills %0 (ecx) */
Avi Kivity6aa8b732006-12-10 02:21:36 -08002452#endif
2453 /* Enter guest mode */
Avi Kivitycd2276a2007-05-14 20:41:13 +03002454 "jne .Llaunched \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002455 ASM_VMX_VMLAUNCH "\n\t"
Avi Kivitycd2276a2007-05-14 20:41:13 +03002456 "jmp .Lkvm_vmx_return \n\t"
2457 ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
2458 ".Lkvm_vmx_return: "
Avi Kivity6aa8b732006-12-10 02:21:36 -08002459 /* Save guest registers, load host registers, keep flags */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002460#ifdef CONFIG_X86_64
Avi Kivitye08aa782007-11-15 18:06:18 +02002461 "xchg %0, (%%rsp) \n\t"
2462 "mov %%rax, %c[rax](%0) \n\t"
2463 "mov %%rbx, %c[rbx](%0) \n\t"
2464 "pushq (%%rsp); popq %c[rcx](%0) \n\t"
2465 "mov %%rdx, %c[rdx](%0) \n\t"
2466 "mov %%rsi, %c[rsi](%0) \n\t"
2467 "mov %%rdi, %c[rdi](%0) \n\t"
2468 "mov %%rbp, %c[rbp](%0) \n\t"
2469 "mov %%r8, %c[r8](%0) \n\t"
2470 "mov %%r9, %c[r9](%0) \n\t"
2471 "mov %%r10, %c[r10](%0) \n\t"
2472 "mov %%r11, %c[r11](%0) \n\t"
2473 "mov %%r12, %c[r12](%0) \n\t"
2474 "mov %%r13, %c[r13](%0) \n\t"
2475 "mov %%r14, %c[r14](%0) \n\t"
2476 "mov %%r15, %c[r15](%0) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002477 "mov %%cr2, %%rax \n\t"
Avi Kivitye08aa782007-11-15 18:06:18 +02002478 "mov %%rax, %c[cr2](%0) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002479
Avi Kivitye08aa782007-11-15 18:06:18 +02002480 "pop %%rbp; pop %%rbp; pop %%rdx \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002481#else
Avi Kivitye08aa782007-11-15 18:06:18 +02002482 "xchg %0, (%%esp) \n\t"
2483 "mov %%eax, %c[rax](%0) \n\t"
2484 "mov %%ebx, %c[rbx](%0) \n\t"
2485 "pushl (%%esp); popl %c[rcx](%0) \n\t"
2486 "mov %%edx, %c[rdx](%0) \n\t"
2487 "mov %%esi, %c[rsi](%0) \n\t"
2488 "mov %%edi, %c[rdi](%0) \n\t"
2489 "mov %%ebp, %c[rbp](%0) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002490 "mov %%cr2, %%eax \n\t"
Avi Kivitye08aa782007-11-15 18:06:18 +02002491 "mov %%eax, %c[cr2](%0) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002492
Avi Kivitye08aa782007-11-15 18:06:18 +02002493 "pop %%ebp; pop %%ebp; pop %%edx \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002494#endif
Avi Kivitye08aa782007-11-15 18:06:18 +02002495 "setbe %c[fail](%0) \n\t"
2496 : : "c"(vmx), "d"((unsigned long)HOST_RSP),
2497 [launched]"i"(offsetof(struct vcpu_vmx, launched)),
2498 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002499 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
2500 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
2501 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
2502 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
2503 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
2504 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
2505 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002506#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002507 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
2508 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
2509 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
2510 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
2511 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
2512 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
2513 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
2514 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
Avi Kivity6aa8b732006-12-10 02:21:36 -08002515#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002516 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
Laurent Vivierc2036302007-10-25 14:18:52 +02002517 : "cc", "memory"
2518#ifdef CONFIG_X86_64
2519 , "rbx", "rdi", "rsi"
2520 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
Laurent Vivierff593e52007-10-25 14:18:55 +02002521#else
2522 , "ebx", "edi", "rsi"
Laurent Vivierc2036302007-10-25 14:18:52 +02002523#endif
2524 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08002525
Avi Kivity1155f762007-11-22 11:30:47 +02002526 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
Avi Kivity9c8cba32007-11-22 11:42:59 +02002527 if (vmx->rmode.irq.pending)
2528 fixup_rmode_irq(vmx);
Avi Kivity1155f762007-11-22 11:30:47 +02002529
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002530 vcpu->arch.interrupt_window_open =
Mike Dayd77c26f2007-10-08 09:02:08 -04002531 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002532
Mike Dayd77c26f2007-10-08 09:02:08 -04002533 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
Avi Kivity15ad7142007-07-11 18:17:21 +03002534 vmx->launched = 1;
Avi Kivity1b6269d2007-10-09 12:12:19 +02002535
2536 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
2537
2538 /* We need to handle NMIs before interrupts are enabled */
2539 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
2540 asm("int $2");
Avi Kivity6aa8b732006-12-10 02:21:36 -08002541}
2542
Avi Kivity6aa8b732006-12-10 02:21:36 -08002543static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
2544{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002545 struct vcpu_vmx *vmx = to_vmx(vcpu);
2546
2547 if (vmx->vmcs) {
Rusty Russell8b9cf982007-07-30 16:31:43 +10002548 on_each_cpu(__vcpu_clear, vmx, 0, 1);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002549 free_vmcs(vmx->vmcs);
2550 vmx->vmcs = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002551 }
2552}
2553
2554static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
2555{
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002556 struct vcpu_vmx *vmx = to_vmx(vcpu);
2557
Sheng Yang2384d2b2008-01-17 15:14:33 +08002558 spin_lock(&vmx_vpid_lock);
2559 if (vmx->vpid != 0)
2560 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
2561 spin_unlock(&vmx_vpid_lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002562 vmx_free_vmcs(vcpu);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002563 kfree(vmx->host_msrs);
2564 kfree(vmx->guest_msrs);
2565 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +10002566 kmem_cache_free(kvm_vcpu_cache, vmx);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002567}
2568
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002569static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002570{
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002571 int err;
Rusty Russellc16f8622007-07-30 21:12:19 +10002572 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Avi Kivity15ad7142007-07-11 18:17:21 +03002573 int cpu;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002574
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002575 if (!vmx)
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002576 return ERR_PTR(-ENOMEM);
2577
Sheng Yang2384d2b2008-01-17 15:14:33 +08002578 allocate_vpid(vmx);
2579
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002580 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
2581 if (err)
2582 goto free_vcpu;
Ingo Molnar965b58a2007-01-05 16:36:23 -08002583
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002584 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002585 if (!vmx->guest_msrs) {
2586 err = -ENOMEM;
2587 goto uninit_vcpu;
2588 }
Ingo Molnar965b58a2007-01-05 16:36:23 -08002589
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002590 vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2591 if (!vmx->host_msrs)
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002592 goto free_guest_msrs;
Ingo Molnar965b58a2007-01-05 16:36:23 -08002593
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002594 vmx->vmcs = alloc_vmcs();
2595 if (!vmx->vmcs)
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002596 goto free_msrs;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002597
2598 vmcs_clear(vmx->vmcs);
2599
Avi Kivity15ad7142007-07-11 18:17:21 +03002600 cpu = get_cpu();
2601 vmx_vcpu_load(&vmx->vcpu, cpu);
Rusty Russell8b9cf982007-07-30 16:31:43 +10002602 err = vmx_vcpu_setup(vmx);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002603 vmx_vcpu_put(&vmx->vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03002604 put_cpu();
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002605 if (err)
2606 goto free_vmcs;
Marcelo Tosatti5e4a0b32008-02-14 21:21:43 -02002607 if (vm_need_virtualize_apic_accesses(kvm))
2608 if (alloc_apic_access_page(kvm) != 0)
2609 goto free_vmcs;
Ingo Molnar965b58a2007-01-05 16:36:23 -08002610
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002611 return &vmx->vcpu;
Ingo Molnar965b58a2007-01-05 16:36:23 -08002612
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002613free_vmcs:
2614 free_vmcs(vmx->vmcs);
2615free_msrs:
2616 kfree(vmx->host_msrs);
2617free_guest_msrs:
2618 kfree(vmx->guest_msrs);
2619uninit_vcpu:
2620 kvm_vcpu_uninit(&vmx->vcpu);
2621free_vcpu:
Rusty Russella4770342007-08-01 14:46:11 +10002622 kmem_cache_free(kvm_vcpu_cache, vmx);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002623 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002624}
2625
Yang, Sheng002c7f72007-07-31 14:23:01 +03002626static void __init vmx_check_processor_compat(void *rtn)
2627{
2628 struct vmcs_config vmcs_conf;
2629
2630 *(int *)rtn = 0;
2631 if (setup_vmcs_config(&vmcs_conf) < 0)
2632 *(int *)rtn = -EIO;
2633 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
2634 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
2635 smp_processor_id());
2636 *(int *)rtn = -EIO;
2637 }
2638}
2639
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002640static struct kvm_x86_ops vmx_x86_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08002641 .cpu_has_kvm_support = cpu_has_kvm_support,
2642 .disabled_by_bios = vmx_disabled_by_bios,
2643 .hardware_setup = hardware_setup,
2644 .hardware_unsetup = hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03002645 .check_processor_compatibility = vmx_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002646 .hardware_enable = hardware_enable,
2647 .hardware_disable = hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02002648 .cpu_has_accelerated_tpr = cpu_has_vmx_virtualize_apic_accesses,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002649
2650 .vcpu_create = vmx_create_vcpu,
2651 .vcpu_free = vmx_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03002652 .vcpu_reset = vmx_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002653
Avi Kivity04d2cc72007-09-10 18:10:54 +03002654 .prepare_guest_switch = vmx_save_host_state,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002655 .vcpu_load = vmx_vcpu_load,
2656 .vcpu_put = vmx_vcpu_put,
Avi Kivity774c47f2007-02-12 00:54:47 -08002657 .vcpu_decache = vmx_vcpu_decache,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002658
2659 .set_guest_debug = set_guest_debug,
Avi Kivity04d2cc72007-09-10 18:10:54 +03002660 .guest_debug_pre = kvm_guest_debug_pre,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002661 .get_msr = vmx_get_msr,
2662 .set_msr = vmx_set_msr,
2663 .get_segment_base = vmx_get_segment_base,
2664 .get_segment = vmx_get_segment,
2665 .set_segment = vmx_set_segment,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002666 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
Anthony Liguori25c4c272007-04-27 09:29:21 +03002667 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002668 .set_cr0 = vmx_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002669 .set_cr3 = vmx_set_cr3,
2670 .set_cr4 = vmx_set_cr4,
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002671#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002672 .set_efer = vmx_set_efer,
2673#endif
2674 .get_idt = vmx_get_idt,
2675 .set_idt = vmx_set_idt,
2676 .get_gdt = vmx_get_gdt,
2677 .set_gdt = vmx_set_gdt,
2678 .cache_regs = vcpu_load_rsp_rip,
2679 .decache_regs = vcpu_put_rsp_rip,
2680 .get_rflags = vmx_get_rflags,
2681 .set_rflags = vmx_set_rflags,
2682
2683 .tlb_flush = vmx_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002684
Avi Kivity6aa8b732006-12-10 02:21:36 -08002685 .run = vmx_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03002686 .handle_exit = kvm_handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002687 .skip_emulated_instruction = skip_emulated_instruction,
Ingo Molnar102d8322007-02-19 14:37:47 +02002688 .patch_hypercall = vmx_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03002689 .get_irq = vmx_get_irq,
2690 .set_irq = vmx_inject_irq,
Avi Kivity298101d2007-11-25 13:41:11 +02002691 .queue_exception = vmx_queue_exception,
2692 .exception_injected = vmx_exception_injected,
Avi Kivity04d2cc72007-09-10 18:10:54 +03002693 .inject_pending_irq = vmx_intr_assist,
2694 .inject_pending_vectors = do_interrupt_requests,
Izik Eiduscbc94022007-10-25 00:29:55 +02002695
2696 .set_tss_addr = vmx_set_tss_addr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002697};
2698
2699static int __init vmx_init(void)
2700{
He, Qingfdef3ad2007-04-30 09:45:24 +03002701 void *iova;
2702 int r;
2703
2704 vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2705 if (!vmx_io_bitmap_a)
2706 return -ENOMEM;
2707
2708 vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2709 if (!vmx_io_bitmap_b) {
2710 r = -ENOMEM;
2711 goto out;
2712 }
2713
2714 /*
2715 * Allow direct access to the PC debug port (it is often used for I/O
2716 * delays, but the vmexits simply slow things down).
2717 */
2718 iova = kmap(vmx_io_bitmap_a);
2719 memset(iova, 0xff, PAGE_SIZE);
2720 clear_bit(0x80, iova);
Avi Kivitycd0536d2007-05-08 11:34:07 +03002721 kunmap(vmx_io_bitmap_a);
He, Qingfdef3ad2007-04-30 09:45:24 +03002722
2723 iova = kmap(vmx_io_bitmap_b);
2724 memset(iova, 0xff, PAGE_SIZE);
Avi Kivitycd0536d2007-05-08 11:34:07 +03002725 kunmap(vmx_io_bitmap_b);
He, Qingfdef3ad2007-04-30 09:45:24 +03002726
Sheng Yang2384d2b2008-01-17 15:14:33 +08002727 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
2728
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08002729 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
He, Qingfdef3ad2007-04-30 09:45:24 +03002730 if (r)
2731 goto out1;
2732
Avi Kivityc7addb92007-09-16 18:58:32 +02002733 if (bypass_guest_pf)
2734 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
2735
He, Qingfdef3ad2007-04-30 09:45:24 +03002736 return 0;
2737
2738out1:
2739 __free_page(vmx_io_bitmap_b);
2740out:
2741 __free_page(vmx_io_bitmap_a);
2742 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002743}
2744
2745static void __exit vmx_exit(void)
2746{
He, Qingfdef3ad2007-04-30 09:45:24 +03002747 __free_page(vmx_io_bitmap_b);
2748 __free_page(vmx_io_bitmap_a);
2749
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08002750 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08002751}
2752
2753module_init(vmx_init)
2754module_exit(vmx_exit)