blob: 5b77d9b7b1acd45f21a76b057d40036aeda843d2 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
19#include "vmx.h"
Avi Kivitye4956062007-06-28 14:15:57 -040020#include "segment_descriptor.h"
21
Avi Kivity6aa8b732006-12-10 02:21:36 -080022#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020023#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080024#include <linux/mm.h>
25#include <linux/highmem.h>
Ingo Molnar07031e12007-01-10 23:15:38 -080026#include <linux/profile.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040027#include <linux/sched.h>
Avi Kivitye4956062007-06-28 14:15:57 -040028
Avi Kivity6aa8b732006-12-10 02:21:36 -080029#include <asm/io.h>
Anthony Liguori3b3be0d2006-12-13 00:33:43 -080030#include <asm/desc.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080031
Avi Kivity6aa8b732006-12-10 02:21:36 -080032MODULE_AUTHOR("Qumranet");
33MODULE_LICENSE("GPL");
34
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040035struct vmcs {
36 u32 revision_id;
37 u32 abort;
38 char data[0];
39};
40
41struct vcpu_vmx {
Rusty Russellfb3f0f52007-07-27 17:16:56 +100042 struct kvm_vcpu vcpu;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040043 int launched;
44 struct kvm_msr_entry *guest_msrs;
45 struct kvm_msr_entry *host_msrs;
46 int nmsrs;
47 int save_nmsrs;
48 int msr_offset_efer;
49#ifdef CONFIG_X86_64
50 int msr_offset_kernel_gs_base;
51#endif
52 struct vmcs *vmcs;
53 struct {
54 int loaded;
55 u16 fs_sel, gs_sel, ldt_sel;
56 int fs_gs_ldt_reload_needed;
57 }host_state;
58
59};
60
61static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
62{
Rusty Russellfb3f0f52007-07-27 17:16:56 +100063 return container_of(vcpu, struct vcpu_vmx, vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040064}
65
Avi Kivity75880a02007-06-20 11:20:04 +030066static int init_rmode_tss(struct kvm *kvm);
67
Avi Kivity6aa8b732006-12-10 02:21:36 -080068static DEFINE_PER_CPU(struct vmcs *, vmxarea);
69static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
70
He, Qingfdef3ad2007-04-30 09:45:24 +030071static struct page *vmx_io_bitmap_a;
72static struct page *vmx_io_bitmap_b;
73
Eddie Dong2cc51562007-05-21 07:28:09 +030074#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
Avi Kivity6aa8b732006-12-10 02:21:36 -080075
Yang, Sheng1c3d14f2007-07-29 11:07:42 +030076static struct vmcs_config {
Avi Kivity6aa8b732006-12-10 02:21:36 -080077 int size;
78 int order;
79 u32 revision_id;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +030080 u32 pin_based_exec_ctrl;
81 u32 cpu_based_exec_ctrl;
82 u32 vmexit_ctrl;
83 u32 vmentry_ctrl;
84} vmcs_config;
Avi Kivity6aa8b732006-12-10 02:21:36 -080085
86#define VMX_SEGMENT_FIELD(seg) \
87 [VCPU_SREG_##seg] = { \
88 .selector = GUEST_##seg##_SELECTOR, \
89 .base = GUEST_##seg##_BASE, \
90 .limit = GUEST_##seg##_LIMIT, \
91 .ar_bytes = GUEST_##seg##_AR_BYTES, \
92 }
93
94static struct kvm_vmx_segment_field {
95 unsigned selector;
96 unsigned base;
97 unsigned limit;
98 unsigned ar_bytes;
99} kvm_vmx_segment_fields[] = {
100 VMX_SEGMENT_FIELD(CS),
101 VMX_SEGMENT_FIELD(DS),
102 VMX_SEGMENT_FIELD(ES),
103 VMX_SEGMENT_FIELD(FS),
104 VMX_SEGMENT_FIELD(GS),
105 VMX_SEGMENT_FIELD(SS),
106 VMX_SEGMENT_FIELD(TR),
107 VMX_SEGMENT_FIELD(LDTR),
108};
109
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300110/*
111 * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
112 * away by decrementing the array size.
113 */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800114static const u32 vmx_msr_index[] = {
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800115#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800116 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
117#endif
118 MSR_EFER, MSR_K6_STAR,
119};
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200120#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800121
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400122static void load_msrs(struct kvm_msr_entry *e, int n)
123{
124 int i;
125
126 for (i = 0; i < n; ++i)
127 wrmsrl(e[i].index, e[i].data);
128}
129
130static void save_msrs(struct kvm_msr_entry *e, int n)
131{
132 int i;
133
134 for (i = 0; i < n; ++i)
135 rdmsrl(e[i].index, e[i].data);
136}
137
138static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr)
Eddie Dong2cc51562007-05-21 07:28:09 +0300139{
140 return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
141}
142
143static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
144{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400145 struct vcpu_vmx *vmx = to_vmx(vcpu);
146 int efer_offset = vmx->msr_offset_efer;
147 return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
148 msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
Eddie Dong2cc51562007-05-21 07:28:09 +0300149}
150
Avi Kivity6aa8b732006-12-10 02:21:36 -0800151static inline int is_page_fault(u32 intr_info)
152{
153 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
154 INTR_INFO_VALID_MASK)) ==
155 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
156}
157
Anthony Liguori2ab455c2007-04-27 09:29:49 +0300158static inline int is_no_device(u32 intr_info)
159{
160 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
161 INTR_INFO_VALID_MASK)) ==
162 (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
163}
164
Avi Kivity6aa8b732006-12-10 02:21:36 -0800165static inline int is_external_interrupt(u32 intr_info)
166{
167 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
168 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
169}
170
Eddie Donga75beee2007-05-17 18:55:15 +0300171static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
Avi Kivity7725f0b2006-12-13 00:34:01 -0800172{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400173 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity7725f0b2006-12-13 00:34:01 -0800174 int i;
175
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400176 for (i = 0; i < vmx->nmsrs; ++i)
177 if (vmx->guest_msrs[i].index == msr)
Eddie Donga75beee2007-05-17 18:55:15 +0300178 return i;
179 return -1;
180}
181
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400182static struct kvm_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
Eddie Donga75beee2007-05-17 18:55:15 +0300183{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400184 struct vcpu_vmx *vmx = to_vmx(vcpu);
Eddie Donga75beee2007-05-17 18:55:15 +0300185 int i;
186
187 i = __find_msr_index(vcpu, msr);
188 if (i >= 0)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400189 return &vmx->guest_msrs[i];
Al Viro8b6d44c2007-02-09 16:38:40 +0000190 return NULL;
Avi Kivity7725f0b2006-12-13 00:34:01 -0800191}
192
Avi Kivity6aa8b732006-12-10 02:21:36 -0800193static void vmcs_clear(struct vmcs *vmcs)
194{
195 u64 phys_addr = __pa(vmcs);
196 u8 error;
197
198 asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
199 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
200 : "cc", "memory");
201 if (error)
202 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
203 vmcs, phys_addr);
204}
205
206static void __vcpu_clear(void *arg)
207{
208 struct kvm_vcpu *vcpu = arg;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400209 struct vcpu_vmx *vmx = to_vmx(vcpu);
Ingo Molnard3b2c332007-01-05 16:36:23 -0800210 int cpu = raw_smp_processor_id();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800211
212 if (vcpu->cpu == cpu)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400213 vmcs_clear(vmx->vmcs);
214 if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800215 per_cpu(current_vmcs, cpu) = NULL;
Avi Kivity77002702007-06-13 19:55:28 +0300216 rdtscll(vcpu->host_tsc);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800217}
218
Avi Kivity8d0be2b2007-02-12 00:54:46 -0800219static void vcpu_clear(struct kvm_vcpu *vcpu)
220{
221 if (vcpu->cpu != raw_smp_processor_id() && vcpu->cpu != -1)
222 smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1);
223 else
224 __vcpu_clear(vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400225 to_vmx(vcpu)->launched = 0;
Avi Kivity8d0be2b2007-02-12 00:54:46 -0800226}
227
Avi Kivity6aa8b732006-12-10 02:21:36 -0800228static unsigned long vmcs_readl(unsigned long field)
229{
230 unsigned long value;
231
232 asm volatile (ASM_VMX_VMREAD_RDX_RAX
233 : "=a"(value) : "d"(field) : "cc");
234 return value;
235}
236
237static u16 vmcs_read16(unsigned long field)
238{
239 return vmcs_readl(field);
240}
241
242static u32 vmcs_read32(unsigned long field)
243{
244 return vmcs_readl(field);
245}
246
247static u64 vmcs_read64(unsigned long field)
248{
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800249#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800250 return vmcs_readl(field);
251#else
252 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
253#endif
254}
255
Avi Kivitye52de1b2007-01-05 16:36:56 -0800256static noinline void vmwrite_error(unsigned long field, unsigned long value)
257{
258 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
259 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
260 dump_stack();
261}
262
Avi Kivity6aa8b732006-12-10 02:21:36 -0800263static void vmcs_writel(unsigned long field, unsigned long value)
264{
265 u8 error;
266
267 asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
268 : "=q"(error) : "a"(value), "d"(field) : "cc" );
Avi Kivitye52de1b2007-01-05 16:36:56 -0800269 if (unlikely(error))
270 vmwrite_error(field, value);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800271}
272
273static void vmcs_write16(unsigned long field, u16 value)
274{
275 vmcs_writel(field, value);
276}
277
278static void vmcs_write32(unsigned long field, u32 value)
279{
280 vmcs_writel(field, value);
281}
282
283static void vmcs_write64(unsigned long field, u64 value)
284{
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800285#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800286 vmcs_writel(field, value);
287#else
288 vmcs_writel(field, value);
289 asm volatile ("");
290 vmcs_writel(field+1, value >> 32);
291#endif
292}
293
Anthony Liguori2ab455c2007-04-27 09:29:49 +0300294static void vmcs_clear_bits(unsigned long field, u32 mask)
295{
296 vmcs_writel(field, vmcs_readl(field) & ~mask);
297}
298
299static void vmcs_set_bits(unsigned long field, u32 mask)
300{
301 vmcs_writel(field, vmcs_readl(field) | mask);
302}
303
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300304static void update_exception_bitmap(struct kvm_vcpu *vcpu)
305{
306 u32 eb;
307
308 eb = 1u << PF_VECTOR;
309 if (!vcpu->fpu_active)
310 eb |= 1u << NM_VECTOR;
311 if (vcpu->guest_debug.enabled)
312 eb |= 1u << 1;
313 if (vcpu->rmode.active)
314 eb = ~0;
315 vmcs_write32(EXCEPTION_BITMAP, eb);
316}
317
Avi Kivity33ed6322007-05-02 16:54:03 +0300318static void reload_tss(void)
319{
320#ifndef CONFIG_X86_64
321
322 /*
323 * VT restores TR but not its size. Useless.
324 */
325 struct descriptor_table gdt;
326 struct segment_descriptor *descs;
327
328 get_gdt(&gdt);
329 descs = (void *)gdt.base;
330 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
331 load_TR_desc();
332#endif
333}
334
Eddie Dong2cc51562007-05-21 07:28:09 +0300335static void load_transition_efer(struct kvm_vcpu *vcpu)
336{
337 u64 trans_efer;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400338 struct vcpu_vmx *vmx = to_vmx(vcpu);
339 int efer_offset = vmx->msr_offset_efer;
Eddie Dong2cc51562007-05-21 07:28:09 +0300340
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400341 trans_efer = vmx->host_msrs[efer_offset].data;
Eddie Dong2cc51562007-05-21 07:28:09 +0300342 trans_efer &= ~EFER_SAVE_RESTORE_BITS;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400343 trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
Eddie Dong2cc51562007-05-21 07:28:09 +0300344 wrmsrl(MSR_EFER, trans_efer);
345 vcpu->stat.efer_reload++;
346}
347
Avi Kivity33ed6322007-05-02 16:54:03 +0300348static void vmx_save_host_state(struct kvm_vcpu *vcpu)
349{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400350 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity33ed6322007-05-02 16:54:03 +0300351
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400352 if (vmx->host_state.loaded)
Avi Kivity33ed6322007-05-02 16:54:03 +0300353 return;
354
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400355 vmx->host_state.loaded = 1;
Avi Kivity33ed6322007-05-02 16:54:03 +0300356 /*
357 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
358 * allow segment selectors with cpl > 0 or ti == 1.
359 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400360 vmx->host_state.ldt_sel = read_ldt();
361 vmx->host_state.fs_gs_ldt_reload_needed = vmx->host_state.ldt_sel;
362 vmx->host_state.fs_sel = read_fs();
363 if (!(vmx->host_state.fs_sel & 7))
364 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +0300365 else {
366 vmcs_write16(HOST_FS_SELECTOR, 0);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400367 vmx->host_state.fs_gs_ldt_reload_needed = 1;
Avi Kivity33ed6322007-05-02 16:54:03 +0300368 }
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400369 vmx->host_state.gs_sel = read_gs();
370 if (!(vmx->host_state.gs_sel & 7))
371 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +0300372 else {
373 vmcs_write16(HOST_GS_SELECTOR, 0);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400374 vmx->host_state.fs_gs_ldt_reload_needed = 1;
Avi Kivity33ed6322007-05-02 16:54:03 +0300375 }
376
377#ifdef CONFIG_X86_64
378 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
379 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
380#else
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400381 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
382 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
Avi Kivity33ed6322007-05-02 16:54:03 +0300383#endif
Avi Kivity707c0872007-05-02 17:33:43 +0300384
385#ifdef CONFIG_X86_64
386 if (is_long_mode(vcpu)) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400387 save_msrs(vmx->host_msrs +
388 vmx->msr_offset_kernel_gs_base, 1);
Avi Kivity707c0872007-05-02 17:33:43 +0300389 }
390#endif
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400391 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
Eddie Dong2cc51562007-05-21 07:28:09 +0300392 if (msr_efer_need_save_restore(vcpu))
393 load_transition_efer(vcpu);
Avi Kivity33ed6322007-05-02 16:54:03 +0300394}
395
396static void vmx_load_host_state(struct kvm_vcpu *vcpu)
397{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400398 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +0300399 unsigned long flags;
Avi Kivity33ed6322007-05-02 16:54:03 +0300400
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400401 if (!vmx->host_state.loaded)
Avi Kivity33ed6322007-05-02 16:54:03 +0300402 return;
403
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400404 vmx->host_state.loaded = 0;
405 if (vmx->host_state.fs_gs_ldt_reload_needed) {
406 load_ldt(vmx->host_state.ldt_sel);
407 load_fs(vmx->host_state.fs_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +0300408 /*
409 * If we have to reload gs, we must take care to
410 * preserve our gs base.
411 */
Avi Kivity15ad7142007-07-11 18:17:21 +0300412 local_irq_save(flags);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400413 load_gs(vmx->host_state.gs_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +0300414#ifdef CONFIG_X86_64
415 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
416#endif
Avi Kivity15ad7142007-07-11 18:17:21 +0300417 local_irq_restore(flags);
Avi Kivity33ed6322007-05-02 16:54:03 +0300418
419 reload_tss();
420 }
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400421 save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
422 load_msrs(vmx->host_msrs, vmx->save_nmsrs);
Eddie Dong2cc51562007-05-21 07:28:09 +0300423 if (msr_efer_need_save_restore(vcpu))
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400424 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
Avi Kivity33ed6322007-05-02 16:54:03 +0300425}
426
Avi Kivity6aa8b732006-12-10 02:21:36 -0800427/*
428 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
429 * vcpu mutex is already taken.
430 */
Avi Kivity15ad7142007-07-11 18:17:21 +0300431static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800432{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400433 struct vcpu_vmx *vmx = to_vmx(vcpu);
434 u64 phys_addr = __pa(vmx->vmcs);
Avi Kivity77002702007-06-13 19:55:28 +0300435 u64 tsc_this, delta;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800436
Avi Kivity8d0be2b2007-02-12 00:54:46 -0800437 if (vcpu->cpu != cpu)
438 vcpu_clear(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800439
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400440 if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800441 u8 error;
442
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400443 per_cpu(current_vmcs, cpu) = vmx->vmcs;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800444 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
445 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
446 : "cc");
447 if (error)
448 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400449 vmx->vmcs, phys_addr);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800450 }
451
452 if (vcpu->cpu != cpu) {
453 struct descriptor_table dt;
454 unsigned long sysenter_esp;
455
456 vcpu->cpu = cpu;
457 /*
458 * Linux uses per-cpu TSS and GDT, so set these when switching
459 * processors.
460 */
461 vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
462 get_gdt(&dt);
463 vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
464
465 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
466 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
Avi Kivity77002702007-06-13 19:55:28 +0300467
468 /*
469 * Make sure the time stamp counter is monotonous.
470 */
471 rdtscll(tsc_this);
472 delta = vcpu->host_tsc - tsc_this;
473 vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800474 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800475}
476
477static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
478{
Avi Kivity33ed6322007-05-02 16:54:03 +0300479 vmx_load_host_state(vcpu);
Avi Kivity7702fd12007-06-14 16:27:40 +0300480 kvm_put_guest_fpu(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800481}
482
Avi Kivity5fd86fc2007-05-02 20:40:00 +0300483static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
484{
485 if (vcpu->fpu_active)
486 return;
487 vcpu->fpu_active = 1;
Rusty Russell707d92f2007-07-17 23:19:08 +1000488 vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
489 if (vcpu->cr0 & X86_CR0_TS)
490 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
Avi Kivity5fd86fc2007-05-02 20:40:00 +0300491 update_exception_bitmap(vcpu);
492}
493
494static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
495{
496 if (!vcpu->fpu_active)
497 return;
498 vcpu->fpu_active = 0;
Rusty Russell707d92f2007-07-17 23:19:08 +1000499 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
Avi Kivity5fd86fc2007-05-02 20:40:00 +0300500 update_exception_bitmap(vcpu);
501}
502
Avi Kivity774c47f2007-02-12 00:54:47 -0800503static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
504{
505 vcpu_clear(vcpu);
506}
507
Avi Kivity6aa8b732006-12-10 02:21:36 -0800508static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
509{
510 return vmcs_readl(GUEST_RFLAGS);
511}
512
513static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
514{
515 vmcs_writel(GUEST_RFLAGS, rflags);
516}
517
518static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
519{
520 unsigned long rip;
521 u32 interruptibility;
522
523 rip = vmcs_readl(GUEST_RIP);
524 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
525 vmcs_writel(GUEST_RIP, rip);
526
527 /*
528 * We emulated an instruction, so temporary interrupt blocking
529 * should be removed, if set.
530 */
531 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
532 if (interruptibility & 3)
533 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
534 interruptibility & ~3);
Dor Laorc1150d82007-01-05 16:36:24 -0800535 vcpu->interrupt_window_open = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800536}
537
538static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
539{
540 printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
541 vmcs_readl(GUEST_RIP));
542 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
543 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
544 GP_VECTOR |
545 INTR_TYPE_EXCEPTION |
546 INTR_INFO_DELIEVER_CODE_MASK |
547 INTR_INFO_VALID_MASK);
548}
549
550/*
Eddie Donga75beee2007-05-17 18:55:15 +0300551 * Swap MSR entry in host/guest MSR entry array.
552 */
553void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
554{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400555 struct vcpu_vmx *vmx = to_vmx(vcpu);
556 struct kvm_msr_entry tmp;
557
558 tmp = vmx->guest_msrs[to];
559 vmx->guest_msrs[to] = vmx->guest_msrs[from];
560 vmx->guest_msrs[from] = tmp;
561 tmp = vmx->host_msrs[to];
562 vmx->host_msrs[to] = vmx->host_msrs[from];
563 vmx->host_msrs[from] = tmp;
Eddie Donga75beee2007-05-17 18:55:15 +0300564}
565
566/*
Avi Kivitye38aea32007-04-19 13:22:48 +0300567 * Set up the vmcs to automatically save and restore system
568 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
569 * mode, as fiddling with msrs is very expensive.
570 */
571static void setup_msrs(struct kvm_vcpu *vcpu)
572{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400573 struct vcpu_vmx *vmx = to_vmx(vcpu);
Eddie Dong2cc51562007-05-21 07:28:09 +0300574 int save_nmsrs;
Avi Kivitye38aea32007-04-19 13:22:48 +0300575
Eddie Donga75beee2007-05-17 18:55:15 +0300576 save_nmsrs = 0;
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300577#ifdef CONFIG_X86_64
Eddie Donga75beee2007-05-17 18:55:15 +0300578 if (is_long_mode(vcpu)) {
Eddie Dong2cc51562007-05-21 07:28:09 +0300579 int index;
580
Eddie Donga75beee2007-05-17 18:55:15 +0300581 index = __find_msr_index(vcpu, MSR_SYSCALL_MASK);
582 if (index >= 0)
583 move_msr_up(vcpu, index, save_nmsrs++);
584 index = __find_msr_index(vcpu, MSR_LSTAR);
585 if (index >= 0)
586 move_msr_up(vcpu, index, save_nmsrs++);
587 index = __find_msr_index(vcpu, MSR_CSTAR);
588 if (index >= 0)
589 move_msr_up(vcpu, index, save_nmsrs++);
590 index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
591 if (index >= 0)
592 move_msr_up(vcpu, index, save_nmsrs++);
593 /*
594 * MSR_K6_STAR is only needed on long mode guests, and only
595 * if efer.sce is enabled.
596 */
597 index = __find_msr_index(vcpu, MSR_K6_STAR);
598 if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE))
599 move_msr_up(vcpu, index, save_nmsrs++);
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300600 }
Eddie Donga75beee2007-05-17 18:55:15 +0300601#endif
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400602 vmx->save_nmsrs = save_nmsrs;
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300603
Eddie Donga75beee2007-05-17 18:55:15 +0300604#ifdef CONFIG_X86_64
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400605 vmx->msr_offset_kernel_gs_base =
Eddie Donga75beee2007-05-17 18:55:15 +0300606 __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
607#endif
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400608 vmx->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
Avi Kivitye38aea32007-04-19 13:22:48 +0300609}
610
611/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800612 * reads and returns guest's timestamp counter "register"
613 * guest_tsc = host_tsc + tsc_offset -- 21.3
614 */
615static u64 guest_read_tsc(void)
616{
617 u64 host_tsc, tsc_offset;
618
619 rdtscll(host_tsc);
620 tsc_offset = vmcs_read64(TSC_OFFSET);
621 return host_tsc + tsc_offset;
622}
623
624/*
625 * writes 'guest_tsc' into guest's timestamp counter "register"
626 * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
627 */
628static void guest_write_tsc(u64 guest_tsc)
629{
630 u64 host_tsc;
631
632 rdtscll(host_tsc);
633 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
634}
635
Avi Kivity6aa8b732006-12-10 02:21:36 -0800636/*
637 * Reads an msr value (of 'msr_index') into 'pdata'.
638 * Returns 0 on success, non-0 otherwise.
639 * Assumes vcpu_load() was already called.
640 */
641static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
642{
643 u64 data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400644 struct kvm_msr_entry *msr;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800645
646 if (!pdata) {
647 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
648 return -EINVAL;
649 }
650
651 switch (msr_index) {
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800652#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800653 case MSR_FS_BASE:
654 data = vmcs_readl(GUEST_FS_BASE);
655 break;
656 case MSR_GS_BASE:
657 data = vmcs_readl(GUEST_GS_BASE);
658 break;
659 case MSR_EFER:
Avi Kivity3bab1f52006-12-29 16:49:48 -0800660 return kvm_get_msr_common(vcpu, msr_index, pdata);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800661#endif
662 case MSR_IA32_TIME_STAMP_COUNTER:
663 data = guest_read_tsc();
664 break;
665 case MSR_IA32_SYSENTER_CS:
666 data = vmcs_read32(GUEST_SYSENTER_CS);
667 break;
668 case MSR_IA32_SYSENTER_EIP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200669 data = vmcs_readl(GUEST_SYSENTER_EIP);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800670 break;
671 case MSR_IA32_SYSENTER_ESP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200672 data = vmcs_readl(GUEST_SYSENTER_ESP);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800673 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800674 default:
675 msr = find_msr_entry(vcpu, msr_index);
Avi Kivity3bab1f52006-12-29 16:49:48 -0800676 if (msr) {
677 data = msr->data;
678 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800679 }
Avi Kivity3bab1f52006-12-29 16:49:48 -0800680 return kvm_get_msr_common(vcpu, msr_index, pdata);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800681 }
682
683 *pdata = data;
684 return 0;
685}
686
687/*
688 * Writes msr value into into the appropriate "register".
689 * Returns 0 on success, non-0 otherwise.
690 * Assumes vcpu_load() was already called.
691 */
692static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
693{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400694 struct vcpu_vmx *vmx = to_vmx(vcpu);
695 struct kvm_msr_entry *msr;
Eddie Dong2cc51562007-05-21 07:28:09 +0300696 int ret = 0;
697
Avi Kivity6aa8b732006-12-10 02:21:36 -0800698 switch (msr_index) {
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800699#ifdef CONFIG_X86_64
Avi Kivity3bab1f52006-12-29 16:49:48 -0800700 case MSR_EFER:
Eddie Dong2cc51562007-05-21 07:28:09 +0300701 ret = kvm_set_msr_common(vcpu, msr_index, data);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400702 if (vmx->host_state.loaded)
Eddie Dong2cc51562007-05-21 07:28:09 +0300703 load_transition_efer(vcpu);
704 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800705 case MSR_FS_BASE:
706 vmcs_writel(GUEST_FS_BASE, data);
707 break;
708 case MSR_GS_BASE:
709 vmcs_writel(GUEST_GS_BASE, data);
710 break;
711#endif
712 case MSR_IA32_SYSENTER_CS:
713 vmcs_write32(GUEST_SYSENTER_CS, data);
714 break;
715 case MSR_IA32_SYSENTER_EIP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200716 vmcs_writel(GUEST_SYSENTER_EIP, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800717 break;
718 case MSR_IA32_SYSENTER_ESP:
Avi Kivityf5b42c32007-03-06 12:05:53 +0200719 vmcs_writel(GUEST_SYSENTER_ESP, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800720 break;
Avi Kivityd27d4ac2007-02-19 14:37:46 +0200721 case MSR_IA32_TIME_STAMP_COUNTER:
Avi Kivity6aa8b732006-12-10 02:21:36 -0800722 guest_write_tsc(data);
723 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800724 default:
725 msr = find_msr_entry(vcpu, msr_index);
Avi Kivity3bab1f52006-12-29 16:49:48 -0800726 if (msr) {
727 msr->data = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400728 if (vmx->host_state.loaded)
729 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
Avi Kivity3bab1f52006-12-29 16:49:48 -0800730 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800731 }
Eddie Dong2cc51562007-05-21 07:28:09 +0300732 ret = kvm_set_msr_common(vcpu, msr_index, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800733 }
734
Eddie Dong2cc51562007-05-21 07:28:09 +0300735 return ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800736}
737
738/*
739 * Sync the rsp and rip registers into the vcpu structure. This allows
740 * registers to be accessed by indexing vcpu->regs.
741 */
742static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
743{
744 vcpu->regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
745 vcpu->rip = vmcs_readl(GUEST_RIP);
746}
747
748/*
749 * Syncs rsp and rip back into the vmcs. Should be called after possible
750 * modification.
751 */
752static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
753{
754 vmcs_writel(GUEST_RSP, vcpu->regs[VCPU_REGS_RSP]);
755 vmcs_writel(GUEST_RIP, vcpu->rip);
756}
757
758static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
759{
760 unsigned long dr7 = 0x400;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800761 int old_singlestep;
762
Avi Kivity6aa8b732006-12-10 02:21:36 -0800763 old_singlestep = vcpu->guest_debug.singlestep;
764
765 vcpu->guest_debug.enabled = dbg->enabled;
766 if (vcpu->guest_debug.enabled) {
767 int i;
768
769 dr7 |= 0x200; /* exact */
770 for (i = 0; i < 4; ++i) {
771 if (!dbg->breakpoints[i].enabled)
772 continue;
773 vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
774 dr7 |= 2 << (i*2); /* global enable */
775 dr7 |= 0 << (i*4+16); /* execution breakpoint */
776 }
777
Avi Kivity6aa8b732006-12-10 02:21:36 -0800778 vcpu->guest_debug.singlestep = dbg->singlestep;
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300779 } else
Avi Kivity6aa8b732006-12-10 02:21:36 -0800780 vcpu->guest_debug.singlestep = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800781
782 if (old_singlestep && !vcpu->guest_debug.singlestep) {
783 unsigned long flags;
784
785 flags = vmcs_readl(GUEST_RFLAGS);
786 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
787 vmcs_writel(GUEST_RFLAGS, flags);
788 }
789
Avi Kivityabd3f2d2007-05-02 17:57:40 +0300790 update_exception_bitmap(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800791 vmcs_writel(GUEST_DR7, dr7);
792
793 return 0;
794}
795
796static __init int cpu_has_kvm_support(void)
797{
798 unsigned long ecx = cpuid_ecx(1);
799 return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
800}
801
802static __init int vmx_disabled_by_bios(void)
803{
804 u64 msr;
805
806 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
Yang, Sheng62b3ffb2007-07-25 12:17:06 +0300807 return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED |
808 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
809 == MSR_IA32_FEATURE_CONTROL_LOCKED;
810 /* locked but not enabled */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800811}
812
Avi Kivity774c47f2007-02-12 00:54:47 -0800813static void hardware_enable(void *garbage)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800814{
815 int cpu = raw_smp_processor_id();
816 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
817 u64 old;
818
819 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
Yang, Sheng62b3ffb2007-07-25 12:17:06 +0300820 if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED |
821 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
822 != (MSR_IA32_FEATURE_CONTROL_LOCKED |
823 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
Avi Kivity6aa8b732006-12-10 02:21:36 -0800824 /* enable and lock */
Yang, Sheng62b3ffb2007-07-25 12:17:06 +0300825 wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
826 MSR_IA32_FEATURE_CONTROL_LOCKED |
827 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED);
Rusty Russell66aee912007-07-17 23:34:16 +1000828 write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800829 asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
830 : "memory", "cc");
831}
832
833static void hardware_disable(void *garbage)
834{
835 asm volatile (ASM_VMX_VMXOFF : : : "cc");
836}
837
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300838static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
839 u32 msr, u32* result)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800840{
841 u32 vmx_msr_low, vmx_msr_high;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300842 u32 ctl = ctl_min | ctl_opt;
843
844 rdmsr(msr, vmx_msr_low, vmx_msr_high);
845
846 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
847 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
848
849 /* Ensure minimum (required) set of control bits are supported. */
850 if (ctl_min & ~ctl)
851 return -1;
852
853 *result = ctl;
854 return 0;
855}
856
857static __init int setup_vmcs_config(void)
858{
859 u32 vmx_msr_low, vmx_msr_high;
860 u32 min, opt;
861 u32 _pin_based_exec_control = 0;
862 u32 _cpu_based_exec_control = 0;
863 u32 _vmexit_control = 0;
864 u32 _vmentry_control = 0;
865
866 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
867 opt = 0;
868 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
869 &_pin_based_exec_control) < 0)
870 return -1;
871
872 min = CPU_BASED_HLT_EXITING |
873#ifdef CONFIG_X86_64
874 CPU_BASED_CR8_LOAD_EXITING |
875 CPU_BASED_CR8_STORE_EXITING |
876#endif
877 CPU_BASED_USE_IO_BITMAPS |
878 CPU_BASED_MOV_DR_EXITING |
879 CPU_BASED_USE_TSC_OFFSETING;
880 opt = 0;
881 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
882 &_cpu_based_exec_control) < 0)
883 return -1;
884
885 min = 0;
886#ifdef CONFIG_X86_64
887 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
888#endif
889 opt = 0;
890 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
891 &_vmexit_control) < 0)
892 return -1;
893
894 min = opt = 0;
895 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
896 &_vmentry_control) < 0)
897 return -1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800898
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -0800899 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300900
901 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
902 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
903 return -1;
904
905#ifdef CONFIG_X86_64
906 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
907 if (vmx_msr_high & (1u<<16))
908 return -1;
909#endif
910
911 /* Require Write-Back (WB) memory type for VMCS accesses. */
912 if (((vmx_msr_high >> 18) & 15) != 6)
913 return -1;
914
915 vmcs_config.size = vmx_msr_high & 0x1fff;
916 vmcs_config.order = get_order(vmcs_config.size);
917 vmcs_config.revision_id = vmx_msr_low;
918
919 vmcs_config.pin_based_exec_ctrl = _pin_based_exec_control;
920 vmcs_config.cpu_based_exec_ctrl = _cpu_based_exec_control;
921 vmcs_config.vmexit_ctrl = _vmexit_control;
922 vmcs_config.vmentry_ctrl = _vmentry_control;
923
924 return 0;
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -0800925}
Avi Kivity6aa8b732006-12-10 02:21:36 -0800926
927static struct vmcs *alloc_vmcs_cpu(int cpu)
928{
929 int node = cpu_to_node(cpu);
930 struct page *pages;
931 struct vmcs *vmcs;
932
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300933 pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800934 if (!pages)
935 return NULL;
936 vmcs = page_address(pages);
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300937 memset(vmcs, 0, vmcs_config.size);
938 vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800939 return vmcs;
940}
941
942static struct vmcs *alloc_vmcs(void)
943{
Ingo Molnard3b2c332007-01-05 16:36:23 -0800944 return alloc_vmcs_cpu(raw_smp_processor_id());
Avi Kivity6aa8b732006-12-10 02:21:36 -0800945}
946
947static void free_vmcs(struct vmcs *vmcs)
948{
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300949 free_pages((unsigned long)vmcs, vmcs_config.order);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800950}
951
Sam Ravnborg39959582007-06-01 00:47:13 -0700952static void free_kvm_area(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800953{
954 int cpu;
955
956 for_each_online_cpu(cpu)
957 free_vmcs(per_cpu(vmxarea, cpu));
958}
959
960extern struct vmcs *alloc_vmcs_cpu(int cpu);
961
962static __init int alloc_kvm_area(void)
963{
964 int cpu;
965
966 for_each_online_cpu(cpu) {
967 struct vmcs *vmcs;
968
969 vmcs = alloc_vmcs_cpu(cpu);
970 if (!vmcs) {
971 free_kvm_area();
972 return -ENOMEM;
973 }
974
975 per_cpu(vmxarea, cpu) = vmcs;
976 }
977 return 0;
978}
979
980static __init int hardware_setup(void)
981{
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300982 if (setup_vmcs_config() < 0)
983 return -1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800984 return alloc_kvm_area();
985}
986
987static __exit void hardware_unsetup(void)
988{
989 free_kvm_area();
990}
991
Avi Kivity6aa8b732006-12-10 02:21:36 -0800992static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
993{
994 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
995
Avi Kivity6af11b92007-03-19 13:18:10 +0200996 if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800997 vmcs_write16(sf->selector, save->selector);
998 vmcs_writel(sf->base, save->base);
999 vmcs_write32(sf->limit, save->limit);
1000 vmcs_write32(sf->ar_bytes, save->ar);
1001 } else {
1002 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
1003 << AR_DPL_SHIFT;
1004 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
1005 }
1006}
1007
1008static void enter_pmode(struct kvm_vcpu *vcpu)
1009{
1010 unsigned long flags;
1011
1012 vcpu->rmode.active = 0;
1013
1014 vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base);
1015 vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit);
1016 vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
1017
1018 flags = vmcs_readl(GUEST_RFLAGS);
1019 flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
1020 flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
1021 vmcs_writel(GUEST_RFLAGS, flags);
1022
Rusty Russell66aee912007-07-17 23:34:16 +10001023 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1024 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001025
1026 update_exception_bitmap(vcpu);
1027
1028 fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es);
1029 fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds);
1030 fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs);
1031 fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs);
1032
1033 vmcs_write16(GUEST_SS_SELECTOR, 0);
1034 vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
1035
1036 vmcs_write16(GUEST_CS_SELECTOR,
1037 vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
1038 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1039}
1040
1041static int rmode_tss_base(struct kvm* kvm)
1042{
1043 gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
1044 return base_gfn << PAGE_SHIFT;
1045}
1046
1047static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
1048{
1049 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1050
1051 save->selector = vmcs_read16(sf->selector);
1052 save->base = vmcs_readl(sf->base);
1053 save->limit = vmcs_read32(sf->limit);
1054 save->ar = vmcs_read32(sf->ar_bytes);
1055 vmcs_write16(sf->selector, vmcs_readl(sf->base) >> 4);
1056 vmcs_write32(sf->limit, 0xffff);
1057 vmcs_write32(sf->ar_bytes, 0xf3);
1058}
1059
1060static void enter_rmode(struct kvm_vcpu *vcpu)
1061{
1062 unsigned long flags;
1063
1064 vcpu->rmode.active = 1;
1065
1066 vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
1067 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1068
1069 vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
1070 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1071
1072 vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
1073 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1074
1075 flags = vmcs_readl(GUEST_RFLAGS);
1076 vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
1077
1078 flags |= IOPL_MASK | X86_EFLAGS_VM;
1079
1080 vmcs_writel(GUEST_RFLAGS, flags);
Rusty Russell66aee912007-07-17 23:34:16 +10001081 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001082 update_exception_bitmap(vcpu);
1083
1084 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
1085 vmcs_write32(GUEST_SS_LIMIT, 0xffff);
1086 vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
1087
1088 vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
Michael Riepeabacf8d2006-12-22 01:05:45 -08001089 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
Avi Kivity8cb5b032007-03-20 18:40:40 +02001090 if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
1091 vmcs_writel(GUEST_CS_BASE, 0xf0000);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001092 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1093
1094 fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es);
1095 fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
1096 fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
1097 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
Avi Kivity75880a02007-06-20 11:20:04 +03001098
1099 init_rmode_tss(vcpu->kvm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001100}
1101
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001102#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001103
1104static void enter_lmode(struct kvm_vcpu *vcpu)
1105{
1106 u32 guest_tr_ar;
1107
1108 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
1109 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
1110 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
1111 __FUNCTION__);
1112 vmcs_write32(GUEST_TR_AR_BYTES,
1113 (guest_tr_ar & ~AR_TYPE_MASK)
1114 | AR_TYPE_BUSY_64_TSS);
1115 }
1116
1117 vcpu->shadow_efer |= EFER_LMA;
1118
1119 find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME;
1120 vmcs_write32(VM_ENTRY_CONTROLS,
1121 vmcs_read32(VM_ENTRY_CONTROLS)
1122 | VM_ENTRY_CONTROLS_IA32E_MASK);
1123}
1124
1125static void exit_lmode(struct kvm_vcpu *vcpu)
1126{
1127 vcpu->shadow_efer &= ~EFER_LMA;
1128
1129 vmcs_write32(VM_ENTRY_CONTROLS,
1130 vmcs_read32(VM_ENTRY_CONTROLS)
1131 & ~VM_ENTRY_CONTROLS_IA32E_MASK);
1132}
1133
1134#endif
1135
Anthony Liguori25c4c272007-04-27 09:29:21 +03001136static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08001137{
Avi Kivity399badf2007-01-05 16:36:38 -08001138 vcpu->cr4 &= KVM_GUEST_CR4_MASK;
1139 vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
1140}
1141
Avi Kivity6aa8b732006-12-10 02:21:36 -08001142static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1143{
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001144 vmx_fpu_deactivate(vcpu);
1145
Rusty Russell707d92f2007-07-17 23:19:08 +10001146 if (vcpu->rmode.active && (cr0 & X86_CR0_PE))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001147 enter_pmode(vcpu);
1148
Rusty Russell707d92f2007-07-17 23:19:08 +10001149 if (!vcpu->rmode.active && !(cr0 & X86_CR0_PE))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001150 enter_rmode(vcpu);
1151
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001152#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001153 if (vcpu->shadow_efer & EFER_LME) {
Rusty Russell707d92f2007-07-17 23:19:08 +10001154 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001155 enter_lmode(vcpu);
Rusty Russell707d92f2007-07-17 23:19:08 +10001156 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001157 exit_lmode(vcpu);
1158 }
1159#endif
1160
1161 vmcs_writel(CR0_READ_SHADOW, cr0);
1162 vmcs_writel(GUEST_CR0,
1163 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
1164 vcpu->cr0 = cr0;
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001165
Rusty Russell707d92f2007-07-17 23:19:08 +10001166 if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001167 vmx_fpu_activate(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001168}
1169
Avi Kivity6aa8b732006-12-10 02:21:36 -08001170static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1171{
1172 vmcs_writel(GUEST_CR3, cr3);
Rusty Russell707d92f2007-07-17 23:19:08 +10001173 if (vcpu->cr0 & X86_CR0_PE)
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001174 vmx_fpu_deactivate(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001175}
1176
1177static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1178{
1179 vmcs_writel(CR4_READ_SHADOW, cr4);
1180 vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
1181 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
1182 vcpu->cr4 = cr4;
1183}
1184
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001185#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001186
1187static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1188{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001189 struct kvm_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001190
1191 vcpu->shadow_efer = efer;
1192 if (efer & EFER_LMA) {
1193 vmcs_write32(VM_ENTRY_CONTROLS,
1194 vmcs_read32(VM_ENTRY_CONTROLS) |
1195 VM_ENTRY_CONTROLS_IA32E_MASK);
1196 msr->data = efer;
1197
1198 } else {
1199 vmcs_write32(VM_ENTRY_CONTROLS,
1200 vmcs_read32(VM_ENTRY_CONTROLS) &
1201 ~VM_ENTRY_CONTROLS_IA32E_MASK);
1202
1203 msr->data = efer & ~EFER_LME;
1204 }
Avi Kivitye38aea32007-04-19 13:22:48 +03001205 setup_msrs(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001206}
1207
1208#endif
1209
1210static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1211{
1212 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1213
1214 return vmcs_readl(sf->base);
1215}
1216
1217static void vmx_get_segment(struct kvm_vcpu *vcpu,
1218 struct kvm_segment *var, int seg)
1219{
1220 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1221 u32 ar;
1222
1223 var->base = vmcs_readl(sf->base);
1224 var->limit = vmcs_read32(sf->limit);
1225 var->selector = vmcs_read16(sf->selector);
1226 ar = vmcs_read32(sf->ar_bytes);
1227 if (ar & AR_UNUSABLE_MASK)
1228 ar = 0;
1229 var->type = ar & 15;
1230 var->s = (ar >> 4) & 1;
1231 var->dpl = (ar >> 5) & 3;
1232 var->present = (ar >> 7) & 1;
1233 var->avl = (ar >> 12) & 1;
1234 var->l = (ar >> 13) & 1;
1235 var->db = (ar >> 14) & 1;
1236 var->g = (ar >> 15) & 1;
1237 var->unusable = (ar >> 16) & 1;
1238}
1239
Avi Kivity653e3102007-05-07 10:55:37 +03001240static u32 vmx_segment_access_rights(struct kvm_segment *var)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001241{
Avi Kivity6aa8b732006-12-10 02:21:36 -08001242 u32 ar;
1243
Avi Kivity653e3102007-05-07 10:55:37 +03001244 if (var->unusable)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001245 ar = 1 << 16;
1246 else {
1247 ar = var->type & 15;
1248 ar |= (var->s & 1) << 4;
1249 ar |= (var->dpl & 3) << 5;
1250 ar |= (var->present & 1) << 7;
1251 ar |= (var->avl & 1) << 12;
1252 ar |= (var->l & 1) << 13;
1253 ar |= (var->db & 1) << 14;
1254 ar |= (var->g & 1) << 15;
1255 }
Uri Lublinf7fbf1f2006-12-13 00:34:00 -08001256 if (ar == 0) /* a 0 value means unusable */
1257 ar = AR_UNUSABLE_MASK;
Avi Kivity653e3102007-05-07 10:55:37 +03001258
1259 return ar;
1260}
1261
1262static void vmx_set_segment(struct kvm_vcpu *vcpu,
1263 struct kvm_segment *var, int seg)
1264{
1265 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1266 u32 ar;
1267
1268 if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
1269 vcpu->rmode.tr.selector = var->selector;
1270 vcpu->rmode.tr.base = var->base;
1271 vcpu->rmode.tr.limit = var->limit;
1272 vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
1273 return;
1274 }
1275 vmcs_writel(sf->base, var->base);
1276 vmcs_write32(sf->limit, var->limit);
1277 vmcs_write16(sf->selector, var->selector);
1278 if (vcpu->rmode.active && var->s) {
1279 /*
1280 * Hack real-mode segments into vm86 compatibility.
1281 */
1282 if (var->base == 0xffff0000 && var->selector == 0xf000)
1283 vmcs_writel(sf->base, 0xf0000);
1284 ar = 0xf3;
1285 } else
1286 ar = vmx_segment_access_rights(var);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001287 vmcs_write32(sf->ar_bytes, ar);
1288}
1289
Avi Kivity6aa8b732006-12-10 02:21:36 -08001290static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1291{
1292 u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
1293
1294 *db = (ar >> 14) & 1;
1295 *l = (ar >> 13) & 1;
1296}
1297
1298static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1299{
1300 dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
1301 dt->base = vmcs_readl(GUEST_IDTR_BASE);
1302}
1303
1304static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1305{
1306 vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
1307 vmcs_writel(GUEST_IDTR_BASE, dt->base);
1308}
1309
1310static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1311{
1312 dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
1313 dt->base = vmcs_readl(GUEST_GDTR_BASE);
1314}
1315
1316static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1317{
1318 vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
1319 vmcs_writel(GUEST_GDTR_BASE, dt->base);
1320}
1321
1322static int init_rmode_tss(struct kvm* kvm)
1323{
1324 struct page *p1, *p2, *p3;
1325 gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
1326 char *page;
1327
Avi Kivity954bbbc2007-03-30 14:02:32 +03001328 p1 = gfn_to_page(kvm, fn++);
1329 p2 = gfn_to_page(kvm, fn++);
1330 p3 = gfn_to_page(kvm, fn);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001331
1332 if (!p1 || !p2 || !p3) {
1333 kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__);
1334 return 0;
1335 }
1336
1337 page = kmap_atomic(p1, KM_USER0);
Shani Moideena3870c42007-06-11 09:31:33 +05301338 clear_page(page);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001339 *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
1340 kunmap_atomic(page, KM_USER0);
1341
1342 page = kmap_atomic(p2, KM_USER0);
Shani Moideena3870c42007-06-11 09:31:33 +05301343 clear_page(page);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001344 kunmap_atomic(page, KM_USER0);
1345
1346 page = kmap_atomic(p3, KM_USER0);
Shani Moideena3870c42007-06-11 09:31:33 +05301347 clear_page(page);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001348 *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
1349 kunmap_atomic(page, KM_USER0);
1350
1351 return 1;
1352}
1353
Avi Kivity6aa8b732006-12-10 02:21:36 -08001354static void seg_setup(int seg)
1355{
1356 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1357
1358 vmcs_write16(sf->selector, 0);
1359 vmcs_writel(sf->base, 0);
1360 vmcs_write32(sf->limit, 0xffff);
1361 vmcs_write32(sf->ar_bytes, 0x93);
1362}
1363
1364/*
1365 * Sets up the vmcs for emulated real mode.
1366 */
1367static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1368{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001369 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001370 u32 host_sysenter_cs;
1371 u32 junk;
1372 unsigned long a;
1373 struct descriptor_table dt;
1374 int i;
1375 int ret = 0;
Avi Kivitycd2276a2007-05-14 20:41:13 +03001376 unsigned long kvm_vmx_return;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001377
1378 if (!init_rmode_tss(vcpu->kvm)) {
1379 ret = -ENOMEM;
1380 goto out;
1381 }
1382
1383 memset(vcpu->regs, 0, sizeof(vcpu->regs));
1384 vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
1385 vcpu->cr8 = 0;
Avi Kivity94cea1b2007-06-13 19:43:19 +03001386 vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
Qing Hedad37952007-07-12 12:33:56 +03001387 if (vcpu->vcpu_id == 0)
Avi Kivity94cea1b2007-06-13 19:43:19 +03001388 vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001389
1390 fx_init(vcpu);
1391
1392 /*
1393 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
1394 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
1395 */
1396 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
1397 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
1398 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1399 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1400
1401 seg_setup(VCPU_SREG_DS);
1402 seg_setup(VCPU_SREG_ES);
1403 seg_setup(VCPU_SREG_FS);
1404 seg_setup(VCPU_SREG_GS);
1405 seg_setup(VCPU_SREG_SS);
1406
1407 vmcs_write16(GUEST_TR_SELECTOR, 0);
1408 vmcs_writel(GUEST_TR_BASE, 0);
1409 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
1410 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1411
1412 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
1413 vmcs_writel(GUEST_LDTR_BASE, 0);
1414 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
1415 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
1416
1417 vmcs_write32(GUEST_SYSENTER_CS, 0);
1418 vmcs_writel(GUEST_SYSENTER_ESP, 0);
1419 vmcs_writel(GUEST_SYSENTER_EIP, 0);
1420
1421 vmcs_writel(GUEST_RFLAGS, 0x02);
1422 vmcs_writel(GUEST_RIP, 0xfff0);
1423 vmcs_writel(GUEST_RSP, 0);
1424
Avi Kivity6aa8b732006-12-10 02:21:36 -08001425 //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
1426 vmcs_writel(GUEST_DR7, 0x400);
1427
1428 vmcs_writel(GUEST_GDTR_BASE, 0);
1429 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
1430
1431 vmcs_writel(GUEST_IDTR_BASE, 0);
1432 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
1433
1434 vmcs_write32(GUEST_ACTIVITY_STATE, 0);
1435 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
1436 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
1437
1438 /* I/O */
He, Qingfdef3ad2007-04-30 09:45:24 +03001439 vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
1440 vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001441
1442 guest_write_tsc(0);
1443
1444 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
1445
1446 /* Special registers */
1447 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
1448
1449 /* Control */
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001450 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
1451 vmcs_config.pin_based_exec_ctrl);
1452 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
1453 vmcs_config.cpu_based_exec_ctrl);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001454
Avi Kivity6aa8b732006-12-10 02:21:36 -08001455 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
1456 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
1457 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
1458
1459 vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */
1460 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
1461 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
1462
1463 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
1464 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1465 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1466 vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */
1467 vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */
1468 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001469#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001470 rdmsrl(MSR_FS_BASE, a);
1471 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
1472 rdmsrl(MSR_GS_BASE, a);
1473 vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
1474#else
1475 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
1476 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
1477#endif
1478
1479 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
1480
1481 get_idt(&dt);
1482 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
1483
Avi Kivitycd2276a2007-05-14 20:41:13 +03001484 asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
1485 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
Eddie Dong2cc51562007-05-21 07:28:09 +03001486 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1487 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
1488 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001489
1490 rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
1491 vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
1492 rdmsrl(MSR_IA32_SYSENTER_ESP, a);
1493 vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
1494 rdmsrl(MSR_IA32_SYSENTER_EIP, a);
1495 vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
1496
Avi Kivity6aa8b732006-12-10 02:21:36 -08001497 for (i = 0; i < NR_VMX_MSR; ++i) {
1498 u32 index = vmx_msr_index[i];
1499 u32 data_low, data_high;
1500 u64 data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001501 int j = vmx->nmsrs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001502
1503 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1504 continue;
Avi Kivity432bd6c2007-01-31 23:48:13 -08001505 if (wrmsr_safe(index, data_low, data_high) < 0)
1506 continue;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001507 data = data_low | ((u64)data_high << 32);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001508 vmx->host_msrs[j].index = index;
1509 vmx->host_msrs[j].reserved = 0;
1510 vmx->host_msrs[j].data = data;
1511 vmx->guest_msrs[j] = vmx->host_msrs[j];
1512 ++vmx->nmsrs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001513 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001514
Avi Kivitye38aea32007-04-19 13:22:48 +03001515 setup_msrs(vcpu);
1516
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001517 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001518
1519 /* 22.2.1, 20.8.1 */
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03001520 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
1521
Avi Kivity6aa8b732006-12-10 02:21:36 -08001522 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
1523
Michael Riepe3b99ab22006-12-13 00:34:15 -08001524#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001525 vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0);
1526 vmcs_writel(TPR_THRESHOLD, 0);
Michael Riepe3b99ab22006-12-13 00:34:15 -08001527#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08001528
Anthony Liguori25c4c272007-04-27 09:29:21 +03001529 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001530 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1531
1532 vcpu->cr0 = 0x60000010;
1533 vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
1534 vmx_set_cr4(vcpu, 0);
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001535#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001536 vmx_set_efer(vcpu, 0);
1537#endif
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001538 vmx_fpu_activate(vcpu);
Avi Kivityabd3f2d2007-05-02 17:57:40 +03001539 update_exception_bitmap(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001540
1541 return 0;
1542
Avi Kivity6aa8b732006-12-10 02:21:36 -08001543out:
1544 return ret;
1545}
1546
1547static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1548{
1549 u16 ent[2];
1550 u16 cs;
1551 u16 ip;
1552 unsigned long flags;
1553 unsigned long ss_base = vmcs_readl(GUEST_SS_BASE);
1554 u16 sp = vmcs_readl(GUEST_RSP);
1555 u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT);
1556
Eric Sesterhenn / Snakebyte39649942007-04-09 16:15:05 +02001557 if (sp > ss_limit || sp < 6 ) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001558 vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n",
1559 __FUNCTION__,
1560 vmcs_readl(GUEST_RSP),
1561 vmcs_readl(GUEST_SS_BASE),
1562 vmcs_read32(GUEST_SS_LIMIT));
1563 return;
1564 }
1565
1566 if (kvm_read_guest(vcpu, irq * sizeof(ent), sizeof(ent), &ent) !=
1567 sizeof(ent)) {
1568 vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
1569 return;
1570 }
1571
1572 flags = vmcs_readl(GUEST_RFLAGS);
1573 cs = vmcs_readl(GUEST_CS_BASE) >> 4;
1574 ip = vmcs_readl(GUEST_RIP);
1575
1576
1577 if (kvm_write_guest(vcpu, ss_base + sp - 2, 2, &flags) != 2 ||
1578 kvm_write_guest(vcpu, ss_base + sp - 4, 2, &cs) != 2 ||
1579 kvm_write_guest(vcpu, ss_base + sp - 6, 2, &ip) != 2) {
1580 vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
1581 return;
1582 }
1583
1584 vmcs_writel(GUEST_RFLAGS, flags &
1585 ~( X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
1586 vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ;
1587 vmcs_writel(GUEST_CS_BASE, ent[1] << 4);
1588 vmcs_writel(GUEST_RIP, ent[0]);
1589 vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
1590}
1591
1592static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1593{
1594 int word_index = __ffs(vcpu->irq_summary);
1595 int bit_index = __ffs(vcpu->irq_pending[word_index]);
1596 int irq = word_index * BITS_PER_LONG + bit_index;
1597
1598 clear_bit(bit_index, &vcpu->irq_pending[word_index]);
1599 if (!vcpu->irq_pending[word_index])
1600 clear_bit(word_index, &vcpu->irq_summary);
1601
1602 if (vcpu->rmode.active) {
1603 inject_rmode_irq(vcpu, irq);
1604 return;
1605 }
1606 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1607 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1608}
1609
Dor Laorc1150d82007-01-05 16:36:24 -08001610
1611static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1612 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001613{
Dor Laorc1150d82007-01-05 16:36:24 -08001614 u32 cpu_based_vm_exec_control;
1615
1616 vcpu->interrupt_window_open =
1617 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1618 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1619
1620 if (vcpu->interrupt_window_open &&
1621 vcpu->irq_summary &&
1622 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001623 /*
Dor Laorc1150d82007-01-05 16:36:24 -08001624 * If interrupts enabled, and not blocked by sti or mov ss. Good.
Avi Kivity6aa8b732006-12-10 02:21:36 -08001625 */
1626 kvm_do_inject_irq(vcpu);
Dor Laorc1150d82007-01-05 16:36:24 -08001627
1628 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
1629 if (!vcpu->interrupt_window_open &&
1630 (vcpu->irq_summary || kvm_run->request_interrupt_window))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001631 /*
1632 * Interrupts blocked. Wait for unblock.
1633 */
Dor Laorc1150d82007-01-05 16:36:24 -08001634 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
1635 else
1636 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
1637 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001638}
1639
1640static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
1641{
1642 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
1643
1644 set_debugreg(dbg->bp[0], 0);
1645 set_debugreg(dbg->bp[1], 1);
1646 set_debugreg(dbg->bp[2], 2);
1647 set_debugreg(dbg->bp[3], 3);
1648
1649 if (dbg->singlestep) {
1650 unsigned long flags;
1651
1652 flags = vmcs_readl(GUEST_RFLAGS);
1653 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1654 vmcs_writel(GUEST_RFLAGS, flags);
1655 }
1656}
1657
1658static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1659 int vec, u32 err_code)
1660{
1661 if (!vcpu->rmode.active)
1662 return 0;
1663
Nitin A Kambleb3f37702007-05-17 15:50:34 +03001664 /*
1665 * Instruction with address size override prefix opcode 0x67
1666 * Cause the #SS fault with 0 error code in VM86 mode.
1667 */
1668 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001669 if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
1670 return 1;
1671 return 0;
1672}
1673
1674static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1675{
1676 u32 intr_info, error_code;
1677 unsigned long cr2, rip;
1678 u32 vect_info;
1679 enum emulation_result er;
Avi Kivitye2dec932007-01-05 16:36:54 -08001680 int r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001681
1682 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1683 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1684
1685 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
1686 !is_page_fault(intr_info)) {
1687 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
1688 "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
1689 }
1690
1691 if (is_external_interrupt(vect_info)) {
1692 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
1693 set_bit(irq, vcpu->irq_pending);
1694 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
1695 }
1696
1697 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
1698 asm ("int $2");
1699 return 1;
1700 }
Anthony Liguori2ab455c2007-04-27 09:29:49 +03001701
1702 if (is_no_device(intr_info)) {
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001703 vmx_fpu_activate(vcpu);
Anthony Liguori2ab455c2007-04-27 09:29:49 +03001704 return 1;
1705 }
1706
Avi Kivity6aa8b732006-12-10 02:21:36 -08001707 error_code = 0;
1708 rip = vmcs_readl(GUEST_RIP);
1709 if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
1710 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
1711 if (is_page_fault(intr_info)) {
1712 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1713
Shaohua Li11ec2802007-07-23 14:51:37 +08001714 mutex_lock(&vcpu->kvm->lock);
Avi Kivitye2dec932007-01-05 16:36:54 -08001715 r = kvm_mmu_page_fault(vcpu, cr2, error_code);
1716 if (r < 0) {
Shaohua Li11ec2802007-07-23 14:51:37 +08001717 mutex_unlock(&vcpu->kvm->lock);
Avi Kivitye2dec932007-01-05 16:36:54 -08001718 return r;
1719 }
1720 if (!r) {
Shaohua Li11ec2802007-07-23 14:51:37 +08001721 mutex_unlock(&vcpu->kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001722 return 1;
1723 }
1724
1725 er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
Shaohua Li11ec2802007-07-23 14:51:37 +08001726 mutex_unlock(&vcpu->kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001727
1728 switch (er) {
1729 case EMULATE_DONE:
1730 return 1;
1731 case EMULATE_DO_MMIO:
Avi Kivity1165f5f2007-04-19 17:27:43 +03001732 ++vcpu->stat.mmio_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001733 return 0;
1734 case EMULATE_FAIL:
1735 vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
1736 break;
1737 default:
1738 BUG();
1739 }
1740 }
1741
1742 if (vcpu->rmode.active &&
1743 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
Avi Kivity72d6e5a2007-06-05 16:15:51 +03001744 error_code)) {
1745 if (vcpu->halt_request) {
1746 vcpu->halt_request = 0;
1747 return kvm_emulate_halt(vcpu);
1748 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001749 return 1;
Avi Kivity72d6e5a2007-06-05 16:15:51 +03001750 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001751
1752 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
1753 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1754 return 0;
1755 }
1756 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
1757 kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
1758 kvm_run->ex.error_code = error_code;
1759 return 0;
1760}
1761
1762static int handle_external_interrupt(struct kvm_vcpu *vcpu,
1763 struct kvm_run *kvm_run)
1764{
Avi Kivity1165f5f2007-04-19 17:27:43 +03001765 ++vcpu->stat.irq_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001766 return 1;
1767}
1768
Avi Kivity988ad742007-02-12 00:54:36 -08001769static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1770{
1771 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1772 return 0;
1773}
Avi Kivity6aa8b732006-12-10 02:21:36 -08001774
Avi Kivity039576c2007-03-20 12:46:50 +02001775static int get_io_count(struct kvm_vcpu *vcpu, unsigned long *count)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001776{
1777 u64 inst;
1778 gva_t rip;
1779 int countr_size;
1780 int i, n;
1781
1782 if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_VM)) {
1783 countr_size = 2;
1784 } else {
1785 u32 cs_ar = vmcs_read32(GUEST_CS_AR_BYTES);
1786
1787 countr_size = (cs_ar & AR_L_MASK) ? 8:
1788 (cs_ar & AR_DB_MASK) ? 4: 2;
1789 }
1790
1791 rip = vmcs_readl(GUEST_RIP);
1792 if (countr_size != 8)
1793 rip += vmcs_readl(GUEST_CS_BASE);
1794
1795 n = kvm_read_guest(vcpu, rip, sizeof(inst), &inst);
1796
1797 for (i = 0; i < n; i++) {
1798 switch (((u8*)&inst)[i]) {
1799 case 0xf0:
1800 case 0xf2:
1801 case 0xf3:
1802 case 0x2e:
1803 case 0x36:
1804 case 0x3e:
1805 case 0x26:
1806 case 0x64:
1807 case 0x65:
1808 case 0x66:
1809 break;
1810 case 0x67:
1811 countr_size = (countr_size == 2) ? 4: (countr_size >> 1);
1812 default:
1813 goto done;
1814 }
1815 }
1816 return 0;
1817done:
1818 countr_size *= 8;
1819 *count = vcpu->regs[VCPU_REGS_RCX] & (~0ULL >> (64 - countr_size));
Avi Kivity039576c2007-03-20 12:46:50 +02001820 //printk("cx: %lx\n", vcpu->regs[VCPU_REGS_RCX]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001821 return 1;
1822}
1823
1824static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1825{
1826 u64 exit_qualification;
Avi Kivity039576c2007-03-20 12:46:50 +02001827 int size, down, in, string, rep;
1828 unsigned port;
1829 unsigned long count;
1830 gva_t address;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001831
Avi Kivity1165f5f2007-04-19 17:27:43 +03001832 ++vcpu->stat.io_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001833 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
Avi Kivity039576c2007-03-20 12:46:50 +02001834 in = (exit_qualification & 8) != 0;
1835 size = (exit_qualification & 7) + 1;
1836 string = (exit_qualification & 16) != 0;
1837 down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
1838 count = 1;
1839 rep = (exit_qualification & 32) != 0;
1840 port = exit_qualification >> 16;
1841 address = 0;
1842 if (string) {
1843 if (rep && !get_io_count(vcpu, &count))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001844 return 1;
Avi Kivity039576c2007-03-20 12:46:50 +02001845 address = vmcs_readl(GUEST_LINEAR_ADDRESS);
1846 }
1847 return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
1848 address, rep, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001849}
1850
Ingo Molnar102d8322007-02-19 14:37:47 +02001851static void
1852vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1853{
1854 /*
1855 * Patch in the VMCALL instruction:
1856 */
1857 hypercall[0] = 0x0f;
1858 hypercall[1] = 0x01;
1859 hypercall[2] = 0xc1;
1860 hypercall[3] = 0xc3;
1861}
1862
Avi Kivity6aa8b732006-12-10 02:21:36 -08001863static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1864{
1865 u64 exit_qualification;
1866 int cr;
1867 int reg;
1868
1869 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1870 cr = exit_qualification & 15;
1871 reg = (exit_qualification >> 8) & 15;
1872 switch ((exit_qualification >> 4) & 3) {
1873 case 0: /* mov to cr */
1874 switch (cr) {
1875 case 0:
1876 vcpu_load_rsp_rip(vcpu);
1877 set_cr0(vcpu, vcpu->regs[reg]);
1878 skip_emulated_instruction(vcpu);
1879 return 1;
1880 case 3:
1881 vcpu_load_rsp_rip(vcpu);
1882 set_cr3(vcpu, vcpu->regs[reg]);
1883 skip_emulated_instruction(vcpu);
1884 return 1;
1885 case 4:
1886 vcpu_load_rsp_rip(vcpu);
1887 set_cr4(vcpu, vcpu->regs[reg]);
1888 skip_emulated_instruction(vcpu);
1889 return 1;
1890 case 8:
1891 vcpu_load_rsp_rip(vcpu);
1892 set_cr8(vcpu, vcpu->regs[reg]);
1893 skip_emulated_instruction(vcpu);
1894 return 1;
1895 };
1896 break;
Anthony Liguori25c4c272007-04-27 09:29:21 +03001897 case 2: /* clts */
1898 vcpu_load_rsp_rip(vcpu);
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001899 vmx_fpu_deactivate(vcpu);
Rusty Russell707d92f2007-07-17 23:19:08 +10001900 vcpu->cr0 &= ~X86_CR0_TS;
Anthony Liguori2ab455c2007-04-27 09:29:49 +03001901 vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001902 vmx_fpu_activate(vcpu);
Anthony Liguori25c4c272007-04-27 09:29:21 +03001903 skip_emulated_instruction(vcpu);
1904 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001905 case 1: /*mov from cr*/
1906 switch (cr) {
1907 case 3:
1908 vcpu_load_rsp_rip(vcpu);
1909 vcpu->regs[reg] = vcpu->cr3;
1910 vcpu_put_rsp_rip(vcpu);
1911 skip_emulated_instruction(vcpu);
1912 return 1;
1913 case 8:
Avi Kivity6aa8b732006-12-10 02:21:36 -08001914 vcpu_load_rsp_rip(vcpu);
1915 vcpu->regs[reg] = vcpu->cr8;
1916 vcpu_put_rsp_rip(vcpu);
1917 skip_emulated_instruction(vcpu);
1918 return 1;
1919 }
1920 break;
1921 case 3: /* lmsw */
1922 lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
1923
1924 skip_emulated_instruction(vcpu);
1925 return 1;
1926 default:
1927 break;
1928 }
1929 kvm_run->exit_reason = 0;
1930 printk(KERN_ERR "kvm: unhandled control register: op %d cr %d\n",
1931 (int)(exit_qualification >> 4) & 3, cr);
1932 return 0;
1933}
1934
1935static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1936{
1937 u64 exit_qualification;
1938 unsigned long val;
1939 int dr, reg;
1940
1941 /*
1942 * FIXME: this code assumes the host is debugging the guest.
1943 * need to deal with guest debugging itself too.
1944 */
1945 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1946 dr = exit_qualification & 7;
1947 reg = (exit_qualification >> 8) & 15;
1948 vcpu_load_rsp_rip(vcpu);
1949 if (exit_qualification & 16) {
1950 /* mov from dr */
1951 switch (dr) {
1952 case 6:
1953 val = 0xffff0ff0;
1954 break;
1955 case 7:
1956 val = 0x400;
1957 break;
1958 default:
1959 val = 0;
1960 }
1961 vcpu->regs[reg] = val;
1962 } else {
1963 /* mov to dr */
1964 }
1965 vcpu_put_rsp_rip(vcpu);
1966 skip_emulated_instruction(vcpu);
1967 return 1;
1968}
1969
1970static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1971{
Avi Kivity06465c52007-02-28 20:46:53 +02001972 kvm_emulate_cpuid(vcpu);
1973 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001974}
1975
1976static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1977{
1978 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1979 u64 data;
1980
1981 if (vmx_get_msr(vcpu, ecx, &data)) {
1982 vmx_inject_gp(vcpu, 0);
1983 return 1;
1984 }
1985
1986 /* FIXME: handling of bits 32:63 of rax, rdx */
1987 vcpu->regs[VCPU_REGS_RAX] = data & -1u;
1988 vcpu->regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
1989 skip_emulated_instruction(vcpu);
1990 return 1;
1991}
1992
1993static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1994{
1995 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1996 u64 data = (vcpu->regs[VCPU_REGS_RAX] & -1u)
1997 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
1998
1999 if (vmx_set_msr(vcpu, ecx, data) != 0) {
2000 vmx_inject_gp(vcpu, 0);
2001 return 1;
2002 }
2003
2004 skip_emulated_instruction(vcpu);
2005 return 1;
2006}
2007
Dor Laorc1150d82007-01-05 16:36:24 -08002008static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2009 struct kvm_run *kvm_run)
2010{
2011 kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
2012 kvm_run->cr8 = vcpu->cr8;
2013 kvm_run->apic_base = vcpu->apic_base;
2014 kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
2015 vcpu->irq_summary == 0);
2016}
2017
Avi Kivity6aa8b732006-12-10 02:21:36 -08002018static int handle_interrupt_window(struct kvm_vcpu *vcpu,
2019 struct kvm_run *kvm_run)
2020{
Dor Laorc1150d82007-01-05 16:36:24 -08002021 /*
2022 * If the user space waits to inject interrupts, exit as soon as
2023 * possible
2024 */
2025 if (kvm_run->request_interrupt_window &&
Dor Laor022a9302007-01-05 16:37:00 -08002026 !vcpu->irq_summary) {
Dor Laorc1150d82007-01-05 16:36:24 -08002027 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
Avi Kivity1165f5f2007-04-19 17:27:43 +03002028 ++vcpu->stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08002029 return 0;
2030 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002031 return 1;
2032}
2033
2034static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2035{
2036 skip_emulated_instruction(vcpu);
Avi Kivityd3bef152007-06-05 15:53:05 +03002037 return kvm_emulate_halt(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002038}
2039
Ingo Molnarc21415e2007-02-19 14:37:47 +02002040static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2041{
Dor Laor510043d2007-02-19 18:25:43 +02002042 skip_emulated_instruction(vcpu);
Avi Kivity270fd9b2007-02-19 14:37:47 +02002043 return kvm_hypercall(vcpu, kvm_run);
Ingo Molnarc21415e2007-02-19 14:37:47 +02002044}
2045
Avi Kivity6aa8b732006-12-10 02:21:36 -08002046/*
2047 * The exit handlers return 1 if the exit was handled fully and guest execution
2048 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
2049 * to be done to userspace and return 0.
2050 */
2051static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
2052 struct kvm_run *kvm_run) = {
2053 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
2054 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
Avi Kivity988ad742007-02-12 00:54:36 -08002055 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002056 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002057 [EXIT_REASON_CR_ACCESS] = handle_cr,
2058 [EXIT_REASON_DR_ACCESS] = handle_dr,
2059 [EXIT_REASON_CPUID] = handle_cpuid,
2060 [EXIT_REASON_MSR_READ] = handle_rdmsr,
2061 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
2062 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
2063 [EXIT_REASON_HLT] = handle_halt,
Ingo Molnarc21415e2007-02-19 14:37:47 +02002064 [EXIT_REASON_VMCALL] = handle_vmcall,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002065};
2066
2067static const int kvm_vmx_max_exit_handlers =
Robert P. J. Day50a34852007-06-03 13:35:29 -04002068 ARRAY_SIZE(kvm_vmx_exit_handlers);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002069
2070/*
2071 * The guest has exited. See if we can fix it or if we need userspace
2072 * assistance.
2073 */
2074static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2075{
2076 u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2077 u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
2078
2079 if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
2080 exit_reason != EXIT_REASON_EXCEPTION_NMI )
2081 printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
2082 "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002083 if (exit_reason < kvm_vmx_max_exit_handlers
2084 && kvm_vmx_exit_handlers[exit_reason])
2085 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
2086 else {
2087 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
2088 kvm_run->hw.hardware_exit_reason = exit_reason;
2089 }
2090 return 0;
2091}
2092
Dor Laorc1150d82007-01-05 16:36:24 -08002093/*
2094 * Check if userspace requested an interrupt window, and that the
2095 * interrupt window is open.
2096 *
2097 * No need to exit to userspace if we already have an interrupt queued.
2098 */
2099static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2100 struct kvm_run *kvm_run)
2101{
2102 return (!vcpu->irq_summary &&
2103 kvm_run->request_interrupt_window &&
2104 vcpu->interrupt_window_open &&
2105 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
2106}
2107
Avi Kivityd9e368d2007-06-07 19:18:30 +03002108static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
2109{
Avi Kivityd9e368d2007-06-07 19:18:30 +03002110}
2111
Avi Kivity6aa8b732006-12-10 02:21:36 -08002112static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2113{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002114 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002115 u8 fail;
Avi Kivitye2dec932007-01-05 16:36:54 -08002116 int r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002117
Avi Kivitye6adf282007-04-30 16:07:54 +03002118preempted:
Avi Kivity6aa8b732006-12-10 02:21:36 -08002119 if (vcpu->guest_debug.enabled)
2120 kvm_guest_debug_pre(vcpu);
2121
Avi Kivitye6adf282007-04-30 16:07:54 +03002122again:
Shaohua Li9ae04482007-07-23 14:51:32 +08002123 r = kvm_mmu_reload(vcpu);
2124 if (unlikely(r))
2125 goto out;
2126
Avi Kivity15ad7142007-07-11 18:17:21 +03002127 preempt_disable();
2128
Gregory Haskinsff1dc792007-05-31 14:08:58 -04002129 if (!vcpu->mmio_read_completed)
2130 do_interrupt_requests(vcpu, kvm_run);
2131
Avi Kivity33ed6322007-05-02 16:54:03 +03002132 vmx_save_host_state(vcpu);
Avi Kivitye6adf282007-04-30 16:07:54 +03002133 kvm_load_guest_fpu(vcpu);
2134
2135 /*
2136 * Loading guest fpu may have cleared host cr0.ts
2137 */
2138 vmcs_writel(HOST_CR0, read_cr0());
2139
Avi Kivityd9e368d2007-06-07 19:18:30 +03002140 local_irq_disable();
2141
2142 vcpu->guest_mode = 1;
2143 if (vcpu->requests)
2144 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
2145 vmx_flush_tlb(vcpu);
2146
Avi Kivity6aa8b732006-12-10 02:21:36 -08002147 asm (
2148 /* Store host registers */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002149#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002150 "push %%rax; push %%rbx; push %%rdx;"
2151 "push %%rsi; push %%rdi; push %%rbp;"
2152 "push %%r8; push %%r9; push %%r10; push %%r11;"
2153 "push %%r12; push %%r13; push %%r14; push %%r15;"
2154 "push %%rcx \n\t"
2155 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
2156#else
2157 "pusha; push %%ecx \n\t"
2158 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
2159#endif
2160 /* Check if vmlaunch of vmresume is needed */
2161 "cmp $0, %1 \n\t"
2162 /* Load guest registers. Don't clobber flags. */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002163#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002164 "mov %c[cr2](%3), %%rax \n\t"
2165 "mov %%rax, %%cr2 \n\t"
2166 "mov %c[rax](%3), %%rax \n\t"
2167 "mov %c[rbx](%3), %%rbx \n\t"
2168 "mov %c[rdx](%3), %%rdx \n\t"
2169 "mov %c[rsi](%3), %%rsi \n\t"
2170 "mov %c[rdi](%3), %%rdi \n\t"
2171 "mov %c[rbp](%3), %%rbp \n\t"
2172 "mov %c[r8](%3), %%r8 \n\t"
2173 "mov %c[r9](%3), %%r9 \n\t"
2174 "mov %c[r10](%3), %%r10 \n\t"
2175 "mov %c[r11](%3), %%r11 \n\t"
2176 "mov %c[r12](%3), %%r12 \n\t"
2177 "mov %c[r13](%3), %%r13 \n\t"
2178 "mov %c[r14](%3), %%r14 \n\t"
2179 "mov %c[r15](%3), %%r15 \n\t"
2180 "mov %c[rcx](%3), %%rcx \n\t" /* kills %3 (rcx) */
2181#else
2182 "mov %c[cr2](%3), %%eax \n\t"
2183 "mov %%eax, %%cr2 \n\t"
2184 "mov %c[rax](%3), %%eax \n\t"
2185 "mov %c[rbx](%3), %%ebx \n\t"
2186 "mov %c[rdx](%3), %%edx \n\t"
2187 "mov %c[rsi](%3), %%esi \n\t"
2188 "mov %c[rdi](%3), %%edi \n\t"
2189 "mov %c[rbp](%3), %%ebp \n\t"
2190 "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
2191#endif
2192 /* Enter guest mode */
Avi Kivitycd2276a2007-05-14 20:41:13 +03002193 "jne .Llaunched \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002194 ASM_VMX_VMLAUNCH "\n\t"
Avi Kivitycd2276a2007-05-14 20:41:13 +03002195 "jmp .Lkvm_vmx_return \n\t"
2196 ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
2197 ".Lkvm_vmx_return: "
Avi Kivity6aa8b732006-12-10 02:21:36 -08002198 /* Save guest registers, load host registers, keep flags */
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002199#ifdef CONFIG_X86_64
Ingo Molnar96958232007-02-12 00:54:33 -08002200 "xchg %3, (%%rsp) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002201 "mov %%rax, %c[rax](%3) \n\t"
2202 "mov %%rbx, %c[rbx](%3) \n\t"
Ingo Molnar96958232007-02-12 00:54:33 -08002203 "pushq (%%rsp); popq %c[rcx](%3) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002204 "mov %%rdx, %c[rdx](%3) \n\t"
2205 "mov %%rsi, %c[rsi](%3) \n\t"
2206 "mov %%rdi, %c[rdi](%3) \n\t"
2207 "mov %%rbp, %c[rbp](%3) \n\t"
2208 "mov %%r8, %c[r8](%3) \n\t"
2209 "mov %%r9, %c[r9](%3) \n\t"
2210 "mov %%r10, %c[r10](%3) \n\t"
2211 "mov %%r11, %c[r11](%3) \n\t"
2212 "mov %%r12, %c[r12](%3) \n\t"
2213 "mov %%r13, %c[r13](%3) \n\t"
2214 "mov %%r14, %c[r14](%3) \n\t"
2215 "mov %%r15, %c[r15](%3) \n\t"
2216 "mov %%cr2, %%rax \n\t"
2217 "mov %%rax, %c[cr2](%3) \n\t"
Ingo Molnar96958232007-02-12 00:54:33 -08002218 "mov (%%rsp), %3 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002219
2220 "pop %%rcx; pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
2221 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
2222 "pop %%rbp; pop %%rdi; pop %%rsi;"
2223 "pop %%rdx; pop %%rbx; pop %%rax \n\t"
2224#else
Ingo Molnar96958232007-02-12 00:54:33 -08002225 "xchg %3, (%%esp) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002226 "mov %%eax, %c[rax](%3) \n\t"
2227 "mov %%ebx, %c[rbx](%3) \n\t"
Ingo Molnar96958232007-02-12 00:54:33 -08002228 "pushl (%%esp); popl %c[rcx](%3) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002229 "mov %%edx, %c[rdx](%3) \n\t"
2230 "mov %%esi, %c[rsi](%3) \n\t"
2231 "mov %%edi, %c[rdi](%3) \n\t"
2232 "mov %%ebp, %c[rbp](%3) \n\t"
2233 "mov %%cr2, %%eax \n\t"
2234 "mov %%eax, %c[cr2](%3) \n\t"
Ingo Molnar96958232007-02-12 00:54:33 -08002235 "mov (%%esp), %3 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08002236
2237 "pop %%ecx; popa \n\t"
2238#endif
2239 "setbe %0 \n\t"
Herbert Xue0015482007-01-23 14:10:00 +11002240 : "=q" (fail)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002241 : "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
Avi Kivity6aa8b732006-12-10 02:21:36 -08002242 "c"(vcpu),
2243 [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
2244 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
2245 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
2246 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
2247 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
2248 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
2249 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002250#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002251 [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
2252 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
2253 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
2254 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
2255 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
2256 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
2257 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
2258 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
2259#endif
2260 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
2261 : "cc", "memory" );
2262
Avi Kivityd9e368d2007-06-07 19:18:30 +03002263 vcpu->guest_mode = 0;
2264 local_irq_enable();
2265
Avi Kivity1165f5f2007-04-19 17:27:43 +03002266 ++vcpu->stat.exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002267
Dor Laorc1150d82007-01-05 16:36:24 -08002268 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002269
Avi Kivity6aa8b732006-12-10 02:21:36 -08002270 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
Avi Kivity15ad7142007-07-11 18:17:21 +03002271 vmx->launched = 1;
2272
2273 preempt_enable();
Avi Kivity6aa8b732006-12-10 02:21:36 -08002274
Avi Kivity05e0c8c2007-04-30 16:15:58 +03002275 if (unlikely(fail)) {
Avi Kivity8eb7d332007-03-04 14:17:08 +02002276 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2277 kvm_run->fail_entry.hardware_entry_failure_reason
2278 = vmcs_read32(VM_INSTRUCTION_ERROR);
Avi Kivitye2dec932007-01-05 16:36:54 -08002279 r = 0;
Avi Kivity05e0c8c2007-04-30 16:15:58 +03002280 goto out;
2281 }
2282 /*
2283 * Profile KVM exit RIPs:
2284 */
2285 if (unlikely(prof_on == KVM_PROFILING))
2286 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
Jeremy Fitzhardinge464d1a72007-02-13 13:26:20 +01002287
Avi Kivity05e0c8c2007-04-30 16:15:58 +03002288 r = kvm_handle_exit(kvm_run, vcpu);
2289 if (r > 0) {
2290 /* Give scheduler a change to reschedule. */
2291 if (signal_pending(current)) {
2292 r = -EINTR;
2293 kvm_run->exit_reason = KVM_EXIT_INTR;
2294 ++vcpu->stat.signal_exits;
2295 goto out;
2296 }
Dor Laorc1150d82007-01-05 16:36:24 -08002297
Avi Kivity05e0c8c2007-04-30 16:15:58 +03002298 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2299 r = -EINTR;
2300 kvm_run->exit_reason = KVM_EXIT_INTR;
2301 ++vcpu->stat.request_irq_exits;
2302 goto out;
2303 }
2304 if (!need_resched()) {
2305 ++vcpu->stat.light_exits;
2306 goto again;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002307 }
2308 }
Dor Laorc1150d82007-01-05 16:36:24 -08002309
Avi Kivitye6adf282007-04-30 16:07:54 +03002310out:
Avi Kivitye6adf282007-04-30 16:07:54 +03002311 if (r > 0) {
2312 kvm_resched(vcpu);
2313 goto preempted;
2314 }
2315
Dor Laorc1150d82007-01-05 16:36:24 -08002316 post_kvm_run_save(vcpu, kvm_run);
Avi Kivitye2dec932007-01-05 16:36:54 -08002317 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002318}
2319
Avi Kivity6aa8b732006-12-10 02:21:36 -08002320static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
2321 unsigned long addr,
2322 u32 err_code)
2323{
2324 u32 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2325
Avi Kivity1165f5f2007-04-19 17:27:43 +03002326 ++vcpu->stat.pf_guest;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002327
2328 if (is_page_fault(vect_info)) {
2329 printk(KERN_DEBUG "inject_page_fault: "
2330 "double fault 0x%lx @ 0x%lx\n",
2331 addr, vmcs_readl(GUEST_RIP));
2332 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
2333 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2334 DF_VECTOR |
2335 INTR_TYPE_EXCEPTION |
2336 INTR_INFO_DELIEVER_CODE_MASK |
2337 INTR_INFO_VALID_MASK);
2338 return;
2339 }
2340 vcpu->cr2 = addr;
2341 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
2342 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2343 PF_VECTOR |
2344 INTR_TYPE_EXCEPTION |
2345 INTR_INFO_DELIEVER_CODE_MASK |
2346 INTR_INFO_VALID_MASK);
2347
2348}
2349
2350static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
2351{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002352 struct vcpu_vmx *vmx = to_vmx(vcpu);
2353
2354 if (vmx->vmcs) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08002355 on_each_cpu(__vcpu_clear, vcpu, 0, 1);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002356 free_vmcs(vmx->vmcs);
2357 vmx->vmcs = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002358 }
2359}
2360
2361static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
2362{
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002363 struct vcpu_vmx *vmx = to_vmx(vcpu);
2364
Avi Kivity6aa8b732006-12-10 02:21:36 -08002365 vmx_free_vmcs(vcpu);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002366 kfree(vmx->host_msrs);
2367 kfree(vmx->guest_msrs);
2368 kvm_vcpu_uninit(vcpu);
2369 kfree(vmx);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002370}
2371
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002372static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002373{
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002374 int err;
2375 struct vcpu_vmx *vmx = kzalloc(sizeof(*vmx), GFP_KERNEL);
Avi Kivity15ad7142007-07-11 18:17:21 +03002376 int cpu;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002377
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002378 if (!vmx)
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002379 return ERR_PTR(-ENOMEM);
2380
2381 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
2382 if (err)
2383 goto free_vcpu;
Ingo Molnar965b58a2007-01-05 16:36:23 -08002384
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002385 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002386 if (!vmx->guest_msrs) {
2387 err = -ENOMEM;
2388 goto uninit_vcpu;
2389 }
Ingo Molnar965b58a2007-01-05 16:36:23 -08002390
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002391 vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2392 if (!vmx->host_msrs)
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002393 goto free_guest_msrs;
Ingo Molnar965b58a2007-01-05 16:36:23 -08002394
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002395 vmx->vmcs = alloc_vmcs();
2396 if (!vmx->vmcs)
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002397 goto free_msrs;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002398
2399 vmcs_clear(vmx->vmcs);
2400
Avi Kivity15ad7142007-07-11 18:17:21 +03002401 cpu = get_cpu();
2402 vmx_vcpu_load(&vmx->vcpu, cpu);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002403 err = vmx_vcpu_setup(&vmx->vcpu);
2404 vmx_vcpu_put(&vmx->vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03002405 put_cpu();
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002406 if (err)
2407 goto free_vmcs;
Ingo Molnar965b58a2007-01-05 16:36:23 -08002408
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002409 return &vmx->vcpu;
Ingo Molnar965b58a2007-01-05 16:36:23 -08002410
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002411free_vmcs:
2412 free_vmcs(vmx->vmcs);
2413free_msrs:
2414 kfree(vmx->host_msrs);
2415free_guest_msrs:
2416 kfree(vmx->guest_msrs);
2417uninit_vcpu:
2418 kvm_vcpu_uninit(&vmx->vcpu);
2419free_vcpu:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002420 kfree(vmx);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002421 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002422}
2423
2424static struct kvm_arch_ops vmx_arch_ops = {
2425 .cpu_has_kvm_support = cpu_has_kvm_support,
2426 .disabled_by_bios = vmx_disabled_by_bios,
2427 .hardware_setup = hardware_setup,
2428 .hardware_unsetup = hardware_unsetup,
2429 .hardware_enable = hardware_enable,
2430 .hardware_disable = hardware_disable,
2431
2432 .vcpu_create = vmx_create_vcpu,
2433 .vcpu_free = vmx_free_vcpu,
2434
2435 .vcpu_load = vmx_vcpu_load,
2436 .vcpu_put = vmx_vcpu_put,
Avi Kivity774c47f2007-02-12 00:54:47 -08002437 .vcpu_decache = vmx_vcpu_decache,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002438
2439 .set_guest_debug = set_guest_debug,
2440 .get_msr = vmx_get_msr,
2441 .set_msr = vmx_set_msr,
2442 .get_segment_base = vmx_get_segment_base,
2443 .get_segment = vmx_get_segment,
2444 .set_segment = vmx_set_segment,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002445 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
Anthony Liguori25c4c272007-04-27 09:29:21 +03002446 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002447 .set_cr0 = vmx_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002448 .set_cr3 = vmx_set_cr3,
2449 .set_cr4 = vmx_set_cr4,
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002450#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002451 .set_efer = vmx_set_efer,
2452#endif
2453 .get_idt = vmx_get_idt,
2454 .set_idt = vmx_set_idt,
2455 .get_gdt = vmx_get_gdt,
2456 .set_gdt = vmx_set_gdt,
2457 .cache_regs = vcpu_load_rsp_rip,
2458 .decache_regs = vcpu_put_rsp_rip,
2459 .get_rflags = vmx_get_rflags,
2460 .set_rflags = vmx_set_rflags,
2461
2462 .tlb_flush = vmx_flush_tlb,
2463 .inject_page_fault = vmx_inject_page_fault,
2464
2465 .inject_gp = vmx_inject_gp,
2466
2467 .run = vmx_vcpu_run,
2468 .skip_emulated_instruction = skip_emulated_instruction,
Ingo Molnar102d8322007-02-19 14:37:47 +02002469 .patch_hypercall = vmx_patch_hypercall,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002470};
2471
2472static int __init vmx_init(void)
2473{
He, Qingfdef3ad2007-04-30 09:45:24 +03002474 void *iova;
2475 int r;
2476
2477 vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2478 if (!vmx_io_bitmap_a)
2479 return -ENOMEM;
2480
2481 vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2482 if (!vmx_io_bitmap_b) {
2483 r = -ENOMEM;
2484 goto out;
2485 }
2486
2487 /*
2488 * Allow direct access to the PC debug port (it is often used for I/O
2489 * delays, but the vmexits simply slow things down).
2490 */
2491 iova = kmap(vmx_io_bitmap_a);
2492 memset(iova, 0xff, PAGE_SIZE);
2493 clear_bit(0x80, iova);
Avi Kivitycd0536d2007-05-08 11:34:07 +03002494 kunmap(vmx_io_bitmap_a);
He, Qingfdef3ad2007-04-30 09:45:24 +03002495
2496 iova = kmap(vmx_io_bitmap_b);
2497 memset(iova, 0xff, PAGE_SIZE);
Avi Kivitycd0536d2007-05-08 11:34:07 +03002498 kunmap(vmx_io_bitmap_b);
He, Qingfdef3ad2007-04-30 09:45:24 +03002499
2500 r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
2501 if (r)
2502 goto out1;
2503
2504 return 0;
2505
2506out1:
2507 __free_page(vmx_io_bitmap_b);
2508out:
2509 __free_page(vmx_io_bitmap_a);
2510 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002511}
2512
2513static void __exit vmx_exit(void)
2514{
He, Qingfdef3ad2007-04-30 09:45:24 +03002515 __free_page(vmx_io_bitmap_b);
2516 __free_page(vmx_io_bitmap_a);
2517
Avi Kivity6aa8b732006-12-10 02:21:36 -08002518 kvm_exit_arch();
2519}
2520
2521module_init(vmx_init)
2522module_exit(vmx_exit)