blob: 79cc06bfe57c0d62dfa2699821a3d137c2bc4436 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
Avi Kivityedf88412007-12-16 11:02:48 +020016#include <linux/kvm_host.h>
17
Avi Kivitye4956062007-06-28 14:15:57 -040018#include "kvm_svm.h"
Eddie Dong85f455f2007-07-06 12:20:49 +030019#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080020#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030021#include "kvm_cache_regs.h"
Avi Kivitye4956062007-06-28 14:15:57 -040022
Avi Kivity6aa8b732006-12-10 02:21:36 -080023#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020024#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/vmalloc.h>
26#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040027#include <linux/sched.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Avi Kivitye4956062007-06-28 14:15:57 -040029#include <asm/desc.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080030
Eduardo Habkost63d11422008-11-17 19:03:20 -020031#include <asm/virtext.h>
32
Avi Kivity4ecac3f2008-05-13 13:23:38 +030033#define __ex(x) __kvm_handle_fault_on_reboot(x)
34
Avi Kivity6aa8b732006-12-10 02:21:36 -080035MODULE_AUTHOR("Qumranet");
36MODULE_LICENSE("GPL");
37
38#define IOPM_ALLOC_ORDER 2
39#define MSRPM_ALLOC_ORDER 1
40
Avi Kivity6aa8b732006-12-10 02:21:36 -080041#define DR7_GD_MASK (1 << 13)
42#define DR6_BD_MASK (1 << 13)
Avi Kivity6aa8b732006-12-10 02:21:36 -080043
44#define SEG_TYPE_LDT 2
45#define SEG_TYPE_BUSY_TSS16 3
46
Joerg Roedel80b77062007-03-30 17:02:14 +030047#define SVM_FEATURE_NPT (1 << 0)
48#define SVM_FEATURE_LBRV (1 << 1)
Amit Shah94c935a2008-08-18 13:11:46 +030049#define SVM_FEATURE_SVML (1 << 2)
Joerg Roedel80b77062007-03-30 17:02:14 +030050
Joerg Roedel24e09cb2008-02-13 18:58:47 +010051#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
52
Alexander Grafc0725422008-11-25 20:17:03 +010053/* Turn on to get debugging output*/
54/* #define NESTED_DEBUG */
55
56#ifdef NESTED_DEBUG
57#define nsvm_printk(fmt, args...) printk(KERN_INFO fmt, ## args)
58#else
59#define nsvm_printk(fmt, args...) do {} while(0)
60#endif
61
Joerg Roedel709ddeb2008-02-07 13:47:45 +010062/* enable NPT for AMD64 and X86 with PAE */
63#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
64static bool npt_enabled = true;
65#else
Joerg Roedele3da3ac2008-02-07 13:47:39 +010066static bool npt_enabled = false;
Joerg Roedel709ddeb2008-02-07 13:47:45 +010067#endif
Joerg Roedel6c7dac72008-02-07 13:47:40 +010068static int npt = 1;
69
70module_param(npt, int, S_IRUGO);
Joerg Roedele3da3ac2008-02-07 13:47:39 +010071
Avi Kivity04d2cc72007-09-10 18:10:54 +030072static void kvm_reput_irq(struct vcpu_svm *svm);
Joerg Roedel44874f82008-08-27 14:18:43 +020073static void svm_flush_tlb(struct kvm_vcpu *vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +030074
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040075static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
76{
Rusty Russellfb3f0f52007-07-27 17:16:56 +100077 return container_of(vcpu, struct vcpu_svm, vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040078}
79
Harvey Harrison4866d5e2008-02-19 10:32:02 -080080static unsigned long iopm_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -080081
82struct kvm_ldttss_desc {
83 u16 limit0;
84 u16 base0;
85 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
86 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
87 u32 base3;
88 u32 zero1;
89} __attribute__((packed));
90
91struct svm_cpu_data {
92 int cpu;
93
Avi Kivity5008fdf2007-04-02 13:05:50 +030094 u64 asid_generation;
95 u32 max_asid;
96 u32 next_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -080097 struct kvm_ldttss_desc *tss_desc;
98
99 struct page *save_area;
100};
101
102static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
Joerg Roedel80b77062007-03-30 17:02:14 +0300103static uint32_t svm_features;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800104
105struct svm_init_data {
106 int cpu;
107 int r;
108};
109
110static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
111
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200112#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800113#define MSRS_RANGE_SIZE 2048
114#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
115
116#define MAX_INST_SIZE 15
117
Joerg Roedel80b77062007-03-30 17:02:14 +0300118static inline u32 svm_has(u32 feat)
119{
120 return svm_features & feat;
121}
122
Avi Kivity6aa8b732006-12-10 02:21:36 -0800123static inline u8 pop_irq(struct kvm_vcpu *vcpu)
124{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800125 int word_index = __ffs(vcpu->arch.irq_summary);
126 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800127 int irq = word_index * BITS_PER_LONG + bit_index;
128
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800129 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
130 if (!vcpu->arch.irq_pending[word_index])
131 clear_bit(word_index, &vcpu->arch.irq_summary);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800132 return irq;
133}
134
135static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
136{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800137 set_bit(irq, vcpu->arch.irq_pending);
138 set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800139}
140
141static inline void clgi(void)
142{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300143 asm volatile (__ex(SVM_CLGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800144}
145
146static inline void stgi(void)
147{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300148 asm volatile (__ex(SVM_STGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800149}
150
151static inline void invlpga(unsigned long addr, u32 asid)
152{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300153 asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800154}
155
156static inline unsigned long kvm_read_cr2(void)
157{
158 unsigned long cr2;
159
160 asm volatile ("mov %%cr2, %0" : "=r" (cr2));
161 return cr2;
162}
163
164static inline void kvm_write_cr2(unsigned long val)
165{
166 asm volatile ("mov %0, %%cr2" :: "r" (val));
167}
168
169static inline unsigned long read_dr6(void)
170{
171 unsigned long dr6;
172
173 asm volatile ("mov %%dr6, %0" : "=r" (dr6));
174 return dr6;
175}
176
177static inline void write_dr6(unsigned long val)
178{
179 asm volatile ("mov %0, %%dr6" :: "r" (val));
180}
181
182static inline unsigned long read_dr7(void)
183{
184 unsigned long dr7;
185
186 asm volatile ("mov %%dr7, %0" : "=r" (dr7));
187 return dr7;
188}
189
190static inline void write_dr7(unsigned long val)
191{
192 asm volatile ("mov %0, %%dr7" :: "r" (val));
193}
194
Avi Kivity6aa8b732006-12-10 02:21:36 -0800195static inline void force_new_asid(struct kvm_vcpu *vcpu)
196{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400197 to_svm(vcpu)->asid_generation--;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800198}
199
200static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
201{
202 force_new_asid(vcpu);
203}
204
205static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
206{
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100207 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600208 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800209
Alexander Graf9962d032008-11-25 20:17:02 +0100210 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800211 vcpu->arch.shadow_efer = efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800212}
213
Avi Kivity298101d2007-11-25 13:41:11 +0200214static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
215 bool has_error_code, u32 error_code)
216{
217 struct vcpu_svm *svm = to_svm(vcpu);
218
219 svm->vmcb->control.event_inj = nr
220 | SVM_EVTINJ_VALID
221 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
222 | SVM_EVTINJ_TYPE_EXEPT;
223 svm->vmcb->control.event_inj_err = error_code;
224}
225
226static bool svm_exception_injected(struct kvm_vcpu *vcpu)
227{
228 struct vcpu_svm *svm = to_svm(vcpu);
229
230 return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
231}
232
Avi Kivity6aa8b732006-12-10 02:21:36 -0800233static int is_external_interrupt(u32 info)
234{
235 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
236 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
237}
238
239static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
240{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400241 struct vcpu_svm *svm = to_svm(vcpu);
242
243 if (!svm->next_rip) {
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800244 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800245 return;
246 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300247 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
248 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
249 __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800250
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300251 kvm_rip_write(vcpu, svm->next_rip);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400252 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
Dor Laorc1150d82007-01-05 16:36:24 -0800253
Alexander Graf1371d902008-11-25 20:17:04 +0100254 vcpu->arch.interrupt_window_open = (svm->vcpu.arch.hflags & HF_GIF_MASK);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800255}
256
257static int has_svm(void)
258{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200259 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800260
Eduardo Habkost63d11422008-11-17 19:03:20 -0200261 if (!cpu_has_svm(&msg)) {
262 printk(KERN_INFO "has_svn: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800263 return 0;
264 }
265
Avi Kivity6aa8b732006-12-10 02:21:36 -0800266 return 1;
267}
268
269static void svm_hardware_disable(void *garbage)
270{
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200271 cpu_svm_disable();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800272}
273
274static void svm_hardware_enable(void *garbage)
275{
276
277 struct svm_cpu_data *svm_data;
278 uint64_t efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800279 struct desc_ptr gdt_descr;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800280 struct desc_struct *gdt;
281 int me = raw_smp_processor_id();
282
283 if (!has_svm()) {
284 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
285 return;
286 }
287 svm_data = per_cpu(svm_data, me);
288
289 if (!svm_data) {
290 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
291 me);
292 return;
293 }
294
295 svm_data->asid_generation = 1;
296 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
297 svm_data->next_asid = svm_data->max_asid + 1;
298
Mike Dayd77c26f2007-10-08 09:02:08 -0400299 asm volatile ("sgdt %0" : "=m"(gdt_descr));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800300 gdt = (struct desc_struct *)gdt_descr.address;
301 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
302
303 rdmsrl(MSR_EFER, efer);
Alexander Graf9962d032008-11-25 20:17:02 +0100304 wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800305
306 wrmsrl(MSR_VM_HSAVE_PA,
307 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
308}
309
Joerg Roedel0da1db752008-07-02 16:02:11 +0200310static void svm_cpu_uninit(int cpu)
311{
312 struct svm_cpu_data *svm_data
313 = per_cpu(svm_data, raw_smp_processor_id());
314
315 if (!svm_data)
316 return;
317
318 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
319 __free_page(svm_data->save_area);
320 kfree(svm_data);
321}
322
Avi Kivity6aa8b732006-12-10 02:21:36 -0800323static int svm_cpu_init(int cpu)
324{
325 struct svm_cpu_data *svm_data;
326 int r;
327
328 svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
329 if (!svm_data)
330 return -ENOMEM;
331 svm_data->cpu = cpu;
332 svm_data->save_area = alloc_page(GFP_KERNEL);
333 r = -ENOMEM;
334 if (!svm_data->save_area)
335 goto err_1;
336
337 per_cpu(svm_data, cpu) = svm_data;
338
339 return 0;
340
341err_1:
342 kfree(svm_data);
343 return r;
344
345}
346
Rusty Russellbfc733a2007-07-31 20:42:42 +1000347static void set_msr_interception(u32 *msrpm, unsigned msr,
348 int read, int write)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800349{
350 int i;
351
352 for (i = 0; i < NUM_MSR_MAPS; i++) {
353 if (msr >= msrpm_ranges[i] &&
354 msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
355 u32 msr_offset = (i * MSRS_IN_RANGE + msr -
356 msrpm_ranges[i]) * 2;
357
358 u32 *base = msrpm + (msr_offset / 32);
359 u32 msr_shift = msr_offset % 32;
360 u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
361 *base = (*base & ~(0x3 << msr_shift)) |
362 (mask << msr_shift);
Rusty Russellbfc733a2007-07-31 20:42:42 +1000363 return;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800364 }
365 }
Rusty Russellbfc733a2007-07-31 20:42:42 +1000366 BUG();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800367}
368
Joerg Roedelf65c2292008-02-13 18:58:46 +0100369static void svm_vcpu_init_msrpm(u32 *msrpm)
370{
371 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
372
373#ifdef CONFIG_X86_64
374 set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
375 set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
376 set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
377 set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
378 set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
379 set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
380#endif
381 set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
382 set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
383 set_msr_interception(msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
384 set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
385}
386
Joerg Roedel24e09cb2008-02-13 18:58:47 +0100387static void svm_enable_lbrv(struct vcpu_svm *svm)
388{
389 u32 *msrpm = svm->msrpm;
390
391 svm->vmcb->control.lbr_ctl = 1;
392 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
393 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
394 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
395 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
396}
397
398static void svm_disable_lbrv(struct vcpu_svm *svm)
399{
400 u32 *msrpm = svm->msrpm;
401
402 svm->vmcb->control.lbr_ctl = 0;
403 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
404 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
405 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
406 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
407}
408
Avi Kivity6aa8b732006-12-10 02:21:36 -0800409static __init int svm_hardware_setup(void)
410{
411 int cpu;
412 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100413 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800414 int r;
415
Avi Kivity6aa8b732006-12-10 02:21:36 -0800416 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
417
418 if (!iopm_pages)
419 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +0300420
421 iopm_va = page_address(iopm_pages);
422 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
423 clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800424 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
425
Joerg Roedel50a37eb2008-01-31 14:57:38 +0100426 if (boot_cpu_has(X86_FEATURE_NX))
427 kvm_enable_efer_bits(EFER_NX);
428
Avi Kivity6aa8b732006-12-10 02:21:36 -0800429 for_each_online_cpu(cpu) {
430 r = svm_cpu_init(cpu);
431 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +0100432 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800433 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +0100434
435 svm_features = cpuid_edx(SVM_CPUID_FUNC);
436
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100437 if (!svm_has(SVM_FEATURE_NPT))
438 npt_enabled = false;
439
Joerg Roedel6c7dac72008-02-07 13:47:40 +0100440 if (npt_enabled && !npt) {
441 printk(KERN_INFO "kvm: Nested Paging disabled\n");
442 npt_enabled = false;
443 }
444
Joerg Roedel18552672008-02-07 13:47:41 +0100445 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100446 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +0100447 kvm_enable_tdp();
Joerg Roedel5f4cb662008-07-14 20:36:36 +0200448 } else
449 kvm_disable_tdp();
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100450
Avi Kivity6aa8b732006-12-10 02:21:36 -0800451 return 0;
452
Joerg Roedelf65c2292008-02-13 18:58:46 +0100453err:
Avi Kivity6aa8b732006-12-10 02:21:36 -0800454 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
455 iopm_base = 0;
456 return r;
457}
458
459static __exit void svm_hardware_unsetup(void)
460{
Joerg Roedel0da1db752008-07-02 16:02:11 +0200461 int cpu;
462
463 for_each_online_cpu(cpu)
464 svm_cpu_uninit(cpu);
465
Avi Kivity6aa8b732006-12-10 02:21:36 -0800466 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +0100467 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800468}
469
470static void init_seg(struct vmcb_seg *seg)
471{
472 seg->selector = 0;
473 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
474 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
475 seg->limit = 0xffff;
476 seg->base = 0;
477}
478
479static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
480{
481 seg->selector = 0;
482 seg->attrib = SVM_SELECTOR_P_MASK | type;
483 seg->limit = 0xffff;
484 seg->base = 0;
485}
486
Joerg Roedele6101a92008-02-13 18:58:45 +0100487static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800488{
Joerg Roedele6101a92008-02-13 18:58:45 +0100489 struct vmcb_control_area *control = &svm->vmcb->control;
490 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800491
492 control->intercept_cr_read = INTERCEPT_CR0_MASK |
493 INTERCEPT_CR3_MASK |
Joerg Roedel649d6862008-04-16 16:51:15 +0200494 INTERCEPT_CR4_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800495
496 control->intercept_cr_write = INTERCEPT_CR0_MASK |
497 INTERCEPT_CR3_MASK |
Avi Kivity80a81192007-12-06 19:50:00 +0200498 INTERCEPT_CR4_MASK |
499 INTERCEPT_CR8_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800500
501 control->intercept_dr_read = INTERCEPT_DR0_MASK |
502 INTERCEPT_DR1_MASK |
503 INTERCEPT_DR2_MASK |
504 INTERCEPT_DR3_MASK;
505
506 control->intercept_dr_write = INTERCEPT_DR0_MASK |
507 INTERCEPT_DR1_MASK |
508 INTERCEPT_DR2_MASK |
509 INTERCEPT_DR3_MASK |
510 INTERCEPT_DR5_MASK |
511 INTERCEPT_DR7_MASK;
512
Anthony Liguori7aa81cc2007-09-17 14:57:50 -0500513 control->intercept_exceptions = (1 << PF_VECTOR) |
Joerg Roedel53371b52008-04-09 14:15:30 +0200514 (1 << UD_VECTOR) |
515 (1 << MC_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800516
517
518 control->intercept = (1ULL << INTERCEPT_INTR) |
519 (1ULL << INTERCEPT_NMI) |
Joerg Roedel01525272007-02-19 14:37:47 +0200520 (1ULL << INTERCEPT_SMI) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800521 (1ULL << INTERCEPT_CPUID) |
Avi Kivitycf5a94d2007-10-28 16:11:58 +0200522 (1ULL << INTERCEPT_INVD) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800523 (1ULL << INTERCEPT_HLT) |
Marcelo Tosattia7052892008-09-23 13:18:35 -0300524 (1ULL << INTERCEPT_INVLPG) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800525 (1ULL << INTERCEPT_INVLPGA) |
526 (1ULL << INTERCEPT_IOIO_PROT) |
527 (1ULL << INTERCEPT_MSR_PROT) |
528 (1ULL << INTERCEPT_TASK_SWITCH) |
Joerg Roedel46fe4dd2007-01-26 00:56:42 -0800529 (1ULL << INTERCEPT_SHUTDOWN) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800530 (1ULL << INTERCEPT_VMRUN) |
531 (1ULL << INTERCEPT_VMMCALL) |
532 (1ULL << INTERCEPT_VMLOAD) |
533 (1ULL << INTERCEPT_VMSAVE) |
534 (1ULL << INTERCEPT_STGI) |
535 (1ULL << INTERCEPT_CLGI) |
Joerg Roedel916ce232007-03-21 19:47:00 +0100536 (1ULL << INTERCEPT_SKINIT) |
Avi Kivitycf5a94d2007-10-28 16:11:58 +0200537 (1ULL << INTERCEPT_WBINVD) |
Joerg Roedel916ce232007-03-21 19:47:00 +0100538 (1ULL << INTERCEPT_MONITOR) |
539 (1ULL << INTERCEPT_MWAIT);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800540
541 control->iopm_base_pa = iopm_base;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100542 control->msrpm_base_pa = __pa(svm->msrpm);
Avi Kivity0cc50642007-03-25 12:07:27 +0200543 control->tsc_offset = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800544 control->int_ctl = V_INTR_MASKING_MASK;
545
546 init_seg(&save->es);
547 init_seg(&save->ss);
548 init_seg(&save->ds);
549 init_seg(&save->fs);
550 init_seg(&save->gs);
551
552 save->cs.selector = 0xf000;
553 /* Executable/Readable Code Segment */
554 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
555 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
556 save->cs.limit = 0xffff;
Avi Kivityd92899a2007-02-12 00:54:38 -0800557 /*
558 * cs.base should really be 0xffff0000, but vmx can't handle that, so
559 * be consistent with it.
560 *
561 * Replace when we have real mode working for vmx.
562 */
563 save->cs.base = 0xf0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800564
565 save->gdtr.limit = 0xffff;
566 save->idtr.limit = 0xffff;
567
568 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
569 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
570
Alexander Graf9962d032008-11-25 20:17:02 +0100571 save->efer = EFER_SVME;
Mike Dayd77c26f2007-10-08 09:02:08 -0400572 save->dr6 = 0xffff0ff0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800573 save->dr7 = 0x400;
574 save->rflags = 2;
575 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300576 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800577
578 /*
579 * cr0 val on cpu init should be 0x60000010, we enable cpu
580 * cache by default. the orderly way is to enable cache in bios.
581 */
Rusty Russell707d92f2007-07-17 23:19:08 +1000582 save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
Rusty Russell66aee912007-07-17 23:34:16 +1000583 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800584 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100585
586 if (npt_enabled) {
587 /* Setup VMCB for Nested Paging */
588 control->nested_ctl = 1;
Marcelo Tosattia7052892008-09-23 13:18:35 -0300589 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
590 (1ULL << INTERCEPT_INVLPG));
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100591 control->intercept_exceptions &= ~(1 << PF_VECTOR);
592 control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
593 INTERCEPT_CR3_MASK);
594 control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
595 INTERCEPT_CR3_MASK);
596 save->g_pat = 0x0007040600070406ULL;
597 /* enable caching because the QEMU Bios doesn't enable it */
598 save->cr0 = X86_CR0_ET;
599 save->cr3 = 0;
600 save->cr4 = 0;
601 }
Avi Kivitya79d2f12008-04-14 13:10:21 +0300602 force_new_asid(&svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +0100603
604 svm->vcpu.arch.hflags = HF_GIF_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800605}
606
Avi Kivitye00c8cf2007-10-21 11:00:39 +0200607static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
Avi Kivity04d2cc72007-09-10 18:10:54 +0300608{
609 struct vcpu_svm *svm = to_svm(vcpu);
610
Joerg Roedele6101a92008-02-13 18:58:45 +0100611 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +0200612
613 if (vcpu->vcpu_id != 0) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300614 kvm_rip_write(vcpu, 0);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800615 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
616 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
Avi Kivity70433382007-11-07 12:57:23 +0200617 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300618 vcpu->arch.regs_avail = ~0;
619 vcpu->arch.regs_dirty = ~0;
Avi Kivitye00c8cf2007-10-21 11:00:39 +0200620
621 return 0;
Avi Kivity04d2cc72007-09-10 18:10:54 +0300622}
623
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000624static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800625{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400626 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800627 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100628 struct page *msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000629 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800630
Rusty Russellc16f8622007-07-30 21:12:19 +1000631 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000632 if (!svm) {
633 err = -ENOMEM;
634 goto out;
635 }
636
637 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
638 if (err)
639 goto free_svm;
640
Avi Kivity6aa8b732006-12-10 02:21:36 -0800641 page = alloc_page(GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000642 if (!page) {
643 err = -ENOMEM;
644 goto uninit;
645 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800646
Joerg Roedelf65c2292008-02-13 18:58:46 +0100647 err = -ENOMEM;
648 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
649 if (!msrpm_pages)
650 goto uninit;
651 svm->msrpm = page_address(msrpm_pages);
652 svm_vcpu_init_msrpm(svm->msrpm);
653
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400654 svm->vmcb = page_address(page);
655 clear_page(svm->vmcb);
656 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
657 svm->asid_generation = 0;
658 memset(svm->db_regs, 0, sizeof(svm->db_regs));
Joerg Roedele6101a92008-02-13 18:58:45 +0100659 init_vmcb(svm);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400660
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000661 fx_init(&svm->vcpu);
662 svm->vcpu.fpu_active = 1;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800663 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000664 if (svm->vcpu.vcpu_id == 0)
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800665 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800666
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000667 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -0800668
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000669uninit:
670 kvm_vcpu_uninit(&svm->vcpu);
671free_svm:
Rusty Russella4770342007-08-01 14:46:11 +1000672 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000673out:
674 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800675}
676
677static void svm_free_vcpu(struct kvm_vcpu *vcpu)
678{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400679 struct vcpu_svm *svm = to_svm(vcpu);
680
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000681 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +0100682 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000683 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +1000684 kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800685}
686
Avi Kivity15ad7142007-07-11 18:17:21 +0300687static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800688{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400689 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +0300690 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +0200691
Avi Kivity0cc50642007-03-25 12:07:27 +0200692 if (unlikely(cpu != vcpu->cpu)) {
693 u64 tsc_this, delta;
694
695 /*
696 * Make sure that the guest sees a monotonically
697 * increasing TSC.
698 */
699 rdtscll(tsc_this);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800700 delta = vcpu->arch.host_tsc - tsc_this;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400701 svm->vmcb->control.tsc_offset += delta;
Avi Kivity0cc50642007-03-25 12:07:27 +0200702 vcpu->cpu = cpu;
Marcelo Tosatti2f599712008-05-27 12:10:20 -0300703 kvm_migrate_timers(vcpu);
Avi Kivity0cc50642007-03-25 12:07:27 +0200704 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300705
706 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400707 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800708}
709
710static void svm_vcpu_put(struct kvm_vcpu *vcpu)
711{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400712 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300713 int i;
714
Avi Kivitye1beb1d2007-11-18 13:50:24 +0200715 ++vcpu->stat.host_state_reload;
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300716 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400717 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300718
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800719 rdtscll(vcpu->arch.host_tsc);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800720}
721
Avi Kivity6aa8b732006-12-10 02:21:36 -0800722static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
723{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400724 return to_svm(vcpu)->vmcb->save.rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800725}
726
727static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
728{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400729 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800730}
731
Alexander Graff0b85052008-11-25 20:17:01 +0100732static void svm_set_vintr(struct vcpu_svm *svm)
733{
734 svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
735}
736
737static void svm_clear_vintr(struct vcpu_svm *svm)
738{
739 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
740}
741
Avi Kivity6aa8b732006-12-10 02:21:36 -0800742static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
743{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400744 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800745
746 switch (seg) {
747 case VCPU_SREG_CS: return &save->cs;
748 case VCPU_SREG_DS: return &save->ds;
749 case VCPU_SREG_ES: return &save->es;
750 case VCPU_SREG_FS: return &save->fs;
751 case VCPU_SREG_GS: return &save->gs;
752 case VCPU_SREG_SS: return &save->ss;
753 case VCPU_SREG_TR: return &save->tr;
754 case VCPU_SREG_LDTR: return &save->ldtr;
755 }
756 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +0000757 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800758}
759
760static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
761{
762 struct vmcb_seg *s = svm_seg(vcpu, seg);
763
764 return s->base;
765}
766
767static void svm_get_segment(struct kvm_vcpu *vcpu,
768 struct kvm_segment *var, int seg)
769{
770 struct vmcb_seg *s = svm_seg(vcpu, seg);
771
772 var->base = s->base;
773 var->limit = s->limit;
774 var->selector = s->selector;
775 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
776 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
777 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
778 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
779 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
780 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
781 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
782 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
Amit Shah25022ac2008-10-27 09:04:17 +0000783
784 /*
785 * SVM always stores 0 for the 'G' bit in the CS selector in
786 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
787 * Intel's VMENTRY has a check on the 'G' bit.
788 */
789 if (seg == VCPU_SREG_CS)
790 var->g = s->limit > 0xfffff;
791
Amit Shahc0d09822008-10-27 09:04:18 +0000792 /*
793 * Work around a bug where the busy flag in the tr selector
794 * isn't exposed
795 */
796 if (seg == VCPU_SREG_TR)
797 var->type |= 0x2;
798
Avi Kivity6aa8b732006-12-10 02:21:36 -0800799 var->unusable = !var->present;
800}
801
Izik Eidus2e4d2652008-03-24 19:38:34 +0200802static int svm_get_cpl(struct kvm_vcpu *vcpu)
803{
804 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
805
806 return save->cpl;
807}
808
Avi Kivity6aa8b732006-12-10 02:21:36 -0800809static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
810{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400811 struct vcpu_svm *svm = to_svm(vcpu);
812
813 dt->limit = svm->vmcb->save.idtr.limit;
814 dt->base = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800815}
816
817static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
818{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400819 struct vcpu_svm *svm = to_svm(vcpu);
820
821 svm->vmcb->save.idtr.limit = dt->limit;
822 svm->vmcb->save.idtr.base = dt->base ;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800823}
824
825static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
826{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400827 struct vcpu_svm *svm = to_svm(vcpu);
828
829 dt->limit = svm->vmcb->save.gdtr.limit;
830 dt->base = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800831}
832
833static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
834{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400835 struct vcpu_svm *svm = to_svm(vcpu);
836
837 svm->vmcb->save.gdtr.limit = dt->limit;
838 svm->vmcb->save.gdtr.base = dt->base ;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800839}
840
Anthony Liguori25c4c272007-04-27 09:29:21 +0300841static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -0800842{
843}
844
Avi Kivity6aa8b732006-12-10 02:21:36 -0800845static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
846{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400847 struct vcpu_svm *svm = to_svm(vcpu);
848
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800849#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800850 if (vcpu->arch.shadow_efer & EFER_LME) {
Rusty Russell707d92f2007-07-17 23:19:08 +1000851 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800852 vcpu->arch.shadow_efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600853 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800854 }
855
Mike Dayd77c26f2007-10-08 09:02:08 -0400856 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800857 vcpu->arch.shadow_efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600858 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800859 }
860 }
861#endif
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100862 if (npt_enabled)
863 goto set;
864
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800865 if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400866 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
Anthony Liguori7807fa62007-04-23 09:17:21 -0500867 vcpu->fpu_active = 1;
868 }
869
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800870 vcpu->arch.cr0 = cr0;
Rusty Russell707d92f2007-07-17 23:19:08 +1000871 cr0 |= X86_CR0_PG | X86_CR0_WP;
Joerg Roedel6b390b62008-01-29 13:01:27 +0100872 if (!vcpu->fpu_active) {
873 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
Joerg Roedel334df502008-01-21 13:09:33 +0100874 cr0 |= X86_CR0_TS;
Joerg Roedel6b390b62008-01-29 13:01:27 +0100875 }
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100876set:
877 /*
878 * re-enable caching here because the QEMU bios
879 * does not do it - this results in some delay at
880 * reboot
881 */
882 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400883 svm->vmcb->save.cr0 = cr0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800884}
885
886static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
887{
Joerg Roedel6394b642008-04-09 14:15:29 +0200888 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +0200889 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
890
891 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
892 force_new_asid(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +0200893
Joerg Roedelec077262008-04-09 14:15:28 +0200894 vcpu->arch.cr4 = cr4;
895 if (!npt_enabled)
896 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +0200897 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +0200898 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800899}
900
901static void svm_set_segment(struct kvm_vcpu *vcpu,
902 struct kvm_segment *var, int seg)
903{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400904 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800905 struct vmcb_seg *s = svm_seg(vcpu, seg);
906
907 s->base = var->base;
908 s->limit = var->limit;
909 s->selector = var->selector;
910 if (var->unusable)
911 s->attrib = 0;
912 else {
913 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
914 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
915 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
916 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
917 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
918 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
919 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
920 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
921 }
922 if (seg == VCPU_SREG_CS)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400923 svm->vmcb->save.cpl
924 = (svm->vmcb->save.cs.attrib
Avi Kivity6aa8b732006-12-10 02:21:36 -0800925 >> SVM_SELECTOR_DPL_SHIFT) & 3;
926
927}
928
Avi Kivity6aa8b732006-12-10 02:21:36 -0800929static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
930{
931 return -EOPNOTSUPP;
932}
933
Eddie Dong2a8067f2007-08-06 16:29:07 +0300934static int svm_get_irq(struct kvm_vcpu *vcpu)
935{
936 struct vcpu_svm *svm = to_svm(vcpu);
937 u32 exit_int_info = svm->vmcb->control.exit_int_info;
938
939 if (is_external_interrupt(exit_int_info))
940 return exit_int_info & SVM_EVTINJ_VEC_MASK;
941 return -1;
942}
943
Avi Kivity6aa8b732006-12-10 02:21:36 -0800944static void load_host_msrs(struct kvm_vcpu *vcpu)
945{
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300946#ifdef CONFIG_X86_64
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400947 wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300948#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800949}
950
951static void save_host_msrs(struct kvm_vcpu *vcpu)
952{
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300953#ifdef CONFIG_X86_64
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400954 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300955#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800956}
957
Rusty Russelle756fc62007-07-30 20:07:08 +1000958static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800959{
960 if (svm_data->next_asid > svm_data->max_asid) {
961 ++svm_data->asid_generation;
962 svm_data->next_asid = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400963 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800964 }
965
Rusty Russelle756fc62007-07-30 20:07:08 +1000966 svm->vcpu.cpu = svm_data->cpu;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400967 svm->asid_generation = svm_data->asid_generation;
968 svm->vmcb->control.asid = svm_data->next_asid++;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800969}
970
Avi Kivity6aa8b732006-12-10 02:21:36 -0800971static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
972{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +0200973 unsigned long val = to_svm(vcpu)->db_regs[dr];
974 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
975 return val;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800976}
977
978static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
979 int *exception)
980{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400981 struct vcpu_svm *svm = to_svm(vcpu);
982
Avi Kivity6aa8b732006-12-10 02:21:36 -0800983 *exception = 0;
984
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400985 if (svm->vmcb->save.dr7 & DR7_GD_MASK) {
986 svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
987 svm->vmcb->save.dr6 |= DR6_BD_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800988 *exception = DB_VECTOR;
989 return;
990 }
991
992 switch (dr) {
993 case 0 ... 3:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400994 svm->db_regs[dr] = value;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800995 return;
996 case 4 ... 5:
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800997 if (vcpu->arch.cr4 & X86_CR4_DE) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800998 *exception = UD_VECTOR;
999 return;
1000 }
1001 case 7: {
1002 if (value & ~((1ULL << 32) - 1)) {
1003 *exception = GP_VECTOR;
1004 return;
1005 }
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001006 svm->vmcb->save.dr7 = value;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001007 return;
1008 }
1009 default:
1010 printk(KERN_DEBUG "%s: unexpected dr %u\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001011 __func__, dr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001012 *exception = UD_VECTOR;
1013 return;
1014 }
1015}
1016
Rusty Russelle756fc62007-07-30 20:07:08 +10001017static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001018{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001019 u32 exit_int_info = svm->vmcb->control.exit_int_info;
Rusty Russelle756fc62007-07-30 20:07:08 +10001020 struct kvm *kvm = svm->vcpu.kvm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001021 u64 fault_address;
1022 u32 error_code;
Avi Kivity577bdc42008-07-19 08:57:05 +03001023 bool event_injection = false;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001024
Eddie Dong85f455f2007-07-06 12:20:49 +03001025 if (!irqchip_in_kernel(kvm) &&
Avi Kivity577bdc42008-07-19 08:57:05 +03001026 is_external_interrupt(exit_int_info)) {
1027 event_injection = true;
Rusty Russelle756fc62007-07-30 20:07:08 +10001028 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
Avi Kivity577bdc42008-07-19 08:57:05 +03001029 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001030
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001031 fault_address = svm->vmcb->control.exit_info_2;
1032 error_code = svm->vmcb->control.exit_info_1;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001033
1034 if (!npt_enabled)
1035 KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code,
1036 (u32)fault_address, (u32)(fault_address >> 32),
1037 handler);
Joerg Roedeld2ebb412008-04-30 17:56:04 +02001038 else
1039 KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
1040 (u32)fault_address, (u32)(fault_address >> 32),
1041 handler);
Joerg Roedel44874f82008-08-27 14:18:43 +02001042 /*
1043 * FIXME: Tis shouldn't be necessary here, but there is a flush
1044 * missing in the MMU code. Until we find this bug, flush the
1045 * complete TLB here on an NPF
1046 */
1047 if (npt_enabled)
1048 svm_flush_tlb(&svm->vcpu);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001049
Avi Kivity48d15032008-08-28 18:27:15 +03001050 if (!npt_enabled && event_injection)
Avi Kivity577bdc42008-07-19 08:57:05 +03001051 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
Avi Kivity30677142007-10-28 18:48:59 +02001052 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001053}
1054
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001055static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1056{
1057 int er;
1058
Sheng Yang571008d2008-01-02 14:49:22 +08001059 er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001060 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001061 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001062 return 1;
1063}
1064
Rusty Russelle756fc62007-07-30 20:07:08 +10001065static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Anthony Liguori7807fa62007-04-23 09:17:21 -05001066{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001067 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001068 if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001069 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
Rusty Russelle756fc62007-07-30 20:07:08 +10001070 svm->vcpu.fpu_active = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001071
1072 return 1;
Anthony Liguori7807fa62007-04-23 09:17:21 -05001073}
1074
Joerg Roedel53371b52008-04-09 14:15:30 +02001075static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1076{
1077 /*
1078 * On an #MC intercept the MCE handler is not called automatically in
1079 * the host. So do it by hand here.
1080 */
1081 asm volatile (
1082 "int $0x12\n");
1083 /* not sure if we ever come back to this point */
1084
1085 return 1;
1086}
1087
Rusty Russelle756fc62007-07-30 20:07:08 +10001088static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001089{
1090 /*
1091 * VMCB is undefined after a SHUTDOWN intercept
1092 * so reinitialize it.
1093 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001094 clear_page(svm->vmcb);
Joerg Roedele6101a92008-02-13 18:58:45 +01001095 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001096
1097 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1098 return 0;
1099}
1100
Rusty Russelle756fc62007-07-30 20:07:08 +10001101static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001102{
Mike Dayd77c26f2007-10-08 09:02:08 -04001103 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Avi Kivity039576c2007-03-20 12:46:50 +02001104 int size, down, in, string, rep;
1105 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001106
Rusty Russelle756fc62007-07-30 20:07:08 +10001107 ++svm->vcpu.stat.io_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001108
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001109 svm->next_rip = svm->vmcb->control.exit_info_2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001110
Laurent Viviere70669a2007-08-05 10:36:40 +03001111 string = (io_info & SVM_IOIO_STR_MASK) != 0;
1112
1113 if (string) {
Laurent Vivier34273182007-09-18 11:27:37 +02001114 if (emulate_instruction(&svm->vcpu,
1115 kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
Laurent Viviere70669a2007-08-05 10:36:40 +03001116 return 0;
1117 return 1;
1118 }
1119
Avi Kivity039576c2007-03-20 12:46:50 +02001120 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1121 port = io_info >> 16;
1122 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Avi Kivity039576c2007-03-20 12:46:50 +02001123 rep = (io_info & SVM_IOIO_REP_MASK) != 0;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001124 down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001125
Guillaume Thouvenine93f36b2008-10-28 10:51:30 +01001126 skip_emulated_instruction(&svm->vcpu);
Laurent Vivier3090dd72007-08-05 10:43:32 +03001127 return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001128}
1129
Joerg Roedelc47f0982008-04-30 17:56:00 +02001130static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1131{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001132 KVMTRACE_0D(NMI, &svm->vcpu, handler);
Joerg Roedelc47f0982008-04-30 17:56:00 +02001133 return 1;
1134}
1135
Joerg Roedela0698052008-04-30 17:56:01 +02001136static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1137{
1138 ++svm->vcpu.stat.irq_exits;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001139 KVMTRACE_0D(INTR, &svm->vcpu, handler);
Joerg Roedela0698052008-04-30 17:56:01 +02001140 return 1;
1141}
1142
Rusty Russelle756fc62007-07-30 20:07:08 +10001143static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001144{
1145 return 1;
1146}
1147
Rusty Russelle756fc62007-07-30 20:07:08 +10001148static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001149{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001150 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10001151 skip_emulated_instruction(&svm->vcpu);
1152 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001153}
1154
Rusty Russelle756fc62007-07-30 20:07:08 +10001155static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity02e235b2007-02-19 14:37:47 +02001156{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001157 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Rusty Russelle756fc62007-07-30 20:07:08 +10001158 skip_emulated_instruction(&svm->vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001159 kvm_emulate_hypercall(&svm->vcpu);
1160 return 1;
Avi Kivity02e235b2007-02-19 14:37:47 +02001161}
1162
Alexander Grafc0725422008-11-25 20:17:03 +01001163static int nested_svm_check_permissions(struct vcpu_svm *svm)
1164{
1165 if (!(svm->vcpu.arch.shadow_efer & EFER_SVME)
1166 || !is_paging(&svm->vcpu)) {
1167 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1168 return 1;
1169 }
1170
1171 if (svm->vmcb->save.cpl) {
1172 kvm_inject_gp(&svm->vcpu, 0);
1173 return 1;
1174 }
1175
1176 return 0;
1177}
1178
1179static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa)
1180{
1181 struct page *page;
1182
1183 down_read(&current->mm->mmap_sem);
1184 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
1185 up_read(&current->mm->mmap_sem);
1186
1187 if (is_error_page(page)) {
1188 printk(KERN_INFO "%s: could not find page at 0x%llx\n",
1189 __func__, gpa);
1190 kvm_release_page_clean(page);
1191 kvm_inject_gp(&svm->vcpu, 0);
1192 return NULL;
1193 }
1194 return page;
1195}
1196
1197static int nested_svm_do(struct vcpu_svm *svm,
1198 u64 arg1_gpa, u64 arg2_gpa, void *opaque,
1199 int (*handler)(struct vcpu_svm *svm,
1200 void *arg1,
1201 void *arg2,
1202 void *opaque))
1203{
1204 struct page *arg1_page;
1205 struct page *arg2_page = NULL;
1206 void *arg1;
1207 void *arg2 = NULL;
1208 int retval;
1209
1210 arg1_page = nested_svm_get_page(svm, arg1_gpa);
1211 if(arg1_page == NULL)
1212 return 1;
1213
1214 if (arg2_gpa) {
1215 arg2_page = nested_svm_get_page(svm, arg2_gpa);
1216 if(arg2_page == NULL) {
1217 kvm_release_page_clean(arg1_page);
1218 return 1;
1219 }
1220 }
1221
1222 arg1 = kmap_atomic(arg1_page, KM_USER0);
1223 if (arg2_gpa)
1224 arg2 = kmap_atomic(arg2_page, KM_USER1);
1225
1226 retval = handler(svm, arg1, arg2, opaque);
1227
1228 kunmap_atomic(arg1, KM_USER0);
1229 if (arg2_gpa)
1230 kunmap_atomic(arg2, KM_USER1);
1231
1232 kvm_release_page_dirty(arg1_page);
1233 if (arg2_gpa)
1234 kvm_release_page_dirty(arg2_page);
1235
1236 return retval;
1237}
1238
Alexander Graf1371d902008-11-25 20:17:04 +01001239static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1240{
1241 if (nested_svm_check_permissions(svm))
1242 return 1;
1243
1244 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1245 skip_emulated_instruction(&svm->vcpu);
1246
1247 svm->vcpu.arch.hflags |= HF_GIF_MASK;
1248
1249 return 1;
1250}
1251
1252static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1253{
1254 if (nested_svm_check_permissions(svm))
1255 return 1;
1256
1257 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1258 skip_emulated_instruction(&svm->vcpu);
1259
1260 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
1261
1262 /* After a CLGI no interrupts should come */
1263 svm_clear_vintr(svm);
1264 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
1265
1266 return 1;
1267}
1268
Rusty Russelle756fc62007-07-30 20:07:08 +10001269static int invalid_op_interception(struct vcpu_svm *svm,
1270 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001271{
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001272 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001273 return 1;
1274}
1275
Rusty Russelle756fc62007-07-30 20:07:08 +10001276static int task_switch_interception(struct vcpu_svm *svm,
1277 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001278{
Izik Eidus37817f22008-03-24 23:14:53 +02001279 u16 tss_selector;
1280
1281 tss_selector = (u16)svm->vmcb->control.exit_info_1;
1282 if (svm->vmcb->control.exit_info_2 &
1283 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
1284 return kvm_task_switch(&svm->vcpu, tss_selector,
1285 TASK_SWITCH_IRET);
1286 if (svm->vmcb->control.exit_info_2 &
1287 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
1288 return kvm_task_switch(&svm->vcpu, tss_selector,
1289 TASK_SWITCH_JMP);
1290 return kvm_task_switch(&svm->vcpu, tss_selector, TASK_SWITCH_CALL);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001291}
1292
Rusty Russelle756fc62007-07-30 20:07:08 +10001293static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001294{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001295 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001296 kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity06465c52007-02-28 20:46:53 +02001297 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001298}
1299
Marcelo Tosattia7052892008-09-23 13:18:35 -03001300static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1301{
1302 if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE)
1303 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
1304 return 1;
1305}
1306
Rusty Russelle756fc62007-07-30 20:07:08 +10001307static int emulate_on_interception(struct vcpu_svm *svm,
1308 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001309{
Laurent Vivier34273182007-09-18 11:27:37 +02001310 if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE)
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001311 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001312 return 1;
1313}
1314
Joerg Roedel1d075432007-12-06 21:02:25 +01001315static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1316{
1317 emulate_instruction(&svm->vcpu, NULL, 0, 0, 0);
1318 if (irqchip_in_kernel(svm->vcpu.kvm))
1319 return 1;
1320 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
1321 return 0;
1322}
1323
Avi Kivity6aa8b732006-12-10 02:21:36 -08001324static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1325{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001326 struct vcpu_svm *svm = to_svm(vcpu);
1327
Avi Kivity6aa8b732006-12-10 02:21:36 -08001328 switch (ecx) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001329 case MSR_IA32_TIME_STAMP_COUNTER: {
1330 u64 tsc;
1331
1332 rdtscll(tsc);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001333 *data = svm->vmcb->control.tsc_offset + tsc;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001334 break;
1335 }
Avi Kivity0e859ca2006-12-22 01:05:08 -08001336 case MSR_K6_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001337 *data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001338 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08001339#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001340 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001341 *data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001342 break;
1343 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001344 *data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001345 break;
1346 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001347 *data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001348 break;
1349 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001350 *data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001351 break;
1352#endif
1353 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001354 *data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001355 break;
1356 case MSR_IA32_SYSENTER_EIP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001357 *data = svm->vmcb->save.sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001358 break;
1359 case MSR_IA32_SYSENTER_ESP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001360 *data = svm->vmcb->save.sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001361 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01001362 /* Nobody will change the following 5 values in the VMCB so
1363 we can safely return them on rdmsr. They will always be 0
1364 until LBRV is implemented. */
1365 case MSR_IA32_DEBUGCTLMSR:
1366 *data = svm->vmcb->save.dbgctl;
1367 break;
1368 case MSR_IA32_LASTBRANCHFROMIP:
1369 *data = svm->vmcb->save.br_from;
1370 break;
1371 case MSR_IA32_LASTBRANCHTOIP:
1372 *data = svm->vmcb->save.br_to;
1373 break;
1374 case MSR_IA32_LASTINTFROMIP:
1375 *data = svm->vmcb->save.last_excp_from;
1376 break;
1377 case MSR_IA32_LASTINTTOIP:
1378 *data = svm->vmcb->save.last_excp_to;
1379 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001380 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08001381 return kvm_get_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001382 }
1383 return 0;
1384}
1385
Rusty Russelle756fc62007-07-30 20:07:08 +10001386static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001387{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001388 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Avi Kivity6aa8b732006-12-10 02:21:36 -08001389 u64 data;
1390
Rusty Russelle756fc62007-07-30 20:07:08 +10001391 if (svm_get_msr(&svm->vcpu, ecx, &data))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02001392 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001393 else {
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001394 KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
1395 (u32)(data >> 32), handler);
1396
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001397 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001398 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001399 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001400 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001401 }
1402 return 1;
1403}
1404
1405static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1406{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001407 struct vcpu_svm *svm = to_svm(vcpu);
1408
Avi Kivity6aa8b732006-12-10 02:21:36 -08001409 switch (ecx) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001410 case MSR_IA32_TIME_STAMP_COUNTER: {
1411 u64 tsc;
1412
1413 rdtscll(tsc);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001414 svm->vmcb->control.tsc_offset = data - tsc;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001415 break;
1416 }
Avi Kivity0e859ca2006-12-22 01:05:08 -08001417 case MSR_K6_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001418 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001419 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08001420#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001421 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001422 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001423 break;
1424 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001425 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001426 break;
1427 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001428 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001429 break;
1430 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001431 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001432 break;
1433#endif
1434 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001435 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001436 break;
1437 case MSR_IA32_SYSENTER_EIP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001438 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001439 break;
1440 case MSR_IA32_SYSENTER_ESP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001441 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001442 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01001443 case MSR_IA32_DEBUGCTLMSR:
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001444 if (!svm_has(SVM_FEATURE_LBRV)) {
1445 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001446 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001447 break;
1448 }
1449 if (data & DEBUGCTL_RESERVED_BITS)
1450 return 1;
1451
1452 svm->vmcb->save.dbgctl = data;
1453 if (data & (1ULL<<0))
1454 svm_enable_lbrv(svm);
1455 else
1456 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01001457 break;
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001458 case MSR_K7_EVNTSEL0:
1459 case MSR_K7_EVNTSEL1:
1460 case MSR_K7_EVNTSEL2:
1461 case MSR_K7_EVNTSEL3:
Chris Lalancette14ae51b2008-05-05 13:05:16 -04001462 case MSR_K7_PERFCTR0:
1463 case MSR_K7_PERFCTR1:
1464 case MSR_K7_PERFCTR2:
1465 case MSR_K7_PERFCTR3:
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001466 /*
Chris Lalancette14ae51b2008-05-05 13:05:16 -04001467 * Just discard all writes to the performance counters; this
1468 * should keep both older linux and windows 64-bit guests
1469 * happy
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001470 */
Chris Lalancette14ae51b2008-05-05 13:05:16 -04001471 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
1472
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001473 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001474 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08001475 return kvm_set_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001476 }
1477 return 0;
1478}
1479
Rusty Russelle756fc62007-07-30 20:07:08 +10001480static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001481{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001482 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001483 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001484 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001485
1486 KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
1487 handler);
1488
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001489 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001490 if (svm_set_msr(&svm->vcpu, ecx, data))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02001491 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001492 else
Rusty Russelle756fc62007-07-30 20:07:08 +10001493 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001494 return 1;
1495}
1496
Rusty Russelle756fc62007-07-30 20:07:08 +10001497static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001498{
Rusty Russelle756fc62007-07-30 20:07:08 +10001499 if (svm->vmcb->control.exit_info_1)
1500 return wrmsr_interception(svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001501 else
Rusty Russelle756fc62007-07-30 20:07:08 +10001502 return rdmsr_interception(svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001503}
1504
Rusty Russelle756fc62007-07-30 20:07:08 +10001505static int interrupt_window_interception(struct vcpu_svm *svm,
Dor Laorc1150d82007-01-05 16:36:24 -08001506 struct kvm_run *kvm_run)
1507{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001508 KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
1509
Alexander Graff0b85052008-11-25 20:17:01 +01001510 svm_clear_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03001511 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Dor Laorc1150d82007-01-05 16:36:24 -08001512 /*
1513 * If the user space waits to inject interrupts, exit as soon as
1514 * possible
1515 */
1516 if (kvm_run->request_interrupt_window &&
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001517 !svm->vcpu.arch.irq_summary) {
Rusty Russelle756fc62007-07-30 20:07:08 +10001518 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08001519 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1520 return 0;
1521 }
1522
1523 return 1;
1524}
1525
Rusty Russelle756fc62007-07-30 20:07:08 +10001526static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001527 struct kvm_run *kvm_run) = {
1528 [SVM_EXIT_READ_CR0] = emulate_on_interception,
1529 [SVM_EXIT_READ_CR3] = emulate_on_interception,
1530 [SVM_EXIT_READ_CR4] = emulate_on_interception,
Avi Kivity80a81192007-12-06 19:50:00 +02001531 [SVM_EXIT_READ_CR8] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001532 /* for now: */
1533 [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
1534 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
1535 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
Joerg Roedel1d075432007-12-06 21:02:25 +01001536 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001537 [SVM_EXIT_READ_DR0] = emulate_on_interception,
1538 [SVM_EXIT_READ_DR1] = emulate_on_interception,
1539 [SVM_EXIT_READ_DR2] = emulate_on_interception,
1540 [SVM_EXIT_READ_DR3] = emulate_on_interception,
1541 [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
1542 [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
1543 [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
1544 [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
1545 [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
1546 [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001547 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001548 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
Anthony Liguori7807fa62007-04-23 09:17:21 -05001549 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
Joerg Roedel53371b52008-04-09 14:15:30 +02001550 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
Joerg Roedela0698052008-04-30 17:56:01 +02001551 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02001552 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001553 [SVM_EXIT_SMI] = nop_on_interception,
1554 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08001555 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001556 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1557 [SVM_EXIT_CPUID] = cpuid_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02001558 [SVM_EXIT_INVD] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001559 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03001560 [SVM_EXIT_INVLPG] = invlpg_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001561 [SVM_EXIT_INVLPGA] = invalid_op_interception,
1562 [SVM_EXIT_IOIO] = io_interception,
1563 [SVM_EXIT_MSR] = msr_interception,
1564 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001565 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001566 [SVM_EXIT_VMRUN] = invalid_op_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02001567 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001568 [SVM_EXIT_VMLOAD] = invalid_op_interception,
1569 [SVM_EXIT_VMSAVE] = invalid_op_interception,
Alexander Graf1371d902008-11-25 20:17:04 +01001570 [SVM_EXIT_STGI] = stgi_interception,
1571 [SVM_EXIT_CLGI] = clgi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001572 [SVM_EXIT_SKINIT] = invalid_op_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02001573 [SVM_EXIT_WBINVD] = emulate_on_interception,
Joerg Roedel916ce232007-03-21 19:47:00 +01001574 [SVM_EXIT_MONITOR] = invalid_op_interception,
1575 [SVM_EXIT_MWAIT] = invalid_op_interception,
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001576 [SVM_EXIT_NPF] = pf_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001577};
1578
Avi Kivity04d2cc72007-09-10 18:10:54 +03001579static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001580{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001581 struct vcpu_svm *svm = to_svm(vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001582 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001583
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001584 KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
1585 (u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
1586
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001587 if (npt_enabled) {
1588 int mmu_reload = 0;
1589 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
1590 svm_set_cr0(vcpu, svm->vmcb->save.cr0);
1591 mmu_reload = 1;
1592 }
1593 vcpu->arch.cr0 = svm->vmcb->save.cr0;
1594 vcpu->arch.cr3 = svm->vmcb->save.cr3;
1595 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1596 if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
1597 kvm_inject_gp(vcpu, 0);
1598 return 1;
1599 }
1600 }
1601 if (mmu_reload) {
1602 kvm_mmu_reset_context(vcpu);
1603 kvm_mmu_load(vcpu);
1604 }
1605 }
1606
Avi Kivity04d2cc72007-09-10 18:10:54 +03001607 kvm_reput_irq(svm);
1608
1609 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1610 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1611 kvm_run->fail_entry.hardware_entry_failure_reason
1612 = svm->vmcb->control.exit_code;
1613 return 0;
1614 }
1615
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001616 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001617 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1618 exit_code != SVM_EXIT_NPF)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001619 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
1620 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001621 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001622 exit_code);
1623
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02001624 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08001625 || !svm_exit_handlers[exit_code]) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001626 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
Avi Kivity364b6252007-04-16 14:28:40 +03001627 kvm_run->hw.hardware_exit_reason = exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001628 return 0;
1629 }
1630
Rusty Russelle756fc62007-07-30 20:07:08 +10001631 return svm_exit_handlers[exit_code](svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001632}
1633
1634static void reload_tss(struct kvm_vcpu *vcpu)
1635{
1636 int cpu = raw_smp_processor_id();
1637
1638 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
Mike Dayd77c26f2007-10-08 09:02:08 -04001639 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001640 load_TR_desc();
1641}
1642
Rusty Russelle756fc62007-07-30 20:07:08 +10001643static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001644{
1645 int cpu = raw_smp_processor_id();
1646
1647 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1648
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001649 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
Rusty Russelle756fc62007-07-30 20:07:08 +10001650 if (svm->vcpu.cpu != cpu ||
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001651 svm->asid_generation != svm_data->asid_generation)
Rusty Russelle756fc62007-07-30 20:07:08 +10001652 new_asid(svm, svm_data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001653}
1654
1655
Eddie Dong85f455f2007-07-06 12:20:49 +03001656static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001657{
1658 struct vmcb_control_area *control;
1659
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001660 KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler);
1661
Avi Kivityfa89a812008-09-01 15:57:51 +03001662 ++svm->vcpu.stat.irq_injections;
Rusty Russelle756fc62007-07-30 20:07:08 +10001663 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03001664 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001665 control->int_ctl &= ~V_INTR_PRIO_MASK;
1666 control->int_ctl |= V_IRQ_MASK |
1667 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1668}
1669
Eddie Dong2a8067f2007-08-06 16:29:07 +03001670static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
1671{
1672 struct vcpu_svm *svm = to_svm(vcpu);
1673
1674 svm_inject_irq(svm, irq);
1675}
1676
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001677static void update_cr8_intercept(struct kvm_vcpu *vcpu)
1678{
1679 struct vcpu_svm *svm = to_svm(vcpu);
1680 struct vmcb *vmcb = svm->vmcb;
1681 int max_irr, tpr;
1682
1683 if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr)
1684 return;
1685
1686 vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1687
1688 max_irr = kvm_lapic_find_highest_irr(vcpu);
1689 if (max_irr == -1)
1690 return;
1691
1692 tpr = kvm_lapic_get_cr8(vcpu) << 4;
1693
1694 if (tpr >= (max_irr & 0xf0))
1695 vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
1696}
1697
Avi Kivity04d2cc72007-09-10 18:10:54 +03001698static void svm_intr_assist(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001699{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001700 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03001701 struct vmcb *vmcb = svm->vmcb;
1702 int intr_vector = -1;
1703
1704 if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
1705 ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) {
1706 intr_vector = vmcb->control.exit_int_info &
1707 SVM_EVTINJ_VEC_MASK;
1708 vmcb->control.exit_int_info = 0;
1709 svm_inject_irq(svm, intr_vector);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001710 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001711 }
1712
1713 if (vmcb->control.int_ctl & V_IRQ_MASK)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001714 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001715
Eddie Dong1b9778d2007-09-03 16:56:58 +03001716 if (!kvm_cpu_has_interrupt(vcpu))
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001717 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001718
Alexander Graf1371d902008-11-25 20:17:04 +01001719 if (!(svm->vcpu.arch.hflags & HF_GIF_MASK))
1720 goto out;
1721
Eddie Dong85f455f2007-07-06 12:20:49 +03001722 if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
1723 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
1724 (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
1725 /* unable to deliver irq, set pending irq */
Alexander Graff0b85052008-11-25 20:17:01 +01001726 svm_set_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03001727 svm_inject_irq(svm, 0x0);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001728 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001729 }
1730 /* Okay, we can deliver the interrupt: grab it and update PIC state. */
Eddie Dong1b9778d2007-09-03 16:56:58 +03001731 intr_vector = kvm_cpu_get_interrupt(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03001732 svm_inject_irq(svm, intr_vector);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001733out:
1734 update_cr8_intercept(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03001735}
1736
1737static void kvm_reput_irq(struct vcpu_svm *svm)
1738{
Rusty Russelle756fc62007-07-30 20:07:08 +10001739 struct vmcb_control_area *control = &svm->vmcb->control;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001740
Eddie Dong7017fc32007-07-18 11:34:57 +03001741 if ((control->int_ctl & V_IRQ_MASK)
1742 && !irqchip_in_kernel(svm->vcpu.kvm)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001743 control->int_ctl &= ~V_IRQ_MASK;
Rusty Russelle756fc62007-07-30 20:07:08 +10001744 push_irq(&svm->vcpu, control->int_vector);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001745 }
Dor Laorc1150d82007-01-05 16:36:24 -08001746
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001747 svm->vcpu.arch.interrupt_window_open =
Alexander Graf1371d902008-11-25 20:17:04 +01001748 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
1749 (svm->vcpu.arch.hflags & HF_GIF_MASK);
Dor Laorc1150d82007-01-05 16:36:24 -08001750}
1751
Eddie Dong85f455f2007-07-06 12:20:49 +03001752static void svm_do_inject_vector(struct vcpu_svm *svm)
1753{
1754 struct kvm_vcpu *vcpu = &svm->vcpu;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001755 int word_index = __ffs(vcpu->arch.irq_summary);
1756 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
Eddie Dong85f455f2007-07-06 12:20:49 +03001757 int irq = word_index * BITS_PER_LONG + bit_index;
1758
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001759 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
1760 if (!vcpu->arch.irq_pending[word_index])
1761 clear_bit(word_index, &vcpu->arch.irq_summary);
Eddie Dong85f455f2007-07-06 12:20:49 +03001762 svm_inject_irq(svm, irq);
1763}
1764
Avi Kivity04d2cc72007-09-10 18:10:54 +03001765static void do_interrupt_requests(struct kvm_vcpu *vcpu,
Dor Laorc1150d82007-01-05 16:36:24 -08001766 struct kvm_run *kvm_run)
1767{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001768 struct vcpu_svm *svm = to_svm(vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001769 struct vmcb_control_area *control = &svm->vmcb->control;
Dor Laorc1150d82007-01-05 16:36:24 -08001770
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001771 svm->vcpu.arch.interrupt_window_open =
Dor Laorc1150d82007-01-05 16:36:24 -08001772 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
Alexander Graf1371d902008-11-25 20:17:04 +01001773 (svm->vmcb->save.rflags & X86_EFLAGS_IF) &&
1774 (svm->vcpu.arch.hflags & HF_GIF_MASK));
Dor Laorc1150d82007-01-05 16:36:24 -08001775
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001776 if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
Dor Laorc1150d82007-01-05 16:36:24 -08001777 /*
1778 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1779 */
Eddie Dong85f455f2007-07-06 12:20:49 +03001780 svm_do_inject_vector(svm);
Dor Laorc1150d82007-01-05 16:36:24 -08001781
1782 /*
1783 * Interrupts blocked. Wait for unblock.
1784 */
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001785 if (!svm->vcpu.arch.interrupt_window_open &&
1786 (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window))
Alexander Graff0b85052008-11-25 20:17:01 +01001787 svm_set_vintr(svm);
1788 else
1789 svm_clear_vintr(svm);
Dor Laorc1150d82007-01-05 16:36:24 -08001790}
1791
Izik Eiduscbc94022007-10-25 00:29:55 +02001792static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
1793{
1794 return 0;
1795}
1796
Avi Kivity6aa8b732006-12-10 02:21:36 -08001797static void save_db_regs(unsigned long *db_regs)
1798{
Avi Kivity5aff4582006-12-13 00:33:45 -08001799 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
1800 asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
1801 asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
1802 asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001803}
1804
1805static void load_db_regs(unsigned long *db_regs)
1806{
Avi Kivity5aff4582006-12-13 00:33:45 -08001807 asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
1808 asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
1809 asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
1810 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001811}
1812
Avi Kivityd9e368d2007-06-07 19:18:30 +03001813static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1814{
1815 force_new_asid(vcpu);
1816}
1817
Avi Kivity04d2cc72007-09-10 18:10:54 +03001818static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
1819{
1820}
1821
Joerg Roedeld7bf8222008-04-16 16:51:17 +02001822static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
1823{
1824 struct vcpu_svm *svm = to_svm(vcpu);
1825
1826 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
1827 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
1828 kvm_lapic_set_tpr(vcpu, cr8);
1829 }
1830}
1831
Joerg Roedel649d6862008-04-16 16:51:15 +02001832static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
1833{
1834 struct vcpu_svm *svm = to_svm(vcpu);
1835 u64 cr8;
1836
1837 if (!irqchip_in_kernel(vcpu->kvm))
1838 return;
1839
1840 cr8 = kvm_get_cr8(vcpu);
1841 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
1842 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
1843}
1844
Avi Kivity80e31d42008-07-14 14:44:59 +03001845#ifdef CONFIG_X86_64
1846#define R "r"
1847#else
1848#define R "e"
1849#endif
1850
Avi Kivity04d2cc72007-09-10 18:10:54 +03001851static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001852{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001853 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001854 u16 fs_selector;
1855 u16 gs_selector;
1856 u16 ldt_selector;
Avi Kivityd9e368d2007-06-07 19:18:30 +03001857
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001858 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
1859 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
1860 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
1861
Rusty Russelle756fc62007-07-30 20:07:08 +10001862 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001863
Joerg Roedel649d6862008-04-16 16:51:15 +02001864 sync_lapic_to_cr8(vcpu);
1865
Avi Kivity6aa8b732006-12-10 02:21:36 -08001866 save_host_msrs(vcpu);
Avi Kivityd6e88ae2008-07-10 16:53:33 +03001867 fs_selector = kvm_read_fs();
1868 gs_selector = kvm_read_gs();
1869 ldt_selector = kvm_read_ldt();
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001870 svm->host_cr2 = kvm_read_cr2();
1871 svm->host_dr6 = read_dr6();
1872 svm->host_dr7 = read_dr7();
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001873 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001874 /* required for live migration with NPT */
1875 if (npt_enabled)
1876 svm->vmcb->save.cr3 = vcpu->arch.cr3;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001877
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001878 if (svm->vmcb->save.dr7 & 0xff) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001879 write_dr7(0);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001880 save_db_regs(svm->host_db_regs);
1881 load_db_regs(svm->db_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001882 }
Avi Kivity36241b82006-12-22 01:05:20 -08001883
Avi Kivity04d2cc72007-09-10 18:10:54 +03001884 clgi();
1885
1886 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08001887
Avi Kivity6aa8b732006-12-10 02:21:36 -08001888 asm volatile (
Avi Kivity80e31d42008-07-14 14:44:59 +03001889 "push %%"R"bp; \n\t"
1890 "mov %c[rbx](%[svm]), %%"R"bx \n\t"
1891 "mov %c[rcx](%[svm]), %%"R"cx \n\t"
1892 "mov %c[rdx](%[svm]), %%"R"dx \n\t"
1893 "mov %c[rsi](%[svm]), %%"R"si \n\t"
1894 "mov %c[rdi](%[svm]), %%"R"di \n\t"
1895 "mov %c[rbp](%[svm]), %%"R"bp \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001896#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001897 "mov %c[r8](%[svm]), %%r8 \n\t"
1898 "mov %c[r9](%[svm]), %%r9 \n\t"
1899 "mov %c[r10](%[svm]), %%r10 \n\t"
1900 "mov %c[r11](%[svm]), %%r11 \n\t"
1901 "mov %c[r12](%[svm]), %%r12 \n\t"
1902 "mov %c[r13](%[svm]), %%r13 \n\t"
1903 "mov %c[r14](%[svm]), %%r14 \n\t"
1904 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001905#endif
1906
Avi Kivity6aa8b732006-12-10 02:21:36 -08001907 /* Enter guest mode */
Avi Kivity80e31d42008-07-14 14:44:59 +03001908 "push %%"R"ax \n\t"
1909 "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03001910 __ex(SVM_VMLOAD) "\n\t"
1911 __ex(SVM_VMRUN) "\n\t"
1912 __ex(SVM_VMSAVE) "\n\t"
Avi Kivity80e31d42008-07-14 14:44:59 +03001913 "pop %%"R"ax \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001914
1915 /* Save guest registers, load host registers */
Avi Kivity80e31d42008-07-14 14:44:59 +03001916 "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
1917 "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
1918 "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
1919 "mov %%"R"si, %c[rsi](%[svm]) \n\t"
1920 "mov %%"R"di, %c[rdi](%[svm]) \n\t"
1921 "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001922#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001923 "mov %%r8, %c[r8](%[svm]) \n\t"
1924 "mov %%r9, %c[r9](%[svm]) \n\t"
1925 "mov %%r10, %c[r10](%[svm]) \n\t"
1926 "mov %%r11, %c[r11](%[svm]) \n\t"
1927 "mov %%r12, %c[r12](%[svm]) \n\t"
1928 "mov %%r13, %c[r13](%[svm]) \n\t"
1929 "mov %%r14, %c[r14](%[svm]) \n\t"
1930 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001931#endif
Avi Kivity80e31d42008-07-14 14:44:59 +03001932 "pop %%"R"bp"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001933 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001934 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08001935 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001936 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
1937 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
1938 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
1939 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
1940 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
1941 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001942#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001943 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
1944 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
1945 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
1946 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
1947 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
1948 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
1949 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
1950 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001951#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02001952 : "cc", "memory"
Avi Kivity80e31d42008-07-14 14:44:59 +03001953 , R"bx", R"cx", R"dx", R"si", R"di"
Laurent Vivier54a08c02007-10-25 14:18:53 +02001954#ifdef CONFIG_X86_64
Laurent Vivier54a08c02007-10-25 14:18:53 +02001955 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
1956#endif
1957 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08001958
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001959 if ((svm->vmcb->save.dr7 & 0xff))
1960 load_db_regs(svm->host_db_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001961
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001962 vcpu->arch.cr2 = svm->vmcb->save.cr2;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001963 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
1964 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
1965 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001966
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001967 write_dr6(svm->host_dr6);
1968 write_dr7(svm->host_dr7);
1969 kvm_write_cr2(svm->host_cr2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001970
Avi Kivityd6e88ae2008-07-10 16:53:33 +03001971 kvm_load_fs(fs_selector);
1972 kvm_load_gs(gs_selector);
1973 kvm_load_ldt(ldt_selector);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001974 load_host_msrs(vcpu);
1975
1976 reload_tss(vcpu);
1977
Avi Kivity56ba47d2007-11-07 17:14:18 +02001978 local_irq_disable();
1979
1980 stgi();
1981
Joerg Roedeld7bf8222008-04-16 16:51:17 +02001982 sync_cr8_to_lapic(vcpu);
1983
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001984 svm->next_rip = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001985}
1986
Avi Kivity80e31d42008-07-14 14:44:59 +03001987#undef R
1988
Avi Kivity6aa8b732006-12-10 02:21:36 -08001989static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
1990{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001991 struct vcpu_svm *svm = to_svm(vcpu);
1992
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001993 if (npt_enabled) {
1994 svm->vmcb->control.nested_cr3 = root;
1995 force_new_asid(vcpu);
1996 return;
1997 }
1998
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001999 svm->vmcb->save.cr3 = root;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002000 force_new_asid(vcpu);
Anthony Liguori7807fa62007-04-23 09:17:21 -05002001
2002 if (vcpu->fpu_active) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002003 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
2004 svm->vmcb->save.cr0 |= X86_CR0_TS;
Anthony Liguori7807fa62007-04-23 09:17:21 -05002005 vcpu->fpu_active = 0;
2006 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002007}
2008
Avi Kivity6aa8b732006-12-10 02:21:36 -08002009static int is_disabled(void)
2010{
Joerg Roedel6031a612007-06-22 12:29:50 +03002011 u64 vm_cr;
2012
2013 rdmsrl(MSR_VM_CR, vm_cr);
2014 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
2015 return 1;
2016
Avi Kivity6aa8b732006-12-10 02:21:36 -08002017 return 0;
2018}
2019
Ingo Molnar102d8322007-02-19 14:37:47 +02002020static void
2021svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
2022{
2023 /*
2024 * Patch in the VMMCALL instruction:
2025 */
2026 hypercall[0] = 0x0f;
2027 hypercall[1] = 0x01;
2028 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02002029}
2030
Yang, Sheng002c7f72007-07-31 14:23:01 +03002031static void svm_check_processor_compat(void *rtn)
2032{
2033 *(int *)rtn = 0;
2034}
2035
Avi Kivity774ead32007-12-26 13:57:04 +02002036static bool svm_cpu_has_accelerated_tpr(void)
2037{
2038 return false;
2039}
2040
Sheng Yang67253af2008-04-25 10:20:22 +08002041static int get_npt_level(void)
2042{
2043#ifdef CONFIG_X86_64
2044 return PT64_ROOT_LEVEL;
2045#else
2046 return PT32E_ROOT_LEVEL;
2047#endif
2048}
2049
Sheng Yang64d4d522008-10-09 16:01:57 +08002050static int svm_get_mt_mask_shift(void)
2051{
2052 return 0;
2053}
2054
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002055static struct kvm_x86_ops svm_x86_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08002056 .cpu_has_kvm_support = has_svm,
2057 .disabled_by_bios = is_disabled,
2058 .hardware_setup = svm_hardware_setup,
2059 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03002060 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002061 .hardware_enable = svm_hardware_enable,
2062 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02002063 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002064
2065 .vcpu_create = svm_create_vcpu,
2066 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03002067 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002068
Avi Kivity04d2cc72007-09-10 18:10:54 +03002069 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002070 .vcpu_load = svm_vcpu_load,
2071 .vcpu_put = svm_vcpu_put,
2072
2073 .set_guest_debug = svm_guest_debug,
2074 .get_msr = svm_get_msr,
2075 .set_msr = svm_set_msr,
2076 .get_segment_base = svm_get_segment_base,
2077 .get_segment = svm_get_segment,
2078 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02002079 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10002080 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Anthony Liguori25c4c272007-04-27 09:29:21 +03002081 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002082 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002083 .set_cr3 = svm_set_cr3,
2084 .set_cr4 = svm_set_cr4,
2085 .set_efer = svm_set_efer,
2086 .get_idt = svm_get_idt,
2087 .set_idt = svm_set_idt,
2088 .get_gdt = svm_get_gdt,
2089 .set_gdt = svm_set_gdt,
2090 .get_dr = svm_get_dr,
2091 .set_dr = svm_set_dr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002092 .get_rflags = svm_get_rflags,
2093 .set_rflags = svm_set_rflags,
2094
Avi Kivity6aa8b732006-12-10 02:21:36 -08002095 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002096
Avi Kivity6aa8b732006-12-10 02:21:36 -08002097 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03002098 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002099 .skip_emulated_instruction = skip_emulated_instruction,
Ingo Molnar102d8322007-02-19 14:37:47 +02002100 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03002101 .get_irq = svm_get_irq,
2102 .set_irq = svm_set_irq,
Avi Kivity298101d2007-11-25 13:41:11 +02002103 .queue_exception = svm_queue_exception,
2104 .exception_injected = svm_exception_injected,
Avi Kivity04d2cc72007-09-10 18:10:54 +03002105 .inject_pending_irq = svm_intr_assist,
2106 .inject_pending_vectors = do_interrupt_requests,
Izik Eiduscbc94022007-10-25 00:29:55 +02002107
2108 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08002109 .get_tdp_level = get_npt_level,
Sheng Yang64d4d522008-10-09 16:01:57 +08002110 .get_mt_mask_shift = svm_get_mt_mask_shift,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002111};
2112
2113static int __init svm_init(void)
2114{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08002115 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Rusty Russellc16f8622007-07-30 21:12:19 +10002116 THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002117}
2118
2119static void __exit svm_exit(void)
2120{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08002121 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08002122}
2123
2124module_init(svm_init)
2125module_exit(svm_exit)