blob: e4eb3fd91b9039710bc2295de7f66e4ecda4a526 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
Avi Kivityedf88412007-12-16 11:02:48 +020016#include <linux/kvm_host.h>
17
Avi Kivitye4956062007-06-28 14:15:57 -040018#include "kvm_svm.h"
Eddie Dong85f455f2007-07-06 12:20:49 +030019#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080020#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030021#include "kvm_cache_regs.h"
Avi Kivitye4956062007-06-28 14:15:57 -040022
Avi Kivity6aa8b732006-12-10 02:21:36 -080023#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020024#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/vmalloc.h>
26#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040027#include <linux/sched.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Avi Kivitye4956062007-06-28 14:15:57 -040029#include <asm/desc.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080030
Eduardo Habkost63d11422008-11-17 19:03:20 -020031#include <asm/virtext.h>
32
Avi Kivity4ecac3f2008-05-13 13:23:38 +030033#define __ex(x) __kvm_handle_fault_on_reboot(x)
34
Avi Kivity6aa8b732006-12-10 02:21:36 -080035MODULE_AUTHOR("Qumranet");
36MODULE_LICENSE("GPL");
37
38#define IOPM_ALLOC_ORDER 2
39#define MSRPM_ALLOC_ORDER 1
40
Avi Kivity6aa8b732006-12-10 02:21:36 -080041#define DR7_GD_MASK (1 << 13)
42#define DR6_BD_MASK (1 << 13)
Avi Kivity6aa8b732006-12-10 02:21:36 -080043
44#define SEG_TYPE_LDT 2
45#define SEG_TYPE_BUSY_TSS16 3
46
Joerg Roedel80b77062007-03-30 17:02:14 +030047#define SVM_FEATURE_NPT (1 << 0)
48#define SVM_FEATURE_LBRV (1 << 1)
Amit Shah94c935a2008-08-18 13:11:46 +030049#define SVM_FEATURE_SVML (1 << 2)
Joerg Roedel80b77062007-03-30 17:02:14 +030050
Joerg Roedel24e09cb2008-02-13 18:58:47 +010051#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
52
Joerg Roedel709ddeb2008-02-07 13:47:45 +010053/* enable NPT for AMD64 and X86 with PAE */
54#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
55static bool npt_enabled = true;
56#else
Joerg Roedele3da3ac2008-02-07 13:47:39 +010057static bool npt_enabled = false;
Joerg Roedel709ddeb2008-02-07 13:47:45 +010058#endif
Joerg Roedel6c7dac72008-02-07 13:47:40 +010059static int npt = 1;
60
61module_param(npt, int, S_IRUGO);
Joerg Roedele3da3ac2008-02-07 13:47:39 +010062
Avi Kivity04d2cc72007-09-10 18:10:54 +030063static void kvm_reput_irq(struct vcpu_svm *svm);
Joerg Roedel44874f82008-08-27 14:18:43 +020064static void svm_flush_tlb(struct kvm_vcpu *vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +030065
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040066static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
67{
Rusty Russellfb3f0f52007-07-27 17:16:56 +100068 return container_of(vcpu, struct vcpu_svm, vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040069}
70
Harvey Harrison4866d5e2008-02-19 10:32:02 -080071static unsigned long iopm_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -080072
73struct kvm_ldttss_desc {
74 u16 limit0;
75 u16 base0;
76 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
77 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
78 u32 base3;
79 u32 zero1;
80} __attribute__((packed));
81
82struct svm_cpu_data {
83 int cpu;
84
Avi Kivity5008fdf2007-04-02 13:05:50 +030085 u64 asid_generation;
86 u32 max_asid;
87 u32 next_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -080088 struct kvm_ldttss_desc *tss_desc;
89
90 struct page *save_area;
91};
92
93static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
Joerg Roedel80b77062007-03-30 17:02:14 +030094static uint32_t svm_features;
Avi Kivity6aa8b732006-12-10 02:21:36 -080095
96struct svm_init_data {
97 int cpu;
98 int r;
99};
100
101static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
102
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200103#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800104#define MSRS_RANGE_SIZE 2048
105#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
106
107#define MAX_INST_SIZE 15
108
Joerg Roedel80b77062007-03-30 17:02:14 +0300109static inline u32 svm_has(u32 feat)
110{
111 return svm_features & feat;
112}
113
Avi Kivity6aa8b732006-12-10 02:21:36 -0800114static inline u8 pop_irq(struct kvm_vcpu *vcpu)
115{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800116 int word_index = __ffs(vcpu->arch.irq_summary);
117 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800118 int irq = word_index * BITS_PER_LONG + bit_index;
119
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800120 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
121 if (!vcpu->arch.irq_pending[word_index])
122 clear_bit(word_index, &vcpu->arch.irq_summary);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800123 return irq;
124}
125
126static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
127{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800128 set_bit(irq, vcpu->arch.irq_pending);
129 set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800130}
131
132static inline void clgi(void)
133{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300134 asm volatile (__ex(SVM_CLGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800135}
136
137static inline void stgi(void)
138{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300139 asm volatile (__ex(SVM_STGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800140}
141
142static inline void invlpga(unsigned long addr, u32 asid)
143{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300144 asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800145}
146
147static inline unsigned long kvm_read_cr2(void)
148{
149 unsigned long cr2;
150
151 asm volatile ("mov %%cr2, %0" : "=r" (cr2));
152 return cr2;
153}
154
155static inline void kvm_write_cr2(unsigned long val)
156{
157 asm volatile ("mov %0, %%cr2" :: "r" (val));
158}
159
160static inline unsigned long read_dr6(void)
161{
162 unsigned long dr6;
163
164 asm volatile ("mov %%dr6, %0" : "=r" (dr6));
165 return dr6;
166}
167
168static inline void write_dr6(unsigned long val)
169{
170 asm volatile ("mov %0, %%dr6" :: "r" (val));
171}
172
173static inline unsigned long read_dr7(void)
174{
175 unsigned long dr7;
176
177 asm volatile ("mov %%dr7, %0" : "=r" (dr7));
178 return dr7;
179}
180
181static inline void write_dr7(unsigned long val)
182{
183 asm volatile ("mov %0, %%dr7" :: "r" (val));
184}
185
Avi Kivity6aa8b732006-12-10 02:21:36 -0800186static inline void force_new_asid(struct kvm_vcpu *vcpu)
187{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400188 to_svm(vcpu)->asid_generation--;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800189}
190
191static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
192{
193 force_new_asid(vcpu);
194}
195
196static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
197{
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100198 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600199 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800200
Alexander Graf9962d032008-11-25 20:17:02 +0100201 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800202 vcpu->arch.shadow_efer = efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800203}
204
Avi Kivity298101d2007-11-25 13:41:11 +0200205static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
206 bool has_error_code, u32 error_code)
207{
208 struct vcpu_svm *svm = to_svm(vcpu);
209
210 svm->vmcb->control.event_inj = nr
211 | SVM_EVTINJ_VALID
212 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
213 | SVM_EVTINJ_TYPE_EXEPT;
214 svm->vmcb->control.event_inj_err = error_code;
215}
216
217static bool svm_exception_injected(struct kvm_vcpu *vcpu)
218{
219 struct vcpu_svm *svm = to_svm(vcpu);
220
221 return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
222}
223
Avi Kivity6aa8b732006-12-10 02:21:36 -0800224static int is_external_interrupt(u32 info)
225{
226 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
227 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
228}
229
230static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
231{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400232 struct vcpu_svm *svm = to_svm(vcpu);
233
234 if (!svm->next_rip) {
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800235 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800236 return;
237 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300238 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
239 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
240 __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800241
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300242 kvm_rip_write(vcpu, svm->next_rip);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400243 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
Dor Laorc1150d82007-01-05 16:36:24 -0800244
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800245 vcpu->arch.interrupt_window_open = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800246}
247
248static int has_svm(void)
249{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200250 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800251
Eduardo Habkost63d11422008-11-17 19:03:20 -0200252 if (!cpu_has_svm(&msg)) {
253 printk(KERN_INFO "has_svn: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800254 return 0;
255 }
256
Avi Kivity6aa8b732006-12-10 02:21:36 -0800257 return 1;
258}
259
260static void svm_hardware_disable(void *garbage)
261{
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200262 cpu_svm_disable();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800263}
264
265static void svm_hardware_enable(void *garbage)
266{
267
268 struct svm_cpu_data *svm_data;
269 uint64_t efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800270 struct desc_ptr gdt_descr;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800271 struct desc_struct *gdt;
272 int me = raw_smp_processor_id();
273
274 if (!has_svm()) {
275 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
276 return;
277 }
278 svm_data = per_cpu(svm_data, me);
279
280 if (!svm_data) {
281 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
282 me);
283 return;
284 }
285
286 svm_data->asid_generation = 1;
287 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
288 svm_data->next_asid = svm_data->max_asid + 1;
289
Mike Dayd77c26f2007-10-08 09:02:08 -0400290 asm volatile ("sgdt %0" : "=m"(gdt_descr));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800291 gdt = (struct desc_struct *)gdt_descr.address;
292 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
293
294 rdmsrl(MSR_EFER, efer);
Alexander Graf9962d032008-11-25 20:17:02 +0100295 wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800296
297 wrmsrl(MSR_VM_HSAVE_PA,
298 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
299}
300
Joerg Roedel0da1db752008-07-02 16:02:11 +0200301static void svm_cpu_uninit(int cpu)
302{
303 struct svm_cpu_data *svm_data
304 = per_cpu(svm_data, raw_smp_processor_id());
305
306 if (!svm_data)
307 return;
308
309 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
310 __free_page(svm_data->save_area);
311 kfree(svm_data);
312}
313
Avi Kivity6aa8b732006-12-10 02:21:36 -0800314static int svm_cpu_init(int cpu)
315{
316 struct svm_cpu_data *svm_data;
317 int r;
318
319 svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
320 if (!svm_data)
321 return -ENOMEM;
322 svm_data->cpu = cpu;
323 svm_data->save_area = alloc_page(GFP_KERNEL);
324 r = -ENOMEM;
325 if (!svm_data->save_area)
326 goto err_1;
327
328 per_cpu(svm_data, cpu) = svm_data;
329
330 return 0;
331
332err_1:
333 kfree(svm_data);
334 return r;
335
336}
337
Rusty Russellbfc733a2007-07-31 20:42:42 +1000338static void set_msr_interception(u32 *msrpm, unsigned msr,
339 int read, int write)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800340{
341 int i;
342
343 for (i = 0; i < NUM_MSR_MAPS; i++) {
344 if (msr >= msrpm_ranges[i] &&
345 msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
346 u32 msr_offset = (i * MSRS_IN_RANGE + msr -
347 msrpm_ranges[i]) * 2;
348
349 u32 *base = msrpm + (msr_offset / 32);
350 u32 msr_shift = msr_offset % 32;
351 u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
352 *base = (*base & ~(0x3 << msr_shift)) |
353 (mask << msr_shift);
Rusty Russellbfc733a2007-07-31 20:42:42 +1000354 return;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800355 }
356 }
Rusty Russellbfc733a2007-07-31 20:42:42 +1000357 BUG();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800358}
359
Joerg Roedelf65c2292008-02-13 18:58:46 +0100360static void svm_vcpu_init_msrpm(u32 *msrpm)
361{
362 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
363
364#ifdef CONFIG_X86_64
365 set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
366 set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
367 set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
368 set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
369 set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
370 set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
371#endif
372 set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
373 set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
374 set_msr_interception(msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
375 set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
376}
377
Joerg Roedel24e09cb2008-02-13 18:58:47 +0100378static void svm_enable_lbrv(struct vcpu_svm *svm)
379{
380 u32 *msrpm = svm->msrpm;
381
382 svm->vmcb->control.lbr_ctl = 1;
383 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
384 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
385 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
386 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
387}
388
389static void svm_disable_lbrv(struct vcpu_svm *svm)
390{
391 u32 *msrpm = svm->msrpm;
392
393 svm->vmcb->control.lbr_ctl = 0;
394 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
395 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
396 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
397 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
398}
399
Avi Kivity6aa8b732006-12-10 02:21:36 -0800400static __init int svm_hardware_setup(void)
401{
402 int cpu;
403 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100404 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800405 int r;
406
Avi Kivity6aa8b732006-12-10 02:21:36 -0800407 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
408
409 if (!iopm_pages)
410 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +0300411
412 iopm_va = page_address(iopm_pages);
413 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
414 clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800415 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
416
Joerg Roedel50a37eb2008-01-31 14:57:38 +0100417 if (boot_cpu_has(X86_FEATURE_NX))
418 kvm_enable_efer_bits(EFER_NX);
419
Avi Kivity6aa8b732006-12-10 02:21:36 -0800420 for_each_online_cpu(cpu) {
421 r = svm_cpu_init(cpu);
422 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +0100423 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800424 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +0100425
426 svm_features = cpuid_edx(SVM_CPUID_FUNC);
427
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100428 if (!svm_has(SVM_FEATURE_NPT))
429 npt_enabled = false;
430
Joerg Roedel6c7dac72008-02-07 13:47:40 +0100431 if (npt_enabled && !npt) {
432 printk(KERN_INFO "kvm: Nested Paging disabled\n");
433 npt_enabled = false;
434 }
435
Joerg Roedel18552672008-02-07 13:47:41 +0100436 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100437 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +0100438 kvm_enable_tdp();
Joerg Roedel5f4cb662008-07-14 20:36:36 +0200439 } else
440 kvm_disable_tdp();
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100441
Avi Kivity6aa8b732006-12-10 02:21:36 -0800442 return 0;
443
Joerg Roedelf65c2292008-02-13 18:58:46 +0100444err:
Avi Kivity6aa8b732006-12-10 02:21:36 -0800445 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
446 iopm_base = 0;
447 return r;
448}
449
450static __exit void svm_hardware_unsetup(void)
451{
Joerg Roedel0da1db752008-07-02 16:02:11 +0200452 int cpu;
453
454 for_each_online_cpu(cpu)
455 svm_cpu_uninit(cpu);
456
Avi Kivity6aa8b732006-12-10 02:21:36 -0800457 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +0100458 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800459}
460
461static void init_seg(struct vmcb_seg *seg)
462{
463 seg->selector = 0;
464 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
465 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
466 seg->limit = 0xffff;
467 seg->base = 0;
468}
469
470static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
471{
472 seg->selector = 0;
473 seg->attrib = SVM_SELECTOR_P_MASK | type;
474 seg->limit = 0xffff;
475 seg->base = 0;
476}
477
Joerg Roedele6101a92008-02-13 18:58:45 +0100478static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800479{
Joerg Roedele6101a92008-02-13 18:58:45 +0100480 struct vmcb_control_area *control = &svm->vmcb->control;
481 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800482
483 control->intercept_cr_read = INTERCEPT_CR0_MASK |
484 INTERCEPT_CR3_MASK |
Joerg Roedel649d6862008-04-16 16:51:15 +0200485 INTERCEPT_CR4_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800486
487 control->intercept_cr_write = INTERCEPT_CR0_MASK |
488 INTERCEPT_CR3_MASK |
Avi Kivity80a81192007-12-06 19:50:00 +0200489 INTERCEPT_CR4_MASK |
490 INTERCEPT_CR8_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800491
492 control->intercept_dr_read = INTERCEPT_DR0_MASK |
493 INTERCEPT_DR1_MASK |
494 INTERCEPT_DR2_MASK |
495 INTERCEPT_DR3_MASK;
496
497 control->intercept_dr_write = INTERCEPT_DR0_MASK |
498 INTERCEPT_DR1_MASK |
499 INTERCEPT_DR2_MASK |
500 INTERCEPT_DR3_MASK |
501 INTERCEPT_DR5_MASK |
502 INTERCEPT_DR7_MASK;
503
Anthony Liguori7aa81cc2007-09-17 14:57:50 -0500504 control->intercept_exceptions = (1 << PF_VECTOR) |
Joerg Roedel53371b52008-04-09 14:15:30 +0200505 (1 << UD_VECTOR) |
506 (1 << MC_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800507
508
509 control->intercept = (1ULL << INTERCEPT_INTR) |
510 (1ULL << INTERCEPT_NMI) |
Joerg Roedel01525272007-02-19 14:37:47 +0200511 (1ULL << INTERCEPT_SMI) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800512 (1ULL << INTERCEPT_CPUID) |
Avi Kivitycf5a94d2007-10-28 16:11:58 +0200513 (1ULL << INTERCEPT_INVD) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800514 (1ULL << INTERCEPT_HLT) |
Marcelo Tosattia7052892008-09-23 13:18:35 -0300515 (1ULL << INTERCEPT_INVLPG) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800516 (1ULL << INTERCEPT_INVLPGA) |
517 (1ULL << INTERCEPT_IOIO_PROT) |
518 (1ULL << INTERCEPT_MSR_PROT) |
519 (1ULL << INTERCEPT_TASK_SWITCH) |
Joerg Roedel46fe4dd2007-01-26 00:56:42 -0800520 (1ULL << INTERCEPT_SHUTDOWN) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800521 (1ULL << INTERCEPT_VMRUN) |
522 (1ULL << INTERCEPT_VMMCALL) |
523 (1ULL << INTERCEPT_VMLOAD) |
524 (1ULL << INTERCEPT_VMSAVE) |
525 (1ULL << INTERCEPT_STGI) |
526 (1ULL << INTERCEPT_CLGI) |
Joerg Roedel916ce232007-03-21 19:47:00 +0100527 (1ULL << INTERCEPT_SKINIT) |
Avi Kivitycf5a94d2007-10-28 16:11:58 +0200528 (1ULL << INTERCEPT_WBINVD) |
Joerg Roedel916ce232007-03-21 19:47:00 +0100529 (1ULL << INTERCEPT_MONITOR) |
530 (1ULL << INTERCEPT_MWAIT);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800531
532 control->iopm_base_pa = iopm_base;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100533 control->msrpm_base_pa = __pa(svm->msrpm);
Avi Kivity0cc50642007-03-25 12:07:27 +0200534 control->tsc_offset = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800535 control->int_ctl = V_INTR_MASKING_MASK;
536
537 init_seg(&save->es);
538 init_seg(&save->ss);
539 init_seg(&save->ds);
540 init_seg(&save->fs);
541 init_seg(&save->gs);
542
543 save->cs.selector = 0xf000;
544 /* Executable/Readable Code Segment */
545 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
546 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
547 save->cs.limit = 0xffff;
Avi Kivityd92899a2007-02-12 00:54:38 -0800548 /*
549 * cs.base should really be 0xffff0000, but vmx can't handle that, so
550 * be consistent with it.
551 *
552 * Replace when we have real mode working for vmx.
553 */
554 save->cs.base = 0xf0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800555
556 save->gdtr.limit = 0xffff;
557 save->idtr.limit = 0xffff;
558
559 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
560 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
561
Alexander Graf9962d032008-11-25 20:17:02 +0100562 save->efer = EFER_SVME;
Mike Dayd77c26f2007-10-08 09:02:08 -0400563 save->dr6 = 0xffff0ff0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800564 save->dr7 = 0x400;
565 save->rflags = 2;
566 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300567 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800568
569 /*
570 * cr0 val on cpu init should be 0x60000010, we enable cpu
571 * cache by default. the orderly way is to enable cache in bios.
572 */
Rusty Russell707d92f2007-07-17 23:19:08 +1000573 save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
Rusty Russell66aee912007-07-17 23:34:16 +1000574 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800575 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100576
577 if (npt_enabled) {
578 /* Setup VMCB for Nested Paging */
579 control->nested_ctl = 1;
Marcelo Tosattia7052892008-09-23 13:18:35 -0300580 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
581 (1ULL << INTERCEPT_INVLPG));
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100582 control->intercept_exceptions &= ~(1 << PF_VECTOR);
583 control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
584 INTERCEPT_CR3_MASK);
585 control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
586 INTERCEPT_CR3_MASK);
587 save->g_pat = 0x0007040600070406ULL;
588 /* enable caching because the QEMU Bios doesn't enable it */
589 save->cr0 = X86_CR0_ET;
590 save->cr3 = 0;
591 save->cr4 = 0;
592 }
Avi Kivitya79d2f12008-04-14 13:10:21 +0300593 force_new_asid(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800594}
595
Avi Kivitye00c8cf2007-10-21 11:00:39 +0200596static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
Avi Kivity04d2cc72007-09-10 18:10:54 +0300597{
598 struct vcpu_svm *svm = to_svm(vcpu);
599
Joerg Roedele6101a92008-02-13 18:58:45 +0100600 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +0200601
602 if (vcpu->vcpu_id != 0) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300603 kvm_rip_write(vcpu, 0);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800604 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
605 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
Avi Kivity70433382007-11-07 12:57:23 +0200606 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300607 vcpu->arch.regs_avail = ~0;
608 vcpu->arch.regs_dirty = ~0;
Avi Kivitye00c8cf2007-10-21 11:00:39 +0200609
610 return 0;
Avi Kivity04d2cc72007-09-10 18:10:54 +0300611}
612
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000613static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800614{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400615 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800616 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100617 struct page *msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000618 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800619
Rusty Russellc16f8622007-07-30 21:12:19 +1000620 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000621 if (!svm) {
622 err = -ENOMEM;
623 goto out;
624 }
625
626 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
627 if (err)
628 goto free_svm;
629
Avi Kivity6aa8b732006-12-10 02:21:36 -0800630 page = alloc_page(GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000631 if (!page) {
632 err = -ENOMEM;
633 goto uninit;
634 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800635
Joerg Roedelf65c2292008-02-13 18:58:46 +0100636 err = -ENOMEM;
637 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
638 if (!msrpm_pages)
639 goto uninit;
640 svm->msrpm = page_address(msrpm_pages);
641 svm_vcpu_init_msrpm(svm->msrpm);
642
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400643 svm->vmcb = page_address(page);
644 clear_page(svm->vmcb);
645 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
646 svm->asid_generation = 0;
647 memset(svm->db_regs, 0, sizeof(svm->db_regs));
Joerg Roedele6101a92008-02-13 18:58:45 +0100648 init_vmcb(svm);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400649
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000650 fx_init(&svm->vcpu);
651 svm->vcpu.fpu_active = 1;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800652 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000653 if (svm->vcpu.vcpu_id == 0)
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800654 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800655
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000656 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -0800657
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000658uninit:
659 kvm_vcpu_uninit(&svm->vcpu);
660free_svm:
Rusty Russella4770342007-08-01 14:46:11 +1000661 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000662out:
663 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800664}
665
666static void svm_free_vcpu(struct kvm_vcpu *vcpu)
667{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400668 struct vcpu_svm *svm = to_svm(vcpu);
669
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000670 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +0100671 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000672 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +1000673 kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800674}
675
Avi Kivity15ad7142007-07-11 18:17:21 +0300676static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800677{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400678 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +0300679 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +0200680
Avi Kivity0cc50642007-03-25 12:07:27 +0200681 if (unlikely(cpu != vcpu->cpu)) {
682 u64 tsc_this, delta;
683
684 /*
685 * Make sure that the guest sees a monotonically
686 * increasing TSC.
687 */
688 rdtscll(tsc_this);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800689 delta = vcpu->arch.host_tsc - tsc_this;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400690 svm->vmcb->control.tsc_offset += delta;
Avi Kivity0cc50642007-03-25 12:07:27 +0200691 vcpu->cpu = cpu;
Marcelo Tosatti2f599712008-05-27 12:10:20 -0300692 kvm_migrate_timers(vcpu);
Avi Kivity0cc50642007-03-25 12:07:27 +0200693 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300694
695 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400696 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800697}
698
699static void svm_vcpu_put(struct kvm_vcpu *vcpu)
700{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400701 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300702 int i;
703
Avi Kivitye1beb1d2007-11-18 13:50:24 +0200704 ++vcpu->stat.host_state_reload;
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300705 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400706 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300707
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800708 rdtscll(vcpu->arch.host_tsc);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800709}
710
Avi Kivity6aa8b732006-12-10 02:21:36 -0800711static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
712{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400713 return to_svm(vcpu)->vmcb->save.rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800714}
715
716static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
717{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400718 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800719}
720
Alexander Graff0b85052008-11-25 20:17:01 +0100721static void svm_set_vintr(struct vcpu_svm *svm)
722{
723 svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
724}
725
726static void svm_clear_vintr(struct vcpu_svm *svm)
727{
728 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
729}
730
Avi Kivity6aa8b732006-12-10 02:21:36 -0800731static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
732{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400733 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800734
735 switch (seg) {
736 case VCPU_SREG_CS: return &save->cs;
737 case VCPU_SREG_DS: return &save->ds;
738 case VCPU_SREG_ES: return &save->es;
739 case VCPU_SREG_FS: return &save->fs;
740 case VCPU_SREG_GS: return &save->gs;
741 case VCPU_SREG_SS: return &save->ss;
742 case VCPU_SREG_TR: return &save->tr;
743 case VCPU_SREG_LDTR: return &save->ldtr;
744 }
745 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +0000746 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800747}
748
749static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
750{
751 struct vmcb_seg *s = svm_seg(vcpu, seg);
752
753 return s->base;
754}
755
756static void svm_get_segment(struct kvm_vcpu *vcpu,
757 struct kvm_segment *var, int seg)
758{
759 struct vmcb_seg *s = svm_seg(vcpu, seg);
760
761 var->base = s->base;
762 var->limit = s->limit;
763 var->selector = s->selector;
764 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
765 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
766 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
767 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
768 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
769 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
770 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
771 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
Amit Shah25022ac2008-10-27 09:04:17 +0000772
773 /*
774 * SVM always stores 0 for the 'G' bit in the CS selector in
775 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
776 * Intel's VMENTRY has a check on the 'G' bit.
777 */
778 if (seg == VCPU_SREG_CS)
779 var->g = s->limit > 0xfffff;
780
Amit Shahc0d09822008-10-27 09:04:18 +0000781 /*
782 * Work around a bug where the busy flag in the tr selector
783 * isn't exposed
784 */
785 if (seg == VCPU_SREG_TR)
786 var->type |= 0x2;
787
Avi Kivity6aa8b732006-12-10 02:21:36 -0800788 var->unusable = !var->present;
789}
790
Izik Eidus2e4d2652008-03-24 19:38:34 +0200791static int svm_get_cpl(struct kvm_vcpu *vcpu)
792{
793 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
794
795 return save->cpl;
796}
797
Avi Kivity6aa8b732006-12-10 02:21:36 -0800798static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
799{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400800 struct vcpu_svm *svm = to_svm(vcpu);
801
802 dt->limit = svm->vmcb->save.idtr.limit;
803 dt->base = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800804}
805
806static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
807{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400808 struct vcpu_svm *svm = to_svm(vcpu);
809
810 svm->vmcb->save.idtr.limit = dt->limit;
811 svm->vmcb->save.idtr.base = dt->base ;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800812}
813
814static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
815{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400816 struct vcpu_svm *svm = to_svm(vcpu);
817
818 dt->limit = svm->vmcb->save.gdtr.limit;
819 dt->base = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800820}
821
822static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
823{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400824 struct vcpu_svm *svm = to_svm(vcpu);
825
826 svm->vmcb->save.gdtr.limit = dt->limit;
827 svm->vmcb->save.gdtr.base = dt->base ;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800828}
829
Anthony Liguori25c4c272007-04-27 09:29:21 +0300830static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -0800831{
832}
833
Avi Kivity6aa8b732006-12-10 02:21:36 -0800834static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
835{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400836 struct vcpu_svm *svm = to_svm(vcpu);
837
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800838#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800839 if (vcpu->arch.shadow_efer & EFER_LME) {
Rusty Russell707d92f2007-07-17 23:19:08 +1000840 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800841 vcpu->arch.shadow_efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600842 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800843 }
844
Mike Dayd77c26f2007-10-08 09:02:08 -0400845 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800846 vcpu->arch.shadow_efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600847 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800848 }
849 }
850#endif
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100851 if (npt_enabled)
852 goto set;
853
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800854 if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400855 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
Anthony Liguori7807fa62007-04-23 09:17:21 -0500856 vcpu->fpu_active = 1;
857 }
858
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800859 vcpu->arch.cr0 = cr0;
Rusty Russell707d92f2007-07-17 23:19:08 +1000860 cr0 |= X86_CR0_PG | X86_CR0_WP;
Joerg Roedel6b390b62008-01-29 13:01:27 +0100861 if (!vcpu->fpu_active) {
862 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
Joerg Roedel334df502008-01-21 13:09:33 +0100863 cr0 |= X86_CR0_TS;
Joerg Roedel6b390b62008-01-29 13:01:27 +0100864 }
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100865set:
866 /*
867 * re-enable caching here because the QEMU bios
868 * does not do it - this results in some delay at
869 * reboot
870 */
871 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400872 svm->vmcb->save.cr0 = cr0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800873}
874
875static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
876{
Joerg Roedel6394b642008-04-09 14:15:29 +0200877 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +0200878 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
879
880 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
881 force_new_asid(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +0200882
Joerg Roedelec077262008-04-09 14:15:28 +0200883 vcpu->arch.cr4 = cr4;
884 if (!npt_enabled)
885 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +0200886 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +0200887 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800888}
889
890static void svm_set_segment(struct kvm_vcpu *vcpu,
891 struct kvm_segment *var, int seg)
892{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400893 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800894 struct vmcb_seg *s = svm_seg(vcpu, seg);
895
896 s->base = var->base;
897 s->limit = var->limit;
898 s->selector = var->selector;
899 if (var->unusable)
900 s->attrib = 0;
901 else {
902 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
903 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
904 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
905 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
906 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
907 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
908 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
909 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
910 }
911 if (seg == VCPU_SREG_CS)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400912 svm->vmcb->save.cpl
913 = (svm->vmcb->save.cs.attrib
Avi Kivity6aa8b732006-12-10 02:21:36 -0800914 >> SVM_SELECTOR_DPL_SHIFT) & 3;
915
916}
917
Avi Kivity6aa8b732006-12-10 02:21:36 -0800918static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
919{
920 return -EOPNOTSUPP;
921}
922
Eddie Dong2a8067f2007-08-06 16:29:07 +0300923static int svm_get_irq(struct kvm_vcpu *vcpu)
924{
925 struct vcpu_svm *svm = to_svm(vcpu);
926 u32 exit_int_info = svm->vmcb->control.exit_int_info;
927
928 if (is_external_interrupt(exit_int_info))
929 return exit_int_info & SVM_EVTINJ_VEC_MASK;
930 return -1;
931}
932
Avi Kivity6aa8b732006-12-10 02:21:36 -0800933static void load_host_msrs(struct kvm_vcpu *vcpu)
934{
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300935#ifdef CONFIG_X86_64
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400936 wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300937#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800938}
939
940static void save_host_msrs(struct kvm_vcpu *vcpu)
941{
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300942#ifdef CONFIG_X86_64
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400943 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300944#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800945}
946
Rusty Russelle756fc62007-07-30 20:07:08 +1000947static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800948{
949 if (svm_data->next_asid > svm_data->max_asid) {
950 ++svm_data->asid_generation;
951 svm_data->next_asid = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400952 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800953 }
954
Rusty Russelle756fc62007-07-30 20:07:08 +1000955 svm->vcpu.cpu = svm_data->cpu;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400956 svm->asid_generation = svm_data->asid_generation;
957 svm->vmcb->control.asid = svm_data->next_asid++;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800958}
959
Avi Kivity6aa8b732006-12-10 02:21:36 -0800960static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
961{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +0200962 unsigned long val = to_svm(vcpu)->db_regs[dr];
963 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
964 return val;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800965}
966
967static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
968 int *exception)
969{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400970 struct vcpu_svm *svm = to_svm(vcpu);
971
Avi Kivity6aa8b732006-12-10 02:21:36 -0800972 *exception = 0;
973
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400974 if (svm->vmcb->save.dr7 & DR7_GD_MASK) {
975 svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
976 svm->vmcb->save.dr6 |= DR6_BD_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800977 *exception = DB_VECTOR;
978 return;
979 }
980
981 switch (dr) {
982 case 0 ... 3:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400983 svm->db_regs[dr] = value;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800984 return;
985 case 4 ... 5:
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800986 if (vcpu->arch.cr4 & X86_CR4_DE) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800987 *exception = UD_VECTOR;
988 return;
989 }
990 case 7: {
991 if (value & ~((1ULL << 32) - 1)) {
992 *exception = GP_VECTOR;
993 return;
994 }
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400995 svm->vmcb->save.dr7 = value;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800996 return;
997 }
998 default:
999 printk(KERN_DEBUG "%s: unexpected dr %u\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001000 __func__, dr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001001 *exception = UD_VECTOR;
1002 return;
1003 }
1004}
1005
Rusty Russelle756fc62007-07-30 20:07:08 +10001006static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001007{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001008 u32 exit_int_info = svm->vmcb->control.exit_int_info;
Rusty Russelle756fc62007-07-30 20:07:08 +10001009 struct kvm *kvm = svm->vcpu.kvm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001010 u64 fault_address;
1011 u32 error_code;
Avi Kivity577bdc42008-07-19 08:57:05 +03001012 bool event_injection = false;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001013
Eddie Dong85f455f2007-07-06 12:20:49 +03001014 if (!irqchip_in_kernel(kvm) &&
Avi Kivity577bdc42008-07-19 08:57:05 +03001015 is_external_interrupt(exit_int_info)) {
1016 event_injection = true;
Rusty Russelle756fc62007-07-30 20:07:08 +10001017 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
Avi Kivity577bdc42008-07-19 08:57:05 +03001018 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001019
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001020 fault_address = svm->vmcb->control.exit_info_2;
1021 error_code = svm->vmcb->control.exit_info_1;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001022
1023 if (!npt_enabled)
1024 KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code,
1025 (u32)fault_address, (u32)(fault_address >> 32),
1026 handler);
Joerg Roedeld2ebb412008-04-30 17:56:04 +02001027 else
1028 KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
1029 (u32)fault_address, (u32)(fault_address >> 32),
1030 handler);
Joerg Roedel44874f82008-08-27 14:18:43 +02001031 /*
1032 * FIXME: Tis shouldn't be necessary here, but there is a flush
1033 * missing in the MMU code. Until we find this bug, flush the
1034 * complete TLB here on an NPF
1035 */
1036 if (npt_enabled)
1037 svm_flush_tlb(&svm->vcpu);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001038
Avi Kivity48d15032008-08-28 18:27:15 +03001039 if (!npt_enabled && event_injection)
Avi Kivity577bdc42008-07-19 08:57:05 +03001040 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
Avi Kivity30677142007-10-28 18:48:59 +02001041 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001042}
1043
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001044static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1045{
1046 int er;
1047
Sheng Yang571008d2008-01-02 14:49:22 +08001048 er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001049 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001050 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001051 return 1;
1052}
1053
Rusty Russelle756fc62007-07-30 20:07:08 +10001054static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Anthony Liguori7807fa62007-04-23 09:17:21 -05001055{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001056 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001057 if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001058 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
Rusty Russelle756fc62007-07-30 20:07:08 +10001059 svm->vcpu.fpu_active = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001060
1061 return 1;
Anthony Liguori7807fa62007-04-23 09:17:21 -05001062}
1063
Joerg Roedel53371b52008-04-09 14:15:30 +02001064static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1065{
1066 /*
1067 * On an #MC intercept the MCE handler is not called automatically in
1068 * the host. So do it by hand here.
1069 */
1070 asm volatile (
1071 "int $0x12\n");
1072 /* not sure if we ever come back to this point */
1073
1074 return 1;
1075}
1076
Rusty Russelle756fc62007-07-30 20:07:08 +10001077static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001078{
1079 /*
1080 * VMCB is undefined after a SHUTDOWN intercept
1081 * so reinitialize it.
1082 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001083 clear_page(svm->vmcb);
Joerg Roedele6101a92008-02-13 18:58:45 +01001084 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001085
1086 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1087 return 0;
1088}
1089
Rusty Russelle756fc62007-07-30 20:07:08 +10001090static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001091{
Mike Dayd77c26f2007-10-08 09:02:08 -04001092 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Avi Kivity039576c2007-03-20 12:46:50 +02001093 int size, down, in, string, rep;
1094 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001095
Rusty Russelle756fc62007-07-30 20:07:08 +10001096 ++svm->vcpu.stat.io_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001097
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001098 svm->next_rip = svm->vmcb->control.exit_info_2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001099
Laurent Viviere70669a2007-08-05 10:36:40 +03001100 string = (io_info & SVM_IOIO_STR_MASK) != 0;
1101
1102 if (string) {
Laurent Vivier34273182007-09-18 11:27:37 +02001103 if (emulate_instruction(&svm->vcpu,
1104 kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
Laurent Viviere70669a2007-08-05 10:36:40 +03001105 return 0;
1106 return 1;
1107 }
1108
Avi Kivity039576c2007-03-20 12:46:50 +02001109 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1110 port = io_info >> 16;
1111 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Avi Kivity039576c2007-03-20 12:46:50 +02001112 rep = (io_info & SVM_IOIO_REP_MASK) != 0;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001113 down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001114
Guillaume Thouvenine93f36b2008-10-28 10:51:30 +01001115 skip_emulated_instruction(&svm->vcpu);
Laurent Vivier3090dd72007-08-05 10:43:32 +03001116 return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001117}
1118
Joerg Roedelc47f0982008-04-30 17:56:00 +02001119static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1120{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001121 KVMTRACE_0D(NMI, &svm->vcpu, handler);
Joerg Roedelc47f0982008-04-30 17:56:00 +02001122 return 1;
1123}
1124
Joerg Roedela0698052008-04-30 17:56:01 +02001125static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1126{
1127 ++svm->vcpu.stat.irq_exits;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001128 KVMTRACE_0D(INTR, &svm->vcpu, handler);
Joerg Roedela0698052008-04-30 17:56:01 +02001129 return 1;
1130}
1131
Rusty Russelle756fc62007-07-30 20:07:08 +10001132static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001133{
1134 return 1;
1135}
1136
Rusty Russelle756fc62007-07-30 20:07:08 +10001137static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001138{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001139 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10001140 skip_emulated_instruction(&svm->vcpu);
1141 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001142}
1143
Rusty Russelle756fc62007-07-30 20:07:08 +10001144static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity02e235b2007-02-19 14:37:47 +02001145{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001146 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Rusty Russelle756fc62007-07-30 20:07:08 +10001147 skip_emulated_instruction(&svm->vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001148 kvm_emulate_hypercall(&svm->vcpu);
1149 return 1;
Avi Kivity02e235b2007-02-19 14:37:47 +02001150}
1151
Rusty Russelle756fc62007-07-30 20:07:08 +10001152static int invalid_op_interception(struct vcpu_svm *svm,
1153 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001154{
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001155 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001156 return 1;
1157}
1158
Rusty Russelle756fc62007-07-30 20:07:08 +10001159static int task_switch_interception(struct vcpu_svm *svm,
1160 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001161{
Izik Eidus37817f22008-03-24 23:14:53 +02001162 u16 tss_selector;
1163
1164 tss_selector = (u16)svm->vmcb->control.exit_info_1;
1165 if (svm->vmcb->control.exit_info_2 &
1166 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
1167 return kvm_task_switch(&svm->vcpu, tss_selector,
1168 TASK_SWITCH_IRET);
1169 if (svm->vmcb->control.exit_info_2 &
1170 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
1171 return kvm_task_switch(&svm->vcpu, tss_selector,
1172 TASK_SWITCH_JMP);
1173 return kvm_task_switch(&svm->vcpu, tss_selector, TASK_SWITCH_CALL);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001174}
1175
Rusty Russelle756fc62007-07-30 20:07:08 +10001176static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001177{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001178 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001179 kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity06465c52007-02-28 20:46:53 +02001180 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001181}
1182
Marcelo Tosattia7052892008-09-23 13:18:35 -03001183static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1184{
1185 if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE)
1186 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
1187 return 1;
1188}
1189
Rusty Russelle756fc62007-07-30 20:07:08 +10001190static int emulate_on_interception(struct vcpu_svm *svm,
1191 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001192{
Laurent Vivier34273182007-09-18 11:27:37 +02001193 if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE)
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001194 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001195 return 1;
1196}
1197
Joerg Roedel1d075432007-12-06 21:02:25 +01001198static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1199{
1200 emulate_instruction(&svm->vcpu, NULL, 0, 0, 0);
1201 if (irqchip_in_kernel(svm->vcpu.kvm))
1202 return 1;
1203 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
1204 return 0;
1205}
1206
Avi Kivity6aa8b732006-12-10 02:21:36 -08001207static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1208{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001209 struct vcpu_svm *svm = to_svm(vcpu);
1210
Avi Kivity6aa8b732006-12-10 02:21:36 -08001211 switch (ecx) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001212 case MSR_IA32_TIME_STAMP_COUNTER: {
1213 u64 tsc;
1214
1215 rdtscll(tsc);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001216 *data = svm->vmcb->control.tsc_offset + tsc;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001217 break;
1218 }
Avi Kivity0e859ca2006-12-22 01:05:08 -08001219 case MSR_K6_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001220 *data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001221 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08001222#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001223 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001224 *data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001225 break;
1226 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001227 *data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001228 break;
1229 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001230 *data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001231 break;
1232 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001233 *data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001234 break;
1235#endif
1236 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001237 *data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001238 break;
1239 case MSR_IA32_SYSENTER_EIP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001240 *data = svm->vmcb->save.sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001241 break;
1242 case MSR_IA32_SYSENTER_ESP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001243 *data = svm->vmcb->save.sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001244 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01001245 /* Nobody will change the following 5 values in the VMCB so
1246 we can safely return them on rdmsr. They will always be 0
1247 until LBRV is implemented. */
1248 case MSR_IA32_DEBUGCTLMSR:
1249 *data = svm->vmcb->save.dbgctl;
1250 break;
1251 case MSR_IA32_LASTBRANCHFROMIP:
1252 *data = svm->vmcb->save.br_from;
1253 break;
1254 case MSR_IA32_LASTBRANCHTOIP:
1255 *data = svm->vmcb->save.br_to;
1256 break;
1257 case MSR_IA32_LASTINTFROMIP:
1258 *data = svm->vmcb->save.last_excp_from;
1259 break;
1260 case MSR_IA32_LASTINTTOIP:
1261 *data = svm->vmcb->save.last_excp_to;
1262 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001263 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08001264 return kvm_get_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001265 }
1266 return 0;
1267}
1268
Rusty Russelle756fc62007-07-30 20:07:08 +10001269static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001270{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001271 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Avi Kivity6aa8b732006-12-10 02:21:36 -08001272 u64 data;
1273
Rusty Russelle756fc62007-07-30 20:07:08 +10001274 if (svm_get_msr(&svm->vcpu, ecx, &data))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02001275 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001276 else {
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001277 KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
1278 (u32)(data >> 32), handler);
1279
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001280 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001281 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001282 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001283 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001284 }
1285 return 1;
1286}
1287
1288static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1289{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001290 struct vcpu_svm *svm = to_svm(vcpu);
1291
Avi Kivity6aa8b732006-12-10 02:21:36 -08001292 switch (ecx) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001293 case MSR_IA32_TIME_STAMP_COUNTER: {
1294 u64 tsc;
1295
1296 rdtscll(tsc);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001297 svm->vmcb->control.tsc_offset = data - tsc;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001298 break;
1299 }
Avi Kivity0e859ca2006-12-22 01:05:08 -08001300 case MSR_K6_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001301 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001302 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08001303#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001304 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001305 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001306 break;
1307 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001308 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001309 break;
1310 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001311 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001312 break;
1313 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001314 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001315 break;
1316#endif
1317 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001318 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001319 break;
1320 case MSR_IA32_SYSENTER_EIP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001321 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001322 break;
1323 case MSR_IA32_SYSENTER_ESP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001324 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001325 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01001326 case MSR_IA32_DEBUGCTLMSR:
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001327 if (!svm_has(SVM_FEATURE_LBRV)) {
1328 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001329 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001330 break;
1331 }
1332 if (data & DEBUGCTL_RESERVED_BITS)
1333 return 1;
1334
1335 svm->vmcb->save.dbgctl = data;
1336 if (data & (1ULL<<0))
1337 svm_enable_lbrv(svm);
1338 else
1339 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01001340 break;
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001341 case MSR_K7_EVNTSEL0:
1342 case MSR_K7_EVNTSEL1:
1343 case MSR_K7_EVNTSEL2:
1344 case MSR_K7_EVNTSEL3:
Chris Lalancette14ae51b2008-05-05 13:05:16 -04001345 case MSR_K7_PERFCTR0:
1346 case MSR_K7_PERFCTR1:
1347 case MSR_K7_PERFCTR2:
1348 case MSR_K7_PERFCTR3:
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001349 /*
Chris Lalancette14ae51b2008-05-05 13:05:16 -04001350 * Just discard all writes to the performance counters; this
1351 * should keep both older linux and windows 64-bit guests
1352 * happy
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001353 */
Chris Lalancette14ae51b2008-05-05 13:05:16 -04001354 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
1355
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001356 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001357 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08001358 return kvm_set_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001359 }
1360 return 0;
1361}
1362
Rusty Russelle756fc62007-07-30 20:07:08 +10001363static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001364{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001365 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001366 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001367 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001368
1369 KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
1370 handler);
1371
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001372 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001373 if (svm_set_msr(&svm->vcpu, ecx, data))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02001374 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001375 else
Rusty Russelle756fc62007-07-30 20:07:08 +10001376 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001377 return 1;
1378}
1379
Rusty Russelle756fc62007-07-30 20:07:08 +10001380static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001381{
Rusty Russelle756fc62007-07-30 20:07:08 +10001382 if (svm->vmcb->control.exit_info_1)
1383 return wrmsr_interception(svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001384 else
Rusty Russelle756fc62007-07-30 20:07:08 +10001385 return rdmsr_interception(svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001386}
1387
Rusty Russelle756fc62007-07-30 20:07:08 +10001388static int interrupt_window_interception(struct vcpu_svm *svm,
Dor Laorc1150d82007-01-05 16:36:24 -08001389 struct kvm_run *kvm_run)
1390{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001391 KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
1392
Alexander Graff0b85052008-11-25 20:17:01 +01001393 svm_clear_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03001394 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Dor Laorc1150d82007-01-05 16:36:24 -08001395 /*
1396 * If the user space waits to inject interrupts, exit as soon as
1397 * possible
1398 */
1399 if (kvm_run->request_interrupt_window &&
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001400 !svm->vcpu.arch.irq_summary) {
Rusty Russelle756fc62007-07-30 20:07:08 +10001401 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08001402 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1403 return 0;
1404 }
1405
1406 return 1;
1407}
1408
Rusty Russelle756fc62007-07-30 20:07:08 +10001409static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001410 struct kvm_run *kvm_run) = {
1411 [SVM_EXIT_READ_CR0] = emulate_on_interception,
1412 [SVM_EXIT_READ_CR3] = emulate_on_interception,
1413 [SVM_EXIT_READ_CR4] = emulate_on_interception,
Avi Kivity80a81192007-12-06 19:50:00 +02001414 [SVM_EXIT_READ_CR8] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001415 /* for now: */
1416 [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
1417 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
1418 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
Joerg Roedel1d075432007-12-06 21:02:25 +01001419 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001420 [SVM_EXIT_READ_DR0] = emulate_on_interception,
1421 [SVM_EXIT_READ_DR1] = emulate_on_interception,
1422 [SVM_EXIT_READ_DR2] = emulate_on_interception,
1423 [SVM_EXIT_READ_DR3] = emulate_on_interception,
1424 [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
1425 [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
1426 [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
1427 [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
1428 [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
1429 [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001430 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001431 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
Anthony Liguori7807fa62007-04-23 09:17:21 -05001432 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
Joerg Roedel53371b52008-04-09 14:15:30 +02001433 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
Joerg Roedela0698052008-04-30 17:56:01 +02001434 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02001435 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001436 [SVM_EXIT_SMI] = nop_on_interception,
1437 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08001438 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001439 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1440 [SVM_EXIT_CPUID] = cpuid_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02001441 [SVM_EXIT_INVD] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001442 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03001443 [SVM_EXIT_INVLPG] = invlpg_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001444 [SVM_EXIT_INVLPGA] = invalid_op_interception,
1445 [SVM_EXIT_IOIO] = io_interception,
1446 [SVM_EXIT_MSR] = msr_interception,
1447 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001448 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001449 [SVM_EXIT_VMRUN] = invalid_op_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02001450 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001451 [SVM_EXIT_VMLOAD] = invalid_op_interception,
1452 [SVM_EXIT_VMSAVE] = invalid_op_interception,
1453 [SVM_EXIT_STGI] = invalid_op_interception,
1454 [SVM_EXIT_CLGI] = invalid_op_interception,
1455 [SVM_EXIT_SKINIT] = invalid_op_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02001456 [SVM_EXIT_WBINVD] = emulate_on_interception,
Joerg Roedel916ce232007-03-21 19:47:00 +01001457 [SVM_EXIT_MONITOR] = invalid_op_interception,
1458 [SVM_EXIT_MWAIT] = invalid_op_interception,
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001459 [SVM_EXIT_NPF] = pf_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001460};
1461
Avi Kivity04d2cc72007-09-10 18:10:54 +03001462static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001463{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001464 struct vcpu_svm *svm = to_svm(vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001465 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001466
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001467 KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
1468 (u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
1469
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001470 if (npt_enabled) {
1471 int mmu_reload = 0;
1472 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
1473 svm_set_cr0(vcpu, svm->vmcb->save.cr0);
1474 mmu_reload = 1;
1475 }
1476 vcpu->arch.cr0 = svm->vmcb->save.cr0;
1477 vcpu->arch.cr3 = svm->vmcb->save.cr3;
1478 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1479 if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
1480 kvm_inject_gp(vcpu, 0);
1481 return 1;
1482 }
1483 }
1484 if (mmu_reload) {
1485 kvm_mmu_reset_context(vcpu);
1486 kvm_mmu_load(vcpu);
1487 }
1488 }
1489
Avi Kivity04d2cc72007-09-10 18:10:54 +03001490 kvm_reput_irq(svm);
1491
1492 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1493 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1494 kvm_run->fail_entry.hardware_entry_failure_reason
1495 = svm->vmcb->control.exit_code;
1496 return 0;
1497 }
1498
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001499 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001500 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1501 exit_code != SVM_EXIT_NPF)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001502 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
1503 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001504 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001505 exit_code);
1506
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02001507 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08001508 || !svm_exit_handlers[exit_code]) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001509 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
Avi Kivity364b6252007-04-16 14:28:40 +03001510 kvm_run->hw.hardware_exit_reason = exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001511 return 0;
1512 }
1513
Rusty Russelle756fc62007-07-30 20:07:08 +10001514 return svm_exit_handlers[exit_code](svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001515}
1516
1517static void reload_tss(struct kvm_vcpu *vcpu)
1518{
1519 int cpu = raw_smp_processor_id();
1520
1521 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
Mike Dayd77c26f2007-10-08 09:02:08 -04001522 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001523 load_TR_desc();
1524}
1525
Rusty Russelle756fc62007-07-30 20:07:08 +10001526static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001527{
1528 int cpu = raw_smp_processor_id();
1529
1530 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1531
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001532 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
Rusty Russelle756fc62007-07-30 20:07:08 +10001533 if (svm->vcpu.cpu != cpu ||
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001534 svm->asid_generation != svm_data->asid_generation)
Rusty Russelle756fc62007-07-30 20:07:08 +10001535 new_asid(svm, svm_data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001536}
1537
1538
Eddie Dong85f455f2007-07-06 12:20:49 +03001539static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001540{
1541 struct vmcb_control_area *control;
1542
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001543 KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler);
1544
Avi Kivityfa89a812008-09-01 15:57:51 +03001545 ++svm->vcpu.stat.irq_injections;
Rusty Russelle756fc62007-07-30 20:07:08 +10001546 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03001547 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001548 control->int_ctl &= ~V_INTR_PRIO_MASK;
1549 control->int_ctl |= V_IRQ_MASK |
1550 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1551}
1552
Eddie Dong2a8067f2007-08-06 16:29:07 +03001553static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
1554{
1555 struct vcpu_svm *svm = to_svm(vcpu);
1556
1557 svm_inject_irq(svm, irq);
1558}
1559
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001560static void update_cr8_intercept(struct kvm_vcpu *vcpu)
1561{
1562 struct vcpu_svm *svm = to_svm(vcpu);
1563 struct vmcb *vmcb = svm->vmcb;
1564 int max_irr, tpr;
1565
1566 if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr)
1567 return;
1568
1569 vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1570
1571 max_irr = kvm_lapic_find_highest_irr(vcpu);
1572 if (max_irr == -1)
1573 return;
1574
1575 tpr = kvm_lapic_get_cr8(vcpu) << 4;
1576
1577 if (tpr >= (max_irr & 0xf0))
1578 vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
1579}
1580
Avi Kivity04d2cc72007-09-10 18:10:54 +03001581static void svm_intr_assist(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001582{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001583 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03001584 struct vmcb *vmcb = svm->vmcb;
1585 int intr_vector = -1;
1586
1587 if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
1588 ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) {
1589 intr_vector = vmcb->control.exit_int_info &
1590 SVM_EVTINJ_VEC_MASK;
1591 vmcb->control.exit_int_info = 0;
1592 svm_inject_irq(svm, intr_vector);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001593 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001594 }
1595
1596 if (vmcb->control.int_ctl & V_IRQ_MASK)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001597 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001598
Eddie Dong1b9778d2007-09-03 16:56:58 +03001599 if (!kvm_cpu_has_interrupt(vcpu))
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001600 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001601
1602 if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
1603 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
1604 (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
1605 /* unable to deliver irq, set pending irq */
Alexander Graff0b85052008-11-25 20:17:01 +01001606 svm_set_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03001607 svm_inject_irq(svm, 0x0);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001608 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001609 }
1610 /* Okay, we can deliver the interrupt: grab it and update PIC state. */
Eddie Dong1b9778d2007-09-03 16:56:58 +03001611 intr_vector = kvm_cpu_get_interrupt(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03001612 svm_inject_irq(svm, intr_vector);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001613out:
1614 update_cr8_intercept(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03001615}
1616
1617static void kvm_reput_irq(struct vcpu_svm *svm)
1618{
Rusty Russelle756fc62007-07-30 20:07:08 +10001619 struct vmcb_control_area *control = &svm->vmcb->control;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001620
Eddie Dong7017fc32007-07-18 11:34:57 +03001621 if ((control->int_ctl & V_IRQ_MASK)
1622 && !irqchip_in_kernel(svm->vcpu.kvm)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001623 control->int_ctl &= ~V_IRQ_MASK;
Rusty Russelle756fc62007-07-30 20:07:08 +10001624 push_irq(&svm->vcpu, control->int_vector);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001625 }
Dor Laorc1150d82007-01-05 16:36:24 -08001626
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001627 svm->vcpu.arch.interrupt_window_open =
Dor Laorc1150d82007-01-05 16:36:24 -08001628 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
1629}
1630
Eddie Dong85f455f2007-07-06 12:20:49 +03001631static void svm_do_inject_vector(struct vcpu_svm *svm)
1632{
1633 struct kvm_vcpu *vcpu = &svm->vcpu;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001634 int word_index = __ffs(vcpu->arch.irq_summary);
1635 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
Eddie Dong85f455f2007-07-06 12:20:49 +03001636 int irq = word_index * BITS_PER_LONG + bit_index;
1637
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001638 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
1639 if (!vcpu->arch.irq_pending[word_index])
1640 clear_bit(word_index, &vcpu->arch.irq_summary);
Eddie Dong85f455f2007-07-06 12:20:49 +03001641 svm_inject_irq(svm, irq);
1642}
1643
Avi Kivity04d2cc72007-09-10 18:10:54 +03001644static void do_interrupt_requests(struct kvm_vcpu *vcpu,
Dor Laorc1150d82007-01-05 16:36:24 -08001645 struct kvm_run *kvm_run)
1646{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001647 struct vcpu_svm *svm = to_svm(vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001648 struct vmcb_control_area *control = &svm->vmcb->control;
Dor Laorc1150d82007-01-05 16:36:24 -08001649
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001650 svm->vcpu.arch.interrupt_window_open =
Dor Laorc1150d82007-01-05 16:36:24 -08001651 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001652 (svm->vmcb->save.rflags & X86_EFLAGS_IF));
Dor Laorc1150d82007-01-05 16:36:24 -08001653
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001654 if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
Dor Laorc1150d82007-01-05 16:36:24 -08001655 /*
1656 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1657 */
Eddie Dong85f455f2007-07-06 12:20:49 +03001658 svm_do_inject_vector(svm);
Dor Laorc1150d82007-01-05 16:36:24 -08001659
1660 /*
1661 * Interrupts blocked. Wait for unblock.
1662 */
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001663 if (!svm->vcpu.arch.interrupt_window_open &&
1664 (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window))
Alexander Graff0b85052008-11-25 20:17:01 +01001665 svm_set_vintr(svm);
1666 else
1667 svm_clear_vintr(svm);
Dor Laorc1150d82007-01-05 16:36:24 -08001668}
1669
Izik Eiduscbc94022007-10-25 00:29:55 +02001670static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
1671{
1672 return 0;
1673}
1674
Avi Kivity6aa8b732006-12-10 02:21:36 -08001675static void save_db_regs(unsigned long *db_regs)
1676{
Avi Kivity5aff4582006-12-13 00:33:45 -08001677 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
1678 asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
1679 asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
1680 asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001681}
1682
1683static void load_db_regs(unsigned long *db_regs)
1684{
Avi Kivity5aff4582006-12-13 00:33:45 -08001685 asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
1686 asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
1687 asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
1688 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001689}
1690
Avi Kivityd9e368d2007-06-07 19:18:30 +03001691static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1692{
1693 force_new_asid(vcpu);
1694}
1695
Avi Kivity04d2cc72007-09-10 18:10:54 +03001696static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
1697{
1698}
1699
Joerg Roedeld7bf8222008-04-16 16:51:17 +02001700static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
1701{
1702 struct vcpu_svm *svm = to_svm(vcpu);
1703
1704 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
1705 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
1706 kvm_lapic_set_tpr(vcpu, cr8);
1707 }
1708}
1709
Joerg Roedel649d6862008-04-16 16:51:15 +02001710static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
1711{
1712 struct vcpu_svm *svm = to_svm(vcpu);
1713 u64 cr8;
1714
1715 if (!irqchip_in_kernel(vcpu->kvm))
1716 return;
1717
1718 cr8 = kvm_get_cr8(vcpu);
1719 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
1720 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
1721}
1722
Avi Kivity80e31d42008-07-14 14:44:59 +03001723#ifdef CONFIG_X86_64
1724#define R "r"
1725#else
1726#define R "e"
1727#endif
1728
Avi Kivity04d2cc72007-09-10 18:10:54 +03001729static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001730{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001731 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001732 u16 fs_selector;
1733 u16 gs_selector;
1734 u16 ldt_selector;
Avi Kivityd9e368d2007-06-07 19:18:30 +03001735
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001736 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
1737 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
1738 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
1739
Rusty Russelle756fc62007-07-30 20:07:08 +10001740 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001741
Joerg Roedel649d6862008-04-16 16:51:15 +02001742 sync_lapic_to_cr8(vcpu);
1743
Avi Kivity6aa8b732006-12-10 02:21:36 -08001744 save_host_msrs(vcpu);
Avi Kivityd6e88ae2008-07-10 16:53:33 +03001745 fs_selector = kvm_read_fs();
1746 gs_selector = kvm_read_gs();
1747 ldt_selector = kvm_read_ldt();
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001748 svm->host_cr2 = kvm_read_cr2();
1749 svm->host_dr6 = read_dr6();
1750 svm->host_dr7 = read_dr7();
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001751 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001752 /* required for live migration with NPT */
1753 if (npt_enabled)
1754 svm->vmcb->save.cr3 = vcpu->arch.cr3;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001755
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001756 if (svm->vmcb->save.dr7 & 0xff) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001757 write_dr7(0);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001758 save_db_regs(svm->host_db_regs);
1759 load_db_regs(svm->db_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001760 }
Avi Kivity36241b82006-12-22 01:05:20 -08001761
Avi Kivity04d2cc72007-09-10 18:10:54 +03001762 clgi();
1763
1764 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08001765
Avi Kivity6aa8b732006-12-10 02:21:36 -08001766 asm volatile (
Avi Kivity80e31d42008-07-14 14:44:59 +03001767 "push %%"R"bp; \n\t"
1768 "mov %c[rbx](%[svm]), %%"R"bx \n\t"
1769 "mov %c[rcx](%[svm]), %%"R"cx \n\t"
1770 "mov %c[rdx](%[svm]), %%"R"dx \n\t"
1771 "mov %c[rsi](%[svm]), %%"R"si \n\t"
1772 "mov %c[rdi](%[svm]), %%"R"di \n\t"
1773 "mov %c[rbp](%[svm]), %%"R"bp \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001774#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001775 "mov %c[r8](%[svm]), %%r8 \n\t"
1776 "mov %c[r9](%[svm]), %%r9 \n\t"
1777 "mov %c[r10](%[svm]), %%r10 \n\t"
1778 "mov %c[r11](%[svm]), %%r11 \n\t"
1779 "mov %c[r12](%[svm]), %%r12 \n\t"
1780 "mov %c[r13](%[svm]), %%r13 \n\t"
1781 "mov %c[r14](%[svm]), %%r14 \n\t"
1782 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001783#endif
1784
Avi Kivity6aa8b732006-12-10 02:21:36 -08001785 /* Enter guest mode */
Avi Kivity80e31d42008-07-14 14:44:59 +03001786 "push %%"R"ax \n\t"
1787 "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03001788 __ex(SVM_VMLOAD) "\n\t"
1789 __ex(SVM_VMRUN) "\n\t"
1790 __ex(SVM_VMSAVE) "\n\t"
Avi Kivity80e31d42008-07-14 14:44:59 +03001791 "pop %%"R"ax \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001792
1793 /* Save guest registers, load host registers */
Avi Kivity80e31d42008-07-14 14:44:59 +03001794 "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
1795 "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
1796 "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
1797 "mov %%"R"si, %c[rsi](%[svm]) \n\t"
1798 "mov %%"R"di, %c[rdi](%[svm]) \n\t"
1799 "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001800#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001801 "mov %%r8, %c[r8](%[svm]) \n\t"
1802 "mov %%r9, %c[r9](%[svm]) \n\t"
1803 "mov %%r10, %c[r10](%[svm]) \n\t"
1804 "mov %%r11, %c[r11](%[svm]) \n\t"
1805 "mov %%r12, %c[r12](%[svm]) \n\t"
1806 "mov %%r13, %c[r13](%[svm]) \n\t"
1807 "mov %%r14, %c[r14](%[svm]) \n\t"
1808 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001809#endif
Avi Kivity80e31d42008-07-14 14:44:59 +03001810 "pop %%"R"bp"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001811 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001812 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08001813 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001814 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
1815 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
1816 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
1817 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
1818 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
1819 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001820#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001821 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
1822 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
1823 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
1824 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
1825 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
1826 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
1827 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
1828 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001829#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02001830 : "cc", "memory"
Avi Kivity80e31d42008-07-14 14:44:59 +03001831 , R"bx", R"cx", R"dx", R"si", R"di"
Laurent Vivier54a08c02007-10-25 14:18:53 +02001832#ifdef CONFIG_X86_64
Laurent Vivier54a08c02007-10-25 14:18:53 +02001833 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
1834#endif
1835 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08001836
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001837 if ((svm->vmcb->save.dr7 & 0xff))
1838 load_db_regs(svm->host_db_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001839
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001840 vcpu->arch.cr2 = svm->vmcb->save.cr2;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001841 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
1842 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
1843 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001844
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001845 write_dr6(svm->host_dr6);
1846 write_dr7(svm->host_dr7);
1847 kvm_write_cr2(svm->host_cr2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001848
Avi Kivityd6e88ae2008-07-10 16:53:33 +03001849 kvm_load_fs(fs_selector);
1850 kvm_load_gs(gs_selector);
1851 kvm_load_ldt(ldt_selector);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001852 load_host_msrs(vcpu);
1853
1854 reload_tss(vcpu);
1855
Avi Kivity56ba47d2007-11-07 17:14:18 +02001856 local_irq_disable();
1857
1858 stgi();
1859
Joerg Roedeld7bf8222008-04-16 16:51:17 +02001860 sync_cr8_to_lapic(vcpu);
1861
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001862 svm->next_rip = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001863}
1864
Avi Kivity80e31d42008-07-14 14:44:59 +03001865#undef R
1866
Avi Kivity6aa8b732006-12-10 02:21:36 -08001867static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
1868{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001869 struct vcpu_svm *svm = to_svm(vcpu);
1870
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001871 if (npt_enabled) {
1872 svm->vmcb->control.nested_cr3 = root;
1873 force_new_asid(vcpu);
1874 return;
1875 }
1876
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001877 svm->vmcb->save.cr3 = root;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001878 force_new_asid(vcpu);
Anthony Liguori7807fa62007-04-23 09:17:21 -05001879
1880 if (vcpu->fpu_active) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001881 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
1882 svm->vmcb->save.cr0 |= X86_CR0_TS;
Anthony Liguori7807fa62007-04-23 09:17:21 -05001883 vcpu->fpu_active = 0;
1884 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001885}
1886
Avi Kivity6aa8b732006-12-10 02:21:36 -08001887static int is_disabled(void)
1888{
Joerg Roedel6031a612007-06-22 12:29:50 +03001889 u64 vm_cr;
1890
1891 rdmsrl(MSR_VM_CR, vm_cr);
1892 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
1893 return 1;
1894
Avi Kivity6aa8b732006-12-10 02:21:36 -08001895 return 0;
1896}
1897
Ingo Molnar102d8322007-02-19 14:37:47 +02001898static void
1899svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1900{
1901 /*
1902 * Patch in the VMMCALL instruction:
1903 */
1904 hypercall[0] = 0x0f;
1905 hypercall[1] = 0x01;
1906 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02001907}
1908
Yang, Sheng002c7f72007-07-31 14:23:01 +03001909static void svm_check_processor_compat(void *rtn)
1910{
1911 *(int *)rtn = 0;
1912}
1913
Avi Kivity774ead32007-12-26 13:57:04 +02001914static bool svm_cpu_has_accelerated_tpr(void)
1915{
1916 return false;
1917}
1918
Sheng Yang67253af2008-04-25 10:20:22 +08001919static int get_npt_level(void)
1920{
1921#ifdef CONFIG_X86_64
1922 return PT64_ROOT_LEVEL;
1923#else
1924 return PT32E_ROOT_LEVEL;
1925#endif
1926}
1927
Sheng Yang64d4d522008-10-09 16:01:57 +08001928static int svm_get_mt_mask_shift(void)
1929{
1930 return 0;
1931}
1932
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001933static struct kvm_x86_ops svm_x86_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001934 .cpu_has_kvm_support = has_svm,
1935 .disabled_by_bios = is_disabled,
1936 .hardware_setup = svm_hardware_setup,
1937 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03001938 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001939 .hardware_enable = svm_hardware_enable,
1940 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02001941 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001942
1943 .vcpu_create = svm_create_vcpu,
1944 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03001945 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001946
Avi Kivity04d2cc72007-09-10 18:10:54 +03001947 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001948 .vcpu_load = svm_vcpu_load,
1949 .vcpu_put = svm_vcpu_put,
1950
1951 .set_guest_debug = svm_guest_debug,
1952 .get_msr = svm_get_msr,
1953 .set_msr = svm_set_msr,
1954 .get_segment_base = svm_get_segment_base,
1955 .get_segment = svm_get_segment,
1956 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02001957 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10001958 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Anthony Liguori25c4c272007-04-27 09:29:21 +03001959 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001960 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001961 .set_cr3 = svm_set_cr3,
1962 .set_cr4 = svm_set_cr4,
1963 .set_efer = svm_set_efer,
1964 .get_idt = svm_get_idt,
1965 .set_idt = svm_set_idt,
1966 .get_gdt = svm_get_gdt,
1967 .set_gdt = svm_set_gdt,
1968 .get_dr = svm_get_dr,
1969 .set_dr = svm_set_dr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001970 .get_rflags = svm_get_rflags,
1971 .set_rflags = svm_set_rflags,
1972
Avi Kivity6aa8b732006-12-10 02:21:36 -08001973 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001974
Avi Kivity6aa8b732006-12-10 02:21:36 -08001975 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03001976 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001977 .skip_emulated_instruction = skip_emulated_instruction,
Ingo Molnar102d8322007-02-19 14:37:47 +02001978 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03001979 .get_irq = svm_get_irq,
1980 .set_irq = svm_set_irq,
Avi Kivity298101d2007-11-25 13:41:11 +02001981 .queue_exception = svm_queue_exception,
1982 .exception_injected = svm_exception_injected,
Avi Kivity04d2cc72007-09-10 18:10:54 +03001983 .inject_pending_irq = svm_intr_assist,
1984 .inject_pending_vectors = do_interrupt_requests,
Izik Eiduscbc94022007-10-25 00:29:55 +02001985
1986 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08001987 .get_tdp_level = get_npt_level,
Sheng Yang64d4d522008-10-09 16:01:57 +08001988 .get_mt_mask_shift = svm_get_mt_mask_shift,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001989};
1990
1991static int __init svm_init(void)
1992{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08001993 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Rusty Russellc16f8622007-07-30 21:12:19 +10001994 THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001995}
1996
1997static void __exit svm_exit(void)
1998{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08001999 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08002000}
2001
2002module_init(svm_init)
2003module_exit(svm_exit)