blob: 58aca86193e58f2090244c45b0c70d47c1c337c7 [file] [log] [blame]
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +01001/*
2 * VMI specific paravirt-ops implementation
3 *
4 * Copyright (C) 2005, VMware, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 * Send feedback to zach@vmware.com
22 *
23 */
24
25#include <linux/module.h>
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +010026#include <linux/cpu.h>
27#include <linux/bootmem.h>
28#include <linux/mm.h>
Zachary Amsdeneeef9c62007-05-02 19:27:16 +020029#include <linux/highmem.h>
Alexey Dobriyanfa0aa862007-06-01 00:46:27 -070030#include <linux/sched.h>
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +010031#include <asm/vmi.h>
32#include <asm/io.h>
33#include <asm/fixmap.h>
34#include <asm/apicdef.h>
Ingo Molnar7b6aa332009-02-17 13:58:15 +010035#include <asm/apic.h>
Ian Campbell3249b7e2010-02-26 17:16:01 +000036#include <asm/pgalloc.h>
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +010037#include <asm/processor.h>
38#include <asm/timer.h>
Zachary Amsdenbbab4f32007-02-13 13:26:21 +010039#include <asm/vmi_time.h>
Adrian Bunk8f485612007-03-05 00:30:56 -080040#include <asm/kmap_types.h>
Alok Kataria31343d82008-08-08 12:15:57 -070041#include <asm/setup.h>
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +010042
43/* Convenient for calling VMI functions indirectly in the ROM */
44typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
45typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
46
47#define call_vrom_func(rom,func) \
48 (((VROMFUNC *)(rom->func))())
49
50#define call_vrom_long_func(rom,func,arg) \
51 (((VROMLONGFUNC *)(rom->func)) (arg))
52
53static struct vrom_header *vmi_rom;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +010054static int disable_pge;
55static int disable_pse;
56static int disable_sep;
57static int disable_tsc;
58static int disable_mtrr;
Zachary Amsden7507ba32007-03-05 00:30:34 -080059static int disable_noidle;
Zachary Amsden772205f2007-03-05 00:30:41 -080060static int disable_vmi_timer;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +010061
62/* Cached VMI operations */
Adrian Bunk30a15282007-05-02 19:27:08 +020063static struct {
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +010064 void (*cpuid)(void /* non-c */);
65 void (*_set_ldt)(u32 selector);
66 void (*set_tr)(u32 selector);
Glauber de Oliveira Costa8d947342008-01-30 13:31:12 +010067 void (*write_idt_entry)(struct desc_struct *, int, u32, u32);
Glauber de Oliveira Costa014b15b2008-01-30 13:31:13 +010068 void (*write_gdt_entry)(struct desc_struct *, int, u32, u32);
Glauber de Oliveira Costa75b8bb32008-01-30 13:31:13 +010069 void (*write_ldt_entry)(struct desc_struct *, int, u32, u32);
H. Peter Anvinfaca6222008-01-30 13:31:02 +010070 void (*set_kernel_stack)(u32 selector, u32 sp0);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +010071 void (*allocate_page)(u32, u32, u32, u32, u32);
72 void (*release_page)(u32, u32);
73 void (*set_pte)(pte_t, pte_t *, unsigned);
74 void (*update_pte)(pte_t *, unsigned);
Zachary Amsdeneeef9c62007-05-02 19:27:16 +020075 void (*set_linear_mapping)(int, void *, u32, u32);
76 void (*_flush_tlb)(int);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +010077 void (*set_initial_ap_state)(int, int);
Zachary Amsdenbbab4f32007-02-13 13:26:21 +010078 void (*halt)(void);
Zachary Amsden49f19712007-04-08 16:04:01 -070079 void (*set_lazy_mode)(int mode);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +010080} vmi_ops;
81
Zachary Amsdene0bb8642007-05-02 19:27:16 +020082/* Cached VMI operations */
83struct vmi_timer_ops vmi_timer_ops;
84
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +010085/*
86 * VMI patching routines.
87 */
88#define MNEM_CALL 0xe8
89#define MNEM_JMP 0xe9
90#define MNEM_RET 0xc3
91
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +010092#define IRQ_PATCH_INT_MASK 0
93#define IRQ_PATCH_DISABLE 5
94
Andi Kleenab144f52007-08-10 22:31:03 +020095static inline void patch_offset(void *insnbuf,
H. Peter Anvin65ea5b02008-01-30 13:30:56 +010096 unsigned long ip, unsigned long dest)
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +010097{
H. Peter Anvin65ea5b02008-01-30 13:30:56 +010098 *(unsigned long *)(insnbuf+1) = dest-ip-5;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +010099}
100
Andi Kleenab144f52007-08-10 22:31:03 +0200101static unsigned patch_internal(int call, unsigned len, void *insnbuf,
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100102 unsigned long ip)
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100103{
104 u64 reloc;
105 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
106 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
107 switch(rel->type) {
108 case VMI_RELOCATION_CALL_REL:
109 BUG_ON(len < 5);
Andi Kleenab144f52007-08-10 22:31:03 +0200110 *(char *)insnbuf = MNEM_CALL;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100111 patch_offset(insnbuf, ip, (unsigned long)rel->eip);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100112 return 5;
113
114 case VMI_RELOCATION_JUMP_REL:
115 BUG_ON(len < 5);
Andi Kleenab144f52007-08-10 22:31:03 +0200116 *(char *)insnbuf = MNEM_JMP;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100117 patch_offset(insnbuf, ip, (unsigned long)rel->eip);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100118 return 5;
119
120 case VMI_RELOCATION_NOP:
121 /* obliterate the whole thing */
122 return 0;
123
124 case VMI_RELOCATION_NONE:
125 /* leave native code in place */
126 break;
127
128 default:
129 BUG();
130 }
131 return len;
132}
133
134/*
135 * Apply patch if appropriate, return length of new instruction
136 * sequence. The callee does nop padding for us.
137 */
Andi Kleenab144f52007-08-10 22:31:03 +0200138static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100139 unsigned long ip, unsigned len)
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100140{
141 switch (type) {
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700142 case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
Andi Kleenab144f52007-08-10 22:31:03 +0200143 return patch_internal(VMI_CALL_DisableInterrupts, len,
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100144 insns, ip);
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700145 case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
Andi Kleenab144f52007-08-10 22:31:03 +0200146 return patch_internal(VMI_CALL_EnableInterrupts, len,
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100147 insns, ip);
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700148 case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
Andi Kleenab144f52007-08-10 22:31:03 +0200149 return patch_internal(VMI_CALL_SetInterruptMask, len,
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100150 insns, ip);
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700151 case PARAVIRT_PATCH(pv_irq_ops.save_fl):
Andi Kleenab144f52007-08-10 22:31:03 +0200152 return patch_internal(VMI_CALL_GetInterruptMask, len,
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100153 insns, ip);
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700154 case PARAVIRT_PATCH(pv_cpu_ops.iret):
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100155 return patch_internal(VMI_CALL_IRET, len, insns, ip);
Jeremy Fitzhardinged75cd222008-06-25 00:19:26 -0400156 case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit):
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100157 return patch_internal(VMI_CALL_SYSEXIT, len, insns, ip);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100158 default:
159 break;
160 }
161 return len;
162}
163
164/* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100165static void vmi_cpuid(unsigned int *ax, unsigned int *bx,
166 unsigned int *cx, unsigned int *dx)
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100167{
168 int override = 0;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100169 if (*ax == 1)
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100170 override = 1;
171 asm volatile ("call *%6"
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100172 : "=a" (*ax),
173 "=b" (*bx),
174 "=c" (*cx),
175 "=d" (*dx)
176 : "0" (*ax), "2" (*cx), "r" (vmi_ops.cpuid));
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100177 if (override) {
178 if (disable_pse)
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100179 *dx &= ~X86_FEATURE_PSE;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100180 if (disable_pge)
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100181 *dx &= ~X86_FEATURE_PGE;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100182 if (disable_sep)
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100183 *dx &= ~X86_FEATURE_SEP;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100184 if (disable_tsc)
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100185 *dx &= ~X86_FEATURE_TSC;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100186 if (disable_mtrr)
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100187 *dx &= ~X86_FEATURE_MTRR;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100188 }
189}
190
191static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
192{
193 if (gdt[nr].a != new->a || gdt[nr].b != new->b)
Glauber de Oliveira Costa014b15b2008-01-30 13:31:13 +0100194 write_gdt_entry(gdt, nr, new, 0);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100195}
196
197static void vmi_load_tls(struct thread_struct *t, unsigned int cpu)
198{
199 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
200 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0]);
201 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1]);
202 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2]);
203}
204
205static void vmi_set_ldt(const void *addr, unsigned entries)
206{
207 unsigned cpu = smp_processor_id();
Glauber de Oliveira Costa014b15b2008-01-30 13:31:13 +0100208 struct desc_struct desc;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100209
Glauber de Oliveira Costa014b15b2008-01-30 13:31:13 +0100210 pack_descriptor(&desc, (unsigned long)addr,
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100211 entries * sizeof(struct desc_struct) - 1,
Glauber de Oliveira Costa014b15b2008-01-30 13:31:13 +0100212 DESC_LDT, 0);
213 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &desc, DESC_LDT);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100214 vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0);
215}
216
217static void vmi_set_tr(void)
218{
219 vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct));
220}
221
Glauber de Oliveira Costa8d947342008-01-30 13:31:12 +0100222static void vmi_write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
223{
224 u32 *idt_entry = (u32 *)g;
Ingo Molnar262d5ee2008-02-04 16:47:54 +0100225 vmi_ops.write_idt_entry(dt, entry, idt_entry[0], idt_entry[1]);
Glauber de Oliveira Costa8d947342008-01-30 13:31:12 +0100226}
227
Glauber de Oliveira Costa014b15b2008-01-30 13:31:13 +0100228static void vmi_write_gdt_entry(struct desc_struct *dt, int entry,
229 const void *desc, int type)
230{
231 u32 *gdt_entry = (u32 *)desc;
Ingo Molnar262d5ee2008-02-04 16:47:54 +0100232 vmi_ops.write_gdt_entry(dt, entry, gdt_entry[0], gdt_entry[1]);
Glauber de Oliveira Costa014b15b2008-01-30 13:31:13 +0100233}
234
Glauber de Oliveira Costa75b8bb32008-01-30 13:31:13 +0100235static void vmi_write_ldt_entry(struct desc_struct *dt, int entry,
236 const void *desc)
237{
238 u32 *ldt_entry = (u32 *)desc;
Zachary Amsdende599852008-09-30 11:02:12 -0700239 vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
Glauber de Oliveira Costa75b8bb32008-01-30 13:31:13 +0100240}
241
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100242static void vmi_load_sp0(struct tss_struct *tss,
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100243 struct thread_struct *thread)
244{
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100245 tss->x86_tss.sp0 = thread->sp0;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100246
247 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
Rusty Russella75c54f2007-05-02 19:27:13 +0200248 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
249 tss->x86_tss.ss1 = thread->sysenter_cs;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100250 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
251 }
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100252 vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.sp0);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100253}
254
255static void vmi_flush_tlb_user(void)
256{
Zachary Amsdeneeef9c62007-05-02 19:27:16 +0200257 vmi_ops._flush_tlb(VMI_FLUSH_TLB);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100258}
259
260static void vmi_flush_tlb_kernel(void)
261{
Zachary Amsdeneeef9c62007-05-02 19:27:16 +0200262 vmi_ops._flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100263}
264
265/* Stub to do nothing at all; used for delays and unimplemented calls */
266static void vmi_nop(void)
267{
268}
269
Zachary Amsdeneeef9c62007-05-02 19:27:16 +0200270#ifdef CONFIG_HIGHPTE
271static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
Zachary Amsden9a1c13e2007-03-05 00:30:37 -0800272{
Zachary Amsdeneeef9c62007-05-02 19:27:16 +0200273 void *va = kmap_atomic(page, type);
274
Zachary Amsden9a1c13e2007-03-05 00:30:37 -0800275 /*
Ian Campbell3249b7e2010-02-26 17:16:01 +0000276 * We disable highmem allocations for page tables so we should never
277 * see any calls to kmap_atomic_pte on a highmem page.
Zachary Amsden9a1c13e2007-03-05 00:30:37 -0800278 */
Ian Campbell3249b7e2010-02-26 17:16:01 +0000279
280 BUG_ON(PageHighmem(page));
Zachary Amsdeneeef9c62007-05-02 19:27:16 +0200281
282 return va;
Zachary Amsden9a1c13e2007-03-05 00:30:37 -0800283}
Zachary Amsdeneeef9c62007-05-02 19:27:16 +0200284#endif
Zachary Amsden9a1c13e2007-03-05 00:30:37 -0800285
Eduardo Habkostf8639932008-07-30 18:32:27 -0300286static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100287{
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100288 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
289}
290
Eduardo Habkostf8639932008-07-30 18:32:27 -0300291static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn)
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100292{
293 /*
294 * This call comes in very early, before mem_map is setup.
295 * It is called only for swapper_pg_dir, which already has
296 * data on it.
297 */
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100298 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
299}
300
Eduardo Habkostf8639932008-07-30 18:32:27 -0300301static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count)
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100302{
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100303 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
304}
305
Eduardo Habkostf8639932008-07-30 18:32:27 -0300306static void vmi_release_pte(unsigned long pfn)
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100307{
308 vmi_ops.release_page(pfn, VMI_PAGE_L1);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100309}
310
Eduardo Habkostf8639932008-07-30 18:32:27 -0300311static void vmi_release_pmd(unsigned long pfn)
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100312{
313 vmi_ops.release_page(pfn, VMI_PAGE_L2);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100314}
315
316/*
Alok Kataria55a8ba42009-02-06 10:29:35 -0800317 * We use the pgd_free hook for releasing the pgd page:
318 */
319static void vmi_pgd_free(struct mm_struct *mm, pgd_t *pgd)
320{
321 unsigned long pfn = __pa(pgd) >> PAGE_SHIFT;
322
323 vmi_ops.release_page(pfn, VMI_PAGE_L2);
324}
325
326/*
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100327 * Helper macros for MMU update flags. We can defer updates until a flush
328 * or page invalidation only if the update is to the current address space
329 * (otherwise, there is no flush). We must check against init_mm, since
330 * this could be a kernel update, which usually passes init_mm, although
331 * sometimes this check can be skipped if we know the particular function
332 * is only called on user mode PTEs. We could change the kernel to pass
333 * current->active_mm here, but in particular, I was unsure if changing
334 * mm/highmem.c to do this would still be correct on other architectures.
335 */
336#define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \
337 (!mustbeuser && (mm) == &init_mm))
338#define vmi_flags_addr(mm, addr, level, user) \
339 ((level) | (is_current_as(mm, user) ? \
340 (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
341#define vmi_flags_addr_defer(mm, addr, level, user) \
342 ((level) | (is_current_as(mm, user) ? \
343 (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
344
Jeremy Fitzhardinge3dc494e2007-05-02 19:27:13 +0200345static void vmi_update_pte(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100346{
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100347 vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
348}
349
Jeremy Fitzhardinge3dc494e2007-05-02 19:27:13 +0200350static void vmi_update_pte_defer(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100351{
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100352 vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0));
353}
354
355static void vmi_set_pte(pte_t *ptep, pte_t pte)
356{
357 /* XXX because of set_pmd_pte, this can be called on PT or PD layers */
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100358 vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT);
359}
360
Jeremy Fitzhardinge3dc494e2007-05-02 19:27:13 +0200361static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100362{
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100363 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
364}
365
366static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval)
367{
368#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingee3328702008-01-30 13:32:58 +0100369 const pte_t pte = { .pte = pmdval.pmd };
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100370#else
371 const pte_t pte = { pmdval.pud.pgd.pgd };
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100372#endif
373 vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD);
374}
375
376#ifdef CONFIG_X86_PAE
377
378static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval)
379{
380 /*
381 * XXX This is called from set_pmd_pte, but at both PT
382 * and PD layers so the VMI_PAGE_PT flag is wrong. But
383 * it is only called for large page mapping changes,
384 * the Xen backend, doesn't support large pages, and the
385 * ESX backend doesn't depend on the flag.
386 */
387 set_64bit((unsigned long long *)ptep,pte_val(pteval));
388 vmi_ops.update_pte(ptep, VMI_PAGE_PT);
389}
390
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100391static void vmi_set_pud(pud_t *pudp, pud_t pudval)
392{
393 /* Um, eww */
Jeremy Fitzhardingee3328702008-01-30 13:32:58 +0100394 const pte_t pte = { .pte = pudval.pgd.pgd };
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100395 vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP);
396}
397
398static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
399{
Jeremy Fitzhardingee3328702008-01-30 13:32:58 +0100400 const pte_t pte = { .pte = 0 };
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100401 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
402}
403
Adrian Bunk8eb68fa2007-05-02 19:27:09 +0200404static void vmi_pmd_clear(pmd_t *pmd)
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100405{
Jeremy Fitzhardingee3328702008-01-30 13:32:58 +0100406 const pte_t pte = { .pte = 0 };
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100407 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
408}
409#endif
410
411#ifdef CONFIG_SMP
Zachary Amsdenc6b36e92007-03-05 00:30:43 -0800412static void __devinit
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100413vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
414 unsigned long start_esp)
415{
Zachary Amsdenc6b36e92007-03-05 00:30:43 -0800416 struct vmi_ap_state ap;
417
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100418 /* Default everything to zero. This is fine for most GPRs. */
419 memset(&ap, 0, sizeof(struct vmi_ap_state));
420
421 ap.gdtr_limit = GDT_SIZE - 1;
422 ap.gdtr_base = (unsigned long) get_cpu_gdt_table(phys_apicid);
423
424 ap.idtr_limit = IDT_ENTRIES * 8 - 1;
425 ap.idtr_base = (unsigned long) idt_table;
426
427 ap.ldtr = 0;
428
429 ap.cs = __KERNEL_CS;
430 ap.eip = (unsigned long) start_eip;
431 ap.ss = __KERNEL_DS;
432 ap.esp = (unsigned long) start_esp;
433
434 ap.ds = __USER_DS;
435 ap.es = __USER_DS;
Jeremy Fitzhardinge7c3576d2007-05-02 19:27:16 +0200436 ap.fs = __KERNEL_PERCPU;
Alok Kataria7d5b0052009-08-04 15:34:22 -0700437 ap.gs = __KERNEL_STACK_CANARY;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100438
439 ap.eflags = 0;
440
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100441#ifdef CONFIG_X86_PAE
442 /* efer should match BSP efer. */
443 if (cpu_has_nx) {
444 unsigned l, h;
445 rdmsr(MSR_EFER, l, h);
446 ap.efer = (unsigned long long) h << 32 | l;
447 }
448#endif
449
450 ap.cr3 = __pa(swapper_pg_dir);
451 /* Protected mode, paging, AM, WP, NE, MP. */
452 ap.cr0 = 0x80050023;
453 ap.cr4 = mmu_cr4_features;
Zachary Amsdenc6b36e92007-03-05 00:30:43 -0800454 vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100455}
456#endif
457
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800458static void vmi_start_context_switch(struct task_struct *prev)
Zachary Amsden49f19712007-04-08 16:04:01 -0700459{
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800460 paravirt_start_context_switch(prev);
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700461 vmi_ops.set_lazy_mode(2);
462}
Zachary Amsden49f19712007-04-08 16:04:01 -0700463
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800464static void vmi_end_context_switch(struct task_struct *next)
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -0800465{
466 vmi_ops.set_lazy_mode(0);
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800467 paravirt_end_context_switch(next);
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -0800468}
469
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700470static void vmi_enter_lazy_mmu(void)
471{
472 paravirt_enter_lazy_mmu();
473 vmi_ops.set_lazy_mode(1);
474}
Zachary Amsden49f19712007-04-08 16:04:01 -0700475
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -0800476static void vmi_leave_lazy_mmu(void)
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700477{
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700478 vmi_ops.set_lazy_mode(0);
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -0800479 paravirt_leave_lazy_mmu();
Zachary Amsden49f19712007-04-08 16:04:01 -0700480}
481
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100482static inline int __init check_vmi_rom(struct vrom_header *rom)
483{
484 struct pci_header *pci;
485 struct pnp_header *pnp;
486 const char *manufacturer = "UNKNOWN";
487 const char *product = "UNKNOWN";
488 const char *license = "unspecified";
489
490 if (rom->rom_signature != 0xaa55)
491 return 0;
492 if (rom->vrom_signature != VMI_SIGNATURE)
493 return 0;
494 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
495 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
496 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
497 rom->api_version_maj,
498 rom->api_version_min);
499 return 0;
500 }
501
502 /*
503 * Relying on the VMI_SIGNATURE field is not 100% safe, so check
504 * the PCI header and device type to make sure this is really a
505 * VMI device.
506 */
507 if (!rom->pci_header_offs) {
508 printk(KERN_WARNING "VMI: ROM does not contain PCI header.\n");
509 return 0;
510 }
511
512 pci = (struct pci_header *)((char *)rom+rom->pci_header_offs);
513 if (pci->vendorID != PCI_VENDOR_ID_VMWARE ||
514 pci->deviceID != PCI_DEVICE_ID_VMWARE_VMI) {
515 /* Allow it to run... anyways, but warn */
516 printk(KERN_WARNING "VMI: ROM from unknown manufacturer\n");
517 }
518
519 if (rom->pnp_header_offs) {
520 pnp = (struct pnp_header *)((char *)rom+rom->pnp_header_offs);
521 if (pnp->manufacturer_offset)
522 manufacturer = (const char *)rom+pnp->manufacturer_offset;
523 if (pnp->product_offset)
524 product = (const char *)rom+pnp->product_offset;
525 }
526
527 if (rom->license_offs)
528 license = (char *)rom+rom->license_offs;
529
530 printk(KERN_INFO "VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n",
531 manufacturer, product,
532 rom->api_version_maj, rom->api_version_min,
533 pci->rom_version_maj, pci->rom_version_min);
534
Andi Kleen302cf932007-03-16 21:07:36 +0100535 /* Don't allow BSD/MIT here for now because we don't want to end up
536 with any binary only shim layers */
537 if (strcmp(license, "GPL") && strcmp(license, "GPL v2")) {
538 printk(KERN_WARNING "VMI: Non GPL license `%s' found for ROM. Not used.\n",
539 license);
540 return 0;
541 }
542
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100543 return 1;
544}
545
546/*
547 * Probe for the VMI option ROM
548 */
549static inline int __init probe_vmi_rom(void)
550{
551 unsigned long base;
552
553 /* VMI ROM is in option ROM area, check signature */
554 for (base = 0xC0000; base < 0xE0000; base += 2048) {
555 struct vrom_header *romstart;
556 romstart = (struct vrom_header *)isa_bus_to_virt(base);
557 if (check_vmi_rom(romstart)) {
558 vmi_rom = romstart;
559 return 1;
560 }
561 }
562 return 0;
563}
564
565/*
566 * VMI setup common to all processors
567 */
568void vmi_bringup(void)
569{
570 /* We must establish the lowmem mapping for MMU ops to work */
Zachary Amsden772205f2007-03-05 00:30:41 -0800571 if (vmi_ops.set_linear_mapping)
Alok Kataria31343d82008-08-08 12:15:57 -0700572 vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100573}
574
575/*
Zachary Amsden772205f2007-03-05 00:30:41 -0800576 * Return a pointer to a VMI function or NULL if unimplemented
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100577 */
578static void *vmi_get_function(int vmicall)
579{
580 u64 reloc;
581 const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
582 reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall);
583 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);
584 if (rel->type == VMI_RELOCATION_CALL_REL)
585 return (void *)rel->eip;
586 else
Zachary Amsden772205f2007-03-05 00:30:41 -0800587 return NULL;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100588}
589
590/*
591 * Helper macro for making the VMI paravirt-ops fill code readable.
Zachary Amsden772205f2007-03-05 00:30:41 -0800592 * For unimplemented operations, fall back to default, unless nop
593 * is returned by the ROM.
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100594 */
595#define para_fill(opname, vmicall) \
596do { \
597 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
598 VMI_CALL_##vmicall); \
Zachary Amsden0492c372007-04-12 19:28:46 -0700599 if (rel->type == VMI_RELOCATION_CALL_REL) \
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700600 opname = (void *)rel->eip; \
Zachary Amsden0492c372007-04-12 19:28:46 -0700601 else if (rel->type == VMI_RELOCATION_NOP) \
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700602 opname = (void *)vmi_nop; \
Zachary Amsden0492c372007-04-12 19:28:46 -0700603 else if (rel->type != VMI_RELOCATION_NONE) \
604 printk(KERN_WARNING "VMI: Unknown relocation " \
605 "type %d for " #vmicall"\n",\
606 rel->type); \
Zachary Amsden772205f2007-03-05 00:30:41 -0800607} while (0)
608
609/*
610 * Helper macro for making the VMI paravirt-ops fill code readable.
611 * For cached operations which do not match the VMI ROM ABI and must
612 * go through a tranlation stub. Ignore NOPs, since it is not clear
613 * a NOP * VMI function corresponds to a NOP paravirt-op when the
614 * functions are not in 1-1 correspondence.
615 */
616#define para_wrap(opname, wrapper, cache, vmicall) \
617do { \
618 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
619 VMI_CALL_##vmicall); \
620 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \
621 if (rel->type == VMI_RELOCATION_CALL_REL) { \
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700622 opname = wrapper; \
Zachary Amsden772205f2007-03-05 00:30:41 -0800623 vmi_ops.cache = (void *)rel->eip; \
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100624 } \
625} while (0)
626
627/*
628 * Activate the VMI interface and switch into paravirtualized mode
629 */
630static inline int __init activate_vmi(void)
631{
632 short kernel_cs;
633 u64 reloc;
634 const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
635
Ian Campbell3249b7e2010-02-26 17:16:01 +0000636 /*
637 * Prevent page tables from being allocated in highmem, even if
638 * CONFIG_HIGHPTE is enabled.
639 */
640 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
641
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100642 if (call_vrom_func(vmi_rom, vmi_init) != 0) {
643 printk(KERN_ERR "VMI ROM failed to initialize!");
644 return 0;
645 }
646 savesegment(cs, kernel_cs);
647
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700648 pv_info.paravirt_enabled = 1;
649 pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
Alok Katariad0153ca2009-09-29 10:25:24 -0700650 pv_info.name = "vmi [deprecated]";
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100651
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700652 pv_init_ops.patch = vmi_patch;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100653
654 /*
655 * Many of these operations are ABI compatible with VMI.
656 * This means we can fill in the paravirt-ops with direct
657 * pointers into the VMI ROM. If the calling convention for
658 * these operations changes, this code needs to be updated.
659 *
660 * Exceptions
661 * CPUID paravirt-op uses pointers, not the native ISA
662 * halt has no VMI equivalent; all VMI halts are "safe"
663 * no MSR support yet - just trap and emulate. VMI uses the
664 * same ABI as the native ISA, but Linux wants exceptions
665 * from bogus MSR read / write handled
666 * rdpmc is not yet used in Linux
667 */
668
Zachary Amsden772205f2007-03-05 00:30:41 -0800669 /* CPUID is special, so very special it gets wrapped like a present */
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700670 para_wrap(pv_cpu_ops.cpuid, vmi_cpuid, cpuid, CPUID);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100671
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700672 para_fill(pv_cpu_ops.clts, CLTS);
673 para_fill(pv_cpu_ops.get_debugreg, GetDR);
674 para_fill(pv_cpu_ops.set_debugreg, SetDR);
675 para_fill(pv_cpu_ops.read_cr0, GetCR0);
676 para_fill(pv_mmu_ops.read_cr2, GetCR2);
677 para_fill(pv_mmu_ops.read_cr3, GetCR3);
678 para_fill(pv_cpu_ops.read_cr4, GetCR4);
679 para_fill(pv_cpu_ops.write_cr0, SetCR0);
680 para_fill(pv_mmu_ops.write_cr2, SetCR2);
681 para_fill(pv_mmu_ops.write_cr3, SetCR3);
682 para_fill(pv_cpu_ops.write_cr4, SetCR4);
Jeremy Fitzhardinge664c7952009-01-30 23:18:41 -0800683
684 para_fill(pv_irq_ops.save_fl.func, GetInterruptMask);
685 para_fill(pv_irq_ops.restore_fl.func, SetInterruptMask);
686 para_fill(pv_irq_ops.irq_disable.func, DisableInterrupts);
687 para_fill(pv_irq_ops.irq_enable.func, EnableInterrupts);
Zachary Amsden772205f2007-03-05 00:30:41 -0800688
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700689 para_fill(pv_cpu_ops.wbinvd, WBINVD);
690 para_fill(pv_cpu_ops.read_tsc, RDTSC);
Zachary Amsden772205f2007-03-05 00:30:41 -0800691
692 /* The following we emulate with trap and emulate for now */
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100693 /* paravirt_ops.read_msr = vmi_rdmsr */
694 /* paravirt_ops.write_msr = vmi_wrmsr */
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100695 /* paravirt_ops.rdpmc = vmi_rdpmc */
696
Zachary Amsden772205f2007-03-05 00:30:41 -0800697 /* TR interface doesn't pass TR value, wrap */
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700698 para_wrap(pv_cpu_ops.load_tr_desc, vmi_set_tr, set_tr, SetTR);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100699
700 /* LDT is special, too */
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700701 para_wrap(pv_cpu_ops.set_ldt, vmi_set_ldt, _set_ldt, SetLDT);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100702
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700703 para_fill(pv_cpu_ops.load_gdt, SetGDT);
704 para_fill(pv_cpu_ops.load_idt, SetIDT);
705 para_fill(pv_cpu_ops.store_gdt, GetGDT);
706 para_fill(pv_cpu_ops.store_idt, GetIDT);
707 para_fill(pv_cpu_ops.store_tr, GetTR);
708 pv_cpu_ops.load_tls = vmi_load_tls;
Glauber de Oliveira Costa75b8bb32008-01-30 13:31:13 +0100709 para_wrap(pv_cpu_ops.write_ldt_entry, vmi_write_ldt_entry,
710 write_ldt_entry, WriteLDTEntry);
Glauber de Oliveira Costa014b15b2008-01-30 13:31:13 +0100711 para_wrap(pv_cpu_ops.write_gdt_entry, vmi_write_gdt_entry,
712 write_gdt_entry, WriteGDTEntry);
Glauber de Oliveira Costa8d947342008-01-30 13:31:12 +0100713 para_wrap(pv_cpu_ops.write_idt_entry, vmi_write_idt_entry,
714 write_idt_entry, WriteIDTEntry);
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100715 para_wrap(pv_cpu_ops.load_sp0, vmi_load_sp0, set_kernel_stack, UpdateKernelStack);
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700716 para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
717 para_fill(pv_cpu_ops.io_delay, IODelay);
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700718
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800719 para_wrap(pv_cpu_ops.start_context_switch, vmi_start_context_switch,
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700720 set_lazy_mode, SetLazyMode);
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800721 para_wrap(pv_cpu_ops.end_context_switch, vmi_end_context_switch,
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700722 set_lazy_mode, SetLazyMode);
723
724 para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,
725 set_lazy_mode, SetLazyMode);
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -0800726 para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy_mmu,
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700727 set_lazy_mode, SetLazyMode);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100728
Zachary Amsden772205f2007-03-05 00:30:41 -0800729 /* user and kernel flush are just handled with different flags to FlushTLB */
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700730 para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB);
731 para_wrap(pv_mmu_ops.flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB);
732 para_fill(pv_mmu_ops.flush_tlb_single, InvalPage);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100733
734 /*
735 * Until a standard flag format can be agreed on, we need to
736 * implement these as wrappers in Linux. Get the VMI ROM
737 * function pointers for the two backend calls.
738 */
739#ifdef CONFIG_X86_PAE
740 vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxELong);
741 vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxELong);
742#else
743 vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE);
744 vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE);
745#endif
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100746
Zachary Amsden772205f2007-03-05 00:30:41 -0800747 if (vmi_ops.set_pte) {
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700748 pv_mmu_ops.set_pte = vmi_set_pte;
749 pv_mmu_ops.set_pte_at = vmi_set_pte_at;
750 pv_mmu_ops.set_pmd = vmi_set_pmd;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100751#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700752 pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic;
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700753 pv_mmu_ops.set_pud = vmi_set_pud;
754 pv_mmu_ops.pte_clear = vmi_pte_clear;
755 pv_mmu_ops.pmd_clear = vmi_pmd_clear;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100756#endif
Zachary Amsden772205f2007-03-05 00:30:41 -0800757 }
758
759 if (vmi_ops.update_pte) {
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700760 pv_mmu_ops.pte_update = vmi_update_pte;
761 pv_mmu_ops.pte_update_defer = vmi_update_pte_defer;
Zachary Amsden772205f2007-03-05 00:30:41 -0800762 }
763
764 vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
765 if (vmi_ops.allocate_page) {
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700766 pv_mmu_ops.alloc_pte = vmi_allocate_pte;
767 pv_mmu_ops.alloc_pmd = vmi_allocate_pmd;
768 pv_mmu_ops.alloc_pmd_clone = vmi_allocate_pmd_clone;
Zachary Amsden772205f2007-03-05 00:30:41 -0800769 }
770
771 vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
772 if (vmi_ops.release_page) {
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700773 pv_mmu_ops.release_pte = vmi_release_pte;
774 pv_mmu_ops.release_pmd = vmi_release_pmd;
Alok Kataria55a8ba42009-02-06 10:29:35 -0800775 pv_mmu_ops.pgd_free = vmi_pgd_free;
Zachary Amsden772205f2007-03-05 00:30:41 -0800776 }
Zachary Amsdeneeef9c62007-05-02 19:27:16 +0200777
778 /* Set linear is needed in all cases */
779 vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
780#ifdef CONFIG_HIGHPTE
781 if (vmi_ops.set_linear_mapping)
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700782 pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
Jeremy Fitzhardingea27fe802007-05-02 19:27:15 +0200783#endif
Zachary Amsden772205f2007-03-05 00:30:41 -0800784
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100785 /*
786 * These MUST always be patched. Don't support indirect jumps
787 * through these operations, as the VMI interface may use either
788 * a jump or a call to get to these operations, depending on
789 * the backend. They are performance critical anyway, so requiring
790 * a patch is not a big problem.
791 */
Jeremy Fitzhardinged75cd222008-06-25 00:19:26 -0400792 pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0;
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700793 pv_cpu_ops.iret = (void *)0xbadbab0;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100794
795#ifdef CONFIG_SMP
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700796 para_wrap(pv_apic_ops.startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100797#endif
798
799#ifdef CONFIG_X86_LOCAL_APIC
Yinghai Luc1eeb2d2009-02-16 23:02:14 -0800800 para_fill(apic->read, APICRead);
801 para_fill(apic->write, APICWrite);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100802#endif
803
804 /*
Zachary Amsdenbbab4f32007-02-13 13:26:21 +0100805 * Check for VMI timer functionality by probing for a cycle frequency method
806 */
807 reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency);
Zachary Amsden772205f2007-03-05 00:30:41 -0800808 if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) {
Zachary Amsdenbbab4f32007-02-13 13:26:21 +0100809 vmi_timer_ops.get_cycle_frequency = (void *)rel->eip;
810 vmi_timer_ops.get_cycle_counter =
811 vmi_get_function(VMI_CALL_GetCycleCounter);
812 vmi_timer_ops.get_wallclock =
813 vmi_get_function(VMI_CALL_GetWallclockTime);
814 vmi_timer_ops.wallclock_updated =
815 vmi_get_function(VMI_CALL_WallclockUpdated);
816 vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);
817 vmi_timer_ops.cancel_alarm =
818 vmi_get_function(VMI_CALL_CancelAlarm);
Thomas Gleixner845b3942009-08-19 15:37:03 +0200819 x86_init.timers.timer_init = vmi_time_init;
Zachary Amsdenbbab4f32007-02-13 13:26:21 +0100820#ifdef CONFIG_X86_LOCAL_APIC
Thomas Gleixner736deca2009-08-19 12:35:53 +0200821 x86_init.timers.setup_percpu_clockev = vmi_time_bsp_init;
822 x86_cpuinit.setup_percpu_clockev = vmi_time_ap_init;
Zachary Amsdenbbab4f32007-02-13 13:26:21 +0100823#endif
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700824 pv_time_ops.sched_clock = vmi_sched_clock;
Thomas Gleixner2d826402009-08-20 17:06:25 +0200825 x86_platform.calibrate_tsc = vmi_tsc_khz;
Feng Tang7bd867d2009-09-10 10:48:56 +0800826 x86_platform.get_wallclock = vmi_get_wallclock;
827 x86_platform.set_wallclock = vmi_set_wallclock;
Zachary Amsden772205f2007-03-05 00:30:41 -0800828
829 /* We have true wallclock functions; disable CMOS clock sync */
830 no_sync_cmos_clock = 1;
831 } else {
832 disable_noidle = 1;
833 disable_vmi_timer = 1;
Zachary Amsdenbbab4f32007-02-13 13:26:21 +0100834 }
Zachary Amsden772205f2007-03-05 00:30:41 -0800835
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700836 para_fill(pv_irq_ops.safe_halt, Halt);
Zachary Amsdenbbab4f32007-02-13 13:26:21 +0100837
838 /*
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100839 * Alternative instruction rewriting doesn't happen soon enough
840 * to convert VMI_IRET to a call instead of a jump; so we have
841 * to do this before IRQs get reenabled. Fortunately, it is
842 * idempotent.
843 */
Jeremy Fitzhardinge441d40d2007-05-02 19:27:16 +0200844 apply_paravirt(__parainstructions, __parainstructions_end);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100845
846 vmi_bringup();
847
848 return 1;
849}
850
851#undef para_fill
852
853void __init vmi_init(void)
854{
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100855 if (!vmi_rom)
856 probe_vmi_rom();
857 else
858 check_vmi_rom(vmi_rom);
859
860 /* In case probing for or validating the ROM failed, basil */
861 if (!vmi_rom)
862 return;
863
864 reserve_top_address(-vmi_rom->virtual_top);
865
Zachary Amsden7507ba32007-03-05 00:30:34 -0800866#ifdef CONFIG_X86_IO_APIC
Zachary Amsden772205f2007-03-05 00:30:41 -0800867 /* This is virtual hardware; timer routing is wired correctly */
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100868 no_timer_check = 1;
869#endif
Zachary Amsdenae8d04e2008-12-13 12:36:58 -0800870}
871
Rakib Mullick659d2612009-01-24 01:46:03 +0600872void __init vmi_activate(void)
Zachary Amsdenae8d04e2008-12-13 12:36:58 -0800873{
874 unsigned long flags;
875
876 if (!vmi_rom)
877 return;
878
879 local_irq_save(flags);
880 activate_vmi();
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100881 local_irq_restore(flags & X86_EFLAGS_IF);
882}
883
884static int __init parse_vmi(char *arg)
885{
886 if (!arg)
887 return -EINVAL;
888
Zachary Amsdeneda08b12007-03-05 00:30:38 -0800889 if (!strcmp(arg, "disable_pge")) {
Jeremy Fitzhardinge53756d32008-01-30 13:30:55 +0100890 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100891 disable_pge = 1;
892 } else if (!strcmp(arg, "disable_pse")) {
Jeremy Fitzhardinge53756d32008-01-30 13:30:55 +0100893 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100894 disable_pse = 1;
895 } else if (!strcmp(arg, "disable_sep")) {
Jeremy Fitzhardinge53756d32008-01-30 13:30:55 +0100896 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100897 disable_sep = 1;
898 } else if (!strcmp(arg, "disable_tsc")) {
Jeremy Fitzhardinge53756d32008-01-30 13:30:55 +0100899 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100900 disable_tsc = 1;
901 } else if (!strcmp(arg, "disable_mtrr")) {
Jeremy Fitzhardinge53756d32008-01-30 13:30:55 +0100902 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_MTRR);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100903 disable_mtrr = 1;
Zachary Amsden772205f2007-03-05 00:30:41 -0800904 } else if (!strcmp(arg, "disable_timer")) {
905 disable_vmi_timer = 1;
906 disable_noidle = 1;
Zachary Amsden7507ba32007-03-05 00:30:34 -0800907 } else if (!strcmp(arg, "disable_noidle"))
908 disable_noidle = 1;
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100909 return 0;
910}
911
912early_param("vmi", parse_vmi);