blob: 239a70d750a21b97159a282c6ad6eb4a33015f66 [file] [log] [blame]
Alexander Graf2a342ed2010-07-29 14:47:48 +02001/*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/init.h>
23#include <linux/kvm_para.h>
24#include <linux/slab.h>
25#include <linux/of.h>
26
27#include <asm/reg.h>
28#include <asm/kvm_ppc.h>
29#include <asm/sections.h>
30#include <asm/cacheflush.h>
31#include <asm/disassemble.h>
32
Alexander Grafd17051c2010-07-29 14:47:57 +020033#define KVM_MAGIC_PAGE (-4096L)
34#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
35
Alexander Grafd1293c92010-07-29 14:47:59 +020036#define KVM_INST_LWZ 0x80000000
37#define KVM_INST_STW 0x90000000
38#define KVM_INST_LD 0xe8000000
39#define KVM_INST_STD 0xf8000000
40#define KVM_INST_NOP 0x60000000
41#define KVM_INST_B 0x48000000
42#define KVM_INST_B_MASK 0x03ffffff
43#define KVM_INST_B_MAX 0x01ffffff
44
Alexander Graf73a18102010-07-29 14:47:58 +020045#define KVM_MASK_RT 0x03e00000
Alexander Grafd1293c92010-07-29 14:47:59 +020046#define KVM_INST_MFMSR 0x7c0000a6
47#define KVM_INST_MFSPR_SPRG0 0x7c1042a6
48#define KVM_INST_MFSPR_SPRG1 0x7c1142a6
49#define KVM_INST_MFSPR_SPRG2 0x7c1242a6
50#define KVM_INST_MFSPR_SPRG3 0x7c1342a6
51#define KVM_INST_MFSPR_SRR0 0x7c1a02a6
52#define KVM_INST_MFSPR_SRR1 0x7c1b02a6
53#define KVM_INST_MFSPR_DAR 0x7c1302a6
54#define KVM_INST_MFSPR_DSISR 0x7c1202a6
55
56#define KVM_INST_MTSPR_SPRG0 0x7c1043a6
57#define KVM_INST_MTSPR_SPRG1 0x7c1143a6
58#define KVM_INST_MTSPR_SPRG2 0x7c1243a6
59#define KVM_INST_MTSPR_SPRG3 0x7c1343a6
60#define KVM_INST_MTSPR_SRR0 0x7c1a03a6
61#define KVM_INST_MTSPR_SRR1 0x7c1b03a6
62#define KVM_INST_MTSPR_DAR 0x7c1303a6
63#define KVM_INST_MTSPR_DSISR 0x7c1203a6
Alexander Graf73a18102010-07-29 14:47:58 +020064
Alexander Grafd1290b152010-07-29 14:48:00 +020065#define KVM_INST_TLBSYNC 0x7c00046c
66
Alexander Graf73a18102010-07-29 14:47:58 +020067static bool kvm_patching_worked = true;
Alexander Graf2d4f5672010-07-29 14:48:01 +020068static char kvm_tmp[1024 * 1024];
69static int kvm_tmp_index;
Alexander Graf73a18102010-07-29 14:47:58 +020070
71static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
72{
73 *inst = new_inst;
74 flush_icache_range((ulong)inst, (ulong)inst + 4);
75}
76
Alexander Grafd1293c92010-07-29 14:47:59 +020077static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
78{
79#ifdef CONFIG_64BIT
80 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
81#else
82 kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
83#endif
84}
85
86static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
87{
88 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
89}
90
91static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
92{
93#ifdef CONFIG_64BIT
94 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
95#else
96 kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
97#endif
98}
99
100static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
101{
102 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
103}
104
Alexander Grafd1290b152010-07-29 14:48:00 +0200105static void kvm_patch_ins_nop(u32 *inst)
106{
107 kvm_patch_ins(inst, KVM_INST_NOP);
108}
109
Alexander Graf71ee8e32010-07-29 14:48:02 +0200110static void kvm_patch_ins_b(u32 *inst, int addr)
111{
112#ifdef CONFIG_RELOCATABLE
113 /* On relocatable kernels interrupts handlers and our code
114 can be in different regions, so we don't patch them */
115
116 extern u32 __end_interrupts;
117 if ((ulong)inst < (ulong)&__end_interrupts)
118 return;
119#endif
120
121 kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
122}
123
Alexander Graf2d4f5672010-07-29 14:48:01 +0200124static u32 *kvm_alloc(int len)
125{
126 u32 *p;
127
128 if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
129 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
130 kvm_tmp_index, len);
131 kvm_patching_worked = false;
132 return NULL;
133 }
134
135 p = (void*)&kvm_tmp[kvm_tmp_index];
136 kvm_tmp_index += len;
137
138 return p;
139}
140
Alexander Graf73a18102010-07-29 14:47:58 +0200141static void kvm_map_magic_page(void *data)
142{
143 kvm_hypercall2(KVM_HC_PPC_MAP_MAGIC_PAGE,
144 KVM_MAGIC_PAGE, /* Physical Address */
145 KVM_MAGIC_PAGE); /* Effective Address */
146}
147
148static void kvm_check_ins(u32 *inst)
149{
150 u32 _inst = *inst;
151 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
152 u32 inst_rt = _inst & KVM_MASK_RT;
153
154 switch (inst_no_rt) {
Alexander Grafd1293c92010-07-29 14:47:59 +0200155 /* Loads */
156 case KVM_INST_MFMSR:
157 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
158 break;
159 case KVM_INST_MFSPR_SPRG0:
160 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
161 break;
162 case KVM_INST_MFSPR_SPRG1:
163 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
164 break;
165 case KVM_INST_MFSPR_SPRG2:
166 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
167 break;
168 case KVM_INST_MFSPR_SPRG3:
169 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
170 break;
171 case KVM_INST_MFSPR_SRR0:
172 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
173 break;
174 case KVM_INST_MFSPR_SRR1:
175 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
176 break;
177 case KVM_INST_MFSPR_DAR:
178 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
179 break;
180 case KVM_INST_MFSPR_DSISR:
181 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
182 break;
183
184 /* Stores */
185 case KVM_INST_MTSPR_SPRG0:
186 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
187 break;
188 case KVM_INST_MTSPR_SPRG1:
189 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
190 break;
191 case KVM_INST_MTSPR_SPRG2:
192 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
193 break;
194 case KVM_INST_MTSPR_SPRG3:
195 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
196 break;
197 case KVM_INST_MTSPR_SRR0:
198 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
199 break;
200 case KVM_INST_MTSPR_SRR1:
201 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
202 break;
203 case KVM_INST_MTSPR_DAR:
204 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
205 break;
206 case KVM_INST_MTSPR_DSISR:
207 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
208 break;
Alexander Grafd1290b152010-07-29 14:48:00 +0200209
210 /* Nops */
211 case KVM_INST_TLBSYNC:
212 kvm_patch_ins_nop(inst);
213 break;
Alexander Graf73a18102010-07-29 14:47:58 +0200214 }
215
216 switch (_inst) {
217 }
218}
219
220static void kvm_use_magic_page(void)
221{
222 u32 *p;
223 u32 *start, *end;
224 u32 tmp;
225
226 /* Tell the host to map the magic page to -4096 on all CPUs */
227 on_each_cpu(kvm_map_magic_page, NULL, 1);
228
229 /* Quick self-test to see if the mapping works */
230 if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
231 kvm_patching_worked = false;
232 return;
233 }
234
235 /* Now loop through all code and find instructions */
236 start = (void*)_stext;
237 end = (void*)_etext;
238
239 for (p = start; p < end; p++)
240 kvm_check_ins(p);
241
242 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
243 kvm_patching_worked ? "worked" : "failed");
244}
245
Alexander Graf2a342ed2010-07-29 14:47:48 +0200246unsigned long kvm_hypercall(unsigned long *in,
247 unsigned long *out,
248 unsigned long nr)
249{
250 unsigned long register r0 asm("r0");
251 unsigned long register r3 asm("r3") = in[0];
252 unsigned long register r4 asm("r4") = in[1];
253 unsigned long register r5 asm("r5") = in[2];
254 unsigned long register r6 asm("r6") = in[3];
255 unsigned long register r7 asm("r7") = in[4];
256 unsigned long register r8 asm("r8") = in[5];
257 unsigned long register r9 asm("r9") = in[6];
258 unsigned long register r10 asm("r10") = in[7];
259 unsigned long register r11 asm("r11") = nr;
260 unsigned long register r12 asm("r12");
261
262 asm volatile("bl kvm_hypercall_start"
263 : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
264 "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
265 "=r"(r12)
266 : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
267 "r"(r9), "r"(r10), "r"(r11)
268 : "memory", "cc", "xer", "ctr", "lr");
269
270 out[0] = r4;
271 out[1] = r5;
272 out[2] = r6;
273 out[3] = r7;
274 out[4] = r8;
275 out[5] = r9;
276 out[6] = r10;
277 out[7] = r11;
278
279 return r3;
280}
281EXPORT_SYMBOL_GPL(kvm_hypercall);
Alexander Graf73a18102010-07-29 14:47:58 +0200282
283static int kvm_para_setup(void)
284{
285 extern u32 kvm_hypercall_start;
286 struct device_node *hyper_node;
287 u32 *insts;
288 int len, i;
289
290 hyper_node = of_find_node_by_path("/hypervisor");
291 if (!hyper_node)
292 return -1;
293
294 insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len);
295 if (len % 4)
296 return -1;
297 if (len > (4 * 4))
298 return -1;
299
300 for (i = 0; i < (len / 4); i++)
301 kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]);
302
303 return 0;
304}
305
Alexander Graf2d4f5672010-07-29 14:48:01 +0200306static __init void kvm_free_tmp(void)
307{
308 unsigned long start, end;
309
310 start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK;
311 end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
312
313 /* Free the tmp space we don't need */
314 for (; start < end; start += PAGE_SIZE) {
315 ClearPageReserved(virt_to_page(start));
316 init_page_count(virt_to_page(start));
317 free_page(start);
318 totalram_pages++;
319 }
320}
321
Alexander Graf73a18102010-07-29 14:47:58 +0200322static int __init kvm_guest_init(void)
323{
324 if (!kvm_para_available())
Alexander Graf2d4f5672010-07-29 14:48:01 +0200325 goto free_tmp;
Alexander Graf73a18102010-07-29 14:47:58 +0200326
327 if (kvm_para_setup())
Alexander Graf2d4f5672010-07-29 14:48:01 +0200328 goto free_tmp;
Alexander Graf73a18102010-07-29 14:47:58 +0200329
330 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
331 kvm_use_magic_page();
332
Alexander Graf2d4f5672010-07-29 14:48:01 +0200333free_tmp:
334 kvm_free_tmp();
335
Alexander Graf73a18102010-07-29 14:47:58 +0200336 return 0;
337}
338
339postcore_initcall(kvm_guest_init);