blob: 10b681c092ed8628edd117573b1a942e34fe1eca [file] [log] [blame]
Alexander Graf2a342ed2010-07-29 14:47:48 +02001/*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/init.h>
23#include <linux/kvm_para.h>
24#include <linux/slab.h>
25#include <linux/of.h>
26
27#include <asm/reg.h>
Alexander Graf2a342ed2010-07-29 14:47:48 +020028#include <asm/sections.h>
29#include <asm/cacheflush.h>
30#include <asm/disassemble.h>
31
Alexander Grafd17051c2010-07-29 14:47:57 +020032#define KVM_MAGIC_PAGE (-4096L)
33#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
34
Alexander Grafd1293c92010-07-29 14:47:59 +020035#define KVM_INST_LWZ 0x80000000
36#define KVM_INST_STW 0x90000000
37#define KVM_INST_LD 0xe8000000
38#define KVM_INST_STD 0xf8000000
39#define KVM_INST_NOP 0x60000000
40#define KVM_INST_B 0x48000000
41#define KVM_INST_B_MASK 0x03ffffff
42#define KVM_INST_B_MAX 0x01ffffff
43
Alexander Graf73a18102010-07-29 14:47:58 +020044#define KVM_MASK_RT 0x03e00000
Alexander Graf512ba592010-08-05 11:26:04 +020045#define KVM_RT_30 0x03c00000
Alexander Grafcbe487f2010-08-03 10:39:35 +020046#define KVM_MASK_RB 0x0000f800
Alexander Grafd1293c92010-07-29 14:47:59 +020047#define KVM_INST_MFMSR 0x7c0000a6
48#define KVM_INST_MFSPR_SPRG0 0x7c1042a6
49#define KVM_INST_MFSPR_SPRG1 0x7c1142a6
50#define KVM_INST_MFSPR_SPRG2 0x7c1242a6
51#define KVM_INST_MFSPR_SPRG3 0x7c1342a6
52#define KVM_INST_MFSPR_SRR0 0x7c1a02a6
53#define KVM_INST_MFSPR_SRR1 0x7c1b02a6
54#define KVM_INST_MFSPR_DAR 0x7c1302a6
55#define KVM_INST_MFSPR_DSISR 0x7c1202a6
56
57#define KVM_INST_MTSPR_SPRG0 0x7c1043a6
58#define KVM_INST_MTSPR_SPRG1 0x7c1143a6
59#define KVM_INST_MTSPR_SPRG2 0x7c1243a6
60#define KVM_INST_MTSPR_SPRG3 0x7c1343a6
61#define KVM_INST_MTSPR_SRR0 0x7c1a03a6
62#define KVM_INST_MTSPR_SRR1 0x7c1b03a6
63#define KVM_INST_MTSPR_DAR 0x7c1303a6
64#define KVM_INST_MTSPR_DSISR 0x7c1203a6
Alexander Graf73a18102010-07-29 14:47:58 +020065
Alexander Grafd1290b152010-07-29 14:48:00 +020066#define KVM_INST_TLBSYNC 0x7c00046c
Alexander Graf78109272010-07-29 14:48:05 +020067#define KVM_INST_MTMSRD_L0 0x7c000164
Alexander Graf819a63d2010-07-29 14:48:04 +020068#define KVM_INST_MTMSRD_L1 0x7c010164
Alexander Graf78109272010-07-29 14:48:05 +020069#define KVM_INST_MTMSR 0x7c000124
Alexander Grafd1290b152010-07-29 14:48:00 +020070
Alexander Graf644bfa02010-07-29 14:48:06 +020071#define KVM_INST_WRTEEI_0 0x7c000146
72#define KVM_INST_WRTEEI_1 0x7c008146
73
Alexander Grafcbe487f2010-08-03 10:39:35 +020074#define KVM_INST_MTSRIN 0x7c0001e4
75
Alexander Graf73a18102010-07-29 14:47:58 +020076static bool kvm_patching_worked = true;
Alexander Graf2d4f5672010-07-29 14:48:01 +020077static char kvm_tmp[1024 * 1024];
78static int kvm_tmp_index;
Alexander Graf73a18102010-07-29 14:47:58 +020079
80static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
81{
82 *inst = new_inst;
83 flush_icache_range((ulong)inst, (ulong)inst + 4);
84}
85
Alexander Graf512ba592010-08-05 11:26:04 +020086static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
87{
88#ifdef CONFIG_64BIT
89 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
90#else
91 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
92#endif
93}
94
Alexander Grafd1293c92010-07-29 14:47:59 +020095static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
96{
97#ifdef CONFIG_64BIT
98 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
99#else
100 kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
101#endif
102}
103
104static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
105{
106 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
107}
108
109static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
110{
111#ifdef CONFIG_64BIT
112 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
113#else
114 kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
115#endif
116}
117
118static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
119{
120 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
121}
122
Alexander Grafd1290b152010-07-29 14:48:00 +0200123static void kvm_patch_ins_nop(u32 *inst)
124{
125 kvm_patch_ins(inst, KVM_INST_NOP);
126}
127
Alexander Graf71ee8e32010-07-29 14:48:02 +0200128static void kvm_patch_ins_b(u32 *inst, int addr)
129{
130#ifdef CONFIG_RELOCATABLE
131 /* On relocatable kernels interrupts handlers and our code
132 can be in different regions, so we don't patch them */
133
134 extern u32 __end_interrupts;
135 if ((ulong)inst < (ulong)&__end_interrupts)
136 return;
137#endif
138
139 kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
140}
141
Alexander Graf2d4f5672010-07-29 14:48:01 +0200142static u32 *kvm_alloc(int len)
143{
144 u32 *p;
145
146 if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
147 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
148 kvm_tmp_index, len);
149 kvm_patching_worked = false;
150 return NULL;
151 }
152
153 p = (void*)&kvm_tmp[kvm_tmp_index];
154 kvm_tmp_index += len;
155
156 return p;
157}
158
Alexander Graf819a63d2010-07-29 14:48:04 +0200159extern u32 kvm_emulate_mtmsrd_branch_offs;
160extern u32 kvm_emulate_mtmsrd_reg_offs;
161extern u32 kvm_emulate_mtmsrd_len;
162extern u32 kvm_emulate_mtmsrd[];
163
164static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
165{
166 u32 *p;
167 int distance_start;
168 int distance_end;
169 ulong next_inst;
170
171 p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
172 if (!p)
173 return;
174
175 /* Find out where we are and put everything there */
176 distance_start = (ulong)p - (ulong)inst;
177 next_inst = ((ulong)inst + 4);
178 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
179
180 /* Make sure we only write valid b instructions */
181 if (distance_start > KVM_INST_B_MAX) {
182 kvm_patching_worked = false;
183 return;
184 }
185
186 /* Modify the chunk to fit the invocation */
187 memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
188 p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
189 p[kvm_emulate_mtmsrd_reg_offs] |= rt;
190 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
191
192 /* Patch the invocation */
193 kvm_patch_ins_b(inst, distance_start);
194}
195
Alexander Graf78109272010-07-29 14:48:05 +0200196extern u32 kvm_emulate_mtmsr_branch_offs;
197extern u32 kvm_emulate_mtmsr_reg1_offs;
198extern u32 kvm_emulate_mtmsr_reg2_offs;
Alexander Graf78109272010-07-29 14:48:05 +0200199extern u32 kvm_emulate_mtmsr_orig_ins_offs;
200extern u32 kvm_emulate_mtmsr_len;
201extern u32 kvm_emulate_mtmsr[];
202
203static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
204{
205 u32 *p;
206 int distance_start;
207 int distance_end;
208 ulong next_inst;
209
210 p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
211 if (!p)
212 return;
213
214 /* Find out where we are and put everything there */
215 distance_start = (ulong)p - (ulong)inst;
216 next_inst = ((ulong)inst + 4);
217 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
218
219 /* Make sure we only write valid b instructions */
220 if (distance_start > KVM_INST_B_MAX) {
221 kvm_patching_worked = false;
222 return;
223 }
224
225 /* Modify the chunk to fit the invocation */
226 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
227 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
Alexander Graf512ba592010-08-05 11:26:04 +0200228
229 /* Make clobbered registers work too */
230 switch (get_rt(rt)) {
231 case 30:
232 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
233 magic_var(scratch2), KVM_RT_30);
234 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
235 magic_var(scratch2), KVM_RT_30);
236 break;
237 case 31:
238 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
239 magic_var(scratch1), KVM_RT_30);
240 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
241 magic_var(scratch1), KVM_RT_30);
242 break;
243 default:
244 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
245 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
246 break;
247 }
248
Alexander Graf78109272010-07-29 14:48:05 +0200249 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
250 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
251
252 /* Patch the invocation */
253 kvm_patch_ins_b(inst, distance_start);
254}
255
Alexander Graf644bfa02010-07-29 14:48:06 +0200256#ifdef CONFIG_BOOKE
257
258extern u32 kvm_emulate_wrteei_branch_offs;
259extern u32 kvm_emulate_wrteei_ee_offs;
260extern u32 kvm_emulate_wrteei_len;
261extern u32 kvm_emulate_wrteei[];
262
263static void kvm_patch_ins_wrteei(u32 *inst)
264{
265 u32 *p;
266 int distance_start;
267 int distance_end;
268 ulong next_inst;
269
270 p = kvm_alloc(kvm_emulate_wrteei_len * 4);
271 if (!p)
272 return;
273
274 /* Find out where we are and put everything there */
275 distance_start = (ulong)p - (ulong)inst;
276 next_inst = ((ulong)inst + 4);
277 distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_branch_offs];
278
279 /* Make sure we only write valid b instructions */
280 if (distance_start > KVM_INST_B_MAX) {
281 kvm_patching_worked = false;
282 return;
283 }
284
285 /* Modify the chunk to fit the invocation */
286 memcpy(p, kvm_emulate_wrteei, kvm_emulate_wrteei_len * 4);
287 p[kvm_emulate_wrteei_branch_offs] |= distance_end & KVM_INST_B_MASK;
288 p[kvm_emulate_wrteei_ee_offs] |= (*inst & MSR_EE);
289 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_len * 4);
290
291 /* Patch the invocation */
292 kvm_patch_ins_b(inst, distance_start);
293}
294
295#endif
296
Alexander Grafcbe487f2010-08-03 10:39:35 +0200297#ifdef CONFIG_PPC_BOOK3S_32
298
299extern u32 kvm_emulate_mtsrin_branch_offs;
300extern u32 kvm_emulate_mtsrin_reg1_offs;
301extern u32 kvm_emulate_mtsrin_reg2_offs;
302extern u32 kvm_emulate_mtsrin_orig_ins_offs;
303extern u32 kvm_emulate_mtsrin_len;
304extern u32 kvm_emulate_mtsrin[];
305
306static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
307{
308 u32 *p;
309 int distance_start;
310 int distance_end;
311 ulong next_inst;
312
313 p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
314 if (!p)
315 return;
316
317 /* Find out where we are and put everything there */
318 distance_start = (ulong)p - (ulong)inst;
319 next_inst = ((ulong)inst + 4);
320 distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
321
322 /* Make sure we only write valid b instructions */
323 if (distance_start > KVM_INST_B_MAX) {
324 kvm_patching_worked = false;
325 return;
326 }
327
328 /* Modify the chunk to fit the invocation */
329 memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
330 p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
331 p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
332 p[kvm_emulate_mtsrin_reg2_offs] |= rt;
333 p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
334 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
335
336 /* Patch the invocation */
337 kvm_patch_ins_b(inst, distance_start);
338}
339
340#endif
341
Alexander Graf73a18102010-07-29 14:47:58 +0200342static void kvm_map_magic_page(void *data)
343{
Alexander Graf7508e162010-08-03 11:32:56 +0200344 u32 *features = data;
345
346 ulong in[8];
347 ulong out[8];
348
349 in[0] = KVM_MAGIC_PAGE;
350 in[1] = KVM_MAGIC_PAGE;
351
352 kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE);
353
354 *features = out[0];
Alexander Graf73a18102010-07-29 14:47:58 +0200355}
356
Alexander Graf7508e162010-08-03 11:32:56 +0200357static void kvm_check_ins(u32 *inst, u32 features)
Alexander Graf73a18102010-07-29 14:47:58 +0200358{
359 u32 _inst = *inst;
360 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
361 u32 inst_rt = _inst & KVM_MASK_RT;
362
363 switch (inst_no_rt) {
Alexander Grafd1293c92010-07-29 14:47:59 +0200364 /* Loads */
365 case KVM_INST_MFMSR:
366 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
367 break;
368 case KVM_INST_MFSPR_SPRG0:
369 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
370 break;
371 case KVM_INST_MFSPR_SPRG1:
372 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
373 break;
374 case KVM_INST_MFSPR_SPRG2:
375 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
376 break;
377 case KVM_INST_MFSPR_SPRG3:
378 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
379 break;
380 case KVM_INST_MFSPR_SRR0:
381 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
382 break;
383 case KVM_INST_MFSPR_SRR1:
384 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
385 break;
386 case KVM_INST_MFSPR_DAR:
387 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
388 break;
389 case KVM_INST_MFSPR_DSISR:
390 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
391 break;
392
393 /* Stores */
394 case KVM_INST_MTSPR_SPRG0:
395 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
396 break;
397 case KVM_INST_MTSPR_SPRG1:
398 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
399 break;
400 case KVM_INST_MTSPR_SPRG2:
401 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
402 break;
403 case KVM_INST_MTSPR_SPRG3:
404 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
405 break;
406 case KVM_INST_MTSPR_SRR0:
407 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
408 break;
409 case KVM_INST_MTSPR_SRR1:
410 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
411 break;
412 case KVM_INST_MTSPR_DAR:
413 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
414 break;
415 case KVM_INST_MTSPR_DSISR:
416 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
417 break;
Alexander Grafd1290b152010-07-29 14:48:00 +0200418
419 /* Nops */
420 case KVM_INST_TLBSYNC:
421 kvm_patch_ins_nop(inst);
422 break;
Alexander Graf819a63d2010-07-29 14:48:04 +0200423
424 /* Rewrites */
425 case KVM_INST_MTMSRD_L1:
426 /* We use r30 and r31 during the hook */
427 if (get_rt(inst_rt) < 30)
428 kvm_patch_ins_mtmsrd(inst, inst_rt);
429 break;
Alexander Graf78109272010-07-29 14:48:05 +0200430 case KVM_INST_MTMSR:
431 case KVM_INST_MTMSRD_L0:
Alexander Graf512ba592010-08-05 11:26:04 +0200432 kvm_patch_ins_mtmsr(inst, inst_rt);
Alexander Graf78109272010-07-29 14:48:05 +0200433 break;
Alexander Graf73a18102010-07-29 14:47:58 +0200434 }
435
Alexander Grafcbe487f2010-08-03 10:39:35 +0200436 switch (inst_no_rt & ~KVM_MASK_RB) {
437#ifdef CONFIG_PPC_BOOK3S_32
438 case KVM_INST_MTSRIN:
439 if (features & KVM_MAGIC_FEAT_SR) {
440 u32 inst_rb = _inst & KVM_MASK_RB;
441 kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
442 }
443 break;
444 break;
445#endif
446 }
447
Alexander Graf73a18102010-07-29 14:47:58 +0200448 switch (_inst) {
Alexander Graf644bfa02010-07-29 14:48:06 +0200449#ifdef CONFIG_BOOKE
450 case KVM_INST_WRTEEI_0:
451 case KVM_INST_WRTEEI_1:
452 kvm_patch_ins_wrteei(inst);
453 break;
454#endif
Alexander Graf73a18102010-07-29 14:47:58 +0200455 }
456}
457
458static void kvm_use_magic_page(void)
459{
460 u32 *p;
461 u32 *start, *end;
462 u32 tmp;
Alexander Graf7508e162010-08-03 11:32:56 +0200463 u32 features;
Alexander Graf73a18102010-07-29 14:47:58 +0200464
465 /* Tell the host to map the magic page to -4096 on all CPUs */
Alexander Graf7508e162010-08-03 11:32:56 +0200466 on_each_cpu(kvm_map_magic_page, &features, 1);
Alexander Graf73a18102010-07-29 14:47:58 +0200467
468 /* Quick self-test to see if the mapping works */
469 if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
470 kvm_patching_worked = false;
471 return;
472 }
473
474 /* Now loop through all code and find instructions */
475 start = (void*)_stext;
476 end = (void*)_etext;
477
478 for (p = start; p < end; p++)
Alexander Graf7508e162010-08-03 11:32:56 +0200479 kvm_check_ins(p, features);
Alexander Graf73a18102010-07-29 14:47:58 +0200480
481 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
482 kvm_patching_worked ? "worked" : "failed");
483}
484
Alexander Graf2a342ed2010-07-29 14:47:48 +0200485unsigned long kvm_hypercall(unsigned long *in,
486 unsigned long *out,
487 unsigned long nr)
488{
489 unsigned long register r0 asm("r0");
490 unsigned long register r3 asm("r3") = in[0];
491 unsigned long register r4 asm("r4") = in[1];
492 unsigned long register r5 asm("r5") = in[2];
493 unsigned long register r6 asm("r6") = in[3];
494 unsigned long register r7 asm("r7") = in[4];
495 unsigned long register r8 asm("r8") = in[5];
496 unsigned long register r9 asm("r9") = in[6];
497 unsigned long register r10 asm("r10") = in[7];
498 unsigned long register r11 asm("r11") = nr;
499 unsigned long register r12 asm("r12");
500
501 asm volatile("bl kvm_hypercall_start"
502 : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
503 "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
504 "=r"(r12)
505 : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
506 "r"(r9), "r"(r10), "r"(r11)
507 : "memory", "cc", "xer", "ctr", "lr");
508
509 out[0] = r4;
510 out[1] = r5;
511 out[2] = r6;
512 out[3] = r7;
513 out[4] = r8;
514 out[5] = r9;
515 out[6] = r10;
516 out[7] = r11;
517
518 return r3;
519}
520EXPORT_SYMBOL_GPL(kvm_hypercall);
Alexander Graf73a18102010-07-29 14:47:58 +0200521
522static int kvm_para_setup(void)
523{
524 extern u32 kvm_hypercall_start;
525 struct device_node *hyper_node;
526 u32 *insts;
527 int len, i;
528
529 hyper_node = of_find_node_by_path("/hypervisor");
530 if (!hyper_node)
531 return -1;
532
533 insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len);
534 if (len % 4)
535 return -1;
536 if (len > (4 * 4))
537 return -1;
538
539 for (i = 0; i < (len / 4); i++)
540 kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]);
541
542 return 0;
543}
544
Alexander Graf2d4f5672010-07-29 14:48:01 +0200545static __init void kvm_free_tmp(void)
546{
547 unsigned long start, end;
548
549 start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK;
550 end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
551
552 /* Free the tmp space we don't need */
553 for (; start < end; start += PAGE_SIZE) {
554 ClearPageReserved(virt_to_page(start));
555 init_page_count(virt_to_page(start));
556 free_page(start);
557 totalram_pages++;
558 }
559}
560
Alexander Graf73a18102010-07-29 14:47:58 +0200561static int __init kvm_guest_init(void)
562{
563 if (!kvm_para_available())
Alexander Graf2d4f5672010-07-29 14:48:01 +0200564 goto free_tmp;
Alexander Graf73a18102010-07-29 14:47:58 +0200565
566 if (kvm_para_setup())
Alexander Graf2d4f5672010-07-29 14:48:01 +0200567 goto free_tmp;
Alexander Graf73a18102010-07-29 14:47:58 +0200568
569 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
570 kvm_use_magic_page();
571
Alexander Graf2d4f5672010-07-29 14:48:01 +0200572free_tmp:
573 kvm_free_tmp();
574
Alexander Graf73a18102010-07-29 14:47:58 +0200575 return 0;
576}
577
578postcore_initcall(kvm_guest_init);