blob: 3258922cc22c5d3822983ee109bfa6f99c1bdf9c [file] [log] [blame]
Alexander Graf2a342ed2010-07-29 14:47:48 +02001/*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/init.h>
23#include <linux/kvm_para.h>
24#include <linux/slab.h>
25#include <linux/of.h>
26
27#include <asm/reg.h>
28#include <asm/kvm_ppc.h>
29#include <asm/sections.h>
30#include <asm/cacheflush.h>
31#include <asm/disassemble.h>
32
Alexander Grafd17051c2010-07-29 14:47:57 +020033#define KVM_MAGIC_PAGE (-4096L)
34#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
35
Alexander Grafd1293c92010-07-29 14:47:59 +020036#define KVM_INST_LWZ 0x80000000
37#define KVM_INST_STW 0x90000000
38#define KVM_INST_LD 0xe8000000
39#define KVM_INST_STD 0xf8000000
40#define KVM_INST_NOP 0x60000000
41#define KVM_INST_B 0x48000000
42#define KVM_INST_B_MASK 0x03ffffff
43#define KVM_INST_B_MAX 0x01ffffff
44
Alexander Graf73a18102010-07-29 14:47:58 +020045#define KVM_MASK_RT 0x03e00000
Alexander Grafd1293c92010-07-29 14:47:59 +020046#define KVM_INST_MFMSR 0x7c0000a6
47#define KVM_INST_MFSPR_SPRG0 0x7c1042a6
48#define KVM_INST_MFSPR_SPRG1 0x7c1142a6
49#define KVM_INST_MFSPR_SPRG2 0x7c1242a6
50#define KVM_INST_MFSPR_SPRG3 0x7c1342a6
51#define KVM_INST_MFSPR_SRR0 0x7c1a02a6
52#define KVM_INST_MFSPR_SRR1 0x7c1b02a6
53#define KVM_INST_MFSPR_DAR 0x7c1302a6
54#define KVM_INST_MFSPR_DSISR 0x7c1202a6
55
56#define KVM_INST_MTSPR_SPRG0 0x7c1043a6
57#define KVM_INST_MTSPR_SPRG1 0x7c1143a6
58#define KVM_INST_MTSPR_SPRG2 0x7c1243a6
59#define KVM_INST_MTSPR_SPRG3 0x7c1343a6
60#define KVM_INST_MTSPR_SRR0 0x7c1a03a6
61#define KVM_INST_MTSPR_SRR1 0x7c1b03a6
62#define KVM_INST_MTSPR_DAR 0x7c1303a6
63#define KVM_INST_MTSPR_DSISR 0x7c1203a6
Alexander Graf73a18102010-07-29 14:47:58 +020064
Alexander Grafd1290b152010-07-29 14:48:00 +020065#define KVM_INST_TLBSYNC 0x7c00046c
66
Alexander Graf73a18102010-07-29 14:47:58 +020067static bool kvm_patching_worked = true;
68
69static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
70{
71 *inst = new_inst;
72 flush_icache_range((ulong)inst, (ulong)inst + 4);
73}
74
Alexander Grafd1293c92010-07-29 14:47:59 +020075static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
76{
77#ifdef CONFIG_64BIT
78 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
79#else
80 kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
81#endif
82}
83
84static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
85{
86 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
87}
88
89static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
90{
91#ifdef CONFIG_64BIT
92 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
93#else
94 kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
95#endif
96}
97
98static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
99{
100 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
101}
102
Alexander Grafd1290b152010-07-29 14:48:00 +0200103static void kvm_patch_ins_nop(u32 *inst)
104{
105 kvm_patch_ins(inst, KVM_INST_NOP);
106}
107
Alexander Graf73a18102010-07-29 14:47:58 +0200108static void kvm_map_magic_page(void *data)
109{
110 kvm_hypercall2(KVM_HC_PPC_MAP_MAGIC_PAGE,
111 KVM_MAGIC_PAGE, /* Physical Address */
112 KVM_MAGIC_PAGE); /* Effective Address */
113}
114
115static void kvm_check_ins(u32 *inst)
116{
117 u32 _inst = *inst;
118 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
119 u32 inst_rt = _inst & KVM_MASK_RT;
120
121 switch (inst_no_rt) {
Alexander Grafd1293c92010-07-29 14:47:59 +0200122 /* Loads */
123 case KVM_INST_MFMSR:
124 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
125 break;
126 case KVM_INST_MFSPR_SPRG0:
127 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
128 break;
129 case KVM_INST_MFSPR_SPRG1:
130 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
131 break;
132 case KVM_INST_MFSPR_SPRG2:
133 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
134 break;
135 case KVM_INST_MFSPR_SPRG3:
136 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
137 break;
138 case KVM_INST_MFSPR_SRR0:
139 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
140 break;
141 case KVM_INST_MFSPR_SRR1:
142 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
143 break;
144 case KVM_INST_MFSPR_DAR:
145 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
146 break;
147 case KVM_INST_MFSPR_DSISR:
148 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
149 break;
150
151 /* Stores */
152 case KVM_INST_MTSPR_SPRG0:
153 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
154 break;
155 case KVM_INST_MTSPR_SPRG1:
156 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
157 break;
158 case KVM_INST_MTSPR_SPRG2:
159 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
160 break;
161 case KVM_INST_MTSPR_SPRG3:
162 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
163 break;
164 case KVM_INST_MTSPR_SRR0:
165 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
166 break;
167 case KVM_INST_MTSPR_SRR1:
168 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
169 break;
170 case KVM_INST_MTSPR_DAR:
171 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
172 break;
173 case KVM_INST_MTSPR_DSISR:
174 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
175 break;
Alexander Grafd1290b152010-07-29 14:48:00 +0200176
177 /* Nops */
178 case KVM_INST_TLBSYNC:
179 kvm_patch_ins_nop(inst);
180 break;
Alexander Graf73a18102010-07-29 14:47:58 +0200181 }
182
183 switch (_inst) {
184 }
185}
186
187static void kvm_use_magic_page(void)
188{
189 u32 *p;
190 u32 *start, *end;
191 u32 tmp;
192
193 /* Tell the host to map the magic page to -4096 on all CPUs */
194 on_each_cpu(kvm_map_magic_page, NULL, 1);
195
196 /* Quick self-test to see if the mapping works */
197 if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
198 kvm_patching_worked = false;
199 return;
200 }
201
202 /* Now loop through all code and find instructions */
203 start = (void*)_stext;
204 end = (void*)_etext;
205
206 for (p = start; p < end; p++)
207 kvm_check_ins(p);
208
209 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
210 kvm_patching_worked ? "worked" : "failed");
211}
212
Alexander Graf2a342ed2010-07-29 14:47:48 +0200213unsigned long kvm_hypercall(unsigned long *in,
214 unsigned long *out,
215 unsigned long nr)
216{
217 unsigned long register r0 asm("r0");
218 unsigned long register r3 asm("r3") = in[0];
219 unsigned long register r4 asm("r4") = in[1];
220 unsigned long register r5 asm("r5") = in[2];
221 unsigned long register r6 asm("r6") = in[3];
222 unsigned long register r7 asm("r7") = in[4];
223 unsigned long register r8 asm("r8") = in[5];
224 unsigned long register r9 asm("r9") = in[6];
225 unsigned long register r10 asm("r10") = in[7];
226 unsigned long register r11 asm("r11") = nr;
227 unsigned long register r12 asm("r12");
228
229 asm volatile("bl kvm_hypercall_start"
230 : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
231 "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
232 "=r"(r12)
233 : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
234 "r"(r9), "r"(r10), "r"(r11)
235 : "memory", "cc", "xer", "ctr", "lr");
236
237 out[0] = r4;
238 out[1] = r5;
239 out[2] = r6;
240 out[3] = r7;
241 out[4] = r8;
242 out[5] = r9;
243 out[6] = r10;
244 out[7] = r11;
245
246 return r3;
247}
248EXPORT_SYMBOL_GPL(kvm_hypercall);
Alexander Graf73a18102010-07-29 14:47:58 +0200249
250static int kvm_para_setup(void)
251{
252 extern u32 kvm_hypercall_start;
253 struct device_node *hyper_node;
254 u32 *insts;
255 int len, i;
256
257 hyper_node = of_find_node_by_path("/hypervisor");
258 if (!hyper_node)
259 return -1;
260
261 insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len);
262 if (len % 4)
263 return -1;
264 if (len > (4 * 4))
265 return -1;
266
267 for (i = 0; i < (len / 4); i++)
268 kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]);
269
270 return 0;
271}
272
273static int __init kvm_guest_init(void)
274{
275 if (!kvm_para_available())
276 return 0;
277
278 if (kvm_para_setup())
279 return 0;
280
281 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
282 kvm_use_magic_page();
283
284 return 0;
285}
286
287postcore_initcall(kvm_guest_init);