blob: 3d64c5704fd5325c58653e9b3325f329d5e7cb97 [file] [log] [blame]
Alexander Grafd17051c2010-07-29 14:47:57 +02001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/page.h>
24#include <asm/asm-offsets.h>
25
26/* Hypercall entry point. Will be patched with device tree instructions. */
27
28.global kvm_hypercall_start
29kvm_hypercall_start:
30 li r3, -1
31 nop
32 nop
33 nop
34 blr
35
36#define KVM_MAGIC_PAGE (-4096)
Alexander Graf92234722010-07-29 14:48:03 +020037
38#ifdef CONFIG_64BIT
39#define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
40#define STL64(reg, offs, reg2) std reg, (offs)(reg2)
41#else
42#define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
43#define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
44#endif
45
46#define SCRATCH_SAVE \
47 /* Enable critical section. We are critical if \
48 shared->critical == r1 */ \
49 STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
50 \
51 /* Save state */ \
52 PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
53 PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
54 mfcr r31; \
55 stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
56
57#define SCRATCH_RESTORE \
58 /* Restore state */ \
59 PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
60 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
61 mtcr r30; \
62 PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
63 \
64 /* Disable critical section. We are critical if \
65 shared->critical == r1 and r2 is always != r1 */ \
66 STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
Alexander Graf819a63d2010-07-29 14:48:04 +020067
68.global kvm_emulate_mtmsrd
69kvm_emulate_mtmsrd:
70
71 SCRATCH_SAVE
72
73 /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
74 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
75 lis r30, (~(MSR_EE | MSR_RI))@h
76 ori r30, r30, (~(MSR_EE | MSR_RI))@l
77 and r31, r31, r30
78
79 /* OR the register's (MSR_EE|MSR_RI) on MSR */
80kvm_emulate_mtmsrd_reg:
Alexander Grafdf08bd12010-08-05 15:44:41 +020081 ori r30, r0, 0
82 andi. r30, r30, (MSR_EE|MSR_RI)
Alexander Graf819a63d2010-07-29 14:48:04 +020083 or r31, r31, r30
84
85 /* Put MSR back into magic page */
86 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
87
88 /* Check if we have to fetch an interrupt */
89 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
90 cmpwi r31, 0
91 beq+ no_check
92
93 /* Check if we may trigger an interrupt */
94 andi. r30, r30, MSR_EE
95 beq no_check
96
97 SCRATCH_RESTORE
98
99 /* Nag hypervisor */
Alexander Grafdf08bd12010-08-05 15:44:41 +0200100kvm_emulate_mtmsrd_orig_ins:
Alexander Graf819a63d2010-07-29 14:48:04 +0200101 tlbsync
102
103 b kvm_emulate_mtmsrd_branch
104
105no_check:
106
107 SCRATCH_RESTORE
108
109 /* Go back to caller */
110kvm_emulate_mtmsrd_branch:
111 b .
112kvm_emulate_mtmsrd_end:
113
114.global kvm_emulate_mtmsrd_branch_offs
115kvm_emulate_mtmsrd_branch_offs:
116 .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
117
118.global kvm_emulate_mtmsrd_reg_offs
119kvm_emulate_mtmsrd_reg_offs:
120 .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
121
Alexander Grafdf08bd12010-08-05 15:44:41 +0200122.global kvm_emulate_mtmsrd_orig_ins_offs
123kvm_emulate_mtmsrd_orig_ins_offs:
124 .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
125
Alexander Graf819a63d2010-07-29 14:48:04 +0200126.global kvm_emulate_mtmsrd_len
127kvm_emulate_mtmsrd_len:
128 .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
Alexander Graf78109272010-07-29 14:48:05 +0200129
130
131#define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI)
132#define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
133
134.global kvm_emulate_mtmsr
135kvm_emulate_mtmsr:
136
137 SCRATCH_SAVE
138
139 /* Fetch old MSR in r31 */
140 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
141
142 /* Find the changed bits between old and new MSR */
143kvm_emulate_mtmsr_reg1:
Alexander Graf512ba592010-08-05 11:26:04 +0200144 ori r30, r0, 0
145 xor r31, r30, r31
Alexander Graf78109272010-07-29 14:48:05 +0200146
147 /* Check if we need to really do mtmsr */
148 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
149 and. r31, r31, r30
150
151 /* No critical bits changed? Maybe we can stay in the guest. */
152 beq maybe_stay_in_guest
153
154do_mtmsr:
155
156 SCRATCH_RESTORE
157
158 /* Just fire off the mtmsr if it's critical */
159kvm_emulate_mtmsr_orig_ins:
160 mtmsr r0
161
162 b kvm_emulate_mtmsr_branch
163
164maybe_stay_in_guest:
165
Alexander Graf512ba592010-08-05 11:26:04 +0200166 /* Get the target register in r30 */
167kvm_emulate_mtmsr_reg2:
168 ori r30, r0, 0
169
Bharat Bhushanf9208422011-10-13 15:17:08 +0530170 /* Put MSR into magic page because we don't call mtmsr */
171 STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
172
Alexander Graf78109272010-07-29 14:48:05 +0200173 /* Check if we have to fetch an interrupt */
174 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
175 cmpwi r31, 0
176 beq+ no_mtmsr
177
178 /* Check if we may trigger an interrupt */
Alexander Graf512ba592010-08-05 11:26:04 +0200179 andi. r31, r30, MSR_EE
Bharat Bhushanf9208422011-10-13 15:17:08 +0530180 bne do_mtmsr
Alexander Graf78109272010-07-29 14:48:05 +0200181
182no_mtmsr:
183
Alexander Graf78109272010-07-29 14:48:05 +0200184 SCRATCH_RESTORE
185
186 /* Go back to caller */
187kvm_emulate_mtmsr_branch:
188 b .
189kvm_emulate_mtmsr_end:
190
191.global kvm_emulate_mtmsr_branch_offs
192kvm_emulate_mtmsr_branch_offs:
193 .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
194
195.global kvm_emulate_mtmsr_reg1_offs
196kvm_emulate_mtmsr_reg1_offs:
197 .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
198
199.global kvm_emulate_mtmsr_reg2_offs
200kvm_emulate_mtmsr_reg2_offs:
201 .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
202
Alexander Graf78109272010-07-29 14:48:05 +0200203.global kvm_emulate_mtmsr_orig_ins_offs
204kvm_emulate_mtmsr_orig_ins_offs:
205 .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
206
207.global kvm_emulate_mtmsr_len
208kvm_emulate_mtmsr_len:
209 .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
Alexander Graf644bfa02010-07-29 14:48:06 +0200210
211
212
213.global kvm_emulate_wrteei
214kvm_emulate_wrteei:
215
216 SCRATCH_SAVE
217
218 /* Fetch old MSR in r31 */
219 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
220
221 /* Remove MSR_EE from old MSR */
222 li r30, 0
223 ori r30, r30, MSR_EE
224 andc r31, r31, r30
225
226 /* OR new MSR_EE onto the old MSR */
227kvm_emulate_wrteei_ee:
228 ori r31, r31, 0
229
230 /* Write new MSR value back */
231 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
232
233 SCRATCH_RESTORE
234
235 /* Go back to caller */
236kvm_emulate_wrteei_branch:
237 b .
238kvm_emulate_wrteei_end:
239
240.global kvm_emulate_wrteei_branch_offs
241kvm_emulate_wrteei_branch_offs:
242 .long (kvm_emulate_wrteei_branch - kvm_emulate_wrteei) / 4
243
244.global kvm_emulate_wrteei_ee_offs
245kvm_emulate_wrteei_ee_offs:
246 .long (kvm_emulate_wrteei_ee - kvm_emulate_wrteei) / 4
247
248.global kvm_emulate_wrteei_len
249kvm_emulate_wrteei_len:
250 .long (kvm_emulate_wrteei_end - kvm_emulate_wrteei) / 4
Alexander Grafcbe487f2010-08-03 10:39:35 +0200251
252
253.global kvm_emulate_mtsrin
254kvm_emulate_mtsrin:
255
256 SCRATCH_SAVE
257
258 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
259 andi. r31, r31, MSR_DR | MSR_IR
260 beq kvm_emulate_mtsrin_reg1
261
262 SCRATCH_RESTORE
263
264kvm_emulate_mtsrin_orig_ins:
265 nop
266 b kvm_emulate_mtsrin_branch
267
268kvm_emulate_mtsrin_reg1:
269 /* rX >> 26 */
270 rlwinm r30,r0,6,26,29
271
272kvm_emulate_mtsrin_reg2:
273 stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
274
275 SCRATCH_RESTORE
276
277 /* Go back to caller */
278kvm_emulate_mtsrin_branch:
279 b .
280kvm_emulate_mtsrin_end:
281
282.global kvm_emulate_mtsrin_branch_offs
283kvm_emulate_mtsrin_branch_offs:
284 .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
285
286.global kvm_emulate_mtsrin_reg1_offs
287kvm_emulate_mtsrin_reg1_offs:
288 .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
289
290.global kvm_emulate_mtsrin_reg2_offs
291kvm_emulate_mtsrin_reg2_offs:
292 .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
293
294.global kvm_emulate_mtsrin_orig_ins_offs
295kvm_emulate_mtsrin_orig_ins_offs:
296 .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
297
298.global kvm_emulate_mtsrin_len
299kvm_emulate_mtsrin_len:
300 .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4