blob: 801058dd74dbc8ab887fbe5a66b56ccc2008ed3a [file] [log] [blame]
Alexander Grafd17051c2010-07-29 14:47:57 +02001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
Scott Wood940b45e2011-11-08 18:23:28 -060016 * Copyright 2010-2011 Freescale Semiconductor, Inc.
Alexander Grafd17051c2010-07-29 14:47:57 +020017 *
18 * Authors: Alexander Graf <agraf@suse.de>
19 */
20
21#include <asm/ppc_asm.h>
22#include <asm/kvm_asm.h>
23#include <asm/reg.h>
24#include <asm/page.h>
25#include <asm/asm-offsets.h>
26
27/* Hypercall entry point. Will be patched with device tree instructions. */
28
29.global kvm_hypercall_start
30kvm_hypercall_start:
31 li r3, -1
32 nop
33 nop
34 nop
35 blr
36
37#define KVM_MAGIC_PAGE (-4096)
Alexander Graf92234722010-07-29 14:48:03 +020038
39#ifdef CONFIG_64BIT
40#define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
41#define STL64(reg, offs, reg2) std reg, (offs)(reg2)
42#else
43#define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
44#define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
45#endif
46
47#define SCRATCH_SAVE \
48 /* Enable critical section. We are critical if \
49 shared->critical == r1 */ \
50 STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
51 \
52 /* Save state */ \
53 PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
54 PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
55 mfcr r31; \
56 stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
57
58#define SCRATCH_RESTORE \
59 /* Restore state */ \
60 PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
61 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
62 mtcr r30; \
63 PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
64 \
65 /* Disable critical section. We are critical if \
66 shared->critical == r1 and r2 is always != r1 */ \
67 STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
Alexander Graf819a63d2010-07-29 14:48:04 +020068
69.global kvm_emulate_mtmsrd
70kvm_emulate_mtmsrd:
71
72 SCRATCH_SAVE
73
74 /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
75 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
76 lis r30, (~(MSR_EE | MSR_RI))@h
77 ori r30, r30, (~(MSR_EE | MSR_RI))@l
78 and r31, r31, r30
79
80 /* OR the register's (MSR_EE|MSR_RI) on MSR */
81kvm_emulate_mtmsrd_reg:
Alexander Grafdf08bd12010-08-05 15:44:41 +020082 ori r30, r0, 0
83 andi. r30, r30, (MSR_EE|MSR_RI)
Alexander Graf819a63d2010-07-29 14:48:04 +020084 or r31, r31, r30
85
86 /* Put MSR back into magic page */
87 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
88
89 /* Check if we have to fetch an interrupt */
90 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
91 cmpwi r31, 0
92 beq+ no_check
93
94 /* Check if we may trigger an interrupt */
95 andi. r30, r30, MSR_EE
96 beq no_check
97
98 SCRATCH_RESTORE
99
100 /* Nag hypervisor */
Alexander Grafdf08bd12010-08-05 15:44:41 +0200101kvm_emulate_mtmsrd_orig_ins:
Alexander Graf819a63d2010-07-29 14:48:04 +0200102 tlbsync
103
104 b kvm_emulate_mtmsrd_branch
105
106no_check:
107
108 SCRATCH_RESTORE
109
110 /* Go back to caller */
111kvm_emulate_mtmsrd_branch:
112 b .
113kvm_emulate_mtmsrd_end:
114
115.global kvm_emulate_mtmsrd_branch_offs
116kvm_emulate_mtmsrd_branch_offs:
117 .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
118
119.global kvm_emulate_mtmsrd_reg_offs
120kvm_emulate_mtmsrd_reg_offs:
121 .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
122
Alexander Grafdf08bd12010-08-05 15:44:41 +0200123.global kvm_emulate_mtmsrd_orig_ins_offs
124kvm_emulate_mtmsrd_orig_ins_offs:
125 .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
126
Alexander Graf819a63d2010-07-29 14:48:04 +0200127.global kvm_emulate_mtmsrd_len
128kvm_emulate_mtmsrd_len:
129 .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
Alexander Graf78109272010-07-29 14:48:05 +0200130
131
132#define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI)
133#define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
134
135.global kvm_emulate_mtmsr
136kvm_emulate_mtmsr:
137
138 SCRATCH_SAVE
139
140 /* Fetch old MSR in r31 */
141 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
142
143 /* Find the changed bits between old and new MSR */
144kvm_emulate_mtmsr_reg1:
Alexander Graf512ba592010-08-05 11:26:04 +0200145 ori r30, r0, 0
146 xor r31, r30, r31
Alexander Graf78109272010-07-29 14:48:05 +0200147
148 /* Check if we need to really do mtmsr */
149 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
150 and. r31, r31, r30
151
152 /* No critical bits changed? Maybe we can stay in the guest. */
153 beq maybe_stay_in_guest
154
155do_mtmsr:
156
157 SCRATCH_RESTORE
158
159 /* Just fire off the mtmsr if it's critical */
160kvm_emulate_mtmsr_orig_ins:
161 mtmsr r0
162
163 b kvm_emulate_mtmsr_branch
164
165maybe_stay_in_guest:
166
Alexander Graf512ba592010-08-05 11:26:04 +0200167 /* Get the target register in r30 */
168kvm_emulate_mtmsr_reg2:
169 ori r30, r0, 0
170
Bharat Bhushanf9208422011-10-13 15:17:08 +0530171 /* Put MSR into magic page because we don't call mtmsr */
172 STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
173
Alexander Graf78109272010-07-29 14:48:05 +0200174 /* Check if we have to fetch an interrupt */
175 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
176 cmpwi r31, 0
177 beq+ no_mtmsr
178
179 /* Check if we may trigger an interrupt */
Alexander Graf512ba592010-08-05 11:26:04 +0200180 andi. r31, r30, MSR_EE
Bharat Bhushanf9208422011-10-13 15:17:08 +0530181 bne do_mtmsr
Alexander Graf78109272010-07-29 14:48:05 +0200182
183no_mtmsr:
184
Alexander Graf78109272010-07-29 14:48:05 +0200185 SCRATCH_RESTORE
186
187 /* Go back to caller */
188kvm_emulate_mtmsr_branch:
189 b .
190kvm_emulate_mtmsr_end:
191
192.global kvm_emulate_mtmsr_branch_offs
193kvm_emulate_mtmsr_branch_offs:
194 .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
195
196.global kvm_emulate_mtmsr_reg1_offs
197kvm_emulate_mtmsr_reg1_offs:
198 .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
199
200.global kvm_emulate_mtmsr_reg2_offs
201kvm_emulate_mtmsr_reg2_offs:
202 .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
203
Alexander Graf78109272010-07-29 14:48:05 +0200204.global kvm_emulate_mtmsr_orig_ins_offs
205kvm_emulate_mtmsr_orig_ins_offs:
206 .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
207
208.global kvm_emulate_mtmsr_len
209kvm_emulate_mtmsr_len:
210 .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
Alexander Graf644bfa02010-07-29 14:48:06 +0200211
Scott Wood940b45e2011-11-08 18:23:28 -0600212/* also used for wrteei 1 */
213.global kvm_emulate_wrtee
214kvm_emulate_wrtee:
Alexander Graf644bfa02010-07-29 14:48:06 +0200215
216 SCRATCH_SAVE
217
218 /* Fetch old MSR in r31 */
219 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
220
Scott Wood940b45e2011-11-08 18:23:28 -0600221 /* Insert new MSR[EE] */
222kvm_emulate_wrtee_reg:
223 ori r30, r0, 0
224 rlwimi r31, r30, 0, MSR_EE
Alexander Graf644bfa02010-07-29 14:48:06 +0200225
Scott Wood940b45e2011-11-08 18:23:28 -0600226 /*
227 * If MSR[EE] is now set, check for a pending interrupt.
228 * We could skip this if MSR[EE] was already on, but that
229 * should be rare, so don't bother.
230 */
231 andi. r30, r30, MSR_EE
232
233 /* Put MSR into magic page because we don't call wrtee */
234 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
235
236 beq no_wrtee
237
238 /* Check if we have to fetch an interrupt */
239 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
240 cmpwi r30, 0
241 bne do_wrtee
242
243no_wrtee:
244 SCRATCH_RESTORE
245
246 /* Go back to caller */
247kvm_emulate_wrtee_branch:
248 b .
249
250do_wrtee:
251 SCRATCH_RESTORE
252
253 /* Just fire off the wrtee if it's critical */
254kvm_emulate_wrtee_orig_ins:
255 wrtee r0
256
257 b kvm_emulate_wrtee_branch
258
259kvm_emulate_wrtee_end:
260
261.global kvm_emulate_wrtee_branch_offs
262kvm_emulate_wrtee_branch_offs:
263 .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
264
265.global kvm_emulate_wrtee_reg_offs
266kvm_emulate_wrtee_reg_offs:
267 .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
268
269.global kvm_emulate_wrtee_orig_ins_offs
270kvm_emulate_wrtee_orig_ins_offs:
271 .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
272
273.global kvm_emulate_wrtee_len
274kvm_emulate_wrtee_len:
275 .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
276
277.global kvm_emulate_wrteei_0
278kvm_emulate_wrteei_0:
279 SCRATCH_SAVE
280
281 /* Fetch old MSR in r31 */
282 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
283
284 /* Remove MSR_EE from old MSR */
285 rlwinm r31, r31, 0, ~MSR_EE
Alexander Graf644bfa02010-07-29 14:48:06 +0200286
287 /* Write new MSR value back */
288 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
289
290 SCRATCH_RESTORE
291
292 /* Go back to caller */
Scott Wood940b45e2011-11-08 18:23:28 -0600293kvm_emulate_wrteei_0_branch:
Alexander Graf644bfa02010-07-29 14:48:06 +0200294 b .
Scott Wood940b45e2011-11-08 18:23:28 -0600295kvm_emulate_wrteei_0_end:
Alexander Graf644bfa02010-07-29 14:48:06 +0200296
Scott Wood940b45e2011-11-08 18:23:28 -0600297.global kvm_emulate_wrteei_0_branch_offs
298kvm_emulate_wrteei_0_branch_offs:
299 .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
Alexander Graf644bfa02010-07-29 14:48:06 +0200300
Scott Wood940b45e2011-11-08 18:23:28 -0600301.global kvm_emulate_wrteei_0_len
302kvm_emulate_wrteei_0_len:
303 .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
Alexander Grafcbe487f2010-08-03 10:39:35 +0200304
305.global kvm_emulate_mtsrin
306kvm_emulate_mtsrin:
307
308 SCRATCH_SAVE
309
310 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
311 andi. r31, r31, MSR_DR | MSR_IR
312 beq kvm_emulate_mtsrin_reg1
313
314 SCRATCH_RESTORE
315
316kvm_emulate_mtsrin_orig_ins:
317 nop
318 b kvm_emulate_mtsrin_branch
319
320kvm_emulate_mtsrin_reg1:
321 /* rX >> 26 */
322 rlwinm r30,r0,6,26,29
323
324kvm_emulate_mtsrin_reg2:
325 stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
326
327 SCRATCH_RESTORE
328
329 /* Go back to caller */
330kvm_emulate_mtsrin_branch:
331 b .
332kvm_emulate_mtsrin_end:
333
334.global kvm_emulate_mtsrin_branch_offs
335kvm_emulate_mtsrin_branch_offs:
336 .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
337
338.global kvm_emulate_mtsrin_reg1_offs
339kvm_emulate_mtsrin_reg1_offs:
340 .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
341
342.global kvm_emulate_mtsrin_reg2_offs
343kvm_emulate_mtsrin_reg2_offs:
344 .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
345
346.global kvm_emulate_mtsrin_orig_ins_offs
347kvm_emulate_mtsrin_orig_ins_offs:
348 .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
349
350.global kvm_emulate_mtsrin_len
351kvm_emulate_mtsrin_len:
352 .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4