| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 1 | /* | 
 | 2 |  * This program is free software; you can redistribute it and/or modify | 
 | 3 |  * it under the terms of the GNU General Public License, version 2, as | 
 | 4 |  * published by the Free Software Foundation. | 
 | 5 |  * | 
 | 6 |  * This program is distributed in the hope that it will be useful, | 
 | 7 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 8 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 9 |  * GNU General Public License for more details. | 
 | 10 |  * | 
 | 11 |  * You should have received a copy of the GNU General Public License | 
 | 12 |  * along with this program; if not, write to the Free Software | 
 | 13 |  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. | 
 | 14 |  * | 
 | 15 |  * Copyright IBM Corp. 2007 | 
| Scott Wood | 4cd35f6 | 2011-06-14 18:34:31 -0500 | [diff] [blame] | 16 |  * Copyright 2011 Freescale Semiconductor, Inc. | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 17 |  * | 
 | 18 |  * Authors: Hollis Blanchard <hollisb@us.ibm.com> | 
 | 19 |  */ | 
 | 20 |  | 
 | 21 | #include <asm/ppc_asm.h> | 
 | 22 | #include <asm/kvm_asm.h> | 
 | 23 | #include <asm/reg.h> | 
 | 24 | #include <asm/mmu-44x.h> | 
 | 25 | #include <asm/page.h> | 
 | 26 | #include <asm/asm-offsets.h> | 
 | 27 |  | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 28 | #define VCPU_GPR(n)     (VCPU_GPRS + (n * 4)) | 
 | 29 |  | 
 | 30 | /* The host stack layout: */ | 
 | 31 | #define HOST_R1         0 /* Implied by stwu. */ | 
 | 32 | #define HOST_CALLEE_LR  4 | 
 | 33 | #define HOST_RUN        8 | 
 | 34 | /* r2 is special: it holds 'current', and it made nonvolatile in the | 
 | 35 |  * kernel with the -ffixed-r2 gcc option. */ | 
 | 36 | #define HOST_R2         12 | 
| Alexander Graf | e1f8acf | 2012-03-05 16:00:28 +0100 | [diff] [blame] | 37 | #define HOST_CR         16 | 
 | 38 | #define HOST_NV_GPRS    20 | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 39 | #define HOST_NV_GPR(n)  (HOST_NV_GPRS + ((n - 14) * 4)) | 
 | 40 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) | 
 | 41 | #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ | 
 | 42 | #define HOST_STACK_LR   (HOST_STACK_SIZE + 4) /* In caller stack frame. */ | 
 | 43 |  | 
 | 44 | #define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \ | 
| Hollis Blanchard | 6a0ab73 | 2008-07-25 13:54:49 -0500 | [diff] [blame] | 45 |                         (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ | 
 | 46 |                         (1<<BOOKE_INTERRUPT_DEBUG)) | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 47 |  | 
 | 48 | #define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ | 
 | 49 |                         (1<<BOOKE_INTERRUPT_DTLB_MISS)) | 
 | 50 |  | 
 | 51 | #define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ | 
 | 52 |                        (1<<BOOKE_INTERRUPT_INST_STORAGE) | \ | 
 | 53 |                        (1<<BOOKE_INTERRUPT_PROGRAM) | \ | 
 | 54 |                        (1<<BOOKE_INTERRUPT_DTLB_MISS)) | 
 | 55 |  | 
 | 56 | .macro KVM_HANDLER ivor_nr | 
 | 57 | _GLOBAL(kvmppc_handler_\ivor_nr) | 
 | 58 | 	/* Get pointer to vcpu and record exit number. */ | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 59 | 	mtspr	SPRN_SPRG_WSCRATCH0, r4 | 
 | 60 | 	mfspr	r4, SPRN_SPRG_RVCPU | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 61 | 	stw	r5, VCPU_GPR(r5)(r4) | 
 | 62 | 	stw	r6, VCPU_GPR(r6)(r4) | 
 | 63 | 	mfctr	r5 | 
 | 64 | 	lis	r6, kvmppc_resume_host@h | 
 | 65 | 	stw	r5, VCPU_CTR(r4) | 
 | 66 | 	li	r5, \ivor_nr | 
 | 67 | 	ori	r6, r6, kvmppc_resume_host@l | 
 | 68 | 	mtctr	r6 | 
 | 69 | 	bctr | 
 | 70 | .endm | 
 | 71 |  | 
 | 72 | _GLOBAL(kvmppc_handlers_start) | 
 | 73 | KVM_HANDLER BOOKE_INTERRUPT_CRITICAL | 
 | 74 | KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK | 
 | 75 | KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE | 
 | 76 | KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE | 
 | 77 | KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL | 
 | 78 | KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT | 
 | 79 | KVM_HANDLER BOOKE_INTERRUPT_PROGRAM | 
 | 80 | KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL | 
 | 81 | KVM_HANDLER BOOKE_INTERRUPT_SYSCALL | 
 | 82 | KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL | 
 | 83 | KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER | 
 | 84 | KVM_HANDLER BOOKE_INTERRUPT_FIT | 
 | 85 | KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG | 
 | 86 | KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS | 
 | 87 | KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS | 
 | 88 | KVM_HANDLER BOOKE_INTERRUPT_DEBUG | 
| Hollis Blanchard | bb3a8a1 | 2009-01-03 16:23:13 -0600 | [diff] [blame] | 89 | KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL | 
 | 90 | KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA | 
 | 91 | KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 92 |  | 
 | 93 | _GLOBAL(kvmppc_handler_len) | 
 | 94 | 	.long kvmppc_handler_1 - kvmppc_handler_0 | 
 | 95 |  | 
 | 96 |  | 
 | 97 | /* Registers: | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 98 |  *  SPRG_SCRATCH0: guest r4 | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 99 |  *  r4: vcpu pointer | 
 | 100 |  *  r5: KVM exit number | 
 | 101 |  */ | 
 | 102 | _GLOBAL(kvmppc_resume_host) | 
 | 103 | 	stw	r3, VCPU_GPR(r3)(r4) | 
 | 104 | 	mfcr	r3 | 
 | 105 | 	stw	r3, VCPU_CR(r4) | 
 | 106 | 	stw	r7, VCPU_GPR(r7)(r4) | 
 | 107 | 	stw	r8, VCPU_GPR(r8)(r4) | 
 | 108 | 	stw	r9, VCPU_GPR(r9)(r4) | 
 | 109 |  | 
 | 110 | 	li	r6, 1 | 
 | 111 | 	slw	r6, r6, r5 | 
 | 112 |  | 
| Hollis Blanchard | 73e75b4 | 2008-12-02 15:51:57 -0600 | [diff] [blame] | 113 | #ifdef CONFIG_KVM_EXIT_TIMING | 
 | 114 | 	/* save exit time */ | 
 | 115 | 1: | 
 | 116 | 	mfspr	r7, SPRN_TBRU | 
 | 117 | 	mfspr	r8, SPRN_TBRL | 
 | 118 | 	mfspr	r9, SPRN_TBRU | 
 | 119 | 	cmpw	r9, r7 | 
 | 120 | 	bne	1b | 
 | 121 | 	stw	r8, VCPU_TIMING_EXIT_TBL(r4) | 
 | 122 | 	stw	r9, VCPU_TIMING_EXIT_TBU(r4) | 
 | 123 | #endif | 
 | 124 |  | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 125 | 	/* Save the faulting instruction and all GPRs for emulation. */ | 
 | 126 | 	andi.	r7, r6, NEED_INST_MASK | 
 | 127 | 	beq	..skip_inst_copy | 
 | 128 | 	mfspr	r9, SPRN_SRR0 | 
 | 129 | 	mfmsr	r8 | 
 | 130 | 	ori	r7, r8, MSR_DS | 
 | 131 | 	mtmsr	r7 | 
 | 132 | 	isync | 
 | 133 | 	lwz	r9, 0(r9) | 
 | 134 | 	mtmsr	r8 | 
 | 135 | 	isync | 
 | 136 | 	stw	r9, VCPU_LAST_INST(r4) | 
 | 137 |  | 
 | 138 | 	stw	r15, VCPU_GPR(r15)(r4) | 
 | 139 | 	stw	r16, VCPU_GPR(r16)(r4) | 
 | 140 | 	stw	r17, VCPU_GPR(r17)(r4) | 
 | 141 | 	stw	r18, VCPU_GPR(r18)(r4) | 
 | 142 | 	stw	r19, VCPU_GPR(r19)(r4) | 
 | 143 | 	stw	r20, VCPU_GPR(r20)(r4) | 
 | 144 | 	stw	r21, VCPU_GPR(r21)(r4) | 
 | 145 | 	stw	r22, VCPU_GPR(r22)(r4) | 
 | 146 | 	stw	r23, VCPU_GPR(r23)(r4) | 
 | 147 | 	stw	r24, VCPU_GPR(r24)(r4) | 
 | 148 | 	stw	r25, VCPU_GPR(r25)(r4) | 
 | 149 | 	stw	r26, VCPU_GPR(r26)(r4) | 
 | 150 | 	stw	r27, VCPU_GPR(r27)(r4) | 
 | 151 | 	stw	r28, VCPU_GPR(r28)(r4) | 
 | 152 | 	stw	r29, VCPU_GPR(r29)(r4) | 
 | 153 | 	stw	r30, VCPU_GPR(r30)(r4) | 
 | 154 | 	stw	r31, VCPU_GPR(r31)(r4) | 
 | 155 | ..skip_inst_copy: | 
 | 156 |  | 
 | 157 | 	/* Also grab DEAR and ESR before the host can clobber them. */ | 
 | 158 |  | 
 | 159 | 	andi.	r7, r6, NEED_DEAR_MASK | 
 | 160 | 	beq	..skip_dear | 
 | 161 | 	mfspr	r9, SPRN_DEAR | 
 | 162 | 	stw	r9, VCPU_FAULT_DEAR(r4) | 
 | 163 | ..skip_dear: | 
 | 164 |  | 
 | 165 | 	andi.	r7, r6, NEED_ESR_MASK | 
 | 166 | 	beq	..skip_esr | 
 | 167 | 	mfspr	r9, SPRN_ESR | 
 | 168 | 	stw	r9, VCPU_FAULT_ESR(r4) | 
 | 169 | ..skip_esr: | 
 | 170 |  | 
 | 171 | 	/* Save remaining volatile guest register state to vcpu. */ | 
 | 172 | 	stw	r0, VCPU_GPR(r0)(r4) | 
 | 173 | 	stw	r1, VCPU_GPR(r1)(r4) | 
 | 174 | 	stw	r2, VCPU_GPR(r2)(r4) | 
 | 175 | 	stw	r10, VCPU_GPR(r10)(r4) | 
 | 176 | 	stw	r11, VCPU_GPR(r11)(r4) | 
 | 177 | 	stw	r12, VCPU_GPR(r12)(r4) | 
 | 178 | 	stw	r13, VCPU_GPR(r13)(r4) | 
 | 179 | 	stw	r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */ | 
 | 180 | 	mflr	r3 | 
 | 181 | 	stw	r3, VCPU_LR(r4) | 
 | 182 | 	mfxer	r3 | 
 | 183 | 	stw	r3, VCPU_XER(r4) | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 184 | 	mfspr	r3, SPRN_SPRG_RSCRATCH0 | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 185 | 	stw	r3, VCPU_GPR(r4)(r4) | 
 | 186 | 	mfspr	r3, SPRN_SRR0 | 
 | 187 | 	stw	r3, VCPU_PC(r4) | 
 | 188 |  | 
 | 189 | 	/* Restore host stack pointer and PID before IVPR, since the host | 
 | 190 | 	 * exception handlers use them. */ | 
 | 191 | 	lwz	r1, VCPU_HOST_STACK(r4) | 
 | 192 | 	lwz	r3, VCPU_HOST_PID(r4) | 
 | 193 | 	mtspr	SPRN_PID, r3 | 
 | 194 |  | 
| Liu Yu | dd9ebf1f | 2011-06-14 18:35:14 -0500 | [diff] [blame] | 195 | #ifdef CONFIG_FSL_BOOKE | 
 | 196 | 	/* we cheat and know that Linux doesn't use PID1 which is always 0 */ | 
 | 197 | 	lis	r3, 0 | 
 | 198 | 	mtspr	SPRN_PID1, r3 | 
 | 199 | #endif | 
 | 200 |  | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 201 | 	/* Restore host IVPR before re-enabling interrupts. We cheat and know | 
 | 202 | 	 * that Linux IVPR is always 0xc0000000. */ | 
 | 203 | 	lis	r3, 0xc000 | 
 | 204 | 	mtspr	SPRN_IVPR, r3 | 
 | 205 |  | 
 | 206 | 	/* Switch to kernel stack and jump to handler. */ | 
 | 207 | 	LOAD_REG_ADDR(r3, kvmppc_handle_exit) | 
 | 208 | 	mtctr	r3 | 
 | 209 | 	lwz	r3, HOST_RUN(r1) | 
 | 210 | 	lwz	r2, HOST_R2(r1) | 
 | 211 | 	mr	r14, r4 /* Save vcpu pointer. */ | 
 | 212 |  | 
 | 213 | 	bctrl	/* kvmppc_handle_exit() */ | 
 | 214 |  | 
 | 215 | 	/* Restore vcpu pointer and the nonvolatiles we used. */ | 
 | 216 | 	mr	r4, r14 | 
 | 217 | 	lwz	r14, VCPU_GPR(r14)(r4) | 
 | 218 |  | 
 | 219 | 	/* Sometimes instruction emulation must restore complete GPR state. */ | 
 | 220 | 	andi.	r5, r3, RESUME_FLAG_NV | 
 | 221 | 	beq	..skip_nv_load | 
 | 222 | 	lwz	r15, VCPU_GPR(r15)(r4) | 
 | 223 | 	lwz	r16, VCPU_GPR(r16)(r4) | 
 | 224 | 	lwz	r17, VCPU_GPR(r17)(r4) | 
 | 225 | 	lwz	r18, VCPU_GPR(r18)(r4) | 
 | 226 | 	lwz	r19, VCPU_GPR(r19)(r4) | 
 | 227 | 	lwz	r20, VCPU_GPR(r20)(r4) | 
 | 228 | 	lwz	r21, VCPU_GPR(r21)(r4) | 
 | 229 | 	lwz	r22, VCPU_GPR(r22)(r4) | 
 | 230 | 	lwz	r23, VCPU_GPR(r23)(r4) | 
 | 231 | 	lwz	r24, VCPU_GPR(r24)(r4) | 
 | 232 | 	lwz	r25, VCPU_GPR(r25)(r4) | 
 | 233 | 	lwz	r26, VCPU_GPR(r26)(r4) | 
 | 234 | 	lwz	r27, VCPU_GPR(r27)(r4) | 
 | 235 | 	lwz	r28, VCPU_GPR(r28)(r4) | 
 | 236 | 	lwz	r29, VCPU_GPR(r29)(r4) | 
 | 237 | 	lwz	r30, VCPU_GPR(r30)(r4) | 
 | 238 | 	lwz	r31, VCPU_GPR(r31)(r4) | 
 | 239 | ..skip_nv_load: | 
 | 240 |  | 
 | 241 | 	/* Should we return to the guest? */ | 
 | 242 | 	andi.	r5, r3, RESUME_FLAG_HOST | 
 | 243 | 	beq	lightweight_exit | 
 | 244 |  | 
 | 245 | 	srawi	r3, r3, 2 /* Shift -ERR back down. */ | 
 | 246 |  | 
 | 247 | heavyweight_exit: | 
 | 248 | 	/* Not returning to guest. */ | 
 | 249 |  | 
| Scott Wood | 4cd35f6 | 2011-06-14 18:34:31 -0500 | [diff] [blame] | 250 | #ifdef CONFIG_SPE | 
 | 251 | 	/* save guest SPEFSCR and load host SPEFSCR */ | 
 | 252 | 	mfspr	r9, SPRN_SPEFSCR | 
 | 253 | 	stw	r9, VCPU_SPEFSCR(r4) | 
 | 254 | 	lwz	r9, VCPU_HOST_SPEFSCR(r4) | 
 | 255 | 	mtspr	SPRN_SPEFSCR, r9 | 
 | 256 | #endif | 
 | 257 |  | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 258 | 	/* We already saved guest volatile register state; now save the | 
 | 259 | 	 * non-volatiles. */ | 
 | 260 | 	stw	r15, VCPU_GPR(r15)(r4) | 
 | 261 | 	stw	r16, VCPU_GPR(r16)(r4) | 
 | 262 | 	stw	r17, VCPU_GPR(r17)(r4) | 
 | 263 | 	stw	r18, VCPU_GPR(r18)(r4) | 
 | 264 | 	stw	r19, VCPU_GPR(r19)(r4) | 
 | 265 | 	stw	r20, VCPU_GPR(r20)(r4) | 
 | 266 | 	stw	r21, VCPU_GPR(r21)(r4) | 
 | 267 | 	stw	r22, VCPU_GPR(r22)(r4) | 
 | 268 | 	stw	r23, VCPU_GPR(r23)(r4) | 
 | 269 | 	stw	r24, VCPU_GPR(r24)(r4) | 
 | 270 | 	stw	r25, VCPU_GPR(r25)(r4) | 
 | 271 | 	stw	r26, VCPU_GPR(r26)(r4) | 
 | 272 | 	stw	r27, VCPU_GPR(r27)(r4) | 
 | 273 | 	stw	r28, VCPU_GPR(r28)(r4) | 
 | 274 | 	stw	r29, VCPU_GPR(r29)(r4) | 
 | 275 | 	stw	r30, VCPU_GPR(r30)(r4) | 
 | 276 | 	stw	r31, VCPU_GPR(r31)(r4) | 
 | 277 |  | 
 | 278 | 	/* Load host non-volatile register state from host stack. */ | 
 | 279 | 	lwz	r14, HOST_NV_GPR(r14)(r1) | 
 | 280 | 	lwz	r15, HOST_NV_GPR(r15)(r1) | 
 | 281 | 	lwz	r16, HOST_NV_GPR(r16)(r1) | 
 | 282 | 	lwz	r17, HOST_NV_GPR(r17)(r1) | 
 | 283 | 	lwz	r18, HOST_NV_GPR(r18)(r1) | 
 | 284 | 	lwz	r19, HOST_NV_GPR(r19)(r1) | 
 | 285 | 	lwz	r20, HOST_NV_GPR(r20)(r1) | 
 | 286 | 	lwz	r21, HOST_NV_GPR(r21)(r1) | 
 | 287 | 	lwz	r22, HOST_NV_GPR(r22)(r1) | 
 | 288 | 	lwz	r23, HOST_NV_GPR(r23)(r1) | 
 | 289 | 	lwz	r24, HOST_NV_GPR(r24)(r1) | 
 | 290 | 	lwz	r25, HOST_NV_GPR(r25)(r1) | 
 | 291 | 	lwz	r26, HOST_NV_GPR(r26)(r1) | 
 | 292 | 	lwz	r27, HOST_NV_GPR(r27)(r1) | 
 | 293 | 	lwz	r28, HOST_NV_GPR(r28)(r1) | 
 | 294 | 	lwz	r29, HOST_NV_GPR(r29)(r1) | 
 | 295 | 	lwz	r30, HOST_NV_GPR(r30)(r1) | 
 | 296 | 	lwz	r31, HOST_NV_GPR(r31)(r1) | 
 | 297 |  | 
 | 298 | 	/* Return to kvm_vcpu_run(). */ | 
 | 299 | 	lwz	r4, HOST_STACK_LR(r1) | 
| Alexander Graf | e1f8acf | 2012-03-05 16:00:28 +0100 | [diff] [blame] | 300 | 	lwz	r5, HOST_CR(r1) | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 301 | 	addi	r1, r1, HOST_STACK_SIZE | 
 | 302 | 	mtlr	r4 | 
| Alexander Graf | e1f8acf | 2012-03-05 16:00:28 +0100 | [diff] [blame] | 303 | 	mtcr	r5 | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 304 | 	/* r3 still contains the return code from kvmppc_handle_exit(). */ | 
 | 305 | 	blr | 
 | 306 |  | 
 | 307 |  | 
 | 308 | /* Registers: | 
 | 309 |  *  r3: kvm_run pointer | 
 | 310 |  *  r4: vcpu pointer | 
 | 311 |  */ | 
 | 312 | _GLOBAL(__kvmppc_vcpu_run) | 
 | 313 | 	stwu	r1, -HOST_STACK_SIZE(r1) | 
 | 314 | 	stw	r1, VCPU_HOST_STACK(r4)	/* Save stack pointer to vcpu. */ | 
 | 315 |  | 
 | 316 | 	/* Save host state to stack. */ | 
 | 317 | 	stw	r3, HOST_RUN(r1) | 
 | 318 | 	mflr	r3 | 
 | 319 | 	stw	r3, HOST_STACK_LR(r1) | 
| Alexander Graf | e1f8acf | 2012-03-05 16:00:28 +0100 | [diff] [blame] | 320 | 	mfcr	r5 | 
 | 321 | 	stw	r5, HOST_CR(r1) | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 322 |  | 
 | 323 | 	/* Save host non-volatile register state to stack. */ | 
 | 324 | 	stw	r14, HOST_NV_GPR(r14)(r1) | 
 | 325 | 	stw	r15, HOST_NV_GPR(r15)(r1) | 
 | 326 | 	stw	r16, HOST_NV_GPR(r16)(r1) | 
 | 327 | 	stw	r17, HOST_NV_GPR(r17)(r1) | 
 | 328 | 	stw	r18, HOST_NV_GPR(r18)(r1) | 
 | 329 | 	stw	r19, HOST_NV_GPR(r19)(r1) | 
 | 330 | 	stw	r20, HOST_NV_GPR(r20)(r1) | 
 | 331 | 	stw	r21, HOST_NV_GPR(r21)(r1) | 
 | 332 | 	stw	r22, HOST_NV_GPR(r22)(r1) | 
 | 333 | 	stw	r23, HOST_NV_GPR(r23)(r1) | 
 | 334 | 	stw	r24, HOST_NV_GPR(r24)(r1) | 
 | 335 | 	stw	r25, HOST_NV_GPR(r25)(r1) | 
 | 336 | 	stw	r26, HOST_NV_GPR(r26)(r1) | 
 | 337 | 	stw	r27, HOST_NV_GPR(r27)(r1) | 
 | 338 | 	stw	r28, HOST_NV_GPR(r28)(r1) | 
 | 339 | 	stw	r29, HOST_NV_GPR(r29)(r1) | 
 | 340 | 	stw	r30, HOST_NV_GPR(r30)(r1) | 
 | 341 | 	stw	r31, HOST_NV_GPR(r31)(r1) | 
 | 342 |  | 
 | 343 | 	/* Load guest non-volatiles. */ | 
 | 344 | 	lwz	r14, VCPU_GPR(r14)(r4) | 
 | 345 | 	lwz	r15, VCPU_GPR(r15)(r4) | 
 | 346 | 	lwz	r16, VCPU_GPR(r16)(r4) | 
 | 347 | 	lwz	r17, VCPU_GPR(r17)(r4) | 
 | 348 | 	lwz	r18, VCPU_GPR(r18)(r4) | 
 | 349 | 	lwz	r19, VCPU_GPR(r19)(r4) | 
 | 350 | 	lwz	r20, VCPU_GPR(r20)(r4) | 
 | 351 | 	lwz	r21, VCPU_GPR(r21)(r4) | 
 | 352 | 	lwz	r22, VCPU_GPR(r22)(r4) | 
 | 353 | 	lwz	r23, VCPU_GPR(r23)(r4) | 
 | 354 | 	lwz	r24, VCPU_GPR(r24)(r4) | 
 | 355 | 	lwz	r25, VCPU_GPR(r25)(r4) | 
 | 356 | 	lwz	r26, VCPU_GPR(r26)(r4) | 
 | 357 | 	lwz	r27, VCPU_GPR(r27)(r4) | 
 | 358 | 	lwz	r28, VCPU_GPR(r28)(r4) | 
 | 359 | 	lwz	r29, VCPU_GPR(r29)(r4) | 
 | 360 | 	lwz	r30, VCPU_GPR(r30)(r4) | 
 | 361 | 	lwz	r31, VCPU_GPR(r31)(r4) | 
 | 362 |  | 
| Scott Wood | 4cd35f6 | 2011-06-14 18:34:31 -0500 | [diff] [blame] | 363 | #ifdef CONFIG_SPE | 
 | 364 | 	/* save host SPEFSCR and load guest SPEFSCR */ | 
 | 365 | 	mfspr	r3, SPRN_SPEFSCR | 
 | 366 | 	stw	r3, VCPU_HOST_SPEFSCR(r4) | 
 | 367 | 	lwz	r3, VCPU_SPEFSCR(r4) | 
 | 368 | 	mtspr	SPRN_SPEFSCR, r3 | 
 | 369 | #endif | 
 | 370 |  | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 371 | lightweight_exit: | 
 | 372 | 	stw	r2, HOST_R2(r1) | 
 | 373 |  | 
 | 374 | 	mfspr	r3, SPRN_PID | 
 | 375 | 	stw	r3, VCPU_HOST_PID(r4) | 
| Hollis Blanchard | 49dd2c4 | 2008-07-25 13:54:53 -0500 | [diff] [blame] | 376 | 	lwz	r3, VCPU_SHADOW_PID(r4) | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 377 | 	mtspr	SPRN_PID, r3 | 
 | 378 |  | 
| Liu Yu | dd9ebf1f | 2011-06-14 18:35:14 -0500 | [diff] [blame] | 379 | #ifdef CONFIG_FSL_BOOKE | 
 | 380 | 	lwz	r3, VCPU_SHADOW_PID1(r4) | 
 | 381 | 	mtspr	SPRN_PID1, r3 | 
 | 382 | #endif | 
 | 383 |  | 
| Hollis Blanchard | 17c885e | 2009-01-03 16:23:09 -0600 | [diff] [blame] | 384 | #ifdef CONFIG_44x | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 385 | 	iccci	0, 0 /* XXX hack */ | 
| Hollis Blanchard | 17c885e | 2009-01-03 16:23:09 -0600 | [diff] [blame] | 386 | #endif | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 387 |  | 
 | 388 | 	/* Load some guest volatiles. */ | 
 | 389 | 	lwz	r0, VCPU_GPR(r0)(r4) | 
 | 390 | 	lwz	r2, VCPU_GPR(r2)(r4) | 
 | 391 | 	lwz	r9, VCPU_GPR(r9)(r4) | 
 | 392 | 	lwz	r10, VCPU_GPR(r10)(r4) | 
 | 393 | 	lwz	r11, VCPU_GPR(r11)(r4) | 
 | 394 | 	lwz	r12, VCPU_GPR(r12)(r4) | 
 | 395 | 	lwz	r13, VCPU_GPR(r13)(r4) | 
 | 396 | 	lwz	r3, VCPU_LR(r4) | 
 | 397 | 	mtlr	r3 | 
 | 398 | 	lwz	r3, VCPU_XER(r4) | 
 | 399 | 	mtxer	r3 | 
 | 400 |  | 
 | 401 | 	/* Switch the IVPR. XXX If we take a TLB miss after this we're screwed, | 
 | 402 | 	 * so how do we make sure vcpu won't fault? */ | 
 | 403 | 	lis	r8, kvmppc_booke_handlers@ha | 
 | 404 | 	lwz	r8, kvmppc_booke_handlers@l(r8) | 
 | 405 | 	mtspr	SPRN_IVPR, r8 | 
 | 406 |  | 
 | 407 | 	/* Save vcpu pointer for the exception handlers. */ | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 408 | 	mtspr	SPRN_SPRG_WVCPU, r4 | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 409 |  | 
| Scott Wood | b590497 | 2011-11-08 18:23:30 -0600 | [diff] [blame] | 410 | 	lwz	r5, VCPU_SHARED(r4) | 
 | 411 |  | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 412 | 	/* Can't switch the stack pointer until after IVPR is switched, | 
 | 413 | 	 * because host interrupt handlers would get confused. */ | 
 | 414 | 	lwz	r1, VCPU_GPR(r1)(r4) | 
 | 415 |  | 
| Scott Wood | b590497 | 2011-11-08 18:23:30 -0600 | [diff] [blame] | 416 | 	/* | 
 | 417 | 	 * Host interrupt handlers may have clobbered these | 
 | 418 | 	 * guest-readable SPRGs, or the guest kernel may have | 
 | 419 | 	 * written directly to the shared area, so we | 
 | 420 | 	 * need to reload them here with the guest's values. | 
 | 421 | 	 */ | 
| Varun Sethi | 3012490 | 2012-04-25 01:27:34 +0000 | [diff] [blame] | 422 | 	PPC_LD(r3, VCPU_SHARED_SPRG4, r5) | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 423 | 	mtspr	SPRN_SPRG4W, r3 | 
| Varun Sethi | 3012490 | 2012-04-25 01:27:34 +0000 | [diff] [blame] | 424 | 	PPC_LD(r3, VCPU_SHARED_SPRG5, r5) | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 425 | 	mtspr	SPRN_SPRG5W, r3 | 
| Varun Sethi | 3012490 | 2012-04-25 01:27:34 +0000 | [diff] [blame] | 426 | 	PPC_LD(r3, VCPU_SHARED_SPRG6, r5) | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 427 | 	mtspr	SPRN_SPRG6W, r3 | 
| Varun Sethi | 3012490 | 2012-04-25 01:27:34 +0000 | [diff] [blame] | 428 | 	PPC_LD(r3, VCPU_SHARED_SPRG7, r5) | 
| Benjamin Herrenschmidt | ee43eb7 | 2009-07-14 20:52:54 +0000 | [diff] [blame] | 429 | 	mtspr	SPRN_SPRG7W, r3 | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 430 |  | 
| Hollis Blanchard | 73e75b4 | 2008-12-02 15:51:57 -0600 | [diff] [blame] | 431 | #ifdef CONFIG_KVM_EXIT_TIMING | 
 | 432 | 	/* save enter time */ | 
 | 433 | 1: | 
 | 434 | 	mfspr	r6, SPRN_TBRU | 
 | 435 | 	mfspr	r7, SPRN_TBRL | 
 | 436 | 	mfspr	r8, SPRN_TBRU | 
 | 437 | 	cmpw	r8, r6 | 
 | 438 | 	bne	1b | 
 | 439 | 	stw	r7, VCPU_TIMING_LAST_ENTER_TBL(r4) | 
 | 440 | 	stw	r8, VCPU_TIMING_LAST_ENTER_TBU(r4) | 
 | 441 | #endif | 
 | 442 |  | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 443 | 	/* Finish loading guest volatiles and jump to guest. */ | 
 | 444 | 	lwz	r3, VCPU_CTR(r4) | 
| Scott Wood | ecee273 | 2011-06-14 18:34:29 -0500 | [diff] [blame] | 445 | 	lwz	r5, VCPU_CR(r4) | 
 | 446 | 	lwz	r6, VCPU_PC(r4) | 
 | 447 | 	lwz	r7, VCPU_SHADOW_MSR(r4) | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 448 | 	mtctr	r3 | 
| Scott Wood | ecee273 | 2011-06-14 18:34:29 -0500 | [diff] [blame] | 449 | 	mtcr	r5 | 
 | 450 | 	mtsrr0	r6 | 
 | 451 | 	mtsrr1	r7 | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 452 | 	lwz	r5, VCPU_GPR(r5)(r4) | 
 | 453 | 	lwz	r6, VCPU_GPR(r6)(r4) | 
 | 454 | 	lwz	r7, VCPU_GPR(r7)(r4) | 
 | 455 | 	lwz	r8, VCPU_GPR(r8)(r4) | 
| Hollis Blanchard | 6a0ab73 | 2008-07-25 13:54:49 -0500 | [diff] [blame] | 456 |  | 
 | 457 | 	/* Clear any debug events which occurred since we disabled MSR[DE]. | 
 | 458 | 	 * XXX This gives us a 3-instruction window in which a breakpoint | 
 | 459 | 	 * intended for guest context could fire in the host instead. */ | 
 | 460 | 	lis	r3, 0xffff | 
 | 461 | 	ori	r3, r3, 0xffff | 
 | 462 | 	mtspr	SPRN_DBSR, r3 | 
 | 463 |  | 
| Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 464 | 	lwz	r3, VCPU_GPR(r3)(r4) | 
 | 465 | 	lwz	r4, VCPU_GPR(r4)(r4) | 
 | 466 | 	rfi | 
| Scott Wood | 4cd35f6 | 2011-06-14 18:34:31 -0500 | [diff] [blame] | 467 |  | 
 | 468 | #ifdef CONFIG_SPE | 
 | 469 | _GLOBAL(kvmppc_save_guest_spe) | 
 | 470 | 	cmpi	0,r3,0 | 
 | 471 | 	beqlr- | 
 | 472 | 	SAVE_32EVRS(0, r4, r3, VCPU_EVR) | 
 | 473 | 	evxor   evr6, evr6, evr6 | 
 | 474 | 	evmwumiaa evr6, evr6, evr6 | 
 | 475 | 	li	r4,VCPU_ACC | 
 | 476 | 	evstddx evr6, r4, r3		/* save acc */ | 
 | 477 | 	blr | 
 | 478 |  | 
 | 479 | _GLOBAL(kvmppc_load_guest_spe) | 
 | 480 | 	cmpi	0,r3,0 | 
 | 481 | 	beqlr- | 
 | 482 | 	li      r4,VCPU_ACC | 
 | 483 | 	evlddx  evr6,r4,r3 | 
 | 484 | 	evmra   evr6,evr6		/* load acc */ | 
 | 485 | 	REST_32EVRS(0, r4, r3, VCPU_EVR) | 
 | 486 | 	blr | 
 | 487 | #endif |