| Alexander Graf | 0737279 | 2010-04-16 00:11:35 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * This program is free software; you can redistribute it and/or modify | 
|  | 3 | * it under the terms of the GNU General Public License, version 2, as | 
|  | 4 | * published by the Free Software Foundation. | 
|  | 5 | * | 
|  | 6 | * This program is distributed in the hope that it will be useful, | 
|  | 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 9 | * GNU General Public License for more details. | 
|  | 10 | * | 
|  | 11 | * You should have received a copy of the GNU General Public License | 
|  | 12 | * along with this program; if not, write to the Free Software | 
|  | 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. | 
|  | 14 | * | 
|  | 15 | * Copyright SUSE Linux Products GmbH 2010 | 
|  | 16 | * | 
|  | 17 | * Authors: Alexander Graf <agraf@suse.de> | 
|  | 18 | */ | 
|  | 19 |  | 
|  | 20 | /* Real mode helpers */ | 
|  | 21 |  | 
|  | 22 | #if defined(CONFIG_PPC_BOOK3S_64) | 
|  | 23 |  | 
|  | 24 | #define GET_SHADOW_VCPU(reg)    \ | 
|  | 25 | addi    reg, r13, PACA_KVM_SVCPU | 
|  | 26 |  | 
|  | 27 | #elif defined(CONFIG_PPC_BOOK3S_32) | 
|  | 28 |  | 
|  | 29 | #define GET_SHADOW_VCPU(reg)    			\ | 
|  | 30 | tophys(reg, r2);       			\ | 
|  | 31 | lwz     reg, (THREAD + THREAD_KVM_SVCPU)(reg);	\ | 
|  | 32 | tophys(reg, reg) | 
|  | 33 |  | 
|  | 34 | #endif | 
|  | 35 |  | 
|  | 36 | /* Disable for nested KVM */ | 
|  | 37 | #define USE_QUICK_LAST_INST | 
|  | 38 |  | 
|  | 39 |  | 
|  | 40 | /* Get helper functions for subarch specific functionality */ | 
|  | 41 |  | 
|  | 42 | #if defined(CONFIG_PPC_BOOK3S_64) | 
|  | 43 | #include "book3s_64_slb.S" | 
|  | 44 | #elif defined(CONFIG_PPC_BOOK3S_32) | 
|  | 45 | #include "book3s_32_sr.S" | 
|  | 46 | #endif | 
|  | 47 |  | 
|  | 48 | /****************************************************************************** | 
|  | 49 | *                                                                            * | 
|  | 50 | *                               Entry code                                   * | 
|  | 51 | *                                                                            * | 
|  | 52 | *****************************************************************************/ | 
|  | 53 |  | 
|  | 54 | .global kvmppc_handler_trampoline_enter | 
|  | 55 | kvmppc_handler_trampoline_enter: | 
|  | 56 |  | 
|  | 57 | /* Required state: | 
|  | 58 | * | 
|  | 59 | * MSR = ~IR|DR | 
|  | 60 | * R13 = PACA | 
|  | 61 | * R1 = host R1 | 
|  | 62 | * R2 = host R2 | 
|  | 63 | * R10 = guest MSR | 
|  | 64 | * all other volatile GPRS = free | 
|  | 65 | * SVCPU[CR] = guest CR | 
|  | 66 | * SVCPU[XER] = guest XER | 
|  | 67 | * SVCPU[CTR] = guest CTR | 
|  | 68 | * SVCPU[LR] = guest LR | 
|  | 69 | */ | 
|  | 70 |  | 
|  | 71 | /* r3 = shadow vcpu */ | 
|  | 72 | GET_SHADOW_VCPU(r3) | 
|  | 73 |  | 
|  | 74 | /* Move SRR0 and SRR1 into the respective regs */ | 
|  | 75 | PPC_LL  r9, SVCPU_PC(r3) | 
|  | 76 | mtsrr0	r9 | 
|  | 77 | mtsrr1	r10 | 
|  | 78 |  | 
|  | 79 | /* Activate guest mode, so faults get handled by KVM */ | 
|  | 80 | li	r11, KVM_GUEST_MODE_GUEST | 
|  | 81 | stb	r11, SVCPU_IN_GUEST(r3) | 
|  | 82 |  | 
|  | 83 | /* Switch to guest segment. This is subarch specific. */ | 
|  | 84 | LOAD_GUEST_SEGMENTS | 
|  | 85 |  | 
|  | 86 | /* Enter guest */ | 
|  | 87 |  | 
|  | 88 | PPC_LL	r4, (SVCPU_CTR)(r3) | 
|  | 89 | PPC_LL	r5, (SVCPU_LR)(r3) | 
|  | 90 | lwz	r6, (SVCPU_CR)(r3) | 
|  | 91 | lwz	r7, (SVCPU_XER)(r3) | 
|  | 92 |  | 
|  | 93 | mtctr	r4 | 
|  | 94 | mtlr	r5 | 
|  | 95 | mtcr	r6 | 
|  | 96 | mtxer	r7 | 
|  | 97 |  | 
|  | 98 | PPC_LL	r0, (SVCPU_R0)(r3) | 
|  | 99 | PPC_LL	r1, (SVCPU_R1)(r3) | 
|  | 100 | PPC_LL	r2, (SVCPU_R2)(r3) | 
|  | 101 | PPC_LL	r4, (SVCPU_R4)(r3) | 
|  | 102 | PPC_LL	r5, (SVCPU_R5)(r3) | 
|  | 103 | PPC_LL	r6, (SVCPU_R6)(r3) | 
|  | 104 | PPC_LL	r7, (SVCPU_R7)(r3) | 
|  | 105 | PPC_LL	r8, (SVCPU_R8)(r3) | 
|  | 106 | PPC_LL	r9, (SVCPU_R9)(r3) | 
|  | 107 | PPC_LL	r10, (SVCPU_R10)(r3) | 
|  | 108 | PPC_LL	r11, (SVCPU_R11)(r3) | 
|  | 109 | PPC_LL	r12, (SVCPU_R12)(r3) | 
|  | 110 | PPC_LL	r13, (SVCPU_R13)(r3) | 
|  | 111 |  | 
|  | 112 | PPC_LL	r3, (SVCPU_R3)(r3) | 
|  | 113 |  | 
|  | 114 | RFI | 
|  | 115 | kvmppc_handler_trampoline_enter_end: | 
|  | 116 |  | 
|  | 117 |  | 
|  | 118 |  | 
|  | 119 | /****************************************************************************** | 
|  | 120 | *                                                                            * | 
|  | 121 | *                               Exit code                                    * | 
|  | 122 | *                                                                            * | 
|  | 123 | *****************************************************************************/ | 
|  | 124 |  | 
|  | 125 | .global kvmppc_handler_trampoline_exit | 
|  | 126 | kvmppc_handler_trampoline_exit: | 
|  | 127 |  | 
|  | 128 | /* Register usage at this point: | 
|  | 129 | * | 
|  | 130 | * SPRG_SCRATCH0  = guest R13 | 
|  | 131 | * R12            = exit handler id | 
|  | 132 | * R13            = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64] | 
|  | 133 | * SVCPU.SCRATCH0 = guest R12 | 
|  | 134 | * SVCPU.SCRATCH1 = guest CR | 
|  | 135 | * | 
|  | 136 | */ | 
|  | 137 |  | 
|  | 138 | /* Save registers */ | 
|  | 139 |  | 
|  | 140 | PPC_STL	r0, (SHADOW_VCPU_OFF + SVCPU_R0)(r13) | 
|  | 141 | PPC_STL	r1, (SHADOW_VCPU_OFF + SVCPU_R1)(r13) | 
|  | 142 | PPC_STL	r2, (SHADOW_VCPU_OFF + SVCPU_R2)(r13) | 
|  | 143 | PPC_STL	r3, (SHADOW_VCPU_OFF + SVCPU_R3)(r13) | 
|  | 144 | PPC_STL	r4, (SHADOW_VCPU_OFF + SVCPU_R4)(r13) | 
|  | 145 | PPC_STL	r5, (SHADOW_VCPU_OFF + SVCPU_R5)(r13) | 
|  | 146 | PPC_STL	r6, (SHADOW_VCPU_OFF + SVCPU_R6)(r13) | 
|  | 147 | PPC_STL	r7, (SHADOW_VCPU_OFF + SVCPU_R7)(r13) | 
|  | 148 | PPC_STL	r8, (SHADOW_VCPU_OFF + SVCPU_R8)(r13) | 
|  | 149 | PPC_STL	r9, (SHADOW_VCPU_OFF + SVCPU_R9)(r13) | 
|  | 150 | PPC_STL	r10, (SHADOW_VCPU_OFF + SVCPU_R10)(r13) | 
|  | 151 | PPC_STL	r11, (SHADOW_VCPU_OFF + SVCPU_R11)(r13) | 
|  | 152 |  | 
|  | 153 | /* Restore R1/R2 so we can handle faults */ | 
|  | 154 | PPC_LL	r1, (SHADOW_VCPU_OFF + SVCPU_HOST_R1)(r13) | 
|  | 155 | PPC_LL	r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13) | 
|  | 156 |  | 
|  | 157 | /* Save guest PC and MSR */ | 
| Benjamin Herrenschmidt | a5d4f3a | 2011-04-05 14:20:31 +1000 | [diff] [blame] | 158 | andi.	r0,r12,0x2 | 
|  | 159 | beq	1f | 
|  | 160 | mfspr	r3,SPRN_HSRR0 | 
|  | 161 | mfspr	r4,SPRN_HSRR1 | 
|  | 162 | andi.	r12,r12,0x3ffd | 
|  | 163 | b	2f | 
|  | 164 | 1:	mfsrr0	r3 | 
| Alexander Graf | 0737279 | 2010-04-16 00:11:35 +0200 | [diff] [blame] | 165 | mfsrr1	r4 | 
| Benjamin Herrenschmidt | a5d4f3a | 2011-04-05 14:20:31 +1000 | [diff] [blame] | 166 | 2: | 
| Alexander Graf | 0737279 | 2010-04-16 00:11:35 +0200 | [diff] [blame] | 167 | PPC_STL	r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13) | 
|  | 168 | PPC_STL	r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13) | 
|  | 169 |  | 
|  | 170 | /* Get scratch'ed off registers */ | 
| Paul Mackerras | 673b189 | 2011-04-05 13:59:58 +1000 | [diff] [blame] | 171 | GET_SCRATCH0(r9) | 
| Alexander Graf | 0737279 | 2010-04-16 00:11:35 +0200 | [diff] [blame] | 172 | PPC_LL	r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) | 
|  | 173 | lwz	r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) | 
|  | 174 |  | 
|  | 175 | PPC_STL	r9, (SHADOW_VCPU_OFF + SVCPU_R13)(r13) | 
|  | 176 | PPC_STL	r8, (SHADOW_VCPU_OFF + SVCPU_R12)(r13) | 
|  | 177 | stw	r7, (SHADOW_VCPU_OFF + SVCPU_CR)(r13) | 
|  | 178 |  | 
|  | 179 | /* Save more register state  */ | 
|  | 180 |  | 
|  | 181 | mfxer	r5 | 
|  | 182 | mfdar	r6 | 
|  | 183 | mfdsisr	r7 | 
|  | 184 | mfctr	r8 | 
|  | 185 | mflr	r9 | 
|  | 186 |  | 
|  | 187 | stw	r5, (SHADOW_VCPU_OFF + SVCPU_XER)(r13) | 
|  | 188 | PPC_STL	r6, (SHADOW_VCPU_OFF + SVCPU_FAULT_DAR)(r13) | 
|  | 189 | stw	r7, (SHADOW_VCPU_OFF + SVCPU_FAULT_DSISR)(r13) | 
|  | 190 | PPC_STL	r8, (SHADOW_VCPU_OFF + SVCPU_CTR)(r13) | 
|  | 191 | PPC_STL	r9, (SHADOW_VCPU_OFF + SVCPU_LR)(r13) | 
|  | 192 |  | 
|  | 193 | /* | 
|  | 194 | * In order for us to easily get the last instruction, | 
|  | 195 | * we got the #vmexit at, we exploit the fact that the | 
|  | 196 | * virtual layout is still the same here, so we can just | 
|  | 197 | * ld from the guest's PC address | 
|  | 198 | */ | 
|  | 199 |  | 
|  | 200 | /* We only load the last instruction when it's safe */ | 
|  | 201 | cmpwi	r12, BOOK3S_INTERRUPT_DATA_STORAGE | 
|  | 202 | beq	ld_last_inst | 
|  | 203 | cmpwi	r12, BOOK3S_INTERRUPT_PROGRAM | 
|  | 204 | beq	ld_last_inst | 
| Alexander Graf | 6fc5582 | 2010-04-20 02:49:49 +0200 | [diff] [blame] | 205 | cmpwi	r12, BOOK3S_INTERRUPT_ALIGNMENT | 
|  | 206 | beq-	ld_last_inst | 
| Alexander Graf | 0737279 | 2010-04-16 00:11:35 +0200 | [diff] [blame] | 207 |  | 
|  | 208 | b	no_ld_last_inst | 
|  | 209 |  | 
|  | 210 | ld_last_inst: | 
|  | 211 | /* Save off the guest instruction we're at */ | 
|  | 212 |  | 
|  | 213 | /* In case lwz faults */ | 
|  | 214 | li	r0, KVM_INST_FETCH_FAILED | 
|  | 215 |  | 
|  | 216 | #ifdef USE_QUICK_LAST_INST | 
|  | 217 |  | 
|  | 218 | /* Set guest mode to 'jump over instruction' so if lwz faults | 
|  | 219 | * we'll just continue at the next IP. */ | 
|  | 220 | li	r9, KVM_GUEST_MODE_SKIP | 
|  | 221 | stb	r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) | 
|  | 222 |  | 
|  | 223 | /*    1) enable paging for data */ | 
|  | 224 | mfmsr	r9 | 
|  | 225 | ori	r11, r9, MSR_DR			/* Enable paging for data */ | 
|  | 226 | mtmsr	r11 | 
|  | 227 | sync | 
|  | 228 | /*    2) fetch the instruction */ | 
|  | 229 | lwz	r0, 0(r3) | 
|  | 230 | /*    3) disable paging again */ | 
|  | 231 | mtmsr	r9 | 
|  | 232 | sync | 
|  | 233 |  | 
|  | 234 | #endif | 
|  | 235 | stw	r0, (SHADOW_VCPU_OFF + SVCPU_LAST_INST)(r13) | 
|  | 236 |  | 
|  | 237 | no_ld_last_inst: | 
|  | 238 |  | 
|  | 239 | /* Unset guest mode */ | 
|  | 240 | li	r9, KVM_GUEST_MODE_NONE | 
|  | 241 | stb	r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) | 
|  | 242 |  | 
|  | 243 | /* Switch back to host MMU */ | 
|  | 244 | LOAD_HOST_SEGMENTS | 
|  | 245 |  | 
|  | 246 | /* Register usage at this point: | 
|  | 247 | * | 
|  | 248 | * R1       = host R1 | 
|  | 249 | * R2       = host R2 | 
|  | 250 | * R12      = exit handler id | 
|  | 251 | * R13      = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64] | 
|  | 252 | * SVCPU.*  = guest * | 
|  | 253 | * | 
|  | 254 | */ | 
|  | 255 |  | 
|  | 256 | /* RFI into the highmem handler */ | 
|  | 257 | mfmsr	r7 | 
|  | 258 | ori	r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME	/* Enable paging */ | 
|  | 259 | mtsrr1	r7 | 
|  | 260 | /* Load highmem handler address */ | 
|  | 261 | PPC_LL	r8, (SHADOW_VCPU_OFF + SVCPU_VMHANDLER)(r13) | 
|  | 262 | mtsrr0	r8 | 
|  | 263 |  | 
|  | 264 | RFI | 
|  | 265 | kvmppc_handler_trampoline_exit_end: |