Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame^] | 1 | /* |
| 2 | * Floating-point, VMX/Altivec and VSX loads and stores |
| 3 | * for use in instruction emulation. |
| 4 | * |
| 5 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU General Public License |
| 9 | * as published by the Free Software Foundation; either version |
| 10 | * 2 of the License, or (at your option) any later version. |
| 11 | */ |
| 12 | |
| 13 | #include <asm/processor.h> |
| 14 | #include <asm/ppc_asm.h> |
| 15 | #include <asm/ppc-opcode.h> |
| 16 | #include <asm/reg.h> |
| 17 | #include <asm/asm-offsets.h> |
| 18 | #include <linux/errno.h> |
| 19 | |
| 20 | #define STKFRM (PPC_MIN_STKFRM + 16) |
| 21 | |
| 22 | .macro extab instr,handler |
| 23 | .section __ex_table,"a" |
| 24 | PPC_LONG \instr,\handler |
| 25 | .previous |
| 26 | .endm |
| 27 | |
| 28 | .macro inst32 op |
| 29 | reg = 0 |
| 30 | .rept 32 |
| 31 | 20: \op reg,0,r4 |
| 32 | b 3f |
| 33 | extab 20b,99f |
| 34 | reg = reg + 1 |
| 35 | .endr |
| 36 | .endm |
| 37 | |
| 38 | /* Get the contents of frN into fr0; N is in r3. */ |
| 39 | _GLOBAL(get_fpr) |
| 40 | mflr r0 |
| 41 | rlwinm r3,r3,3,0xf8 |
| 42 | bcl 20,31,1f |
| 43 | blr /* fr0 is already in fr0 */ |
| 44 | nop |
| 45 | reg = 1 |
| 46 | .rept 31 |
| 47 | fmr fr0,reg |
| 48 | blr |
| 49 | reg = reg + 1 |
| 50 | .endr |
| 51 | 1: mflr r5 |
| 52 | add r5,r3,r5 |
| 53 | mtctr r5 |
| 54 | mtlr r0 |
| 55 | bctr |
| 56 | |
| 57 | /* Put the contents of fr0 into frN; N is in r3. */ |
| 58 | _GLOBAL(put_fpr) |
| 59 | mflr r0 |
| 60 | rlwinm r3,r3,3,0xf8 |
| 61 | bcl 20,31,1f |
| 62 | blr /* fr0 is already in fr0 */ |
| 63 | nop |
| 64 | reg = 1 |
| 65 | .rept 31 |
| 66 | fmr reg,fr0 |
| 67 | blr |
| 68 | reg = reg + 1 |
| 69 | .endr |
| 70 | 1: mflr r5 |
| 71 | add r5,r3,r5 |
| 72 | mtctr r5 |
| 73 | mtlr r0 |
| 74 | bctr |
| 75 | |
| 76 | /* Load FP reg N from float at *p. N is in r3, p in r4. */ |
| 77 | _GLOBAL(do_lfs) |
| 78 | PPC_STLU r1,-STKFRM(r1) |
| 79 | mflr r0 |
| 80 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 81 | mfmsr r6 |
| 82 | ori r7,r6,MSR_FP |
| 83 | cmpwi cr7,r3,0 |
| 84 | mtmsrd r7 |
| 85 | isync |
| 86 | beq cr7,1f |
| 87 | stfd fr0,STKFRM-16(r1) |
| 88 | 1: li r9,-EFAULT |
| 89 | 2: lfs fr0,0(r4) |
| 90 | li r9,0 |
| 91 | 3: bl put_fpr |
| 92 | beq cr7,4f |
| 93 | lfd fr0,STKFRM-16(r1) |
| 94 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 95 | mtlr r0 |
| 96 | mtmsrd r6 |
| 97 | isync |
| 98 | mr r3,r9 |
| 99 | addi r1,r1,STKFRM |
| 100 | blr |
| 101 | extab 2b,3b |
| 102 | |
| 103 | /* Load FP reg N from double at *p. N is in r3, p in r4. */ |
| 104 | _GLOBAL(do_lfd) |
| 105 | PPC_STLU r1,-STKFRM(r1) |
| 106 | mflr r0 |
| 107 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 108 | mfmsr r6 |
| 109 | ori r7,r6,MSR_FP |
| 110 | cmpwi cr7,r3,0 |
| 111 | mtmsrd r7 |
| 112 | isync |
| 113 | beq cr7,1f |
| 114 | stfd fr0,STKFRM-16(r1) |
| 115 | 1: li r9,-EFAULT |
| 116 | 2: lfd fr0,0(r4) |
| 117 | li r9,0 |
| 118 | 3: beq cr7,4f |
| 119 | bl put_fpr |
| 120 | lfd fr0,STKFRM-16(r1) |
| 121 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 122 | mtlr r0 |
| 123 | mtmsrd r6 |
| 124 | isync |
| 125 | mr r3,r9 |
| 126 | addi r1,r1,STKFRM |
| 127 | blr |
| 128 | extab 2b,3b |
| 129 | |
| 130 | /* Store FP reg N to float at *p. N is in r3, p in r4. */ |
| 131 | _GLOBAL(do_stfs) |
| 132 | PPC_STLU r1,-STKFRM(r1) |
| 133 | mflr r0 |
| 134 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 135 | mfmsr r6 |
| 136 | ori r7,r6,MSR_FP |
| 137 | cmpwi cr7,r3,0 |
| 138 | mtmsrd r7 |
| 139 | isync |
| 140 | beq cr7,1f |
| 141 | stfd fr0,STKFRM-16(r1) |
| 142 | bl get_fpr |
| 143 | 1: li r9,-EFAULT |
| 144 | 2: stfs fr0,0(r4) |
| 145 | li r9,0 |
| 146 | 3: beq cr7,4f |
| 147 | lfd fr0,STKFRM-16(r1) |
| 148 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 149 | mtlr r0 |
| 150 | mtmsrd r6 |
| 151 | isync |
| 152 | mr r3,r9 |
| 153 | addi r1,r1,STKFRM |
| 154 | blr |
| 155 | extab 2b,3b |
| 156 | |
| 157 | /* Store FP reg N to double at *p. N is in r3, p in r4. */ |
| 158 | _GLOBAL(do_stfd) |
| 159 | PPC_STLU r1,-STKFRM(r1) |
| 160 | mflr r0 |
| 161 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 162 | mfmsr r6 |
| 163 | ori r7,r6,MSR_FP |
| 164 | cmpwi cr7,r3,0 |
| 165 | mtmsrd r7 |
| 166 | isync |
| 167 | beq cr7,1f |
| 168 | stfd fr0,STKFRM-16(r1) |
| 169 | bl get_fpr |
| 170 | 1: li r9,-EFAULT |
| 171 | 2: stfd fr0,0(r4) |
| 172 | li r9,0 |
| 173 | 3: beq cr7,4f |
| 174 | lfd fr0,STKFRM-16(r1) |
| 175 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 176 | mtlr r0 |
| 177 | mtmsrd r6 |
| 178 | isync |
| 179 | mr r3,r9 |
| 180 | addi r1,r1,STKFRM |
| 181 | blr |
| 182 | extab 2b,3b |
| 183 | |
| 184 | #ifdef CONFIG_ALTIVEC |
| 185 | /* Get the contents of vrN into vr0; N is in r3. */ |
| 186 | _GLOBAL(get_vr) |
| 187 | mflr r0 |
| 188 | rlwinm r3,r3,3,0xf8 |
| 189 | bcl 20,31,1f |
| 190 | blr /* vr0 is already in vr0 */ |
| 191 | nop |
| 192 | reg = 1 |
| 193 | .rept 31 |
| 194 | vor vr0,reg,reg /* assembler doesn't know vmr? */ |
| 195 | blr |
| 196 | reg = reg + 1 |
| 197 | .endr |
| 198 | 1: mflr r5 |
| 199 | add r5,r3,r5 |
| 200 | mtctr r5 |
| 201 | mtlr r0 |
| 202 | bctr |
| 203 | |
| 204 | /* Put the contents of vr0 into vrN; N is in r3. */ |
| 205 | _GLOBAL(put_vr) |
| 206 | mflr r0 |
| 207 | rlwinm r3,r3,3,0xf8 |
| 208 | bcl 20,31,1f |
| 209 | blr /* vr0 is already in vr0 */ |
| 210 | nop |
| 211 | reg = 1 |
| 212 | .rept 31 |
| 213 | vor reg,vr0,vr0 |
| 214 | blr |
| 215 | reg = reg + 1 |
| 216 | .endr |
| 217 | 1: mflr r5 |
| 218 | add r5,r3,r5 |
| 219 | mtctr r5 |
| 220 | mtlr r0 |
| 221 | bctr |
| 222 | |
| 223 | /* Load vector reg N from *p. N is in r3, p in r4. */ |
| 224 | _GLOBAL(do_lvx) |
| 225 | PPC_STLU r1,-STKFRM(r1) |
| 226 | mflr r0 |
| 227 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 228 | mfmsr r6 |
| 229 | oris r7,r6,MSR_VEC@h |
| 230 | cmpwi cr7,r3,0 |
| 231 | li r8,STKFRM-16 |
| 232 | mtmsrd r7 |
| 233 | isync |
| 234 | beq cr7,1f |
| 235 | stvx vr0,r1,r8 |
| 236 | 1: li r9,-EFAULT |
| 237 | 2: lvx vr0,0,r4 |
| 238 | li r9,0 |
| 239 | 3: beq cr7,4f |
| 240 | bl put_vr |
| 241 | lvx vr0,r1,r8 |
| 242 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 243 | mtlr r0 |
| 244 | mtmsrd r6 |
| 245 | isync |
| 246 | mr r3,r9 |
| 247 | addi r1,r1,STKFRM |
| 248 | blr |
| 249 | extab 2b,3b |
| 250 | |
| 251 | /* Store vector reg N to *p. N is in r3, p in r4. */ |
| 252 | _GLOBAL(do_stvx) |
| 253 | PPC_STLU r1,-STKFRM(r1) |
| 254 | mflr r0 |
| 255 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 256 | mfmsr r6 |
| 257 | oris r7,r6,MSR_VEC@h |
| 258 | cmpwi cr7,r3,0 |
| 259 | li r8,STKFRM-16 |
| 260 | mtmsrd r7 |
| 261 | isync |
| 262 | beq cr7,1f |
| 263 | stvx vr0,r1,r8 |
| 264 | bl get_vr |
| 265 | 1: li r9,-EFAULT |
| 266 | 2: stvx vr0,0,r4 |
| 267 | li r9,0 |
| 268 | 3: beq cr7,4f |
| 269 | lvx vr0,r1,r8 |
| 270 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 271 | mtlr r0 |
| 272 | mtmsrd r6 |
| 273 | isync |
| 274 | mr r3,r9 |
| 275 | addi r1,r1,STKFRM |
| 276 | blr |
| 277 | extab 2b,3b |
| 278 | #endif /* CONFIG_ALTIVEC */ |
| 279 | |
| 280 | #ifdef CONFIG_VSX |
| 281 | /* Get the contents of vsrN into vsr0; N is in r3. */ |
| 282 | _GLOBAL(get_vsr) |
| 283 | mflr r0 |
| 284 | rlwinm r3,r3,3,0x1f8 |
| 285 | bcl 20,31,1f |
| 286 | blr /* vsr0 is already in vsr0 */ |
| 287 | nop |
| 288 | reg = 1 |
| 289 | .rept 63 |
| 290 | XXLOR(0,reg,reg) |
| 291 | blr |
| 292 | reg = reg + 1 |
| 293 | .endr |
| 294 | 1: mflr r5 |
| 295 | add r5,r3,r5 |
| 296 | mtctr r5 |
| 297 | mtlr r0 |
| 298 | bctr |
| 299 | |
| 300 | /* Put the contents of vsr0 into vsrN; N is in r3. */ |
| 301 | _GLOBAL(put_vsr) |
| 302 | mflr r0 |
| 303 | rlwinm r3,r3,3,0x1f8 |
| 304 | bcl 20,31,1f |
| 305 | blr /* vr0 is already in vr0 */ |
| 306 | nop |
| 307 | reg = 1 |
| 308 | .rept 63 |
| 309 | XXLOR(reg,0,0) |
| 310 | blr |
| 311 | reg = reg + 1 |
| 312 | .endr |
| 313 | 1: mflr r5 |
| 314 | add r5,r3,r5 |
| 315 | mtctr r5 |
| 316 | mtlr r0 |
| 317 | bctr |
| 318 | |
| 319 | /* Load VSX reg N from vector doubleword *p. N is in r3, p in r4. */ |
| 320 | _GLOBAL(do_lxvd2x) |
| 321 | PPC_STLU r1,-STKFRM(r1) |
| 322 | mflr r0 |
| 323 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 324 | mfmsr r6 |
| 325 | oris r7,r6,MSR_VSX@h |
| 326 | cmpwi cr7,r3,0 |
| 327 | li r8,STKFRM-16 |
| 328 | mtmsrd r7 |
| 329 | isync |
| 330 | beq cr7,1f |
| 331 | STXVD2X(0,r1,r8) |
| 332 | 1: li r9,-EFAULT |
| 333 | 2: LXVD2X(0,0,r4) |
| 334 | li r9,0 |
| 335 | 3: beq cr7,4f |
| 336 | bl put_vsr |
| 337 | LXVD2X(0,r1,r8) |
| 338 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 339 | mtlr r0 |
| 340 | mtmsrd r6 |
| 341 | isync |
| 342 | mr r3,r9 |
| 343 | addi r1,r1,STKFRM |
| 344 | blr |
| 345 | extab 2b,3b |
| 346 | |
| 347 | /* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */ |
| 348 | _GLOBAL(do_stxvd2x) |
| 349 | PPC_STLU r1,-STKFRM(r1) |
| 350 | mflr r0 |
| 351 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 352 | mfmsr r6 |
| 353 | oris r7,r6,MSR_VSX@h |
| 354 | cmpwi cr7,r3,0 |
| 355 | li r8,STKFRM-16 |
| 356 | mtmsrd r7 |
| 357 | isync |
| 358 | beq cr7,1f |
| 359 | STXVD2X(0,r1,r8) |
| 360 | bl get_vsr |
| 361 | 1: li r9,-EFAULT |
| 362 | 2: STXVD2X(0,0,r4) |
| 363 | li r9,0 |
| 364 | 3: beq cr7,4f |
| 365 | LXVD2X(0,r1,r8) |
| 366 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
| 367 | mtlr r0 |
| 368 | mtmsrd r6 |
| 369 | isync |
| 370 | mr r3,r9 |
| 371 | addi r1,r1,STKFRM |
| 372 | blr |
| 373 | extab 2b,3b |
| 374 | |
| 375 | #endif /* CONFIG_VSX */ |