| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1 | /* | 
|  | 2 | * Single-step support. | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or | 
|  | 7 | * modify it under the terms of the GNU General Public License | 
|  | 8 | * as published by the Free Software Foundation; either version | 
|  | 9 | * 2 of the License, or (at your option) any later version. | 
|  | 10 | */ | 
|  | 11 | #include <linux/kernel.h> | 
| Gui,Jian | 0d69a05 | 2006-11-01 10:50:15 +0800 | [diff] [blame] | 12 | #include <linux/kprobes.h> | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 13 | #include <linux/ptrace.h> | 
|  | 14 | #include <asm/sstep.h> | 
|  | 15 | #include <asm/processor.h> | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 16 | #include <asm/uaccess.h> | 
|  | 17 | #include <asm/cputable.h> | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 18 |  | 
|  | 19 | extern char system_call_common[]; | 
|  | 20 |  | 
| Paul Mackerras | c032524 | 2005-10-28 22:48:08 +1000 | [diff] [blame] | 21 | #ifdef CONFIG_PPC64 | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 22 | /* Bits in SRR1 that are copied from MSR */ | 
| Stephen Rothwell | af30837 | 2006-03-23 17:38:10 +1100 | [diff] [blame] | 23 | #define MSR_MASK	0xffffffff87c0ffffUL | 
| Paul Mackerras | c032524 | 2005-10-28 22:48:08 +1000 | [diff] [blame] | 24 | #else | 
|  | 25 | #define MSR_MASK	0x87c0ffff | 
|  | 26 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 27 |  | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 28 | /* Bits in XER */ | 
|  | 29 | #define XER_SO		0x80000000U | 
|  | 30 | #define XER_OV		0x40000000U | 
|  | 31 | #define XER_CA		0x20000000U | 
|  | 32 |  | 
|  | 33 | /* | 
|  | 34 | * Functions in ldstfp.S | 
|  | 35 | */ | 
|  | 36 | extern int do_lfs(int rn, unsigned long ea); | 
|  | 37 | extern int do_lfd(int rn, unsigned long ea); | 
|  | 38 | extern int do_stfs(int rn, unsigned long ea); | 
|  | 39 | extern int do_stfd(int rn, unsigned long ea); | 
|  | 40 | extern int do_lvx(int rn, unsigned long ea); | 
|  | 41 | extern int do_stvx(int rn, unsigned long ea); | 
|  | 42 | extern int do_lxvd2x(int rn, unsigned long ea); | 
|  | 43 | extern int do_stxvd2x(int rn, unsigned long ea); | 
|  | 44 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 45 | /* | 
|  | 46 | * Determine whether a conditional branch instruction would branch. | 
|  | 47 | */ | 
| Gui,Jian | 0d69a05 | 2006-11-01 10:50:15 +0800 | [diff] [blame] | 48 | static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 49 | { | 
|  | 50 | unsigned int bo = (instr >> 21) & 0x1f; | 
|  | 51 | unsigned int bi; | 
|  | 52 |  | 
|  | 53 | if ((bo & 4) == 0) { | 
|  | 54 | /* decrement counter */ | 
|  | 55 | --regs->ctr; | 
|  | 56 | if (((bo >> 1) & 1) ^ (regs->ctr == 0)) | 
|  | 57 | return 0; | 
|  | 58 | } | 
|  | 59 | if ((bo & 0x10) == 0) { | 
|  | 60 | /* check bit from CR */ | 
|  | 61 | bi = (instr >> 16) & 0x1f; | 
|  | 62 | if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1)) | 
|  | 63 | return 0; | 
|  | 64 | } | 
|  | 65 | return 1; | 
|  | 66 | } | 
|  | 67 |  | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 68 |  | 
|  | 69 | static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb) | 
|  | 70 | { | 
|  | 71 | if (!user_mode(regs)) | 
|  | 72 | return 1; | 
|  | 73 | return __access_ok(ea, nb, USER_DS); | 
|  | 74 | } | 
|  | 75 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 76 | /* | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 77 | * Calculate effective address for a D-form instruction | 
|  | 78 | */ | 
|  | 79 | static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs) | 
|  | 80 | { | 
|  | 81 | int ra; | 
|  | 82 | unsigned long ea; | 
|  | 83 |  | 
|  | 84 | ra = (instr >> 16) & 0x1f; | 
|  | 85 | ea = (signed short) instr;		/* sign-extend */ | 
|  | 86 | if (ra) { | 
|  | 87 | ea += regs->gpr[ra]; | 
|  | 88 | if (instr & 0x04000000)		/* update forms */ | 
|  | 89 | regs->gpr[ra] = ea; | 
|  | 90 | } | 
|  | 91 | #ifdef __powerpc64__ | 
|  | 92 | if (!(regs->msr & MSR_SF)) | 
|  | 93 | ea &= 0xffffffffUL; | 
|  | 94 | #endif | 
|  | 95 | return ea; | 
|  | 96 | } | 
|  | 97 |  | 
|  | 98 | #ifdef __powerpc64__ | 
|  | 99 | /* | 
|  | 100 | * Calculate effective address for a DS-form instruction | 
|  | 101 | */ | 
|  | 102 | static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs) | 
|  | 103 | { | 
|  | 104 | int ra; | 
|  | 105 | unsigned long ea; | 
|  | 106 |  | 
|  | 107 | ra = (instr >> 16) & 0x1f; | 
|  | 108 | ea = (signed short) (instr & ~3);	/* sign-extend */ | 
|  | 109 | if (ra) { | 
|  | 110 | ea += regs->gpr[ra]; | 
|  | 111 | if ((instr & 3) == 1)		/* update forms */ | 
|  | 112 | regs->gpr[ra] = ea; | 
|  | 113 | } | 
|  | 114 | if (!(regs->msr & MSR_SF)) | 
|  | 115 | ea &= 0xffffffffUL; | 
|  | 116 | return ea; | 
|  | 117 | } | 
|  | 118 | #endif /* __powerpc64 */ | 
|  | 119 |  | 
|  | 120 | /* | 
|  | 121 | * Calculate effective address for an X-form instruction | 
|  | 122 | */ | 
|  | 123 | static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs, | 
|  | 124 | int do_update) | 
|  | 125 | { | 
|  | 126 | int ra, rb; | 
|  | 127 | unsigned long ea; | 
|  | 128 |  | 
|  | 129 | ra = (instr >> 16) & 0x1f; | 
|  | 130 | rb = (instr >> 11) & 0x1f; | 
|  | 131 | ea = regs->gpr[rb]; | 
|  | 132 | if (ra) { | 
|  | 133 | ea += regs->gpr[ra]; | 
|  | 134 | if (do_update)		/* update forms */ | 
|  | 135 | regs->gpr[ra] = ea; | 
|  | 136 | } | 
|  | 137 | #ifdef __powerpc64__ | 
|  | 138 | if (!(regs->msr & MSR_SF)) | 
|  | 139 | ea &= 0xffffffffUL; | 
|  | 140 | #endif | 
|  | 141 | return ea; | 
|  | 142 | } | 
|  | 143 |  | 
|  | 144 | /* | 
|  | 145 | * Return the largest power of 2, not greater than sizeof(unsigned long), | 
|  | 146 | * such that x is a multiple of it. | 
|  | 147 | */ | 
|  | 148 | static inline unsigned long max_align(unsigned long x) | 
|  | 149 | { | 
|  | 150 | x |= sizeof(unsigned long); | 
|  | 151 | return x & -x;		/* isolates rightmost bit */ | 
|  | 152 | } | 
|  | 153 |  | 
|  | 154 |  | 
|  | 155 | static inline unsigned long byterev_2(unsigned long x) | 
|  | 156 | { | 
|  | 157 | return ((x >> 8) & 0xff) | ((x & 0xff) << 8); | 
|  | 158 | } | 
|  | 159 |  | 
|  | 160 | static inline unsigned long byterev_4(unsigned long x) | 
|  | 161 | { | 
|  | 162 | return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) | | 
|  | 163 | ((x & 0xff00) << 8) | ((x & 0xff) << 24); | 
|  | 164 | } | 
|  | 165 |  | 
|  | 166 | #ifdef __powerpc64__ | 
|  | 167 | static inline unsigned long byterev_8(unsigned long x) | 
|  | 168 | { | 
|  | 169 | return (byterev_4(x) << 32) | byterev_4(x >> 32); | 
|  | 170 | } | 
|  | 171 | #endif | 
|  | 172 |  | 
|  | 173 | static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea, | 
|  | 174 | int nb) | 
|  | 175 | { | 
|  | 176 | int err = 0; | 
|  | 177 | unsigned long x = 0; | 
|  | 178 |  | 
|  | 179 | switch (nb) { | 
|  | 180 | case 1: | 
|  | 181 | err = __get_user(x, (unsigned char __user *) ea); | 
|  | 182 | break; | 
|  | 183 | case 2: | 
|  | 184 | err = __get_user(x, (unsigned short __user *) ea); | 
|  | 185 | break; | 
|  | 186 | case 4: | 
|  | 187 | err = __get_user(x, (unsigned int __user *) ea); | 
|  | 188 | break; | 
|  | 189 | #ifdef __powerpc64__ | 
|  | 190 | case 8: | 
|  | 191 | err = __get_user(x, (unsigned long __user *) ea); | 
|  | 192 | break; | 
|  | 193 | #endif | 
|  | 194 | } | 
|  | 195 | if (!err) | 
|  | 196 | *dest = x; | 
|  | 197 | return err; | 
|  | 198 | } | 
|  | 199 |  | 
|  | 200 | static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea, | 
|  | 201 | int nb, struct pt_regs *regs) | 
|  | 202 | { | 
|  | 203 | int err; | 
|  | 204 | unsigned long x, b, c; | 
|  | 205 |  | 
|  | 206 | /* unaligned, do this in pieces */ | 
|  | 207 | x = 0; | 
|  | 208 | for (; nb > 0; nb -= c) { | 
|  | 209 | c = max_align(ea); | 
|  | 210 | if (c > nb) | 
|  | 211 | c = max_align(nb); | 
|  | 212 | err = read_mem_aligned(&b, ea, c); | 
|  | 213 | if (err) | 
|  | 214 | return err; | 
|  | 215 | x = (x << (8 * c)) + b; | 
|  | 216 | ea += c; | 
|  | 217 | } | 
|  | 218 | *dest = x; | 
|  | 219 | return 0; | 
|  | 220 | } | 
|  | 221 |  | 
|  | 222 | /* | 
|  | 223 | * Read memory at address ea for nb bytes, return 0 for success | 
|  | 224 | * or -EFAULT if an error occurred. | 
|  | 225 | */ | 
|  | 226 | static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb, | 
|  | 227 | struct pt_regs *regs) | 
|  | 228 | { | 
|  | 229 | if (!address_ok(regs, ea, nb)) | 
|  | 230 | return -EFAULT; | 
|  | 231 | if ((ea & (nb - 1)) == 0) | 
|  | 232 | return read_mem_aligned(dest, ea, nb); | 
|  | 233 | return read_mem_unaligned(dest, ea, nb, regs); | 
|  | 234 | } | 
|  | 235 |  | 
|  | 236 | static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea, | 
|  | 237 | int nb) | 
|  | 238 | { | 
|  | 239 | int err = 0; | 
|  | 240 |  | 
|  | 241 | switch (nb) { | 
|  | 242 | case 1: | 
|  | 243 | err = __put_user(val, (unsigned char __user *) ea); | 
|  | 244 | break; | 
|  | 245 | case 2: | 
|  | 246 | err = __put_user(val, (unsigned short __user *) ea); | 
|  | 247 | break; | 
|  | 248 | case 4: | 
|  | 249 | err = __put_user(val, (unsigned int __user *) ea); | 
|  | 250 | break; | 
|  | 251 | #ifdef __powerpc64__ | 
|  | 252 | case 8: | 
|  | 253 | err = __put_user(val, (unsigned long __user *) ea); | 
|  | 254 | break; | 
|  | 255 | #endif | 
|  | 256 | } | 
|  | 257 | return err; | 
|  | 258 | } | 
|  | 259 |  | 
|  | 260 | static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea, | 
|  | 261 | int nb, struct pt_regs *regs) | 
|  | 262 | { | 
|  | 263 | int err; | 
|  | 264 | unsigned long c; | 
|  | 265 |  | 
|  | 266 | /* unaligned or little-endian, do this in pieces */ | 
|  | 267 | for (; nb > 0; nb -= c) { | 
|  | 268 | c = max_align(ea); | 
|  | 269 | if (c > nb) | 
|  | 270 | c = max_align(nb); | 
|  | 271 | err = write_mem_aligned(val >> (nb - c) * 8, ea, c); | 
|  | 272 | if (err) | 
|  | 273 | return err; | 
|  | 274 | ++ea; | 
|  | 275 | } | 
|  | 276 | return 0; | 
|  | 277 | } | 
|  | 278 |  | 
|  | 279 | /* | 
|  | 280 | * Write memory at address ea for nb bytes, return 0 for success | 
|  | 281 | * or -EFAULT if an error occurred. | 
|  | 282 | */ | 
|  | 283 | static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb, | 
|  | 284 | struct pt_regs *regs) | 
|  | 285 | { | 
|  | 286 | if (!address_ok(regs, ea, nb)) | 
|  | 287 | return -EFAULT; | 
|  | 288 | if ((ea & (nb - 1)) == 0) | 
|  | 289 | return write_mem_aligned(val, ea, nb); | 
|  | 290 | return write_mem_unaligned(val, ea, nb, regs); | 
|  | 291 | } | 
|  | 292 |  | 
|  | 293 | /* | 
|  | 294 | * Check the address and alignment, and call func to do the actual | 
|  | 295 | * load or store. | 
|  | 296 | */ | 
|  | 297 | static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long), | 
|  | 298 | unsigned long ea, int nb, | 
|  | 299 | struct pt_regs *regs) | 
|  | 300 | { | 
|  | 301 | int err; | 
|  | 302 | unsigned long val[sizeof(double) / sizeof(long)]; | 
|  | 303 | unsigned long ptr; | 
|  | 304 |  | 
|  | 305 | if (!address_ok(regs, ea, nb)) | 
|  | 306 | return -EFAULT; | 
|  | 307 | if ((ea & 3) == 0) | 
|  | 308 | return (*func)(rn, ea); | 
|  | 309 | ptr = (unsigned long) &val[0]; | 
|  | 310 | if (sizeof(unsigned long) == 8 || nb == 4) { | 
|  | 311 | err = read_mem_unaligned(&val[0], ea, nb, regs); | 
|  | 312 | ptr += sizeof(unsigned long) - nb; | 
|  | 313 | } else { | 
|  | 314 | /* reading a double on 32-bit */ | 
|  | 315 | err = read_mem_unaligned(&val[0], ea, 4, regs); | 
|  | 316 | if (!err) | 
|  | 317 | err = read_mem_unaligned(&val[1], ea + 4, 4, regs); | 
|  | 318 | } | 
|  | 319 | if (err) | 
|  | 320 | return err; | 
|  | 321 | return (*func)(rn, ptr); | 
|  | 322 | } | 
|  | 323 |  | 
|  | 324 | static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long), | 
|  | 325 | unsigned long ea, int nb, | 
|  | 326 | struct pt_regs *regs) | 
|  | 327 | { | 
|  | 328 | int err; | 
|  | 329 | unsigned long val[sizeof(double) / sizeof(long)]; | 
|  | 330 | unsigned long ptr; | 
|  | 331 |  | 
|  | 332 | if (!address_ok(regs, ea, nb)) | 
|  | 333 | return -EFAULT; | 
|  | 334 | if ((ea & 3) == 0) | 
|  | 335 | return (*func)(rn, ea); | 
|  | 336 | ptr = (unsigned long) &val[0]; | 
|  | 337 | if (sizeof(unsigned long) == 8 || nb == 4) { | 
|  | 338 | ptr += sizeof(unsigned long) - nb; | 
|  | 339 | err = (*func)(rn, ptr); | 
|  | 340 | if (err) | 
|  | 341 | return err; | 
|  | 342 | err = write_mem_unaligned(val[0], ea, nb, regs); | 
|  | 343 | } else { | 
|  | 344 | /* writing a double on 32-bit */ | 
|  | 345 | err = (*func)(rn, ptr); | 
|  | 346 | if (err) | 
|  | 347 | return err; | 
|  | 348 | err = write_mem_unaligned(val[0], ea, 4, regs); | 
|  | 349 | if (!err) | 
|  | 350 | err = write_mem_unaligned(val[1], ea + 4, 4, regs); | 
|  | 351 | } | 
|  | 352 | return err; | 
|  | 353 | } | 
|  | 354 |  | 
|  | 355 | #ifdef CONFIG_ALTIVEC | 
|  | 356 | /* For Altivec/VMX, no need to worry about alignment */ | 
|  | 357 | static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long), | 
|  | 358 | unsigned long ea, struct pt_regs *regs) | 
|  | 359 | { | 
|  | 360 | if (!address_ok(regs, ea & ~0xfUL, 16)) | 
|  | 361 | return -EFAULT; | 
|  | 362 | return (*func)(rn, ea); | 
|  | 363 | } | 
|  | 364 |  | 
|  | 365 | static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long), | 
|  | 366 | unsigned long ea, struct pt_regs *regs) | 
|  | 367 | { | 
|  | 368 | if (!address_ok(regs, ea & ~0xfUL, 16)) | 
|  | 369 | return -EFAULT; | 
|  | 370 | return (*func)(rn, ea); | 
|  | 371 | } | 
|  | 372 | #endif /* CONFIG_ALTIVEC */ | 
|  | 373 |  | 
|  | 374 | #ifdef CONFIG_VSX | 
|  | 375 | static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long), | 
|  | 376 | unsigned long ea, struct pt_regs *regs) | 
|  | 377 | { | 
|  | 378 | int err; | 
|  | 379 | unsigned long val[2]; | 
|  | 380 |  | 
|  | 381 | if (!address_ok(regs, ea, 16)) | 
|  | 382 | return -EFAULT; | 
|  | 383 | if ((ea & 3) == 0) | 
|  | 384 | return (*func)(rn, ea); | 
|  | 385 | err = read_mem_unaligned(&val[0], ea, 8, regs); | 
|  | 386 | if (!err) | 
|  | 387 | err = read_mem_unaligned(&val[1], ea + 8, 8, regs); | 
|  | 388 | if (!err) | 
|  | 389 | err = (*func)(rn, (unsigned long) &val[0]); | 
|  | 390 | return err; | 
|  | 391 | } | 
|  | 392 |  | 
|  | 393 | static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long), | 
|  | 394 | unsigned long ea, struct pt_regs *regs) | 
|  | 395 | { | 
|  | 396 | int err; | 
|  | 397 | unsigned long val[2]; | 
|  | 398 |  | 
|  | 399 | if (!address_ok(regs, ea, 16)) | 
|  | 400 | return -EFAULT; | 
|  | 401 | if ((ea & 3) == 0) | 
|  | 402 | return (*func)(rn, ea); | 
|  | 403 | err = (*func)(rn, (unsigned long) &val[0]); | 
|  | 404 | if (err) | 
|  | 405 | return err; | 
|  | 406 | err = write_mem_unaligned(val[0], ea, 8, regs); | 
|  | 407 | if (!err) | 
|  | 408 | err = write_mem_unaligned(val[1], ea + 8, 8, regs); | 
|  | 409 | return err; | 
|  | 410 | } | 
|  | 411 | #endif /* CONFIG_VSX */ | 
|  | 412 |  | 
|  | 413 | #define __put_user_asmx(x, addr, err, op, cr)		\ | 
|  | 414 | __asm__ __volatile__(				\ | 
|  | 415 | "1:	" op " %2,0,%3\n"		\ | 
|  | 416 | "	mfcr	%1\n"			\ | 
|  | 417 | "2:\n"					\ | 
|  | 418 | ".section .fixup,\"ax\"\n"		\ | 
|  | 419 | "3:	li	%0,%4\n"		\ | 
|  | 420 | "	b	2b\n"			\ | 
|  | 421 | ".previous\n"				\ | 
|  | 422 | ".section __ex_table,\"a\"\n"		\ | 
|  | 423 | PPC_LONG_ALIGN "\n"		\ | 
|  | 424 | PPC_LONG "1b,3b\n"		\ | 
|  | 425 | ".previous"				\ | 
|  | 426 | : "=r" (err), "=r" (cr)			\ | 
|  | 427 | : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)) | 
|  | 428 |  | 
|  | 429 | #define __get_user_asmx(x, addr, err, op)		\ | 
|  | 430 | __asm__ __volatile__(				\ | 
|  | 431 | "1:	"op" %1,0,%2\n"			\ | 
|  | 432 | "2:\n"					\ | 
|  | 433 | ".section .fixup,\"ax\"\n"		\ | 
|  | 434 | "3:	li	%0,%3\n"		\ | 
|  | 435 | "	b	2b\n"			\ | 
|  | 436 | ".previous\n"				\ | 
|  | 437 | ".section __ex_table,\"a\"\n"		\ | 
|  | 438 | PPC_LONG_ALIGN "\n"		\ | 
|  | 439 | PPC_LONG "1b,3b\n"		\ | 
|  | 440 | ".previous"				\ | 
|  | 441 | : "=r" (err), "=r" (x)			\ | 
|  | 442 | : "r" (addr), "i" (-EFAULT), "0" (err)) | 
|  | 443 |  | 
|  | 444 | #define __cacheop_user_asmx(addr, err, op)		\ | 
|  | 445 | __asm__ __volatile__(				\ | 
|  | 446 | "1:	"op" 0,%1\n"			\ | 
|  | 447 | "2:\n"					\ | 
|  | 448 | ".section .fixup,\"ax\"\n"		\ | 
|  | 449 | "3:	li	%0,%3\n"		\ | 
|  | 450 | "	b	2b\n"			\ | 
|  | 451 | ".previous\n"				\ | 
|  | 452 | ".section __ex_table,\"a\"\n"		\ | 
|  | 453 | PPC_LONG_ALIGN "\n"		\ | 
|  | 454 | PPC_LONG "1b,3b\n"		\ | 
|  | 455 | ".previous"				\ | 
|  | 456 | : "=r" (err)				\ | 
|  | 457 | : "r" (addr), "i" (-EFAULT), "0" (err)) | 
|  | 458 |  | 
|  | 459 | static void __kprobes set_cr0(struct pt_regs *regs, int rd) | 
|  | 460 | { | 
|  | 461 | long val = regs->gpr[rd]; | 
|  | 462 |  | 
|  | 463 | regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); | 
|  | 464 | #ifdef __powerpc64__ | 
|  | 465 | if (!(regs->msr & MSR_SF)) | 
|  | 466 | val = (int) val; | 
|  | 467 | #endif | 
|  | 468 | if (val < 0) | 
|  | 469 | regs->ccr |= 0x80000000; | 
|  | 470 | else if (val > 0) | 
|  | 471 | regs->ccr |= 0x40000000; | 
|  | 472 | else | 
|  | 473 | regs->ccr |= 0x20000000; | 
|  | 474 | } | 
|  | 475 |  | 
|  | 476 | static void __kprobes add_with_carry(struct pt_regs *regs, int rd, | 
|  | 477 | unsigned long val1, unsigned long val2, | 
|  | 478 | unsigned long carry_in) | 
|  | 479 | { | 
|  | 480 | unsigned long val = val1 + val2; | 
|  | 481 |  | 
|  | 482 | if (carry_in) | 
|  | 483 | ++val; | 
|  | 484 | regs->gpr[rd] = val; | 
|  | 485 | #ifdef __powerpc64__ | 
|  | 486 | if (!(regs->msr & MSR_SF)) { | 
|  | 487 | val = (unsigned int) val; | 
|  | 488 | val1 = (unsigned int) val1; | 
|  | 489 | } | 
|  | 490 | #endif | 
|  | 491 | if (val < val1 || (carry_in && val == val1)) | 
|  | 492 | regs->xer |= XER_CA; | 
|  | 493 | else | 
|  | 494 | regs->xer &= ~XER_CA; | 
|  | 495 | } | 
|  | 496 |  | 
|  | 497 | static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2, | 
|  | 498 | int crfld) | 
|  | 499 | { | 
|  | 500 | unsigned int crval, shift; | 
|  | 501 |  | 
|  | 502 | crval = (regs->xer >> 31) & 1;		/* get SO bit */ | 
|  | 503 | if (v1 < v2) | 
|  | 504 | crval |= 8; | 
|  | 505 | else if (v1 > v2) | 
|  | 506 | crval |= 4; | 
|  | 507 | else | 
|  | 508 | crval |= 2; | 
|  | 509 | shift = (7 - crfld) * 4; | 
|  | 510 | regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift); | 
|  | 511 | } | 
|  | 512 |  | 
|  | 513 | static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1, | 
|  | 514 | unsigned long v2, int crfld) | 
|  | 515 | { | 
|  | 516 | unsigned int crval, shift; | 
|  | 517 |  | 
|  | 518 | crval = (regs->xer >> 31) & 1;		/* get SO bit */ | 
|  | 519 | if (v1 < v2) | 
|  | 520 | crval |= 8; | 
|  | 521 | else if (v1 > v2) | 
|  | 522 | crval |= 4; | 
|  | 523 | else | 
|  | 524 | crval |= 2; | 
|  | 525 | shift = (7 - crfld) * 4; | 
|  | 526 | regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift); | 
|  | 527 | } | 
|  | 528 |  | 
|  | 529 | /* | 
|  | 530 | * Elements of 32-bit rotate and mask instructions. | 
|  | 531 | */ | 
|  | 532 | #define MASK32(mb, me)	((0xffffffffUL >> (mb)) + \ | 
|  | 533 | ((signed long)-0x80000000L >> (me)) + ((me) >= (mb))) | 
|  | 534 | #ifdef __powerpc64__ | 
|  | 535 | #define MASK64_L(mb)	(~0UL >> (mb)) | 
|  | 536 | #define MASK64_R(me)	((signed long)-0x8000000000000000L >> (me)) | 
|  | 537 | #define MASK64(mb, me)	(MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb))) | 
|  | 538 | #define DATA32(x)	(((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32)) | 
|  | 539 | #else | 
|  | 540 | #define DATA32(x)	(x) | 
|  | 541 | #endif | 
|  | 542 | #define ROTATE(x, n)	((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x)) | 
|  | 543 |  | 
|  | 544 | /* | 
|  | 545 | * Emulate instructions that cause a transfer of control, | 
|  | 546 | * loads and stores, and a few other instructions. | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 547 | * Returns 1 if the step was emulated, 0 if not, | 
|  | 548 | * or -1 if the instruction is one that should not be stepped, | 
|  | 549 | * such as an rfid, or a mtmsrd that would clear MSR_RI. | 
|  | 550 | */ | 
| Gui,Jian | 0d69a05 | 2006-11-01 10:50:15 +0800 | [diff] [blame] | 551 | int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 552 | { | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 553 | unsigned int opcode, ra, rb, rd, spr, u; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 554 | unsigned long int imm; | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 555 | unsigned long int val, val2; | 
|  | 556 | unsigned long int ea; | 
|  | 557 | unsigned int cr, mb, me, sh; | 
|  | 558 | int err; | 
|  | 559 | unsigned long old_ra; | 
|  | 560 | long ival; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 561 |  | 
|  | 562 | opcode = instr >> 26; | 
|  | 563 | switch (opcode) { | 
|  | 564 | case 16:	/* bc */ | 
|  | 565 | imm = (signed short)(instr & 0xfffc); | 
|  | 566 | if ((instr & 2) == 0) | 
|  | 567 | imm += regs->nip; | 
|  | 568 | regs->nip += 4; | 
|  | 569 | if ((regs->msr & MSR_SF) == 0) | 
|  | 570 | regs->nip &= 0xffffffffUL; | 
|  | 571 | if (instr & 1) | 
|  | 572 | regs->link = regs->nip; | 
|  | 573 | if (branch_taken(instr, regs)) | 
|  | 574 | regs->nip = imm; | 
|  | 575 | return 1; | 
| Paul Mackerras | c032524 | 2005-10-28 22:48:08 +1000 | [diff] [blame] | 576 | #ifdef CONFIG_PPC64 | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 577 | case 17:	/* sc */ | 
|  | 578 | /* | 
|  | 579 | * N.B. this uses knowledge about how the syscall | 
|  | 580 | * entry code works.  If that is changed, this will | 
|  | 581 | * need to be changed also. | 
|  | 582 | */ | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 583 | if (regs->gpr[0] == 0x1ebe && | 
|  | 584 | cpu_has_feature(CPU_FTR_REAL_LE)) { | 
|  | 585 | regs->msr ^= MSR_LE; | 
|  | 586 | goto instr_done; | 
|  | 587 | } | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 588 | regs->gpr[9] = regs->gpr[13]; | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 589 | regs->gpr[10] = MSR_KERNEL; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 590 | regs->gpr[11] = regs->nip + 4; | 
|  | 591 | regs->gpr[12] = regs->msr & MSR_MASK; | 
|  | 592 | regs->gpr[13] = (unsigned long) get_paca(); | 
|  | 593 | regs->nip = (unsigned long) &system_call_common; | 
|  | 594 | regs->msr = MSR_KERNEL; | 
|  | 595 | return 1; | 
| Paul Mackerras | c032524 | 2005-10-28 22:48:08 +1000 | [diff] [blame] | 596 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 597 | case 18:	/* b */ | 
|  | 598 | imm = instr & 0x03fffffc; | 
|  | 599 | if (imm & 0x02000000) | 
|  | 600 | imm -= 0x04000000; | 
|  | 601 | if ((instr & 2) == 0) | 
|  | 602 | imm += regs->nip; | 
|  | 603 | if (instr & 1) { | 
|  | 604 | regs->link = regs->nip + 4; | 
|  | 605 | if ((regs->msr & MSR_SF) == 0) | 
|  | 606 | regs->link &= 0xffffffffUL; | 
|  | 607 | } | 
|  | 608 | if ((regs->msr & MSR_SF) == 0) | 
|  | 609 | imm &= 0xffffffffUL; | 
|  | 610 | regs->nip = imm; | 
|  | 611 | return 1; | 
|  | 612 | case 19: | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 613 | switch ((instr >> 1) & 0x3ff) { | 
|  | 614 | case 16:	/* bclr */ | 
|  | 615 | case 528:	/* bcctr */ | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 616 | imm = (instr & 0x400)? regs->ctr: regs->link; | 
|  | 617 | regs->nip += 4; | 
|  | 618 | if ((regs->msr & MSR_SF) == 0) { | 
|  | 619 | regs->nip &= 0xffffffffUL; | 
|  | 620 | imm &= 0xffffffffUL; | 
|  | 621 | } | 
|  | 622 | if (instr & 1) | 
|  | 623 | regs->link = regs->nip; | 
|  | 624 | if (branch_taken(instr, regs)) | 
|  | 625 | regs->nip = imm; | 
|  | 626 | return 1; | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 627 |  | 
|  | 628 | case 18:	/* rfid, scary */ | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 629 | return -1; | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 630 |  | 
|  | 631 | case 150:	/* isync */ | 
|  | 632 | isync(); | 
|  | 633 | goto instr_done; | 
|  | 634 |  | 
|  | 635 | case 33:	/* crnor */ | 
|  | 636 | case 129:	/* crandc */ | 
|  | 637 | case 193:	/* crxor */ | 
|  | 638 | case 225:	/* crnand */ | 
|  | 639 | case 257:	/* crand */ | 
|  | 640 | case 289:	/* creqv */ | 
|  | 641 | case 417:	/* crorc */ | 
|  | 642 | case 449:	/* cror */ | 
|  | 643 | ra = (instr >> 16) & 0x1f; | 
|  | 644 | rb = (instr >> 11) & 0x1f; | 
|  | 645 | rd = (instr >> 21) & 0x1f; | 
|  | 646 | ra = (regs->ccr >> (31 - ra)) & 1; | 
|  | 647 | rb = (regs->ccr >> (31 - rb)) & 1; | 
|  | 648 | val = (instr >> (6 + ra * 2 + rb)) & 1; | 
|  | 649 | regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) | | 
|  | 650 | (val << (31 - rd)); | 
|  | 651 | goto instr_done; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 652 | } | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 653 | break; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 654 | case 31: | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 655 | switch ((instr >> 1) & 0x3ff) { | 
|  | 656 | case 598:	/* sync */ | 
|  | 657 | #ifdef __powerpc64__ | 
|  | 658 | switch ((instr >> 21) & 3) { | 
|  | 659 | case 1:		/* lwsync */ | 
|  | 660 | asm volatile("lwsync" : : : "memory"); | 
|  | 661 | goto instr_done; | 
|  | 662 | case 2:		/* ptesync */ | 
|  | 663 | asm volatile("ptesync" : : : "memory"); | 
|  | 664 | goto instr_done; | 
|  | 665 | } | 
|  | 666 | #endif | 
|  | 667 | mb(); | 
|  | 668 | goto instr_done; | 
|  | 669 |  | 
|  | 670 | case 854:	/* eieio */ | 
|  | 671 | eieio(); | 
|  | 672 | goto instr_done; | 
|  | 673 | } | 
|  | 674 | break; | 
|  | 675 | } | 
|  | 676 |  | 
|  | 677 | /* Following cases refer to regs->gpr[], so we need all regs */ | 
|  | 678 | if (!FULL_REGS(regs)) | 
|  | 679 | return 0; | 
|  | 680 |  | 
|  | 681 | rd = (instr >> 21) & 0x1f; | 
|  | 682 | ra = (instr >> 16) & 0x1f; | 
|  | 683 | rb = (instr >> 11) & 0x1f; | 
|  | 684 |  | 
|  | 685 | switch (opcode) { | 
|  | 686 | case 7:		/* mulli */ | 
|  | 687 | regs->gpr[rd] = regs->gpr[ra] * (short) instr; | 
|  | 688 | goto instr_done; | 
|  | 689 |  | 
|  | 690 | case 8:		/* subfic */ | 
|  | 691 | imm = (short) instr; | 
|  | 692 | add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1); | 
|  | 693 | goto instr_done; | 
|  | 694 |  | 
|  | 695 | case 10:	/* cmpli */ | 
|  | 696 | imm = (unsigned short) instr; | 
|  | 697 | val = regs->gpr[ra]; | 
|  | 698 | #ifdef __powerpc64__ | 
|  | 699 | if ((rd & 1) == 0) | 
|  | 700 | val = (unsigned int) val; | 
|  | 701 | #endif | 
|  | 702 | do_cmp_unsigned(regs, val, imm, rd >> 2); | 
|  | 703 | goto instr_done; | 
|  | 704 |  | 
|  | 705 | case 11:	/* cmpi */ | 
|  | 706 | imm = (short) instr; | 
|  | 707 | val = regs->gpr[ra]; | 
|  | 708 | #ifdef __powerpc64__ | 
|  | 709 | if ((rd & 1) == 0) | 
|  | 710 | val = (int) val; | 
|  | 711 | #endif | 
|  | 712 | do_cmp_signed(regs, val, imm, rd >> 2); | 
|  | 713 | goto instr_done; | 
|  | 714 |  | 
|  | 715 | case 12:	/* addic */ | 
|  | 716 | imm = (short) instr; | 
|  | 717 | add_with_carry(regs, rd, regs->gpr[ra], imm, 0); | 
|  | 718 | goto instr_done; | 
|  | 719 |  | 
|  | 720 | case 13:	/* addic. */ | 
|  | 721 | imm = (short) instr; | 
|  | 722 | add_with_carry(regs, rd, regs->gpr[ra], imm, 0); | 
|  | 723 | set_cr0(regs, rd); | 
|  | 724 | goto instr_done; | 
|  | 725 |  | 
|  | 726 | case 14:	/* addi */ | 
|  | 727 | imm = (short) instr; | 
|  | 728 | if (ra) | 
|  | 729 | imm += regs->gpr[ra]; | 
|  | 730 | regs->gpr[rd] = imm; | 
|  | 731 | goto instr_done; | 
|  | 732 |  | 
|  | 733 | case 15:	/* addis */ | 
|  | 734 | imm = ((short) instr) << 16; | 
|  | 735 | if (ra) | 
|  | 736 | imm += regs->gpr[ra]; | 
|  | 737 | regs->gpr[rd] = imm; | 
|  | 738 | goto instr_done; | 
|  | 739 |  | 
|  | 740 | case 20:	/* rlwimi */ | 
|  | 741 | mb = (instr >> 6) & 0x1f; | 
|  | 742 | me = (instr >> 1) & 0x1f; | 
|  | 743 | val = DATA32(regs->gpr[rd]); | 
|  | 744 | imm = MASK32(mb, me); | 
|  | 745 | regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm); | 
|  | 746 | goto logical_done; | 
|  | 747 |  | 
|  | 748 | case 21:	/* rlwinm */ | 
|  | 749 | mb = (instr >> 6) & 0x1f; | 
|  | 750 | me = (instr >> 1) & 0x1f; | 
|  | 751 | val = DATA32(regs->gpr[rd]); | 
|  | 752 | regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me); | 
|  | 753 | goto logical_done; | 
|  | 754 |  | 
|  | 755 | case 23:	/* rlwnm */ | 
|  | 756 | mb = (instr >> 6) & 0x1f; | 
|  | 757 | me = (instr >> 1) & 0x1f; | 
|  | 758 | rb = regs->gpr[rb] & 0x1f; | 
|  | 759 | val = DATA32(regs->gpr[rd]); | 
|  | 760 | regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me); | 
|  | 761 | goto logical_done; | 
|  | 762 |  | 
|  | 763 | case 24:	/* ori */ | 
|  | 764 | imm = (unsigned short) instr; | 
|  | 765 | regs->gpr[ra] = regs->gpr[rd] | imm; | 
|  | 766 | goto instr_done; | 
|  | 767 |  | 
|  | 768 | case 25:	/* oris */ | 
|  | 769 | imm = (unsigned short) instr; | 
|  | 770 | regs->gpr[ra] = regs->gpr[rd] | (imm << 16); | 
|  | 771 | goto instr_done; | 
|  | 772 |  | 
|  | 773 | case 26:	/* xori */ | 
|  | 774 | imm = (unsigned short) instr; | 
|  | 775 | regs->gpr[ra] = regs->gpr[rd] ^ imm; | 
|  | 776 | goto instr_done; | 
|  | 777 |  | 
|  | 778 | case 27:	/* xoris */ | 
|  | 779 | imm = (unsigned short) instr; | 
|  | 780 | regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16); | 
|  | 781 | goto instr_done; | 
|  | 782 |  | 
|  | 783 | case 28:	/* andi. */ | 
|  | 784 | imm = (unsigned short) instr; | 
|  | 785 | regs->gpr[ra] = regs->gpr[rd] & imm; | 
|  | 786 | set_cr0(regs, ra); | 
|  | 787 | goto instr_done; | 
|  | 788 |  | 
|  | 789 | case 29:	/* andis. */ | 
|  | 790 | imm = (unsigned short) instr; | 
|  | 791 | regs->gpr[ra] = regs->gpr[rd] & (imm << 16); | 
|  | 792 | set_cr0(regs, ra); | 
|  | 793 | goto instr_done; | 
|  | 794 |  | 
|  | 795 | #ifdef __powerpc64__ | 
|  | 796 | case 30:	/* rld* */ | 
|  | 797 | mb = ((instr >> 6) & 0x1f) | (instr & 0x20); | 
|  | 798 | val = regs->gpr[rd]; | 
|  | 799 | if ((instr & 0x10) == 0) { | 
|  | 800 | sh = rb | ((instr & 2) << 4); | 
|  | 801 | val = ROTATE(val, sh); | 
|  | 802 | switch ((instr >> 2) & 3) { | 
|  | 803 | case 0:		/* rldicl */ | 
|  | 804 | regs->gpr[ra] = val & MASK64_L(mb); | 
|  | 805 | goto logical_done; | 
|  | 806 | case 1:		/* rldicr */ | 
|  | 807 | regs->gpr[ra] = val & MASK64_R(mb); | 
|  | 808 | goto logical_done; | 
|  | 809 | case 2:		/* rldic */ | 
|  | 810 | regs->gpr[ra] = val & MASK64(mb, 63 - sh); | 
|  | 811 | goto logical_done; | 
|  | 812 | case 3:		/* rldimi */ | 
|  | 813 | imm = MASK64(mb, 63 - sh); | 
|  | 814 | regs->gpr[ra] = (regs->gpr[ra] & ~imm) | | 
|  | 815 | (val & imm); | 
|  | 816 | goto logical_done; | 
|  | 817 | } | 
|  | 818 | } else { | 
|  | 819 | sh = regs->gpr[rb] & 0x3f; | 
|  | 820 | val = ROTATE(val, sh); | 
|  | 821 | switch ((instr >> 1) & 7) { | 
|  | 822 | case 0:		/* rldcl */ | 
|  | 823 | regs->gpr[ra] = val & MASK64_L(mb); | 
|  | 824 | goto logical_done; | 
|  | 825 | case 1:		/* rldcr */ | 
|  | 826 | regs->gpr[ra] = val & MASK64_R(mb); | 
|  | 827 | goto logical_done; | 
|  | 828 | } | 
|  | 829 | } | 
|  | 830 | #endif | 
|  | 831 |  | 
|  | 832 | case 31: | 
|  | 833 | switch ((instr >> 1) & 0x3ff) { | 
|  | 834 | case 83:	/* mfmsr */ | 
|  | 835 | if (regs->msr & MSR_PR) | 
|  | 836 | break; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 837 | regs->gpr[rd] = regs->msr & MSR_MASK; | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 838 | goto instr_done; | 
|  | 839 | case 146:	/* mtmsr */ | 
|  | 840 | if (regs->msr & MSR_PR) | 
|  | 841 | break; | 
| Paul Mackerras | c032524 | 2005-10-28 22:48:08 +1000 | [diff] [blame] | 842 | imm = regs->gpr[rd]; | 
|  | 843 | if ((imm & MSR_RI) == 0) | 
|  | 844 | /* can't step mtmsr that would clear MSR_RI */ | 
|  | 845 | return -1; | 
|  | 846 | regs->msr = imm; | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 847 | goto instr_done; | 
| Paul Mackerras | c032524 | 2005-10-28 22:48:08 +1000 | [diff] [blame] | 848 | #ifdef CONFIG_PPC64 | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 849 | case 178:	/* mtmsrd */ | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 850 | /* only MSR_EE and MSR_RI get changed if bit 15 set */ | 
|  | 851 | /* mtmsrd doesn't change MSR_HV and MSR_ME */ | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 852 | if (regs->msr & MSR_PR) | 
|  | 853 | break; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 854 | imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL; | 
|  | 855 | imm = (regs->msr & MSR_MASK & ~imm) | 
|  | 856 | | (regs->gpr[rd] & imm); | 
|  | 857 | if ((imm & MSR_RI) == 0) | 
|  | 858 | /* can't step mtmsrd that would clear MSR_RI */ | 
|  | 859 | return -1; | 
|  | 860 | regs->msr = imm; | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 861 | goto instr_done; | 
| Paul Mackerras | c032524 | 2005-10-28 22:48:08 +1000 | [diff] [blame] | 862 | #endif | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 863 | case 19:	/* mfcr */ | 
| Ananth N Mavinakayanahalli | 6888199 | 2007-04-18 15:56:38 +1000 | [diff] [blame] | 864 | regs->gpr[rd] = regs->ccr; | 
|  | 865 | regs->gpr[rd] &= 0xffffffffUL; | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 866 | goto instr_done; | 
|  | 867 |  | 
|  | 868 | case 144:	/* mtcrf */ | 
|  | 869 | imm = 0xf0000000UL; | 
|  | 870 | val = regs->gpr[rd]; | 
|  | 871 | for (sh = 0; sh < 8; ++sh) { | 
|  | 872 | if (instr & (0x80000 >> sh)) | 
|  | 873 | regs->ccr = (regs->ccr & ~imm) | | 
|  | 874 | (val & imm); | 
|  | 875 | imm >>= 4; | 
|  | 876 | } | 
|  | 877 | goto instr_done; | 
|  | 878 |  | 
|  | 879 | case 339:	/* mfspr */ | 
| Ananth N Mavinakayanahalli | 6888199 | 2007-04-18 15:56:38 +1000 | [diff] [blame] | 880 | spr = (instr >> 11) & 0x3ff; | 
|  | 881 | switch (spr) { | 
|  | 882 | case 0x20:	/* mfxer */ | 
|  | 883 | regs->gpr[rd] = regs->xer; | 
|  | 884 | regs->gpr[rd] &= 0xffffffffUL; | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 885 | goto instr_done; | 
| Ananth N Mavinakayanahalli | 6888199 | 2007-04-18 15:56:38 +1000 | [diff] [blame] | 886 | case 0x100:	/* mflr */ | 
|  | 887 | regs->gpr[rd] = regs->link; | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 888 | goto instr_done; | 
| Ananth N Mavinakayanahalli | 6888199 | 2007-04-18 15:56:38 +1000 | [diff] [blame] | 889 | case 0x120:	/* mfctr */ | 
|  | 890 | regs->gpr[rd] = regs->ctr; | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 891 | goto instr_done; | 
| Ananth N Mavinakayanahalli | 6888199 | 2007-04-18 15:56:38 +1000 | [diff] [blame] | 892 | } | 
|  | 893 | break; | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 894 |  | 
|  | 895 | case 467:	/* mtspr */ | 
| Ananth N Mavinakayanahalli | 6888199 | 2007-04-18 15:56:38 +1000 | [diff] [blame] | 896 | spr = (instr >> 11) & 0x3ff; | 
|  | 897 | switch (spr) { | 
|  | 898 | case 0x20:	/* mtxer */ | 
|  | 899 | regs->xer = (regs->gpr[rd] & 0xffffffffUL); | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 900 | goto instr_done; | 
| Ananth N Mavinakayanahalli | 6888199 | 2007-04-18 15:56:38 +1000 | [diff] [blame] | 901 | case 0x100:	/* mtlr */ | 
|  | 902 | regs->link = regs->gpr[rd]; | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 903 | goto instr_done; | 
| Ananth N Mavinakayanahalli | 6888199 | 2007-04-18 15:56:38 +1000 | [diff] [blame] | 904 | case 0x120:	/* mtctr */ | 
|  | 905 | regs->ctr = regs->gpr[rd]; | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 906 | goto instr_done; | 
| Ananth N Mavinakayanahalli | 6888199 | 2007-04-18 15:56:38 +1000 | [diff] [blame] | 907 | } | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 908 | break; | 
|  | 909 |  | 
|  | 910 | /* | 
|  | 911 | * Compare instructions | 
|  | 912 | */ | 
|  | 913 | case 0:	/* cmp */ | 
|  | 914 | val = regs->gpr[ra]; | 
|  | 915 | val2 = regs->gpr[rb]; | 
|  | 916 | #ifdef __powerpc64__ | 
|  | 917 | if ((rd & 1) == 0) { | 
|  | 918 | /* word (32-bit) compare */ | 
|  | 919 | val = (int) val; | 
|  | 920 | val2 = (int) val2; | 
|  | 921 | } | 
|  | 922 | #endif | 
|  | 923 | do_cmp_signed(regs, val, val2, rd >> 2); | 
|  | 924 | goto instr_done; | 
|  | 925 |  | 
|  | 926 | case 32:	/* cmpl */ | 
|  | 927 | val = regs->gpr[ra]; | 
|  | 928 | val2 = regs->gpr[rb]; | 
|  | 929 | #ifdef __powerpc64__ | 
|  | 930 | if ((rd & 1) == 0) { | 
|  | 931 | /* word (32-bit) compare */ | 
|  | 932 | val = (unsigned int) val; | 
|  | 933 | val2 = (unsigned int) val2; | 
|  | 934 | } | 
|  | 935 | #endif | 
|  | 936 | do_cmp_unsigned(regs, val, val2, rd >> 2); | 
|  | 937 | goto instr_done; | 
|  | 938 |  | 
|  | 939 | /* | 
|  | 940 | * Arithmetic instructions | 
|  | 941 | */ | 
|  | 942 | case 8:	/* subfc */ | 
|  | 943 | add_with_carry(regs, rd, ~regs->gpr[ra], | 
|  | 944 | regs->gpr[rb], 1); | 
|  | 945 | goto arith_done; | 
|  | 946 | #ifdef __powerpc64__ | 
|  | 947 | case 9:	/* mulhdu */ | 
|  | 948 | asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) : | 
|  | 949 | "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); | 
|  | 950 | goto arith_done; | 
|  | 951 | #endif | 
|  | 952 | case 10:	/* addc */ | 
|  | 953 | add_with_carry(regs, rd, regs->gpr[ra], | 
|  | 954 | regs->gpr[rb], 0); | 
|  | 955 | goto arith_done; | 
|  | 956 |  | 
|  | 957 | case 11:	/* mulhwu */ | 
|  | 958 | asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) : | 
|  | 959 | "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); | 
|  | 960 | goto arith_done; | 
|  | 961 |  | 
|  | 962 | case 40:	/* subf */ | 
|  | 963 | regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra]; | 
|  | 964 | goto arith_done; | 
|  | 965 | #ifdef __powerpc64__ | 
|  | 966 | case 73:	/* mulhd */ | 
|  | 967 | asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) : | 
|  | 968 | "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); | 
|  | 969 | goto arith_done; | 
|  | 970 | #endif | 
|  | 971 | case 75:	/* mulhw */ | 
|  | 972 | asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) : | 
|  | 973 | "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); | 
|  | 974 | goto arith_done; | 
|  | 975 |  | 
|  | 976 | case 104:	/* neg */ | 
|  | 977 | regs->gpr[rd] = -regs->gpr[ra]; | 
|  | 978 | goto arith_done; | 
|  | 979 |  | 
|  | 980 | case 136:	/* subfe */ | 
|  | 981 | add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb], | 
|  | 982 | regs->xer & XER_CA); | 
|  | 983 | goto arith_done; | 
|  | 984 |  | 
|  | 985 | case 138:	/* adde */ | 
|  | 986 | add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb], | 
|  | 987 | regs->xer & XER_CA); | 
|  | 988 | goto arith_done; | 
|  | 989 |  | 
|  | 990 | case 200:	/* subfze */ | 
|  | 991 | add_with_carry(regs, rd, ~regs->gpr[ra], 0L, | 
|  | 992 | regs->xer & XER_CA); | 
|  | 993 | goto arith_done; | 
|  | 994 |  | 
|  | 995 | case 202:	/* addze */ | 
|  | 996 | add_with_carry(regs, rd, regs->gpr[ra], 0L, | 
|  | 997 | regs->xer & XER_CA); | 
|  | 998 | goto arith_done; | 
|  | 999 |  | 
|  | 1000 | case 232:	/* subfme */ | 
|  | 1001 | add_with_carry(regs, rd, ~regs->gpr[ra], -1L, | 
|  | 1002 | regs->xer & XER_CA); | 
|  | 1003 | goto arith_done; | 
|  | 1004 | #ifdef __powerpc64__ | 
|  | 1005 | case 233:	/* mulld */ | 
|  | 1006 | regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb]; | 
|  | 1007 | goto arith_done; | 
|  | 1008 | #endif | 
|  | 1009 | case 234:	/* addme */ | 
|  | 1010 | add_with_carry(regs, rd, regs->gpr[ra], -1L, | 
|  | 1011 | regs->xer & XER_CA); | 
|  | 1012 | goto arith_done; | 
|  | 1013 |  | 
|  | 1014 | case 235:	/* mullw */ | 
|  | 1015 | regs->gpr[rd] = (unsigned int) regs->gpr[ra] * | 
|  | 1016 | (unsigned int) regs->gpr[rb]; | 
|  | 1017 | goto arith_done; | 
|  | 1018 |  | 
|  | 1019 | case 266:	/* add */ | 
|  | 1020 | regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb]; | 
|  | 1021 | goto arith_done; | 
|  | 1022 | #ifdef __powerpc64__ | 
|  | 1023 | case 457:	/* divdu */ | 
|  | 1024 | regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb]; | 
|  | 1025 | goto arith_done; | 
|  | 1026 | #endif | 
|  | 1027 | case 459:	/* divwu */ | 
|  | 1028 | regs->gpr[rd] = (unsigned int) regs->gpr[ra] / | 
|  | 1029 | (unsigned int) regs->gpr[rb]; | 
|  | 1030 | goto arith_done; | 
|  | 1031 | #ifdef __powerpc64__ | 
|  | 1032 | case 489:	/* divd */ | 
|  | 1033 | regs->gpr[rd] = (long int) regs->gpr[ra] / | 
|  | 1034 | (long int) regs->gpr[rb]; | 
|  | 1035 | goto arith_done; | 
|  | 1036 | #endif | 
|  | 1037 | case 491:	/* divw */ | 
|  | 1038 | regs->gpr[rd] = (int) regs->gpr[ra] / | 
|  | 1039 | (int) regs->gpr[rb]; | 
|  | 1040 | goto arith_done; | 
|  | 1041 |  | 
|  | 1042 |  | 
|  | 1043 | /* | 
|  | 1044 | * Logical instructions | 
|  | 1045 | */ | 
|  | 1046 | case 26:	/* cntlzw */ | 
|  | 1047 | asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) : | 
|  | 1048 | "r" (regs->gpr[rd])); | 
|  | 1049 | goto logical_done; | 
|  | 1050 | #ifdef __powerpc64__ | 
|  | 1051 | case 58:	/* cntlzd */ | 
|  | 1052 | asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) : | 
|  | 1053 | "r" (regs->gpr[rd])); | 
|  | 1054 | goto logical_done; | 
|  | 1055 | #endif | 
|  | 1056 | case 28:	/* and */ | 
|  | 1057 | regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb]; | 
|  | 1058 | goto logical_done; | 
|  | 1059 |  | 
|  | 1060 | case 60:	/* andc */ | 
|  | 1061 | regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb]; | 
|  | 1062 | goto logical_done; | 
|  | 1063 |  | 
|  | 1064 | case 124:	/* nor */ | 
|  | 1065 | regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]); | 
|  | 1066 | goto logical_done; | 
|  | 1067 |  | 
|  | 1068 | case 284:	/* xor */ | 
|  | 1069 | regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]); | 
|  | 1070 | goto logical_done; | 
|  | 1071 |  | 
|  | 1072 | case 316:	/* xor */ | 
|  | 1073 | regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb]; | 
|  | 1074 | goto logical_done; | 
|  | 1075 |  | 
|  | 1076 | case 412:	/* orc */ | 
|  | 1077 | regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb]; | 
|  | 1078 | goto logical_done; | 
|  | 1079 |  | 
|  | 1080 | case 444:	/* or */ | 
|  | 1081 | regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb]; | 
|  | 1082 | goto logical_done; | 
|  | 1083 |  | 
|  | 1084 | case 476:	/* nand */ | 
|  | 1085 | regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]); | 
|  | 1086 | goto logical_done; | 
|  | 1087 |  | 
|  | 1088 | case 922:	/* extsh */ | 
|  | 1089 | regs->gpr[ra] = (signed short) regs->gpr[rd]; | 
|  | 1090 | goto logical_done; | 
|  | 1091 |  | 
|  | 1092 | case 954:	/* extsb */ | 
|  | 1093 | regs->gpr[ra] = (signed char) regs->gpr[rd]; | 
|  | 1094 | goto logical_done; | 
|  | 1095 | #ifdef __powerpc64__ | 
|  | 1096 | case 986:	/* extsw */ | 
|  | 1097 | regs->gpr[ra] = (signed int) regs->gpr[rd]; | 
|  | 1098 | goto logical_done; | 
|  | 1099 | #endif | 
|  | 1100 |  | 
|  | 1101 | /* | 
|  | 1102 | * Shift instructions | 
|  | 1103 | */ | 
|  | 1104 | case 24:	/* slw */ | 
|  | 1105 | sh = regs->gpr[rb] & 0x3f; | 
|  | 1106 | if (sh < 32) | 
|  | 1107 | regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL; | 
|  | 1108 | else | 
|  | 1109 | regs->gpr[ra] = 0; | 
|  | 1110 | goto logical_done; | 
|  | 1111 |  | 
|  | 1112 | case 536:	/* srw */ | 
|  | 1113 | sh = regs->gpr[rb] & 0x3f; | 
|  | 1114 | if (sh < 32) | 
|  | 1115 | regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh; | 
|  | 1116 | else | 
|  | 1117 | regs->gpr[ra] = 0; | 
|  | 1118 | goto logical_done; | 
|  | 1119 |  | 
|  | 1120 | case 792:	/* sraw */ | 
|  | 1121 | sh = regs->gpr[rb] & 0x3f; | 
|  | 1122 | ival = (signed int) regs->gpr[rd]; | 
|  | 1123 | regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); | 
|  | 1124 | if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0)) | 
|  | 1125 | regs->xer |= XER_CA; | 
|  | 1126 | else | 
|  | 1127 | regs->xer &= ~XER_CA; | 
|  | 1128 | goto logical_done; | 
|  | 1129 |  | 
|  | 1130 | case 824:	/* srawi */ | 
|  | 1131 | sh = rb; | 
|  | 1132 | ival = (signed int) regs->gpr[rd]; | 
|  | 1133 | regs->gpr[ra] = ival >> sh; | 
|  | 1134 | if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) | 
|  | 1135 | regs->xer |= XER_CA; | 
|  | 1136 | else | 
|  | 1137 | regs->xer &= ~XER_CA; | 
|  | 1138 | goto logical_done; | 
|  | 1139 |  | 
|  | 1140 | #ifdef __powerpc64__ | 
|  | 1141 | case 27:	/* sld */ | 
|  | 1142 | sh = regs->gpr[rd] & 0x7f; | 
|  | 1143 | if (sh < 64) | 
|  | 1144 | regs->gpr[ra] = regs->gpr[rd] << sh; | 
|  | 1145 | else | 
|  | 1146 | regs->gpr[ra] = 0; | 
|  | 1147 | goto logical_done; | 
|  | 1148 |  | 
|  | 1149 | case 539:	/* srd */ | 
|  | 1150 | sh = regs->gpr[rb] & 0x7f; | 
|  | 1151 | if (sh < 64) | 
|  | 1152 | regs->gpr[ra] = regs->gpr[rd] >> sh; | 
|  | 1153 | else | 
|  | 1154 | regs->gpr[ra] = 0; | 
|  | 1155 | goto logical_done; | 
|  | 1156 |  | 
|  | 1157 | case 794:	/* srad */ | 
|  | 1158 | sh = regs->gpr[rb] & 0x7f; | 
|  | 1159 | ival = (signed long int) regs->gpr[rd]; | 
|  | 1160 | regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); | 
|  | 1161 | if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0)) | 
|  | 1162 | regs->xer |= XER_CA; | 
|  | 1163 | else | 
|  | 1164 | regs->xer &= ~XER_CA; | 
|  | 1165 | goto logical_done; | 
|  | 1166 |  | 
|  | 1167 | case 826:	/* sradi with sh_5 = 0 */ | 
|  | 1168 | case 827:	/* sradi with sh_5 = 1 */ | 
|  | 1169 | sh = rb | ((instr & 2) << 4); | 
|  | 1170 | ival = (signed long int) regs->gpr[rd]; | 
|  | 1171 | regs->gpr[ra] = ival >> sh; | 
|  | 1172 | if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) | 
|  | 1173 | regs->xer |= XER_CA; | 
|  | 1174 | else | 
|  | 1175 | regs->xer &= ~XER_CA; | 
|  | 1176 | goto logical_done; | 
|  | 1177 | #endif /* __powerpc64__ */ | 
|  | 1178 |  | 
|  | 1179 | /* | 
|  | 1180 | * Cache instructions | 
|  | 1181 | */ | 
|  | 1182 | case 54:	/* dcbst */ | 
|  | 1183 | ea = xform_ea(instr, regs, 0); | 
|  | 1184 | if (!address_ok(regs, ea, 8)) | 
|  | 1185 | return 0; | 
|  | 1186 | err = 0; | 
|  | 1187 | __cacheop_user_asmx(ea, err, "dcbst"); | 
|  | 1188 | if (err) | 
|  | 1189 | return 0; | 
|  | 1190 | goto instr_done; | 
|  | 1191 |  | 
|  | 1192 | case 86:	/* dcbf */ | 
|  | 1193 | ea = xform_ea(instr, regs, 0); | 
|  | 1194 | if (!address_ok(regs, ea, 8)) | 
|  | 1195 | return 0; | 
|  | 1196 | err = 0; | 
|  | 1197 | __cacheop_user_asmx(ea, err, "dcbf"); | 
|  | 1198 | if (err) | 
|  | 1199 | return 0; | 
|  | 1200 | goto instr_done; | 
|  | 1201 |  | 
|  | 1202 | case 246:	/* dcbtst */ | 
|  | 1203 | if (rd == 0) { | 
|  | 1204 | ea = xform_ea(instr, regs, 0); | 
|  | 1205 | prefetchw((void *) ea); | 
|  | 1206 | } | 
|  | 1207 | goto instr_done; | 
|  | 1208 |  | 
|  | 1209 | case 278:	/* dcbt */ | 
|  | 1210 | if (rd == 0) { | 
|  | 1211 | ea = xform_ea(instr, regs, 0); | 
|  | 1212 | prefetch((void *) ea); | 
|  | 1213 | } | 
|  | 1214 | goto instr_done; | 
|  | 1215 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1216 | } | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 1217 | break; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1218 | } | 
| Paul Mackerras | 0016a4c | 2010-06-15 14:48:58 +1000 | [diff] [blame] | 1219 |  | 
|  | 1220 | /* | 
|  | 1221 | * Following cases are for loads and stores, so bail out | 
|  | 1222 | * if we're in little-endian mode. | 
|  | 1223 | */ | 
|  | 1224 | if (regs->msr & MSR_LE) | 
|  | 1225 | return 0; | 
|  | 1226 |  | 
|  | 1227 | /* | 
|  | 1228 | * Save register RA in case it's an update form load or store | 
|  | 1229 | * and the access faults. | 
|  | 1230 | */ | 
|  | 1231 | old_ra = regs->gpr[ra]; | 
|  | 1232 |  | 
|  | 1233 | switch (opcode) { | 
|  | 1234 | case 31: | 
|  | 1235 | u = instr & 0x40; | 
|  | 1236 | switch ((instr >> 1) & 0x3ff) { | 
|  | 1237 | case 20:	/* lwarx */ | 
|  | 1238 | ea = xform_ea(instr, regs, 0); | 
|  | 1239 | if (ea & 3) | 
|  | 1240 | break;		/* can't handle misaligned */ | 
|  | 1241 | err = -EFAULT; | 
|  | 1242 | if (!address_ok(regs, ea, 4)) | 
|  | 1243 | goto ldst_done; | 
|  | 1244 | err = 0; | 
|  | 1245 | __get_user_asmx(val, ea, err, "lwarx"); | 
|  | 1246 | if (!err) | 
|  | 1247 | regs->gpr[rd] = val; | 
|  | 1248 | goto ldst_done; | 
|  | 1249 |  | 
|  | 1250 | case 150:	/* stwcx. */ | 
|  | 1251 | ea = xform_ea(instr, regs, 0); | 
|  | 1252 | if (ea & 3) | 
|  | 1253 | break;		/* can't handle misaligned */ | 
|  | 1254 | err = -EFAULT; | 
|  | 1255 | if (!address_ok(regs, ea, 4)) | 
|  | 1256 | goto ldst_done; | 
|  | 1257 | err = 0; | 
|  | 1258 | __put_user_asmx(regs->gpr[rd], ea, err, "stwcx.", cr); | 
|  | 1259 | if (!err) | 
|  | 1260 | regs->ccr = (regs->ccr & 0x0fffffff) | | 
|  | 1261 | (cr & 0xe0000000) | | 
|  | 1262 | ((regs->xer >> 3) & 0x10000000); | 
|  | 1263 | goto ldst_done; | 
|  | 1264 |  | 
|  | 1265 | #ifdef __powerpc64__ | 
|  | 1266 | case 84:	/* ldarx */ | 
|  | 1267 | ea = xform_ea(instr, regs, 0); | 
|  | 1268 | if (ea & 7) | 
|  | 1269 | break;		/* can't handle misaligned */ | 
|  | 1270 | err = -EFAULT; | 
|  | 1271 | if (!address_ok(regs, ea, 8)) | 
|  | 1272 | goto ldst_done; | 
|  | 1273 | err = 0; | 
|  | 1274 | __get_user_asmx(val, ea, err, "ldarx"); | 
|  | 1275 | if (!err) | 
|  | 1276 | regs->gpr[rd] = val; | 
|  | 1277 | goto ldst_done; | 
|  | 1278 |  | 
|  | 1279 | case 214:	/* stdcx. */ | 
|  | 1280 | ea = xform_ea(instr, regs, 0); | 
|  | 1281 | if (ea & 7) | 
|  | 1282 | break;		/* can't handle misaligned */ | 
|  | 1283 | err = -EFAULT; | 
|  | 1284 | if (!address_ok(regs, ea, 8)) | 
|  | 1285 | goto ldst_done; | 
|  | 1286 | err = 0; | 
|  | 1287 | __put_user_asmx(regs->gpr[rd], ea, err, "stdcx.", cr); | 
|  | 1288 | if (!err) | 
|  | 1289 | regs->ccr = (regs->ccr & 0x0fffffff) | | 
|  | 1290 | (cr & 0xe0000000) | | 
|  | 1291 | ((regs->xer >> 3) & 0x10000000); | 
|  | 1292 | goto ldst_done; | 
|  | 1293 |  | 
|  | 1294 | case 21:	/* ldx */ | 
|  | 1295 | case 53:	/* ldux */ | 
|  | 1296 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | 
|  | 1297 | 8, regs); | 
|  | 1298 | goto ldst_done; | 
|  | 1299 | #endif | 
|  | 1300 |  | 
|  | 1301 | case 23:	/* lwzx */ | 
|  | 1302 | case 55:	/* lwzux */ | 
|  | 1303 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | 
|  | 1304 | 4, regs); | 
|  | 1305 | goto ldst_done; | 
|  | 1306 |  | 
|  | 1307 | case 87:	/* lbzx */ | 
|  | 1308 | case 119:	/* lbzux */ | 
|  | 1309 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | 
|  | 1310 | 1, regs); | 
|  | 1311 | goto ldst_done; | 
|  | 1312 |  | 
|  | 1313 | #ifdef CONFIG_ALTIVEC | 
|  | 1314 | case 103:	/* lvx */ | 
|  | 1315 | case 359:	/* lvxl */ | 
|  | 1316 | if (!(regs->msr & MSR_VEC)) | 
|  | 1317 | break; | 
|  | 1318 | ea = xform_ea(instr, regs, 0); | 
|  | 1319 | err = do_vec_load(rd, do_lvx, ea, regs); | 
|  | 1320 | goto ldst_done; | 
|  | 1321 |  | 
|  | 1322 | case 231:	/* stvx */ | 
|  | 1323 | case 487:	/* stvxl */ | 
|  | 1324 | if (!(regs->msr & MSR_VEC)) | 
|  | 1325 | break; | 
|  | 1326 | ea = xform_ea(instr, regs, 0); | 
|  | 1327 | err = do_vec_store(rd, do_stvx, ea, regs); | 
|  | 1328 | goto ldst_done; | 
|  | 1329 | #endif /* CONFIG_ALTIVEC */ | 
|  | 1330 |  | 
|  | 1331 | #ifdef __powerpc64__ | 
|  | 1332 | case 149:	/* stdx */ | 
|  | 1333 | case 181:	/* stdux */ | 
|  | 1334 | val = regs->gpr[rd]; | 
|  | 1335 | err = write_mem(val, xform_ea(instr, regs, u), 8, regs); | 
|  | 1336 | goto ldst_done; | 
|  | 1337 | #endif | 
|  | 1338 |  | 
|  | 1339 | case 151:	/* stwx */ | 
|  | 1340 | case 183:	/* stwux */ | 
|  | 1341 | val = regs->gpr[rd]; | 
|  | 1342 | err = write_mem(val, xform_ea(instr, regs, u), 4, regs); | 
|  | 1343 | goto ldst_done; | 
|  | 1344 |  | 
|  | 1345 | case 215:	/* stbx */ | 
|  | 1346 | case 247:	/* stbux */ | 
|  | 1347 | val = regs->gpr[rd]; | 
|  | 1348 | err = write_mem(val, xform_ea(instr, regs, u), 1, regs); | 
|  | 1349 | goto ldst_done; | 
|  | 1350 |  | 
|  | 1351 | case 279:	/* lhzx */ | 
|  | 1352 | case 311:	/* lhzux */ | 
|  | 1353 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | 
|  | 1354 | 2, regs); | 
|  | 1355 | goto ldst_done; | 
|  | 1356 |  | 
|  | 1357 | #ifdef __powerpc64__ | 
|  | 1358 | case 341:	/* lwax */ | 
|  | 1359 | case 373:	/* lwaux */ | 
|  | 1360 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | 
|  | 1361 | 4, regs); | 
|  | 1362 | if (!err) | 
|  | 1363 | regs->gpr[rd] = (signed int) regs->gpr[rd]; | 
|  | 1364 | goto ldst_done; | 
|  | 1365 | #endif | 
|  | 1366 |  | 
|  | 1367 | case 343:	/* lhax */ | 
|  | 1368 | case 375:	/* lhaux */ | 
|  | 1369 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | 
|  | 1370 | 2, regs); | 
|  | 1371 | if (!err) | 
|  | 1372 | regs->gpr[rd] = (signed short) regs->gpr[rd]; | 
|  | 1373 | goto ldst_done; | 
|  | 1374 |  | 
|  | 1375 | case 407:	/* sthx */ | 
|  | 1376 | case 439:	/* sthux */ | 
|  | 1377 | val = regs->gpr[rd]; | 
|  | 1378 | err = write_mem(val, xform_ea(instr, regs, u), 2, regs); | 
|  | 1379 | goto ldst_done; | 
|  | 1380 |  | 
|  | 1381 | #ifdef __powerpc64__ | 
|  | 1382 | case 532:	/* ldbrx */ | 
|  | 1383 | err = read_mem(&val, xform_ea(instr, regs, 0), 8, regs); | 
|  | 1384 | if (!err) | 
|  | 1385 | regs->gpr[rd] = byterev_8(val); | 
|  | 1386 | goto ldst_done; | 
|  | 1387 |  | 
|  | 1388 | #endif | 
|  | 1389 |  | 
|  | 1390 | case 534:	/* lwbrx */ | 
|  | 1391 | err = read_mem(&val, xform_ea(instr, regs, 0), 4, regs); | 
|  | 1392 | if (!err) | 
|  | 1393 | regs->gpr[rd] = byterev_4(val); | 
|  | 1394 | goto ldst_done; | 
|  | 1395 |  | 
|  | 1396 | case 535:	/* lfsx */ | 
|  | 1397 | case 567:	/* lfsux */ | 
|  | 1398 | if (!(regs->msr & MSR_FP)) | 
|  | 1399 | break; | 
|  | 1400 | ea = xform_ea(instr, regs, u); | 
|  | 1401 | err = do_fp_load(rd, do_lfs, ea, 4, regs); | 
|  | 1402 | goto ldst_done; | 
|  | 1403 |  | 
|  | 1404 | case 599:	/* lfdx */ | 
|  | 1405 | case 631:	/* lfdux */ | 
|  | 1406 | if (!(regs->msr & MSR_FP)) | 
|  | 1407 | break; | 
|  | 1408 | ea = xform_ea(instr, regs, u); | 
|  | 1409 | err = do_fp_load(rd, do_lfd, ea, 8, regs); | 
|  | 1410 | goto ldst_done; | 
|  | 1411 |  | 
|  | 1412 | case 663:	/* stfsx */ | 
|  | 1413 | case 695:	/* stfsux */ | 
|  | 1414 | if (!(regs->msr & MSR_FP)) | 
|  | 1415 | break; | 
|  | 1416 | ea = xform_ea(instr, regs, u); | 
|  | 1417 | err = do_fp_store(rd, do_stfs, ea, 4, regs); | 
|  | 1418 | goto ldst_done; | 
|  | 1419 |  | 
|  | 1420 | case 727:	/* stfdx */ | 
|  | 1421 | case 759:	/* stfdux */ | 
|  | 1422 | if (!(regs->msr & MSR_FP)) | 
|  | 1423 | break; | 
|  | 1424 | ea = xform_ea(instr, regs, u); | 
|  | 1425 | err = do_fp_store(rd, do_stfd, ea, 8, regs); | 
|  | 1426 | goto ldst_done; | 
|  | 1427 |  | 
|  | 1428 | #ifdef __powerpc64__ | 
|  | 1429 | case 660:	/* stdbrx */ | 
|  | 1430 | val = byterev_8(regs->gpr[rd]); | 
|  | 1431 | err = write_mem(val, xform_ea(instr, regs, 0), 8, regs); | 
|  | 1432 | goto ldst_done; | 
|  | 1433 |  | 
|  | 1434 | #endif | 
|  | 1435 | case 662:	/* stwbrx */ | 
|  | 1436 | val = byterev_4(regs->gpr[rd]); | 
|  | 1437 | err = write_mem(val, xform_ea(instr, regs, 0), 4, regs); | 
|  | 1438 | goto ldst_done; | 
|  | 1439 |  | 
|  | 1440 | case 790:	/* lhbrx */ | 
|  | 1441 | err = read_mem(&val, xform_ea(instr, regs, 0), 2, regs); | 
|  | 1442 | if (!err) | 
|  | 1443 | regs->gpr[rd] = byterev_2(val); | 
|  | 1444 | goto ldst_done; | 
|  | 1445 |  | 
|  | 1446 | case 918:	/* sthbrx */ | 
|  | 1447 | val = byterev_2(regs->gpr[rd]); | 
|  | 1448 | err = write_mem(val, xform_ea(instr, regs, 0), 2, regs); | 
|  | 1449 | goto ldst_done; | 
|  | 1450 |  | 
|  | 1451 | #ifdef CONFIG_VSX | 
|  | 1452 | case 844:	/* lxvd2x */ | 
|  | 1453 | case 876:	/* lxvd2ux */ | 
|  | 1454 | if (!(regs->msr & MSR_VSX)) | 
|  | 1455 | break; | 
|  | 1456 | rd |= (instr & 1) << 5; | 
|  | 1457 | ea = xform_ea(instr, regs, u); | 
|  | 1458 | err = do_vsx_load(rd, do_lxvd2x, ea, regs); | 
|  | 1459 | goto ldst_done; | 
|  | 1460 |  | 
|  | 1461 | case 972:	/* stxvd2x */ | 
|  | 1462 | case 1004:	/* stxvd2ux */ | 
|  | 1463 | if (!(regs->msr & MSR_VSX)) | 
|  | 1464 | break; | 
|  | 1465 | rd |= (instr & 1) << 5; | 
|  | 1466 | ea = xform_ea(instr, regs, u); | 
|  | 1467 | err = do_vsx_store(rd, do_stxvd2x, ea, regs); | 
|  | 1468 | goto ldst_done; | 
|  | 1469 |  | 
|  | 1470 | #endif /* CONFIG_VSX */ | 
|  | 1471 | } | 
|  | 1472 | break; | 
|  | 1473 |  | 
|  | 1474 | case 32:	/* lwz */ | 
|  | 1475 | case 33:	/* lwzu */ | 
|  | 1476 | err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 4, regs); | 
|  | 1477 | goto ldst_done; | 
|  | 1478 |  | 
|  | 1479 | case 34:	/* lbz */ | 
|  | 1480 | case 35:	/* lbzu */ | 
|  | 1481 | err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 1, regs); | 
|  | 1482 | goto ldst_done; | 
|  | 1483 |  | 
|  | 1484 | case 36:	/* stw */ | 
|  | 1485 | case 37:	/* stwu */ | 
|  | 1486 | val = regs->gpr[rd]; | 
|  | 1487 | err = write_mem(val, dform_ea(instr, regs), 4, regs); | 
|  | 1488 | goto ldst_done; | 
|  | 1489 |  | 
|  | 1490 | case 38:	/* stb */ | 
|  | 1491 | case 39:	/* stbu */ | 
|  | 1492 | val = regs->gpr[rd]; | 
|  | 1493 | err = write_mem(val, dform_ea(instr, regs), 1, regs); | 
|  | 1494 | goto ldst_done; | 
|  | 1495 |  | 
|  | 1496 | case 40:	/* lhz */ | 
|  | 1497 | case 41:	/* lhzu */ | 
|  | 1498 | err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 2, regs); | 
|  | 1499 | goto ldst_done; | 
|  | 1500 |  | 
|  | 1501 | case 42:	/* lha */ | 
|  | 1502 | case 43:	/* lhau */ | 
|  | 1503 | err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 2, regs); | 
|  | 1504 | if (!err) | 
|  | 1505 | regs->gpr[rd] = (signed short) regs->gpr[rd]; | 
|  | 1506 | goto ldst_done; | 
|  | 1507 |  | 
|  | 1508 | case 44:	/* sth */ | 
|  | 1509 | case 45:	/* sthu */ | 
|  | 1510 | val = regs->gpr[rd]; | 
|  | 1511 | err = write_mem(val, dform_ea(instr, regs), 2, regs); | 
|  | 1512 | goto ldst_done; | 
|  | 1513 |  | 
|  | 1514 | case 46:	/* lmw */ | 
|  | 1515 | ra = (instr >> 16) & 0x1f; | 
|  | 1516 | if (ra >= rd) | 
|  | 1517 | break;		/* invalid form, ra in range to load */ | 
|  | 1518 | ea = dform_ea(instr, regs); | 
|  | 1519 | do { | 
|  | 1520 | err = read_mem(®s->gpr[rd], ea, 4, regs); | 
|  | 1521 | if (err) | 
|  | 1522 | return 0; | 
|  | 1523 | ea += 4; | 
|  | 1524 | } while (++rd < 32); | 
|  | 1525 | goto instr_done; | 
|  | 1526 |  | 
|  | 1527 | case 47:	/* stmw */ | 
|  | 1528 | ea = dform_ea(instr, regs); | 
|  | 1529 | do { | 
|  | 1530 | err = write_mem(regs->gpr[rd], ea, 4, regs); | 
|  | 1531 | if (err) | 
|  | 1532 | return 0; | 
|  | 1533 | ea += 4; | 
|  | 1534 | } while (++rd < 32); | 
|  | 1535 | goto instr_done; | 
|  | 1536 |  | 
|  | 1537 | case 48:	/* lfs */ | 
|  | 1538 | case 49:	/* lfsu */ | 
|  | 1539 | if (!(regs->msr & MSR_FP)) | 
|  | 1540 | break; | 
|  | 1541 | ea = dform_ea(instr, regs); | 
|  | 1542 | err = do_fp_load(rd, do_lfs, ea, 4, regs); | 
|  | 1543 | goto ldst_done; | 
|  | 1544 |  | 
|  | 1545 | case 50:	/* lfd */ | 
|  | 1546 | case 51:	/* lfdu */ | 
|  | 1547 | if (!(regs->msr & MSR_FP)) | 
|  | 1548 | break; | 
|  | 1549 | ea = dform_ea(instr, regs); | 
|  | 1550 | err = do_fp_load(rd, do_lfd, ea, 8, regs); | 
|  | 1551 | goto ldst_done; | 
|  | 1552 |  | 
|  | 1553 | case 52:	/* stfs */ | 
|  | 1554 | case 53:	/* stfsu */ | 
|  | 1555 | if (!(regs->msr & MSR_FP)) | 
|  | 1556 | break; | 
|  | 1557 | ea = dform_ea(instr, regs); | 
|  | 1558 | err = do_fp_store(rd, do_stfs, ea, 4, regs); | 
|  | 1559 | goto ldst_done; | 
|  | 1560 |  | 
|  | 1561 | case 54:	/* stfd */ | 
|  | 1562 | case 55:	/* stfdu */ | 
|  | 1563 | if (!(regs->msr & MSR_FP)) | 
|  | 1564 | break; | 
|  | 1565 | ea = dform_ea(instr, regs); | 
|  | 1566 | err = do_fp_store(rd, do_stfd, ea, 8, regs); | 
|  | 1567 | goto ldst_done; | 
|  | 1568 |  | 
|  | 1569 | #ifdef __powerpc64__ | 
|  | 1570 | case 58:	/* ld[u], lwa */ | 
|  | 1571 | switch (instr & 3) { | 
|  | 1572 | case 0:		/* ld */ | 
|  | 1573 | err = read_mem(®s->gpr[rd], dsform_ea(instr, regs), | 
|  | 1574 | 8, regs); | 
|  | 1575 | goto ldst_done; | 
|  | 1576 | case 1:		/* ldu */ | 
|  | 1577 | err = read_mem(®s->gpr[rd], dsform_ea(instr, regs), | 
|  | 1578 | 8, regs); | 
|  | 1579 | goto ldst_done; | 
|  | 1580 | case 2:		/* lwa */ | 
|  | 1581 | err = read_mem(®s->gpr[rd], dsform_ea(instr, regs), | 
|  | 1582 | 4, regs); | 
|  | 1583 | if (!err) | 
|  | 1584 | regs->gpr[rd] = (signed int) regs->gpr[rd]; | 
|  | 1585 | goto ldst_done; | 
|  | 1586 | } | 
|  | 1587 | break; | 
|  | 1588 |  | 
|  | 1589 | case 62:	/* std[u] */ | 
|  | 1590 | val = regs->gpr[rd]; | 
|  | 1591 | switch (instr & 3) { | 
|  | 1592 | case 0:		/* std */ | 
|  | 1593 | err = write_mem(val, dsform_ea(instr, regs), 8, regs); | 
|  | 1594 | goto ldst_done; | 
|  | 1595 | case 1:		/* stdu */ | 
|  | 1596 | err = write_mem(val, dsform_ea(instr, regs), 8, regs); | 
|  | 1597 | goto ldst_done; | 
|  | 1598 | } | 
|  | 1599 | break; | 
|  | 1600 | #endif /* __powerpc64__ */ | 
|  | 1601 |  | 
|  | 1602 | } | 
|  | 1603 | err = -EINVAL; | 
|  | 1604 |  | 
|  | 1605 | ldst_done: | 
|  | 1606 | if (err) { | 
|  | 1607 | regs->gpr[ra] = old_ra; | 
|  | 1608 | return 0;	/* invoke DSI if -EFAULT? */ | 
|  | 1609 | } | 
|  | 1610 | instr_done: | 
|  | 1611 | regs->nip += 4; | 
|  | 1612 | #ifdef __powerpc64__ | 
|  | 1613 | if ((regs->msr & MSR_SF) == 0) | 
|  | 1614 | regs->nip &= 0xffffffffUL; | 
|  | 1615 | #endif | 
|  | 1616 | return 1; | 
|  | 1617 |  | 
|  | 1618 | logical_done: | 
|  | 1619 | if (instr & 1) | 
|  | 1620 | set_cr0(regs, ra); | 
|  | 1621 | goto instr_done; | 
|  | 1622 |  | 
|  | 1623 | arith_done: | 
|  | 1624 | if (instr & 1) | 
|  | 1625 | set_cr0(regs, rd); | 
|  | 1626 | goto instr_done; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1627 | } |