blob: f73daa6f397007eb0f6e1c728c8ca1768b3c7f0b [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * Single-step support.
3 *
4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
Gui,Jian0d69a052006-11-01 10:50:15 +080012#include <linux/kprobes.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100013#include <linux/ptrace.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070014#include <linux/prefetch.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100015#include <asm/sstep.h>
16#include <asm/processor.h>
Paul Mackerras0016a4c2010-06-15 14:48:58 +100017#include <asm/uaccess.h>
18#include <asm/cputable.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100019
20extern char system_call_common[];
21
Paul Mackerrasc0325242005-10-28 22:48:08 +100022#ifdef CONFIG_PPC64
Paul Mackerras14cf11a2005-09-26 16:04:21 +100023/* Bits in SRR1 that are copied from MSR */
Stephen Rothwellaf308372006-03-23 17:38:10 +110024#define MSR_MASK 0xffffffff87c0ffffUL
Paul Mackerrasc0325242005-10-28 22:48:08 +100025#else
26#define MSR_MASK 0x87c0ffff
27#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +100028
Paul Mackerras0016a4c2010-06-15 14:48:58 +100029/* Bits in XER */
30#define XER_SO 0x80000000U
31#define XER_OV 0x40000000U
32#define XER_CA 0x20000000U
33
Sean MacLennancd64d162010-09-01 07:21:21 +000034#ifdef CONFIG_PPC_FPU
Paul Mackerras0016a4c2010-06-15 14:48:58 +100035/*
36 * Functions in ldstfp.S
37 */
38extern int do_lfs(int rn, unsigned long ea);
39extern int do_lfd(int rn, unsigned long ea);
40extern int do_stfs(int rn, unsigned long ea);
41extern int do_stfd(int rn, unsigned long ea);
42extern int do_lvx(int rn, unsigned long ea);
43extern int do_stvx(int rn, unsigned long ea);
44extern int do_lxvd2x(int rn, unsigned long ea);
45extern int do_stxvd2x(int rn, unsigned long ea);
Sean MacLennancd64d162010-09-01 07:21:21 +000046#endif
Paul Mackerras0016a4c2010-06-15 14:48:58 +100047
Paul Mackerras14cf11a2005-09-26 16:04:21 +100048/*
49 * Determine whether a conditional branch instruction would branch.
50 */
Gui,Jian0d69a052006-11-01 10:50:15 +080051static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100052{
53 unsigned int bo = (instr >> 21) & 0x1f;
54 unsigned int bi;
55
56 if ((bo & 4) == 0) {
57 /* decrement counter */
58 --regs->ctr;
59 if (((bo >> 1) & 1) ^ (regs->ctr == 0))
60 return 0;
61 }
62 if ((bo & 0x10) == 0) {
63 /* check bit from CR */
64 bi = (instr >> 16) & 0x1f;
65 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
66 return 0;
67 }
68 return 1;
69}
70
Paul Mackerras0016a4c2010-06-15 14:48:58 +100071
72static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb)
73{
74 if (!user_mode(regs))
75 return 1;
76 return __access_ok(ea, nb, USER_DS);
77}
78
Paul Mackerras14cf11a2005-09-26 16:04:21 +100079/*
Paul Mackerras0016a4c2010-06-15 14:48:58 +100080 * Calculate effective address for a D-form instruction
81 */
82static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs)
83{
84 int ra;
85 unsigned long ea;
86
87 ra = (instr >> 16) & 0x1f;
88 ea = (signed short) instr; /* sign-extend */
89 if (ra) {
90 ea += regs->gpr[ra];
91 if (instr & 0x04000000) /* update forms */
92 regs->gpr[ra] = ea;
93 }
94#ifdef __powerpc64__
95 if (!(regs->msr & MSR_SF))
96 ea &= 0xffffffffUL;
97#endif
98 return ea;
99}
100
101#ifdef __powerpc64__
102/*
103 * Calculate effective address for a DS-form instruction
104 */
105static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs)
106{
107 int ra;
108 unsigned long ea;
109
110 ra = (instr >> 16) & 0x1f;
111 ea = (signed short) (instr & ~3); /* sign-extend */
112 if (ra) {
113 ea += regs->gpr[ra];
114 if ((instr & 3) == 1) /* update forms */
115 regs->gpr[ra] = ea;
116 }
117 if (!(regs->msr & MSR_SF))
118 ea &= 0xffffffffUL;
119 return ea;
120}
121#endif /* __powerpc64 */
122
123/*
124 * Calculate effective address for an X-form instruction
125 */
126static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs,
127 int do_update)
128{
129 int ra, rb;
130 unsigned long ea;
131
132 ra = (instr >> 16) & 0x1f;
133 rb = (instr >> 11) & 0x1f;
134 ea = regs->gpr[rb];
135 if (ra) {
136 ea += regs->gpr[ra];
137 if (do_update) /* update forms */
138 regs->gpr[ra] = ea;
139 }
140#ifdef __powerpc64__
141 if (!(regs->msr & MSR_SF))
142 ea &= 0xffffffffUL;
143#endif
144 return ea;
145}
146
147/*
148 * Return the largest power of 2, not greater than sizeof(unsigned long),
149 * such that x is a multiple of it.
150 */
151static inline unsigned long max_align(unsigned long x)
152{
153 x |= sizeof(unsigned long);
154 return x & -x; /* isolates rightmost bit */
155}
156
157
158static inline unsigned long byterev_2(unsigned long x)
159{
160 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
161}
162
163static inline unsigned long byterev_4(unsigned long x)
164{
165 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
166 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
167}
168
169#ifdef __powerpc64__
170static inline unsigned long byterev_8(unsigned long x)
171{
172 return (byterev_4(x) << 32) | byterev_4(x >> 32);
173}
174#endif
175
176static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea,
177 int nb)
178{
179 int err = 0;
180 unsigned long x = 0;
181
182 switch (nb) {
183 case 1:
184 err = __get_user(x, (unsigned char __user *) ea);
185 break;
186 case 2:
187 err = __get_user(x, (unsigned short __user *) ea);
188 break;
189 case 4:
190 err = __get_user(x, (unsigned int __user *) ea);
191 break;
192#ifdef __powerpc64__
193 case 8:
194 err = __get_user(x, (unsigned long __user *) ea);
195 break;
196#endif
197 }
198 if (!err)
199 *dest = x;
200 return err;
201}
202
203static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
204 int nb, struct pt_regs *regs)
205{
206 int err;
207 unsigned long x, b, c;
208
209 /* unaligned, do this in pieces */
210 x = 0;
211 for (; nb > 0; nb -= c) {
212 c = max_align(ea);
213 if (c > nb)
214 c = max_align(nb);
215 err = read_mem_aligned(&b, ea, c);
216 if (err)
217 return err;
218 x = (x << (8 * c)) + b;
219 ea += c;
220 }
221 *dest = x;
222 return 0;
223}
224
225/*
226 * Read memory at address ea for nb bytes, return 0 for success
227 * or -EFAULT if an error occurred.
228 */
229static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb,
230 struct pt_regs *regs)
231{
232 if (!address_ok(regs, ea, nb))
233 return -EFAULT;
234 if ((ea & (nb - 1)) == 0)
235 return read_mem_aligned(dest, ea, nb);
236 return read_mem_unaligned(dest, ea, nb, regs);
237}
238
239static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea,
240 int nb)
241{
242 int err = 0;
243
244 switch (nb) {
245 case 1:
246 err = __put_user(val, (unsigned char __user *) ea);
247 break;
248 case 2:
249 err = __put_user(val, (unsigned short __user *) ea);
250 break;
251 case 4:
252 err = __put_user(val, (unsigned int __user *) ea);
253 break;
254#ifdef __powerpc64__
255 case 8:
256 err = __put_user(val, (unsigned long __user *) ea);
257 break;
258#endif
259 }
260 return err;
261}
262
263static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
264 int nb, struct pt_regs *regs)
265{
266 int err;
267 unsigned long c;
268
269 /* unaligned or little-endian, do this in pieces */
270 for (; nb > 0; nb -= c) {
271 c = max_align(ea);
272 if (c > nb)
273 c = max_align(nb);
274 err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
275 if (err)
276 return err;
277 ++ea;
278 }
279 return 0;
280}
281
282/*
283 * Write memory at address ea for nb bytes, return 0 for success
284 * or -EFAULT if an error occurred.
285 */
286static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
287 struct pt_regs *regs)
288{
289 if (!address_ok(regs, ea, nb))
290 return -EFAULT;
291 if ((ea & (nb - 1)) == 0)
292 return write_mem_aligned(val, ea, nb);
293 return write_mem_unaligned(val, ea, nb, regs);
294}
295
Sean MacLennancd64d162010-09-01 07:21:21 +0000296#ifdef CONFIG_PPC_FPU
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000297/*
298 * Check the address and alignment, and call func to do the actual
299 * load or store.
300 */
301static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
302 unsigned long ea, int nb,
303 struct pt_regs *regs)
304{
305 int err;
306 unsigned long val[sizeof(double) / sizeof(long)];
307 unsigned long ptr;
308
309 if (!address_ok(regs, ea, nb))
310 return -EFAULT;
311 if ((ea & 3) == 0)
312 return (*func)(rn, ea);
313 ptr = (unsigned long) &val[0];
314 if (sizeof(unsigned long) == 8 || nb == 4) {
315 err = read_mem_unaligned(&val[0], ea, nb, regs);
316 ptr += sizeof(unsigned long) - nb;
317 } else {
318 /* reading a double on 32-bit */
319 err = read_mem_unaligned(&val[0], ea, 4, regs);
320 if (!err)
321 err = read_mem_unaligned(&val[1], ea + 4, 4, regs);
322 }
323 if (err)
324 return err;
325 return (*func)(rn, ptr);
326}
327
328static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
329 unsigned long ea, int nb,
330 struct pt_regs *regs)
331{
332 int err;
333 unsigned long val[sizeof(double) / sizeof(long)];
334 unsigned long ptr;
335
336 if (!address_ok(regs, ea, nb))
337 return -EFAULT;
338 if ((ea & 3) == 0)
339 return (*func)(rn, ea);
340 ptr = (unsigned long) &val[0];
341 if (sizeof(unsigned long) == 8 || nb == 4) {
342 ptr += sizeof(unsigned long) - nb;
343 err = (*func)(rn, ptr);
344 if (err)
345 return err;
346 err = write_mem_unaligned(val[0], ea, nb, regs);
347 } else {
348 /* writing a double on 32-bit */
349 err = (*func)(rn, ptr);
350 if (err)
351 return err;
352 err = write_mem_unaligned(val[0], ea, 4, regs);
353 if (!err)
354 err = write_mem_unaligned(val[1], ea + 4, 4, regs);
355 }
356 return err;
357}
Sean MacLennancd64d162010-09-01 07:21:21 +0000358#endif
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000359
360#ifdef CONFIG_ALTIVEC
361/* For Altivec/VMX, no need to worry about alignment */
362static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long),
363 unsigned long ea, struct pt_regs *regs)
364{
365 if (!address_ok(regs, ea & ~0xfUL, 16))
366 return -EFAULT;
367 return (*func)(rn, ea);
368}
369
370static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long),
371 unsigned long ea, struct pt_regs *regs)
372{
373 if (!address_ok(regs, ea & ~0xfUL, 16))
374 return -EFAULT;
375 return (*func)(rn, ea);
376}
377#endif /* CONFIG_ALTIVEC */
378
379#ifdef CONFIG_VSX
380static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long),
381 unsigned long ea, struct pt_regs *regs)
382{
383 int err;
384 unsigned long val[2];
385
386 if (!address_ok(regs, ea, 16))
387 return -EFAULT;
388 if ((ea & 3) == 0)
389 return (*func)(rn, ea);
390 err = read_mem_unaligned(&val[0], ea, 8, regs);
391 if (!err)
392 err = read_mem_unaligned(&val[1], ea + 8, 8, regs);
393 if (!err)
394 err = (*func)(rn, (unsigned long) &val[0]);
395 return err;
396}
397
398static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
399 unsigned long ea, struct pt_regs *regs)
400{
401 int err;
402 unsigned long val[2];
403
404 if (!address_ok(regs, ea, 16))
405 return -EFAULT;
406 if ((ea & 3) == 0)
407 return (*func)(rn, ea);
408 err = (*func)(rn, (unsigned long) &val[0]);
409 if (err)
410 return err;
411 err = write_mem_unaligned(val[0], ea, 8, regs);
412 if (!err)
413 err = write_mem_unaligned(val[1], ea + 8, 8, regs);
414 return err;
415}
416#endif /* CONFIG_VSX */
417
418#define __put_user_asmx(x, addr, err, op, cr) \
419 __asm__ __volatile__( \
420 "1: " op " %2,0,%3\n" \
421 " mfcr %1\n" \
422 "2:\n" \
423 ".section .fixup,\"ax\"\n" \
424 "3: li %0,%4\n" \
425 " b 2b\n" \
426 ".previous\n" \
427 ".section __ex_table,\"a\"\n" \
428 PPC_LONG_ALIGN "\n" \
429 PPC_LONG "1b,3b\n" \
430 ".previous" \
431 : "=r" (err), "=r" (cr) \
432 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
433
434#define __get_user_asmx(x, addr, err, op) \
435 __asm__ __volatile__( \
436 "1: "op" %1,0,%2\n" \
437 "2:\n" \
438 ".section .fixup,\"ax\"\n" \
439 "3: li %0,%3\n" \
440 " b 2b\n" \
441 ".previous\n" \
442 ".section __ex_table,\"a\"\n" \
443 PPC_LONG_ALIGN "\n" \
444 PPC_LONG "1b,3b\n" \
445 ".previous" \
446 : "=r" (err), "=r" (x) \
447 : "r" (addr), "i" (-EFAULT), "0" (err))
448
449#define __cacheop_user_asmx(addr, err, op) \
450 __asm__ __volatile__( \
451 "1: "op" 0,%1\n" \
452 "2:\n" \
453 ".section .fixup,\"ax\"\n" \
454 "3: li %0,%3\n" \
455 " b 2b\n" \
456 ".previous\n" \
457 ".section __ex_table,\"a\"\n" \
458 PPC_LONG_ALIGN "\n" \
459 PPC_LONG "1b,3b\n" \
460 ".previous" \
461 : "=r" (err) \
462 : "r" (addr), "i" (-EFAULT), "0" (err))
463
464static void __kprobes set_cr0(struct pt_regs *regs, int rd)
465{
466 long val = regs->gpr[rd];
467
468 regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
469#ifdef __powerpc64__
470 if (!(regs->msr & MSR_SF))
471 val = (int) val;
472#endif
473 if (val < 0)
474 regs->ccr |= 0x80000000;
475 else if (val > 0)
476 regs->ccr |= 0x40000000;
477 else
478 regs->ccr |= 0x20000000;
479}
480
481static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
482 unsigned long val1, unsigned long val2,
483 unsigned long carry_in)
484{
485 unsigned long val = val1 + val2;
486
487 if (carry_in)
488 ++val;
489 regs->gpr[rd] = val;
490#ifdef __powerpc64__
491 if (!(regs->msr & MSR_SF)) {
492 val = (unsigned int) val;
493 val1 = (unsigned int) val1;
494 }
495#endif
496 if (val < val1 || (carry_in && val == val1))
497 regs->xer |= XER_CA;
498 else
499 regs->xer &= ~XER_CA;
500}
501
502static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2,
503 int crfld)
504{
505 unsigned int crval, shift;
506
507 crval = (regs->xer >> 31) & 1; /* get SO bit */
508 if (v1 < v2)
509 crval |= 8;
510 else if (v1 > v2)
511 crval |= 4;
512 else
513 crval |= 2;
514 shift = (7 - crfld) * 4;
515 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
516}
517
518static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
519 unsigned long v2, int crfld)
520{
521 unsigned int crval, shift;
522
523 crval = (regs->xer >> 31) & 1; /* get SO bit */
524 if (v1 < v2)
525 crval |= 8;
526 else if (v1 > v2)
527 crval |= 4;
528 else
529 crval |= 2;
530 shift = (7 - crfld) * 4;
531 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
532}
533
534/*
535 * Elements of 32-bit rotate and mask instructions.
536 */
537#define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
538 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
539#ifdef __powerpc64__
540#define MASK64_L(mb) (~0UL >> (mb))
541#define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
542#define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
543#define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
544#else
545#define DATA32(x) (x)
546#endif
547#define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
548
549/*
550 * Emulate instructions that cause a transfer of control,
551 * loads and stores, and a few other instructions.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000552 * Returns 1 if the step was emulated, 0 if not,
553 * or -1 if the instruction is one that should not be stepped,
554 * such as an rfid, or a mtmsrd that would clear MSR_RI.
555 */
Gui,Jian0d69a052006-11-01 10:50:15 +0800556int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000557{
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000558 unsigned int opcode, ra, rb, rd, spr, u;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000559 unsigned long int imm;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000560 unsigned long int val, val2;
561 unsigned long int ea;
562 unsigned int cr, mb, me, sh;
563 int err;
564 unsigned long old_ra;
565 long ival;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000566
567 opcode = instr >> 26;
568 switch (opcode) {
569 case 16: /* bc */
570 imm = (signed short)(instr & 0xfffc);
571 if ((instr & 2) == 0)
572 imm += regs->nip;
573 regs->nip += 4;
574 if ((regs->msr & MSR_SF) == 0)
575 regs->nip &= 0xffffffffUL;
576 if (instr & 1)
577 regs->link = regs->nip;
578 if (branch_taken(instr, regs))
579 regs->nip = imm;
580 return 1;
Paul Mackerrasc0325242005-10-28 22:48:08 +1000581#ifdef CONFIG_PPC64
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000582 case 17: /* sc */
583 /*
584 * N.B. this uses knowledge about how the syscall
585 * entry code works. If that is changed, this will
586 * need to be changed also.
587 */
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000588 if (regs->gpr[0] == 0x1ebe &&
589 cpu_has_feature(CPU_FTR_REAL_LE)) {
590 regs->msr ^= MSR_LE;
591 goto instr_done;
592 }
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000593 regs->gpr[9] = regs->gpr[13];
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000594 regs->gpr[10] = MSR_KERNEL;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000595 regs->gpr[11] = regs->nip + 4;
596 regs->gpr[12] = regs->msr & MSR_MASK;
597 regs->gpr[13] = (unsigned long) get_paca();
598 regs->nip = (unsigned long) &system_call_common;
599 regs->msr = MSR_KERNEL;
600 return 1;
Paul Mackerrasc0325242005-10-28 22:48:08 +1000601#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000602 case 18: /* b */
603 imm = instr & 0x03fffffc;
604 if (imm & 0x02000000)
605 imm -= 0x04000000;
606 if ((instr & 2) == 0)
607 imm += regs->nip;
608 if (instr & 1) {
609 regs->link = regs->nip + 4;
610 if ((regs->msr & MSR_SF) == 0)
611 regs->link &= 0xffffffffUL;
612 }
613 if ((regs->msr & MSR_SF) == 0)
614 imm &= 0xffffffffUL;
615 regs->nip = imm;
616 return 1;
617 case 19:
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000618 switch ((instr >> 1) & 0x3ff) {
619 case 16: /* bclr */
620 case 528: /* bcctr */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000621 imm = (instr & 0x400)? regs->ctr: regs->link;
622 regs->nip += 4;
623 if ((regs->msr & MSR_SF) == 0) {
624 regs->nip &= 0xffffffffUL;
625 imm &= 0xffffffffUL;
626 }
627 if (instr & 1)
628 regs->link = regs->nip;
629 if (branch_taken(instr, regs))
630 regs->nip = imm;
631 return 1;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000632
633 case 18: /* rfid, scary */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000634 return -1;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000635
636 case 150: /* isync */
637 isync();
638 goto instr_done;
639
640 case 33: /* crnor */
641 case 129: /* crandc */
642 case 193: /* crxor */
643 case 225: /* crnand */
644 case 257: /* crand */
645 case 289: /* creqv */
646 case 417: /* crorc */
647 case 449: /* cror */
648 ra = (instr >> 16) & 0x1f;
649 rb = (instr >> 11) & 0x1f;
650 rd = (instr >> 21) & 0x1f;
651 ra = (regs->ccr >> (31 - ra)) & 1;
652 rb = (regs->ccr >> (31 - rb)) & 1;
653 val = (instr >> (6 + ra * 2 + rb)) & 1;
654 regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) |
655 (val << (31 - rd));
656 goto instr_done;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000657 }
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000658 break;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000659 case 31:
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000660 switch ((instr >> 1) & 0x3ff) {
661 case 598: /* sync */
662#ifdef __powerpc64__
663 switch ((instr >> 21) & 3) {
664 case 1: /* lwsync */
665 asm volatile("lwsync" : : : "memory");
666 goto instr_done;
667 case 2: /* ptesync */
668 asm volatile("ptesync" : : : "memory");
669 goto instr_done;
670 }
671#endif
672 mb();
673 goto instr_done;
674
675 case 854: /* eieio */
676 eieio();
677 goto instr_done;
678 }
679 break;
680 }
681
682 /* Following cases refer to regs->gpr[], so we need all regs */
683 if (!FULL_REGS(regs))
684 return 0;
685
686 rd = (instr >> 21) & 0x1f;
687 ra = (instr >> 16) & 0x1f;
688 rb = (instr >> 11) & 0x1f;
689
690 switch (opcode) {
691 case 7: /* mulli */
692 regs->gpr[rd] = regs->gpr[ra] * (short) instr;
693 goto instr_done;
694
695 case 8: /* subfic */
696 imm = (short) instr;
697 add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1);
698 goto instr_done;
699
700 case 10: /* cmpli */
701 imm = (unsigned short) instr;
702 val = regs->gpr[ra];
703#ifdef __powerpc64__
704 if ((rd & 1) == 0)
705 val = (unsigned int) val;
706#endif
707 do_cmp_unsigned(regs, val, imm, rd >> 2);
708 goto instr_done;
709
710 case 11: /* cmpi */
711 imm = (short) instr;
712 val = regs->gpr[ra];
713#ifdef __powerpc64__
714 if ((rd & 1) == 0)
715 val = (int) val;
716#endif
717 do_cmp_signed(regs, val, imm, rd >> 2);
718 goto instr_done;
719
720 case 12: /* addic */
721 imm = (short) instr;
722 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
723 goto instr_done;
724
725 case 13: /* addic. */
726 imm = (short) instr;
727 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
728 set_cr0(regs, rd);
729 goto instr_done;
730
731 case 14: /* addi */
732 imm = (short) instr;
733 if (ra)
734 imm += regs->gpr[ra];
735 regs->gpr[rd] = imm;
736 goto instr_done;
737
738 case 15: /* addis */
739 imm = ((short) instr) << 16;
740 if (ra)
741 imm += regs->gpr[ra];
742 regs->gpr[rd] = imm;
743 goto instr_done;
744
745 case 20: /* rlwimi */
746 mb = (instr >> 6) & 0x1f;
747 me = (instr >> 1) & 0x1f;
748 val = DATA32(regs->gpr[rd]);
749 imm = MASK32(mb, me);
750 regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
751 goto logical_done;
752
753 case 21: /* rlwinm */
754 mb = (instr >> 6) & 0x1f;
755 me = (instr >> 1) & 0x1f;
756 val = DATA32(regs->gpr[rd]);
757 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
758 goto logical_done;
759
760 case 23: /* rlwnm */
761 mb = (instr >> 6) & 0x1f;
762 me = (instr >> 1) & 0x1f;
763 rb = regs->gpr[rb] & 0x1f;
764 val = DATA32(regs->gpr[rd]);
765 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
766 goto logical_done;
767
768 case 24: /* ori */
769 imm = (unsigned short) instr;
770 regs->gpr[ra] = regs->gpr[rd] | imm;
771 goto instr_done;
772
773 case 25: /* oris */
774 imm = (unsigned short) instr;
775 regs->gpr[ra] = regs->gpr[rd] | (imm << 16);
776 goto instr_done;
777
778 case 26: /* xori */
779 imm = (unsigned short) instr;
780 regs->gpr[ra] = regs->gpr[rd] ^ imm;
781 goto instr_done;
782
783 case 27: /* xoris */
784 imm = (unsigned short) instr;
785 regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16);
786 goto instr_done;
787
788 case 28: /* andi. */
789 imm = (unsigned short) instr;
790 regs->gpr[ra] = regs->gpr[rd] & imm;
791 set_cr0(regs, ra);
792 goto instr_done;
793
794 case 29: /* andis. */
795 imm = (unsigned short) instr;
796 regs->gpr[ra] = regs->gpr[rd] & (imm << 16);
797 set_cr0(regs, ra);
798 goto instr_done;
799
800#ifdef __powerpc64__
801 case 30: /* rld* */
802 mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
803 val = regs->gpr[rd];
804 if ((instr & 0x10) == 0) {
805 sh = rb | ((instr & 2) << 4);
806 val = ROTATE(val, sh);
807 switch ((instr >> 2) & 3) {
808 case 0: /* rldicl */
809 regs->gpr[ra] = val & MASK64_L(mb);
810 goto logical_done;
811 case 1: /* rldicr */
812 regs->gpr[ra] = val & MASK64_R(mb);
813 goto logical_done;
814 case 2: /* rldic */
815 regs->gpr[ra] = val & MASK64(mb, 63 - sh);
816 goto logical_done;
817 case 3: /* rldimi */
818 imm = MASK64(mb, 63 - sh);
819 regs->gpr[ra] = (regs->gpr[ra] & ~imm) |
820 (val & imm);
821 goto logical_done;
822 }
823 } else {
824 sh = regs->gpr[rb] & 0x3f;
825 val = ROTATE(val, sh);
826 switch ((instr >> 1) & 7) {
827 case 0: /* rldcl */
828 regs->gpr[ra] = val & MASK64_L(mb);
829 goto logical_done;
830 case 1: /* rldcr */
831 regs->gpr[ra] = val & MASK64_R(mb);
832 goto logical_done;
833 }
834 }
835#endif
836
837 case 31:
838 switch ((instr >> 1) & 0x3ff) {
839 case 83: /* mfmsr */
840 if (regs->msr & MSR_PR)
841 break;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000842 regs->gpr[rd] = regs->msr & MSR_MASK;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000843 goto instr_done;
844 case 146: /* mtmsr */
845 if (regs->msr & MSR_PR)
846 break;
Paul Mackerrasc0325242005-10-28 22:48:08 +1000847 imm = regs->gpr[rd];
848 if ((imm & MSR_RI) == 0)
849 /* can't step mtmsr that would clear MSR_RI */
850 return -1;
851 regs->msr = imm;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000852 goto instr_done;
Paul Mackerrasc0325242005-10-28 22:48:08 +1000853#ifdef CONFIG_PPC64
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000854 case 178: /* mtmsrd */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000855 /* only MSR_EE and MSR_RI get changed if bit 15 set */
856 /* mtmsrd doesn't change MSR_HV and MSR_ME */
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000857 if (regs->msr & MSR_PR)
858 break;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000859 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL;
860 imm = (regs->msr & MSR_MASK & ~imm)
861 | (regs->gpr[rd] & imm);
862 if ((imm & MSR_RI) == 0)
863 /* can't step mtmsrd that would clear MSR_RI */
864 return -1;
865 regs->msr = imm;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000866 goto instr_done;
Paul Mackerrasc0325242005-10-28 22:48:08 +1000867#endif
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000868 case 19: /* mfcr */
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000869 regs->gpr[rd] = regs->ccr;
870 regs->gpr[rd] &= 0xffffffffUL;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000871 goto instr_done;
872
873 case 144: /* mtcrf */
874 imm = 0xf0000000UL;
875 val = regs->gpr[rd];
876 for (sh = 0; sh < 8; ++sh) {
877 if (instr & (0x80000 >> sh))
878 regs->ccr = (regs->ccr & ~imm) |
879 (val & imm);
880 imm >>= 4;
881 }
882 goto instr_done;
883
884 case 339: /* mfspr */
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000885 spr = (instr >> 11) & 0x3ff;
886 switch (spr) {
887 case 0x20: /* mfxer */
888 regs->gpr[rd] = regs->xer;
889 regs->gpr[rd] &= 0xffffffffUL;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000890 goto instr_done;
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000891 case 0x100: /* mflr */
892 regs->gpr[rd] = regs->link;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000893 goto instr_done;
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000894 case 0x120: /* mfctr */
895 regs->gpr[rd] = regs->ctr;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000896 goto instr_done;
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000897 }
898 break;
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000899
900 case 467: /* mtspr */
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000901 spr = (instr >> 11) & 0x3ff;
902 switch (spr) {
903 case 0x20: /* mtxer */
904 regs->xer = (regs->gpr[rd] & 0xffffffffUL);
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000905 goto instr_done;
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000906 case 0x100: /* mtlr */
907 regs->link = regs->gpr[rd];
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000908 goto instr_done;
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000909 case 0x120: /* mtctr */
910 regs->ctr = regs->gpr[rd];
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000911 goto instr_done;
Ananth N Mavinakayanahalli68881992007-04-18 15:56:38 +1000912 }
Paul Mackerras0016a4c2010-06-15 14:48:58 +1000913 break;
914
915/*
916 * Compare instructions
917 */
918 case 0: /* cmp */
919 val = regs->gpr[ra];
920 val2 = regs->gpr[rb];
921#ifdef __powerpc64__
922 if ((rd & 1) == 0) {
923 /* word (32-bit) compare */
924 val = (int) val;
925 val2 = (int) val2;
926 }
927#endif
928 do_cmp_signed(regs, val, val2, rd >> 2);
929 goto instr_done;
930
931 case 32: /* cmpl */
932 val = regs->gpr[ra];
933 val2 = regs->gpr[rb];
934#ifdef __powerpc64__
935 if ((rd & 1) == 0) {
936 /* word (32-bit) compare */
937 val = (unsigned int) val;
938 val2 = (unsigned int) val2;
939 }
940#endif
941 do_cmp_unsigned(regs, val, val2, rd >> 2);
942 goto instr_done;
943
944/*
945 * Arithmetic instructions
946 */
947 case 8: /* subfc */
948 add_with_carry(regs, rd, ~regs->gpr[ra],
949 regs->gpr[rb], 1);
950 goto arith_done;
951#ifdef __powerpc64__
952 case 9: /* mulhdu */
953 asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) :
954 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
955 goto arith_done;
956#endif
957 case 10: /* addc */
958 add_with_carry(regs, rd, regs->gpr[ra],
959 regs->gpr[rb], 0);
960 goto arith_done;
961
962 case 11: /* mulhwu */
963 asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) :
964 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
965 goto arith_done;
966
967 case 40: /* subf */
968 regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra];
969 goto arith_done;
970#ifdef __powerpc64__
971 case 73: /* mulhd */
972 asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) :
973 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
974 goto arith_done;
975#endif
976 case 75: /* mulhw */
977 asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) :
978 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
979 goto arith_done;
980
981 case 104: /* neg */
982 regs->gpr[rd] = -regs->gpr[ra];
983 goto arith_done;
984
985 case 136: /* subfe */
986 add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb],
987 regs->xer & XER_CA);
988 goto arith_done;
989
990 case 138: /* adde */
991 add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb],
992 regs->xer & XER_CA);
993 goto arith_done;
994
995 case 200: /* subfze */
996 add_with_carry(regs, rd, ~regs->gpr[ra], 0L,
997 regs->xer & XER_CA);
998 goto arith_done;
999
1000 case 202: /* addze */
1001 add_with_carry(regs, rd, regs->gpr[ra], 0L,
1002 regs->xer & XER_CA);
1003 goto arith_done;
1004
1005 case 232: /* subfme */
1006 add_with_carry(regs, rd, ~regs->gpr[ra], -1L,
1007 regs->xer & XER_CA);
1008 goto arith_done;
1009#ifdef __powerpc64__
1010 case 233: /* mulld */
1011 regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb];
1012 goto arith_done;
1013#endif
1014 case 234: /* addme */
1015 add_with_carry(regs, rd, regs->gpr[ra], -1L,
1016 regs->xer & XER_CA);
1017 goto arith_done;
1018
1019 case 235: /* mullw */
1020 regs->gpr[rd] = (unsigned int) regs->gpr[ra] *
1021 (unsigned int) regs->gpr[rb];
1022 goto arith_done;
1023
1024 case 266: /* add */
1025 regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb];
1026 goto arith_done;
1027#ifdef __powerpc64__
1028 case 457: /* divdu */
1029 regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb];
1030 goto arith_done;
1031#endif
1032 case 459: /* divwu */
1033 regs->gpr[rd] = (unsigned int) regs->gpr[ra] /
1034 (unsigned int) regs->gpr[rb];
1035 goto arith_done;
1036#ifdef __powerpc64__
1037 case 489: /* divd */
1038 regs->gpr[rd] = (long int) regs->gpr[ra] /
1039 (long int) regs->gpr[rb];
1040 goto arith_done;
1041#endif
1042 case 491: /* divw */
1043 regs->gpr[rd] = (int) regs->gpr[ra] /
1044 (int) regs->gpr[rb];
1045 goto arith_done;
1046
1047
1048/*
1049 * Logical instructions
1050 */
1051 case 26: /* cntlzw */
1052 asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) :
1053 "r" (regs->gpr[rd]));
1054 goto logical_done;
1055#ifdef __powerpc64__
1056 case 58: /* cntlzd */
1057 asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) :
1058 "r" (regs->gpr[rd]));
1059 goto logical_done;
1060#endif
1061 case 28: /* and */
1062 regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb];
1063 goto logical_done;
1064
1065 case 60: /* andc */
1066 regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb];
1067 goto logical_done;
1068
1069 case 124: /* nor */
1070 regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]);
1071 goto logical_done;
1072
1073 case 284: /* xor */
1074 regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1075 goto logical_done;
1076
1077 case 316: /* xor */
1078 regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb];
1079 goto logical_done;
1080
1081 case 412: /* orc */
1082 regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb];
1083 goto logical_done;
1084
1085 case 444: /* or */
1086 regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb];
1087 goto logical_done;
1088
1089 case 476: /* nand */
1090 regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]);
1091 goto logical_done;
1092
1093 case 922: /* extsh */
1094 regs->gpr[ra] = (signed short) regs->gpr[rd];
1095 goto logical_done;
1096
1097 case 954: /* extsb */
1098 regs->gpr[ra] = (signed char) regs->gpr[rd];
1099 goto logical_done;
1100#ifdef __powerpc64__
1101 case 986: /* extsw */
1102 regs->gpr[ra] = (signed int) regs->gpr[rd];
1103 goto logical_done;
1104#endif
1105
1106/*
1107 * Shift instructions
1108 */
1109 case 24: /* slw */
1110 sh = regs->gpr[rb] & 0x3f;
1111 if (sh < 32)
1112 regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL;
1113 else
1114 regs->gpr[ra] = 0;
1115 goto logical_done;
1116
1117 case 536: /* srw */
1118 sh = regs->gpr[rb] & 0x3f;
1119 if (sh < 32)
1120 regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1121 else
1122 regs->gpr[ra] = 0;
1123 goto logical_done;
1124
1125 case 792: /* sraw */
1126 sh = regs->gpr[rb] & 0x3f;
1127 ival = (signed int) regs->gpr[rd];
1128 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1129 if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
1130 regs->xer |= XER_CA;
1131 else
1132 regs->xer &= ~XER_CA;
1133 goto logical_done;
1134
1135 case 824: /* srawi */
1136 sh = rb;
1137 ival = (signed int) regs->gpr[rd];
1138 regs->gpr[ra] = ival >> sh;
1139 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
1140 regs->xer |= XER_CA;
1141 else
1142 regs->xer &= ~XER_CA;
1143 goto logical_done;
1144
1145#ifdef __powerpc64__
1146 case 27: /* sld */
1147 sh = regs->gpr[rd] & 0x7f;
1148 if (sh < 64)
1149 regs->gpr[ra] = regs->gpr[rd] << sh;
1150 else
1151 regs->gpr[ra] = 0;
1152 goto logical_done;
1153
1154 case 539: /* srd */
1155 sh = regs->gpr[rb] & 0x7f;
1156 if (sh < 64)
1157 regs->gpr[ra] = regs->gpr[rd] >> sh;
1158 else
1159 regs->gpr[ra] = 0;
1160 goto logical_done;
1161
1162 case 794: /* srad */
1163 sh = regs->gpr[rb] & 0x7f;
1164 ival = (signed long int) regs->gpr[rd];
1165 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1166 if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
1167 regs->xer |= XER_CA;
1168 else
1169 regs->xer &= ~XER_CA;
1170 goto logical_done;
1171
1172 case 826: /* sradi with sh_5 = 0 */
1173 case 827: /* sradi with sh_5 = 1 */
1174 sh = rb | ((instr & 2) << 4);
1175 ival = (signed long int) regs->gpr[rd];
1176 regs->gpr[ra] = ival >> sh;
1177 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
1178 regs->xer |= XER_CA;
1179 else
1180 regs->xer &= ~XER_CA;
1181 goto logical_done;
1182#endif /* __powerpc64__ */
1183
1184/*
1185 * Cache instructions
1186 */
1187 case 54: /* dcbst */
1188 ea = xform_ea(instr, regs, 0);
1189 if (!address_ok(regs, ea, 8))
1190 return 0;
1191 err = 0;
1192 __cacheop_user_asmx(ea, err, "dcbst");
1193 if (err)
1194 return 0;
1195 goto instr_done;
1196
1197 case 86: /* dcbf */
1198 ea = xform_ea(instr, regs, 0);
1199 if (!address_ok(regs, ea, 8))
1200 return 0;
1201 err = 0;
1202 __cacheop_user_asmx(ea, err, "dcbf");
1203 if (err)
1204 return 0;
1205 goto instr_done;
1206
1207 case 246: /* dcbtst */
1208 if (rd == 0) {
1209 ea = xform_ea(instr, regs, 0);
1210 prefetchw((void *) ea);
1211 }
1212 goto instr_done;
1213
1214 case 278: /* dcbt */
1215 if (rd == 0) {
1216 ea = xform_ea(instr, regs, 0);
1217 prefetch((void *) ea);
1218 }
1219 goto instr_done;
1220
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001221 }
Paul Mackerras0016a4c2010-06-15 14:48:58 +10001222 break;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001223 }
Paul Mackerras0016a4c2010-06-15 14:48:58 +10001224
1225 /*
1226 * Following cases are for loads and stores, so bail out
1227 * if we're in little-endian mode.
1228 */
1229 if (regs->msr & MSR_LE)
1230 return 0;
1231
1232 /*
1233 * Save register RA in case it's an update form load or store
1234 * and the access faults.
1235 */
1236 old_ra = regs->gpr[ra];
1237
1238 switch (opcode) {
1239 case 31:
1240 u = instr & 0x40;
1241 switch ((instr >> 1) & 0x3ff) {
1242 case 20: /* lwarx */
1243 ea = xform_ea(instr, regs, 0);
1244 if (ea & 3)
1245 break; /* can't handle misaligned */
1246 err = -EFAULT;
1247 if (!address_ok(regs, ea, 4))
1248 goto ldst_done;
1249 err = 0;
1250 __get_user_asmx(val, ea, err, "lwarx");
1251 if (!err)
1252 regs->gpr[rd] = val;
1253 goto ldst_done;
1254
1255 case 150: /* stwcx. */
1256 ea = xform_ea(instr, regs, 0);
1257 if (ea & 3)
1258 break; /* can't handle misaligned */
1259 err = -EFAULT;
1260 if (!address_ok(regs, ea, 4))
1261 goto ldst_done;
1262 err = 0;
1263 __put_user_asmx(regs->gpr[rd], ea, err, "stwcx.", cr);
1264 if (!err)
1265 regs->ccr = (regs->ccr & 0x0fffffff) |
1266 (cr & 0xe0000000) |
1267 ((regs->xer >> 3) & 0x10000000);
1268 goto ldst_done;
1269
1270#ifdef __powerpc64__
1271 case 84: /* ldarx */
1272 ea = xform_ea(instr, regs, 0);
1273 if (ea & 7)
1274 break; /* can't handle misaligned */
1275 err = -EFAULT;
1276 if (!address_ok(regs, ea, 8))
1277 goto ldst_done;
1278 err = 0;
1279 __get_user_asmx(val, ea, err, "ldarx");
1280 if (!err)
1281 regs->gpr[rd] = val;
1282 goto ldst_done;
1283
1284 case 214: /* stdcx. */
1285 ea = xform_ea(instr, regs, 0);
1286 if (ea & 7)
1287 break; /* can't handle misaligned */
1288 err = -EFAULT;
1289 if (!address_ok(regs, ea, 8))
1290 goto ldst_done;
1291 err = 0;
1292 __put_user_asmx(regs->gpr[rd], ea, err, "stdcx.", cr);
1293 if (!err)
1294 regs->ccr = (regs->ccr & 0x0fffffff) |
1295 (cr & 0xe0000000) |
1296 ((regs->xer >> 3) & 0x10000000);
1297 goto ldst_done;
1298
1299 case 21: /* ldx */
1300 case 53: /* ldux */
1301 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1302 8, regs);
1303 goto ldst_done;
1304#endif
1305
1306 case 23: /* lwzx */
1307 case 55: /* lwzux */
1308 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1309 4, regs);
1310 goto ldst_done;
1311
1312 case 87: /* lbzx */
1313 case 119: /* lbzux */
1314 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1315 1, regs);
1316 goto ldst_done;
1317
1318#ifdef CONFIG_ALTIVEC
1319 case 103: /* lvx */
1320 case 359: /* lvxl */
1321 if (!(regs->msr & MSR_VEC))
1322 break;
1323 ea = xform_ea(instr, regs, 0);
1324 err = do_vec_load(rd, do_lvx, ea, regs);
1325 goto ldst_done;
1326
1327 case 231: /* stvx */
1328 case 487: /* stvxl */
1329 if (!(regs->msr & MSR_VEC))
1330 break;
1331 ea = xform_ea(instr, regs, 0);
1332 err = do_vec_store(rd, do_stvx, ea, regs);
1333 goto ldst_done;
1334#endif /* CONFIG_ALTIVEC */
1335
1336#ifdef __powerpc64__
1337 case 149: /* stdx */
1338 case 181: /* stdux */
1339 val = regs->gpr[rd];
1340 err = write_mem(val, xform_ea(instr, regs, u), 8, regs);
1341 goto ldst_done;
1342#endif
1343
1344 case 151: /* stwx */
1345 case 183: /* stwux */
1346 val = regs->gpr[rd];
1347 err = write_mem(val, xform_ea(instr, regs, u), 4, regs);
1348 goto ldst_done;
1349
1350 case 215: /* stbx */
1351 case 247: /* stbux */
1352 val = regs->gpr[rd];
1353 err = write_mem(val, xform_ea(instr, regs, u), 1, regs);
1354 goto ldst_done;
1355
1356 case 279: /* lhzx */
1357 case 311: /* lhzux */
1358 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1359 2, regs);
1360 goto ldst_done;
1361
1362#ifdef __powerpc64__
1363 case 341: /* lwax */
1364 case 373: /* lwaux */
1365 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1366 4, regs);
1367 if (!err)
1368 regs->gpr[rd] = (signed int) regs->gpr[rd];
1369 goto ldst_done;
1370#endif
1371
1372 case 343: /* lhax */
1373 case 375: /* lhaux */
1374 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1375 2, regs);
1376 if (!err)
1377 regs->gpr[rd] = (signed short) regs->gpr[rd];
1378 goto ldst_done;
1379
1380 case 407: /* sthx */
1381 case 439: /* sthux */
1382 val = regs->gpr[rd];
1383 err = write_mem(val, xform_ea(instr, regs, u), 2, regs);
1384 goto ldst_done;
1385
1386#ifdef __powerpc64__
1387 case 532: /* ldbrx */
1388 err = read_mem(&val, xform_ea(instr, regs, 0), 8, regs);
1389 if (!err)
1390 regs->gpr[rd] = byterev_8(val);
1391 goto ldst_done;
1392
1393#endif
1394
1395 case 534: /* lwbrx */
1396 err = read_mem(&val, xform_ea(instr, regs, 0), 4, regs);
1397 if (!err)
1398 regs->gpr[rd] = byterev_4(val);
1399 goto ldst_done;
1400
Sean MacLennancd64d162010-09-01 07:21:21 +00001401#ifdef CONFIG_PPC_CPU
Paul Mackerras0016a4c2010-06-15 14:48:58 +10001402 case 535: /* lfsx */
1403 case 567: /* lfsux */
1404 if (!(regs->msr & MSR_FP))
1405 break;
1406 ea = xform_ea(instr, regs, u);
1407 err = do_fp_load(rd, do_lfs, ea, 4, regs);
1408 goto ldst_done;
1409
1410 case 599: /* lfdx */
1411 case 631: /* lfdux */
1412 if (!(regs->msr & MSR_FP))
1413 break;
1414 ea = xform_ea(instr, regs, u);
1415 err = do_fp_load(rd, do_lfd, ea, 8, regs);
1416 goto ldst_done;
1417
1418 case 663: /* stfsx */
1419 case 695: /* stfsux */
1420 if (!(regs->msr & MSR_FP))
1421 break;
1422 ea = xform_ea(instr, regs, u);
1423 err = do_fp_store(rd, do_stfs, ea, 4, regs);
1424 goto ldst_done;
1425
1426 case 727: /* stfdx */
1427 case 759: /* stfdux */
1428 if (!(regs->msr & MSR_FP))
1429 break;
1430 ea = xform_ea(instr, regs, u);
1431 err = do_fp_store(rd, do_stfd, ea, 8, regs);
1432 goto ldst_done;
Sean MacLennancd64d162010-09-01 07:21:21 +00001433#endif
Paul Mackerras0016a4c2010-06-15 14:48:58 +10001434
1435#ifdef __powerpc64__
1436 case 660: /* stdbrx */
1437 val = byterev_8(regs->gpr[rd]);
1438 err = write_mem(val, xform_ea(instr, regs, 0), 8, regs);
1439 goto ldst_done;
1440
1441#endif
1442 case 662: /* stwbrx */
1443 val = byterev_4(regs->gpr[rd]);
1444 err = write_mem(val, xform_ea(instr, regs, 0), 4, regs);
1445 goto ldst_done;
1446
1447 case 790: /* lhbrx */
1448 err = read_mem(&val, xform_ea(instr, regs, 0), 2, regs);
1449 if (!err)
1450 regs->gpr[rd] = byterev_2(val);
1451 goto ldst_done;
1452
1453 case 918: /* sthbrx */
1454 val = byterev_2(regs->gpr[rd]);
1455 err = write_mem(val, xform_ea(instr, regs, 0), 2, regs);
1456 goto ldst_done;
1457
1458#ifdef CONFIG_VSX
1459 case 844: /* lxvd2x */
1460 case 876: /* lxvd2ux */
1461 if (!(regs->msr & MSR_VSX))
1462 break;
1463 rd |= (instr & 1) << 5;
1464 ea = xform_ea(instr, regs, u);
1465 err = do_vsx_load(rd, do_lxvd2x, ea, regs);
1466 goto ldst_done;
1467
1468 case 972: /* stxvd2x */
1469 case 1004: /* stxvd2ux */
1470 if (!(regs->msr & MSR_VSX))
1471 break;
1472 rd |= (instr & 1) << 5;
1473 ea = xform_ea(instr, regs, u);
1474 err = do_vsx_store(rd, do_stxvd2x, ea, regs);
1475 goto ldst_done;
1476
1477#endif /* CONFIG_VSX */
1478 }
1479 break;
1480
1481 case 32: /* lwz */
1482 case 33: /* lwzu */
1483 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 4, regs);
1484 goto ldst_done;
1485
1486 case 34: /* lbz */
1487 case 35: /* lbzu */
1488 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 1, regs);
1489 goto ldst_done;
1490
1491 case 36: /* stw */
1492 case 37: /* stwu */
1493 val = regs->gpr[rd];
1494 err = write_mem(val, dform_ea(instr, regs), 4, regs);
1495 goto ldst_done;
1496
1497 case 38: /* stb */
1498 case 39: /* stbu */
1499 val = regs->gpr[rd];
1500 err = write_mem(val, dform_ea(instr, regs), 1, regs);
1501 goto ldst_done;
1502
1503 case 40: /* lhz */
1504 case 41: /* lhzu */
1505 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
1506 goto ldst_done;
1507
1508 case 42: /* lha */
1509 case 43: /* lhau */
1510 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
1511 if (!err)
1512 regs->gpr[rd] = (signed short) regs->gpr[rd];
1513 goto ldst_done;
1514
1515 case 44: /* sth */
1516 case 45: /* sthu */
1517 val = regs->gpr[rd];
1518 err = write_mem(val, dform_ea(instr, regs), 2, regs);
1519 goto ldst_done;
1520
1521 case 46: /* lmw */
1522 ra = (instr >> 16) & 0x1f;
1523 if (ra >= rd)
1524 break; /* invalid form, ra in range to load */
1525 ea = dform_ea(instr, regs);
1526 do {
1527 err = read_mem(&regs->gpr[rd], ea, 4, regs);
1528 if (err)
1529 return 0;
1530 ea += 4;
1531 } while (++rd < 32);
1532 goto instr_done;
1533
1534 case 47: /* stmw */
1535 ea = dform_ea(instr, regs);
1536 do {
1537 err = write_mem(regs->gpr[rd], ea, 4, regs);
1538 if (err)
1539 return 0;
1540 ea += 4;
1541 } while (++rd < 32);
1542 goto instr_done;
1543
Sean MacLennancd64d162010-09-01 07:21:21 +00001544#ifdef CONFIG_PPC_FPU
Paul Mackerras0016a4c2010-06-15 14:48:58 +10001545 case 48: /* lfs */
1546 case 49: /* lfsu */
1547 if (!(regs->msr & MSR_FP))
1548 break;
1549 ea = dform_ea(instr, regs);
1550 err = do_fp_load(rd, do_lfs, ea, 4, regs);
1551 goto ldst_done;
1552
1553 case 50: /* lfd */
1554 case 51: /* lfdu */
1555 if (!(regs->msr & MSR_FP))
1556 break;
1557 ea = dform_ea(instr, regs);
1558 err = do_fp_load(rd, do_lfd, ea, 8, regs);
1559 goto ldst_done;
1560
1561 case 52: /* stfs */
1562 case 53: /* stfsu */
1563 if (!(regs->msr & MSR_FP))
1564 break;
1565 ea = dform_ea(instr, regs);
1566 err = do_fp_store(rd, do_stfs, ea, 4, regs);
1567 goto ldst_done;
1568
1569 case 54: /* stfd */
1570 case 55: /* stfdu */
1571 if (!(regs->msr & MSR_FP))
1572 break;
1573 ea = dform_ea(instr, regs);
1574 err = do_fp_store(rd, do_stfd, ea, 8, regs);
1575 goto ldst_done;
Sean MacLennancd64d162010-09-01 07:21:21 +00001576#endif
Paul Mackerras0016a4c2010-06-15 14:48:58 +10001577
1578#ifdef __powerpc64__
1579 case 58: /* ld[u], lwa */
1580 switch (instr & 3) {
1581 case 0: /* ld */
1582 err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
1583 8, regs);
1584 goto ldst_done;
1585 case 1: /* ldu */
1586 err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
1587 8, regs);
1588 goto ldst_done;
1589 case 2: /* lwa */
1590 err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
1591 4, regs);
1592 if (!err)
1593 regs->gpr[rd] = (signed int) regs->gpr[rd];
1594 goto ldst_done;
1595 }
1596 break;
1597
1598 case 62: /* std[u] */
1599 val = regs->gpr[rd];
1600 switch (instr & 3) {
1601 case 0: /* std */
1602 err = write_mem(val, dsform_ea(instr, regs), 8, regs);
1603 goto ldst_done;
1604 case 1: /* stdu */
1605 err = write_mem(val, dsform_ea(instr, regs), 8, regs);
1606 goto ldst_done;
1607 }
1608 break;
1609#endif /* __powerpc64__ */
1610
1611 }
1612 err = -EINVAL;
1613
1614 ldst_done:
1615 if (err) {
1616 regs->gpr[ra] = old_ra;
1617 return 0; /* invoke DSI if -EFAULT? */
1618 }
1619 instr_done:
1620 regs->nip += 4;
1621#ifdef __powerpc64__
1622 if ((regs->msr & MSR_SF) == 0)
1623 regs->nip &= 0xffffffffUL;
1624#endif
1625 return 1;
1626
1627 logical_done:
1628 if (instr & 1)
1629 set_cr0(regs, ra);
1630 goto instr_done;
1631
1632 arith_done:
1633 if (instr & 1)
1634 set_cr0(regs, rd);
1635 goto instr_done;
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001636}