blob: 15914f0235a05d34e320309ca1f7cb60c77fcd6b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/parisc/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
6 */
7
8/*
9 * 'Traps.c' handles hardware traps and faults after we have saved some
10 * state in 'asm.s'.
11 */
12
13#include <linux/config.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/errno.h>
18#include <linux/ptrace.h>
19#include <linux/timer.h>
20#include <linux/mm.h>
21#include <linux/module.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/spinlock.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/console.h>
28#include <linux/kallsyms.h>
29
30#include <asm/assembly.h>
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <asm/io.h>
34#include <asm/irq.h>
35#include <asm/traps.h>
36#include <asm/unaligned.h>
37#include <asm/atomic.h>
38#include <asm/smp.h>
39#include <asm/pdc.h>
40#include <asm/pdc_chassis.h>
41#include <asm/unwind.h>
42
43#include "../math-emu/math-emu.h" /* for handle_fpe() */
44
45#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
46 /* dumped to the console via printk) */
47
48#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
49DEFINE_SPINLOCK(pa_dbit_lock);
50#endif
51
52int printbinary(char *buf, unsigned long x, int nbits)
53{
54 unsigned long mask = 1UL << (nbits - 1);
55 while (mask != 0) {
56 *buf++ = (mask & x ? '1' : '0');
57 mask >>= 1;
58 }
59 *buf = '\0';
60
61 return nbits;
62}
63
64#ifdef __LP64__
65#define RFMT "%016lx"
66#else
67#define RFMT "%08lx"
68#endif
69
70void show_regs(struct pt_regs *regs)
71{
72 int i;
73 char buf[128], *p;
74 char *level;
75 unsigned long cr30;
76 unsigned long cr31;
Thibaut Vareneeba91722005-10-21 22:49:25 -040077 /* carlos says that gcc understands better memory in a struct,
78 * and it makes our life easier with fpregs -- T-Bone */
79 struct { u32 sw[2]; } s;
80
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;
82
83 printk("%s\n", level); /* don't want to have that pretty register dump messed up */
84
85 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
86 printbinary(buf, regs->gr[0], 32);
87 printk("%sPSW: %s %s\n", level, buf, print_tainted());
88
89 for (i = 0; i < 32; i += 4) {
90 int j;
91 p = buf;
92 p += sprintf(p, "%sr%02d-%02d ", level, i, i + 3);
93 for (j = 0; j < 4; j++) {
94 p += sprintf(p, " " RFMT, (i+j) == 0 ? 0 : regs->gr[i + j]);
95 }
96 printk("%s\n", buf);
97 }
98
99 for (i = 0; i < 8; i += 4) {
100 int j;
101 p = buf;
102 p += sprintf(p, "%ssr%d-%d ", level, i, i + 3);
103 for (j = 0; j < 4; j++) {
104 p += sprintf(p, " " RFMT, regs->sr[i + j]);
105 }
106 printk("%s\n", buf);
107 }
108
Thibaut Vareneeba91722005-10-21 22:49:25 -0400109 /* FR are 64bit everywhere. Need to use asm to get the content
110 * of fpsr/fper1, and we assume that we won't have a FP Identify
111 * in our way, otherwise we're screwed.
112 * The fldd is used to restore the T-bit if there was one, as the
113 * store clears it anyway.
114 * BTW, PA2.0 book says "thou shall not use fstw on FPSR/FPERs". */
115 __asm__ (
116 "fstd %%fr0,0(%1) \n\t"
117 "fldd 0(%1),%%fr0 \n\t"
118 : "=m" (s) : "r" (&s) : "%r0"
119 );
120
121 printk("%s\n", level);
122 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
123 printbinary(buf, s.sw[0], 32);
124 printk("%sFPSR: %s\n", level, buf);
125 printk("%sFPER1: %08x\n", level, s.sw[1]);
126
127 /* here we'll print fr0 again, tho it'll be meaningless */
128 for (i = 0; i < 32; i += 4) {
129 int j;
130 p = buf;
131 p += sprintf(p, "%sfr%02d-%02d ", level, i, i + 3);
132 for (j = 0; j < 4; j++)
133 p += sprintf(p, " %016llx", (i+j) == 0 ? 0 : regs->fr[i+j]);
134 printk("%s\n", buf);
135 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
137 cr30 = mfctl(30);
138 cr31 = mfctl(31);
139 printk("%s\n", level);
140 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
141 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
142 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
143 level, regs->iir, regs->isr, regs->ior);
144 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
145 level, current_thread_info()->cpu, cr30, cr31);
146 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
147 printk(level);
148 print_symbol(" IAOQ[0]: %s\n", regs->iaoq[0]);
149 printk(level);
150 print_symbol(" IAOQ[1]: %s\n", regs->iaoq[1]);
151 printk(level);
152 print_symbol(" RP(r2): %s\n", regs->gr[2]);
153}
154
155
156void dump_stack(void)
157{
158 show_stack(NULL, NULL);
159}
160
161EXPORT_SYMBOL(dump_stack);
162
163static void do_show_stack(struct unwind_frame_info *info)
164{
165 int i = 1;
166
167 printk("Backtrace:\n");
168 while (i <= 16) {
169 if (unwind_once(info) < 0 || info->ip == 0)
170 break;
171
172 if (__kernel_text_address(info->ip)) {
173 printk(" [<" RFMT ">] ", info->ip);
174#ifdef CONFIG_KALLSYMS
175 print_symbol("%s\n", info->ip);
176#else
177 if ((i & 0x03) == 0)
178 printk("\n");
179#endif
180 i++;
181 }
182 }
183 printk("\n");
184}
185
186void show_stack(struct task_struct *task, unsigned long *s)
187{
188 struct unwind_frame_info info;
189
190 if (!task) {
191 unsigned long sp;
192 struct pt_regs *r;
193
194HERE:
195 asm volatile ("copy %%r30, %0" : "=r"(sp));
196 r = (struct pt_regs *)kmalloc(sizeof(struct pt_regs), GFP_KERNEL);
197 if (!r)
198 return;
199 memset(r, 0, sizeof(struct pt_regs));
200 r->iaoq[0] = (unsigned long)&&HERE;
201 r->gr[2] = (unsigned long)__builtin_return_address(0);
202 r->gr[30] = sp;
203 unwind_frame_init(&info, current, r);
204 kfree(r);
205 } else {
206 unwind_frame_init_from_blocked_task(&info, task);
207 }
208
209 do_show_stack(&info);
210}
211
212void die_if_kernel(char *str, struct pt_regs *regs, long err)
213{
214 if (user_mode(regs)) {
215 if (err == 0)
216 return; /* STFU */
217
218 printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
219 current->comm, current->pid, str, err, regs->iaoq[0]);
220#ifdef PRINT_USER_FAULTS
221 /* XXX for debugging only */
222 show_regs(regs);
223#endif
224 return;
225 }
226
227 oops_in_progress = 1;
228
229 /* Amuse the user in a SPARC fashion */
230 printk(
231" _______________________________ \n"
232" < Your System ate a SPARC! Gah! >\n"
233" ------------------------------- \n"
234" \\ ^__^\n"
235" \\ (xx)\\_______\n"
236" (__)\\ )\\/\\\n"
237" U ||----w |\n"
238" || ||\n");
239
240 /* unlock the pdc lock if necessary */
241 pdc_emergency_unlock();
242
243 /* maybe the kernel hasn't booted very far yet and hasn't been able
244 * to initialize the serial or STI console. In that case we should
245 * re-enable the pdc console, so that the user will be able to
246 * identify the problem. */
247 if (!console_drivers)
248 pdc_console_restart();
249
250 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
251 current->comm, current->pid, str, err);
252 show_regs(regs);
253
254 /* Wot's wrong wif bein' racy? */
255 if (current->thread.flags & PARISC_KERNEL_DEATH) {
256 printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
257 local_irq_enable();
258 while (1);
259 }
260
261 current->thread.flags |= PARISC_KERNEL_DEATH;
262 do_exit(SIGSEGV);
263}
264
265int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
266{
267 return syscall(regs);
268}
269
270/* gdb uses break 4,8 */
271#define GDB_BREAK_INSN 0x10004
272void handle_gdb_break(struct pt_regs *regs, int wot)
273{
274 struct siginfo si;
275
276 si.si_code = wot;
277 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
278 si.si_signo = SIGTRAP;
279 si.si_errno = 0;
280 force_sig_info(SIGTRAP, &si, current);
281}
282
283void handle_break(unsigned iir, struct pt_regs *regs)
284{
285 struct siginfo si;
286
287 switch(iir) {
288 case 0x00:
289#ifdef PRINT_USER_FAULTS
290 printk(KERN_DEBUG "break 0,0: pid=%d command='%s'\n",
291 current->pid, current->comm);
292#endif
293 die_if_kernel("Breakpoint", regs, 0);
294#ifdef PRINT_USER_FAULTS
295 show_regs(regs);
296#endif
297 si.si_code = TRAP_BRKPT;
298 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
299 si.si_signo = SIGTRAP;
300 force_sig_info(SIGTRAP, &si, current);
301 break;
302
303 case GDB_BREAK_INSN:
304 die_if_kernel("Breakpoint", regs, 0);
305 handle_gdb_break(regs, TRAP_BRKPT);
306 break;
307
308 default:
309#ifdef PRINT_USER_FAULTS
310 printk(KERN_DEBUG "break %#08x: pid=%d command='%s'\n",
311 iir, current->pid, current->comm);
312 show_regs(regs);
313#endif
314 si.si_signo = SIGTRAP;
315 si.si_code = TRAP_BRKPT;
316 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
317 force_sig_info(SIGTRAP, &si, current);
318 return;
319 }
320}
321
322
323int handle_toc(void)
324{
325 printk(KERN_CRIT "TOC call.\n");
326 return 0;
327}
328
329static void default_trap(int code, struct pt_regs *regs)
330{
331 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
332 show_regs(regs);
333}
334
335void (*cpu_lpmc) (int code, struct pt_regs *regs) = default_trap;
336
337
338void transfer_pim_to_trap_frame(struct pt_regs *regs)
339{
340 register int i;
341 extern unsigned int hpmc_pim_data[];
342 struct pdc_hpmc_pim_11 *pim_narrow;
343 struct pdc_hpmc_pim_20 *pim_wide;
344
345 if (boot_cpu_data.cpu_type >= pcxu) {
346
347 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
348
349 /*
350 * Note: The following code will probably generate a
351 * bunch of truncation error warnings from the compiler.
352 * Could be handled with an ifdef, but perhaps there
353 * is a better way.
354 */
355
356 regs->gr[0] = pim_wide->cr[22];
357
358 for (i = 1; i < 32; i++)
359 regs->gr[i] = pim_wide->gr[i];
360
361 for (i = 0; i < 32; i++)
362 regs->fr[i] = pim_wide->fr[i];
363
364 for (i = 0; i < 8; i++)
365 regs->sr[i] = pim_wide->sr[i];
366
367 regs->iasq[0] = pim_wide->cr[17];
368 regs->iasq[1] = pim_wide->iasq_back;
369 regs->iaoq[0] = pim_wide->cr[18];
370 regs->iaoq[1] = pim_wide->iaoq_back;
371
372 regs->sar = pim_wide->cr[11];
373 regs->iir = pim_wide->cr[19];
374 regs->isr = pim_wide->cr[20];
375 regs->ior = pim_wide->cr[21];
376 }
377 else {
378 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
379
380 regs->gr[0] = pim_narrow->cr[22];
381
382 for (i = 1; i < 32; i++)
383 regs->gr[i] = pim_narrow->gr[i];
384
385 for (i = 0; i < 32; i++)
386 regs->fr[i] = pim_narrow->fr[i];
387
388 for (i = 0; i < 8; i++)
389 regs->sr[i] = pim_narrow->sr[i];
390
391 regs->iasq[0] = pim_narrow->cr[17];
392 regs->iasq[1] = pim_narrow->iasq_back;
393 regs->iaoq[0] = pim_narrow->cr[18];
394 regs->iaoq[1] = pim_narrow->iaoq_back;
395
396 regs->sar = pim_narrow->cr[11];
397 regs->iir = pim_narrow->cr[19];
398 regs->isr = pim_narrow->cr[20];
399 regs->ior = pim_narrow->cr[21];
400 }
401
402 /*
403 * The following fields only have meaning if we came through
404 * another path. So just zero them here.
405 */
406
407 regs->ksp = 0;
408 regs->kpc = 0;
409 regs->orig_r28 = 0;
410}
411
412
413/*
414 * This routine is called as a last resort when everything else
415 * has gone clearly wrong. We get called for faults in kernel space,
416 * and HPMC's.
417 */
418void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
419{
420 static DEFINE_SPINLOCK(terminate_lock);
421
422 oops_in_progress = 1;
423
424 set_eiem(0);
425 local_irq_disable();
426 spin_lock(&terminate_lock);
427
428 /* unlock the pdc lock if necessary */
429 pdc_emergency_unlock();
430
431 /* restart pdc console if necessary */
432 if (!console_drivers)
433 pdc_console_restart();
434
435 /* Not all paths will gutter the processor... */
436 switch(code){
437
438 case 1:
439 transfer_pim_to_trap_frame(regs);
440 break;
441
442 default:
443 /* Fall through */
444 break;
445
446 }
447
448 {
449 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
450 struct unwind_frame_info info;
451 unwind_frame_init(&info, current, regs);
452 do_show_stack(&info);
453 }
454
455 printk("\n");
456 printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
457 msg, code, regs, offset);
458 show_regs(regs);
459
460 spin_unlock(&terminate_lock);
461
462 /* put soft power button back under hardware control;
463 * if the user had pressed it once at any time, the
464 * system will shut down immediately right here. */
465 pdc_soft_power_button(0);
466
467 /* Call kernel panic() so reboot timeouts work properly
468 * FIXME: This function should be on the list of
469 * panic notifiers, and we should call panic
470 * directly from the location that we wish.
471 * e.g. We should not call panic from
472 * parisc_terminate, but rather the oter way around.
473 * This hack works, prints the panic message twice,
474 * and it enables reboot timers!
475 */
476 panic(msg);
477}
478
479void handle_interruption(int code, struct pt_regs *regs)
480{
481 unsigned long fault_address = 0;
482 unsigned long fault_space = 0;
483 struct siginfo si;
484
485 if (code == 1)
486 pdc_console_restart(); /* switch back to pdc if HPMC */
487 else
488 local_irq_enable();
489
490 /* Security check:
491 * If the priority level is still user, and the
492 * faulting space is not equal to the active space
493 * then the user is attempting something in a space
494 * that does not belong to them. Kill the process.
495 *
496 * This is normally the situation when the user
497 * attempts to jump into the kernel space at the
498 * wrong offset, be it at the gateway page or a
499 * random location.
500 *
501 * We cannot normally signal the process because it
502 * could *be* on the gateway page, and processes
503 * executing on the gateway page can't have signals
504 * delivered.
505 *
506 * We merely readjust the address into the users
507 * space, at a destination address of zero, and
508 * allow processing to continue.
509 */
510 if (((unsigned long)regs->iaoq[0] & 3) &&
511 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
512 /* Kill the user process later */
513 regs->iaoq[0] = 0 | 3;
514 regs->iaoq[1] = regs->iaoq[0] + 4;
515 regs->iasq[0] = regs->iasq[0] = regs->sr[7];
516 regs->gr[0] &= ~PSW_B;
517 return;
518 }
519
520#if 0
521 printk(KERN_CRIT "Interruption # %d\n", code);
522#endif
523
524 switch(code) {
525
526 case 1:
527 /* High-priority machine check (HPMC) */
528
529 /* set up a new led state on systems shipped with a LED State panel */
530 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
531
532 parisc_terminate("High Priority Machine Check (HPMC)",
533 regs, code, 0);
534 /* NOT REACHED */
535
536 case 2:
537 /* Power failure interrupt */
538 printk(KERN_CRIT "Power failure interrupt !\n");
539 return;
540
541 case 3:
542 /* Recovery counter trap */
543 regs->gr[0] &= ~PSW_R;
544 if (user_space(regs))
545 handle_gdb_break(regs, TRAP_TRACE);
546 /* else this must be the start of a syscall - just let it run */
547 return;
548
549 case 5:
550 /* Low-priority machine check */
551 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
552
553 flush_all_caches();
554 cpu_lpmc(5, regs);
555 return;
556
557 case 6:
558 /* Instruction TLB miss fault/Instruction page fault */
559 fault_address = regs->iaoq[0];
560 fault_space = regs->iasq[0];
561 break;
562
563 case 8:
564 /* Illegal instruction trap */
565 die_if_kernel("Illegal instruction", regs, code);
566 si.si_code = ILL_ILLOPC;
567 goto give_sigill;
568
569 case 9:
570 /* Break instruction trap */
571 handle_break(regs->iir,regs);
572 return;
573
574 case 10:
575 /* Privileged operation trap */
576 die_if_kernel("Privileged operation", regs, code);
577 si.si_code = ILL_PRVOPC;
578 goto give_sigill;
579
580 case 11:
581 /* Privileged register trap */
582 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
583
584 /* This is a MFCTL cr26/cr27 to gr instruction.
585 * PCXS traps on this, so we need to emulate it.
586 */
587
588 if (regs->iir & 0x00200000)
589 regs->gr[regs->iir & 0x1f] = mfctl(27);
590 else
591 regs->gr[regs->iir & 0x1f] = mfctl(26);
592
593 regs->iaoq[0] = regs->iaoq[1];
594 regs->iaoq[1] += 4;
595 regs->iasq[0] = regs->iasq[1];
596 return;
597 }
598
599 die_if_kernel("Privileged register usage", regs, code);
600 si.si_code = ILL_PRVREG;
601 give_sigill:
602 si.si_signo = SIGILL;
603 si.si_errno = 0;
604 si.si_addr = (void __user *) regs->iaoq[0];
605 force_sig_info(SIGILL, &si, current);
606 return;
607
608 case 12:
609 /* Overflow Trap, let the userland signal handler do the cleanup */
610 si.si_signo = SIGFPE;
611 si.si_code = FPE_INTOVF;
612 si.si_addr = (void __user *) regs->iaoq[0];
613 force_sig_info(SIGFPE, &si, current);
614 return;
615
616 case 13:
617 /* Conditional Trap
618 The condition succees in an instruction which traps
619 on condition */
620 if(user_mode(regs)){
621 si.si_signo = SIGFPE;
622 /* Set to zero, and let the userspace app figure it out from
623 the insn pointed to by si_addr */
624 si.si_code = 0;
625 si.si_addr = (void __user *) regs->iaoq[0];
626 force_sig_info(SIGFPE, &si, current);
627 return;
628 }
629 /* The kernel doesn't want to handle condition codes */
630 break;
631
632 case 14:
633 /* Assist Exception Trap, i.e. floating point exception. */
634 die_if_kernel("Floating point exception", regs, 0); /* quiet */
635 handle_fpe(regs);
636 return;
637
638 case 15:
639 /* Data TLB miss fault/Data page fault */
640 /* Fall through */
641 case 16:
642 /* Non-access instruction TLB miss fault */
643 /* The instruction TLB entry needed for the target address of the FIC
644 is absent, and hardware can't find it, so we get to cleanup */
645 /* Fall through */
646 case 17:
647 /* Non-access data TLB miss fault/Non-access data page fault */
648 /* FIXME:
649 Still need to add slow path emulation code here!
650 If the insn used a non-shadow register, then the tlb
651 handlers could not have their side-effect (e.g. probe
652 writing to a target register) emulated since rfir would
653 erase the changes to said register. Instead we have to
654 setup everything, call this function we are in, and emulate
655 by hand. Technically we need to emulate:
656 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
657 */
658 fault_address = regs->ior;
659 fault_space = regs->isr;
660 break;
661
662 case 18:
663 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
664 /* Check for unaligned access */
665 if (check_unaligned(regs)) {
666 handle_unaligned(regs);
667 return;
668 }
669 /* Fall Through */
670 case 26:
671 /* PCXL: Data memory access rights trap */
672 fault_address = regs->ior;
673 fault_space = regs->isr;
674 break;
675
676 case 19:
677 /* Data memory break trap */
678 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
679 /* fall thru */
680 case 21:
681 /* Page reference trap */
682 handle_gdb_break(regs, TRAP_HWBKPT);
683 return;
684
685 case 25:
686 /* Taken branch trap */
687 regs->gr[0] &= ~PSW_T;
688 if (user_space(regs))
689 handle_gdb_break(regs, TRAP_BRANCH);
690 /* else this must be the start of a syscall - just let it
691 * run.
692 */
693 return;
694
695 case 7:
696 /* Instruction access rights */
697 /* PCXL: Instruction memory protection trap */
698
699 /*
700 * This could be caused by either: 1) a process attempting
701 * to execute within a vma that does not have execute
702 * permission, or 2) an access rights violation caused by a
703 * flush only translation set up by ptep_get_and_clear().
704 * So we check the vma permissions to differentiate the two.
705 * If the vma indicates we have execute permission, then
706 * the cause is the latter one. In this case, we need to
707 * call do_page_fault() to fix the problem.
708 */
709
710 if (user_mode(regs)) {
711 struct vm_area_struct *vma;
712
713 down_read(&current->mm->mmap_sem);
714 vma = find_vma(current->mm,regs->iaoq[0]);
715 if (vma && (regs->iaoq[0] >= vma->vm_start)
716 && (vma->vm_flags & VM_EXEC)) {
717
718 fault_address = regs->iaoq[0];
719 fault_space = regs->iasq[0];
720
721 up_read(&current->mm->mmap_sem);
722 break; /* call do_page_fault() */
723 }
724 up_read(&current->mm->mmap_sem);
725 }
726 /* Fall Through */
727 case 27:
728 /* Data memory protection ID trap */
729 die_if_kernel("Protection id trap", regs, code);
730 si.si_code = SEGV_MAPERR;
731 si.si_signo = SIGSEGV;
732 si.si_errno = 0;
733 if (code == 7)
734 si.si_addr = (void __user *) regs->iaoq[0];
735 else
736 si.si_addr = (void __user *) regs->ior;
737 force_sig_info(SIGSEGV, &si, current);
738 return;
739
740 case 28:
741 /* Unaligned data reference trap */
742 handle_unaligned(regs);
743 return;
744
745 default:
746 if (user_mode(regs)) {
747#ifdef PRINT_USER_FAULTS
748 printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
749 current->pid, current->comm);
750 show_regs(regs);
751#endif
752 /* SIGBUS, for lack of a better one. */
753 si.si_signo = SIGBUS;
754 si.si_code = BUS_OBJERR;
755 si.si_errno = 0;
756 si.si_addr = (void __user *) regs->ior;
757 force_sig_info(SIGBUS, &si, current);
758 return;
759 }
760 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
761
762 parisc_terminate("Unexpected interruption", regs, code, 0);
763 /* NOT REACHED */
764 }
765
766 if (user_mode(regs)) {
767 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
768#ifdef PRINT_USER_FAULTS
769 if (fault_space == 0)
770 printk(KERN_DEBUG "User Fault on Kernel Space ");
771 else
772 printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
773 code);
774 printk("pid=%d command='%s'\n", current->pid, current->comm);
775 show_regs(regs);
776#endif
777 si.si_signo = SIGSEGV;
778 si.si_errno = 0;
779 si.si_code = SEGV_MAPERR;
780 si.si_addr = (void __user *) regs->ior;
781 force_sig_info(SIGSEGV, &si, current);
782 return;
783 }
784 }
785 else {
786
787 /*
788 * The kernel should never fault on its own address space.
789 */
790
791 if (fault_space == 0)
792 {
793 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
794 parisc_terminate("Kernel Fault", regs, code, fault_address);
795
796 }
797 }
798
799 do_page_fault(regs, code, fault_address);
800}
801
802
803int __init check_ivt(void *iva)
804{
805 int i;
806 u32 check = 0;
807 u32 *ivap;
808 u32 *hpmcp;
809 u32 length;
810 extern void os_hpmc(void);
811 extern void os_hpmc_end(void);
812
813 if (strcmp((char *)iva, "cows can fly"))
814 return -1;
815
816 ivap = (u32 *)iva;
817
818 for (i = 0; i < 8; i++)
819 *ivap++ = 0;
820
821 /* Compute Checksum for HPMC handler */
822
823 length = (u32)((unsigned long)os_hpmc_end - (unsigned long)os_hpmc);
824 ivap[7] = length;
825
826 hpmcp = (u32 *)os_hpmc;
827
828 for (i=0; i<length/4; i++)
829 check += *hpmcp++;
830
831 for (i=0; i<8; i++)
832 check += ivap[i];
833
834 ivap[5] = -check;
835
836 return 0;
837}
838
839#ifndef __LP64__
840extern const void fault_vector_11;
841#endif
842extern const void fault_vector_20;
843
844void __init trap_init(void)
845{
846 void *iva;
847
848 if (boot_cpu_data.cpu_type >= pcxu)
849 iva = (void *) &fault_vector_20;
850 else
851#ifdef __LP64__
852 panic("Can't boot 64-bit OS on PA1.1 processor!");
853#else
854 iva = (void *) &fault_vector_11;
855#endif
856
857 if (check_ivt(iva))
858 panic("IVT invalid");
859}