blob: 3a268c3ed47e0d3b230a9f97b4e96ac9d775f0b7 [file] [log] [blame]
Robin Getz2a12c462010-03-11 16:24:18 +00001/* provide some functions which dump the trace buffer, in a nice way for people
2 * to read it, and understand what is going on
3 *
4 * Copyright 2004-2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later
7 */
8
9#include <linux/kernel.h>
10#include <linux/hardirq.h>
11#include <linux/thread_info.h>
12#include <linux/mm.h>
13#include <linux/uaccess.h>
14#include <linux/module.h>
15#include <linux/kallsyms.h>
16#include <linux/err.h>
17#include <linux/fs.h>
18#include <asm/dma.h>
19#include <asm/trace.h>
20#include <asm/fixed_code.h>
21#include <asm/traps.h>
22
23#ifdef CONFIG_DEBUG_VERBOSE
24#define verbose_printk(fmt, arg...) \
25 printk(fmt, ##arg)
26#else
27#define verbose_printk(fmt, arg...) \
28 ({ if (0) printk(fmt, ##arg); 0; })
29#endif
30
31
32void decode_address(char *buf, unsigned long address)
33{
34#ifdef CONFIG_DEBUG_VERBOSE
35 struct task_struct *p;
36 struct mm_struct *mm;
37 unsigned long flags, offset;
38 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
39 struct rb_node *n;
40
41#ifdef CONFIG_KALLSYMS
42 unsigned long symsize;
43 const char *symname;
44 char *modname;
45 char *delim = ":";
46 char namebuf[128];
47#endif
48
49 buf += sprintf(buf, "<0x%08lx> ", address);
50
51#ifdef CONFIG_KALLSYMS
52 /* look up the address and see if we are in kernel space */
53 symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
54
55 if (symname) {
56 /* yeah! kernel space! */
57 if (!modname)
58 modname = delim = "";
59 sprintf(buf, "{ %s%s%s%s + 0x%lx }",
60 delim, modname, delim, symname,
61 (unsigned long)offset);
62 return;
63 }
64#endif
65
66 if (address >= FIXED_CODE_START && address < FIXED_CODE_END) {
67 /* Problem in fixed code section? */
68 strcat(buf, "/* Maybe fixed code section */");
69 return;
70
71 } else if (address < CONFIG_BOOT_LOAD) {
72 /* Problem somewhere before the kernel start address */
73 strcat(buf, "/* Maybe null pointer? */");
74 return;
75
76 } else if (address >= COREMMR_BASE) {
77 strcat(buf, "/* core mmrs */");
78 return;
79
80 } else if (address >= SYSMMR_BASE) {
81 strcat(buf, "/* system mmrs */");
82 return;
83
84 } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) {
85 strcat(buf, "/* on-chip L1 ROM */");
86 return;
87 }
88
89 /*
90 * Don't walk any of the vmas if we are oopsing, it has been known
91 * to cause problems - corrupt vmas (kernel crashes) cause double faults
92 */
93 if (oops_in_progress) {
94 strcat(buf, "/* kernel dynamic memory (maybe user-space) */");
95 return;
96 }
97
98 /* looks like we're off in user-land, so let's walk all the
99 * mappings of all our processes and see if we can't be a whee
100 * bit more specific
101 */
102 write_lock_irqsave(&tasklist_lock, flags);
103 for_each_process(p) {
104 mm = (in_atomic ? p->mm : get_task_mm(p));
105 if (!mm)
106 continue;
107
108 if (!down_read_trylock(&mm->mmap_sem)) {
109 if (!in_atomic)
110 mmput(mm);
111 continue;
112 }
113
114 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
115 struct vm_area_struct *vma;
116
117 vma = rb_entry(n, struct vm_area_struct, vm_rb);
118
119 if (address >= vma->vm_start && address < vma->vm_end) {
120 char _tmpbuf[256];
121 char *name = p->comm;
122 struct file *file = vma->vm_file;
123
124 if (file) {
125 char *d_name = d_path(&file->f_path, _tmpbuf,
126 sizeof(_tmpbuf));
127 if (!IS_ERR(d_name))
128 name = d_name;
129 }
130
131 /* FLAT does not have its text aligned to the start of
132 * the map while FDPIC ELF does ...
133 */
134
135 /* before we can check flat/fdpic, we need to
136 * make sure current is valid
137 */
138 if ((unsigned long)current >= FIXED_CODE_START &&
139 !((unsigned long)current & 0x3)) {
140 if (current->mm &&
141 (address > current->mm->start_code) &&
142 (address < current->mm->end_code))
143 offset = address - current->mm->start_code;
144 else
145 offset = (address - vma->vm_start) +
146 (vma->vm_pgoff << PAGE_SHIFT);
147
148 sprintf(buf, "[ %s + 0x%lx ]", name, offset);
149 } else
150 sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
151 name, vma->vm_start, vma->vm_end);
152
153 up_read(&mm->mmap_sem);
154 if (!in_atomic)
155 mmput(mm);
156
157 if (buf[0] == '\0')
158 sprintf(buf, "[ %s ] dynamic memory", name);
159
160 goto done;
161 }
162 }
163
164 up_read(&mm->mmap_sem);
165 if (!in_atomic)
166 mmput(mm);
167 }
168
169 /*
170 * we were unable to find this address anywhere,
171 * or some MMs were skipped because they were in use.
172 */
173 sprintf(buf, "/* kernel dynamic memory */");
174
175done:
176 write_unlock_irqrestore(&tasklist_lock, flags);
177#else
178 sprintf(buf, " ");
179#endif
180}
181
182#define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
183
184/*
185 * Similar to get_user, do some address checking, then dereference
186 * Return true on success, false on bad address
187 */
188bool get_instruction(unsigned short *val, unsigned short *address)
189{
190 unsigned long addr = (unsigned long)address;
191
192 /* Check for odd addresses */
193 if (addr & 0x1)
194 return false;
195
196 /* MMR region will never have instructions */
197 if (addr >= SYSMMR_BASE)
198 return false;
199
200 switch (bfin_mem_access_type(addr, 2)) {
201 case BFIN_MEM_ACCESS_CORE:
202 case BFIN_MEM_ACCESS_CORE_ONLY:
203 *val = *address;
204 return true;
205 case BFIN_MEM_ACCESS_DMA:
206 dma_memcpy(val, address, 2);
207 return true;
208 case BFIN_MEM_ACCESS_ITEST:
209 isram_memcpy(val, address, 2);
210 return true;
211 default: /* invalid access */
212 return false;
213 }
214}
215
216/*
217 * decode the instruction if we are printing out the trace, as it
218 * makes things easier to follow, without running it through objdump
219 * These are the normal instructions which cause change of flow, which
220 * would be at the source of the trace buffer
221 */
222#if defined(CONFIG_DEBUG_VERBOSE) && defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
223static void decode_instruction(unsigned short *address)
224{
225 unsigned short opcode;
226
227 if (get_instruction(&opcode, address)) {
228 if (opcode == 0x0010)
229 verbose_printk("RTS");
230 else if (opcode == 0x0011)
231 verbose_printk("RTI");
232 else if (opcode == 0x0012)
233 verbose_printk("RTX");
234 else if (opcode == 0x0013)
235 verbose_printk("RTN");
236 else if (opcode == 0x0014)
237 verbose_printk("RTE");
238 else if (opcode == 0x0025)
239 verbose_printk("EMUEXCPT");
240 else if (opcode >= 0x0040 && opcode <= 0x0047)
241 verbose_printk("STI R%i", opcode & 7);
242 else if (opcode >= 0x0050 && opcode <= 0x0057)
243 verbose_printk("JUMP (P%i)", opcode & 7);
244 else if (opcode >= 0x0060 && opcode <= 0x0067)
245 verbose_printk("CALL (P%i)", opcode & 7);
246 else if (opcode >= 0x0070 && opcode <= 0x0077)
247 verbose_printk("CALL (PC+P%i)", opcode & 7);
248 else if (opcode >= 0x0080 && opcode <= 0x0087)
249 verbose_printk("JUMP (PC+P%i)", opcode & 7);
250 else if (opcode >= 0x0090 && opcode <= 0x009F)
251 verbose_printk("RAISE 0x%x", opcode & 0xF);
252 else if (opcode >= 0x00A0 && opcode <= 0x00AF)
253 verbose_printk("EXCPT 0x%x", opcode & 0xF);
254 else if ((opcode >= 0x1000 && opcode <= 0x13FF) || (opcode >= 0x1800 && opcode <= 0x1BFF))
255 verbose_printk("IF !CC JUMP");
256 else if ((opcode >= 0x1400 && opcode <= 0x17ff) || (opcode >= 0x1c00 && opcode <= 0x1fff))
257 verbose_printk("IF CC JUMP");
258 else if (opcode >= 0x2000 && opcode <= 0x2fff)
259 verbose_printk("JUMP.S");
260 else if (opcode >= 0xe080 && opcode <= 0xe0ff)
261 verbose_printk("LSETUP");
262 else if (opcode >= 0xe200 && opcode <= 0xe2ff)
263 verbose_printk("JUMP.L");
264 else if (opcode >= 0xe300 && opcode <= 0xe3ff)
265 verbose_printk("CALL pcrel");
266 else
267 verbose_printk("0x%04x", opcode);
268 }
269
270}
271#endif
272
273void dump_bfin_trace_buffer(void)
274{
275#ifdef CONFIG_DEBUG_VERBOSE
276#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
277 int tflags, i = 0;
278 char buf[150];
279 unsigned short *addr;
280#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
281 int j, index;
282#endif
283
284 trace_buffer_save(tflags);
285
286 printk(KERN_NOTICE "Hardware Trace:\n");
287
288#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
289 printk(KERN_NOTICE "WARNING: Expanded trace turned on - can not trace exceptions\n");
290#endif
291
292 if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
293 for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) {
294 decode_address(buf, (unsigned long)bfin_read_TBUF());
295 printk(KERN_NOTICE "%4i Target : %s\n", i, buf);
296 addr = (unsigned short *)bfin_read_TBUF();
297 decode_address(buf, (unsigned long)addr);
298 printk(KERN_NOTICE " Source : %s ", buf);
299 decode_instruction(addr);
300 printk("\n");
301 }
302 }
303
304#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
305 if (trace_buff_offset)
306 index = trace_buff_offset / 4;
307 else
308 index = EXPAND_LEN;
309
310 j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128;
311 while (j) {
312 decode_address(buf, software_trace_buff[index]);
313 printk(KERN_NOTICE "%4i Target : %s\n", i, buf);
314 index -= 1;
315 if (index < 0)
316 index = EXPAND_LEN;
317 decode_address(buf, software_trace_buff[index]);
318 printk(KERN_NOTICE " Source : %s ", buf);
319 decode_instruction((unsigned short *)software_trace_buff[index]);
320 printk("\n");
321 index -= 1;
322 if (index < 0)
323 index = EXPAND_LEN;
324 j--;
325 i++;
326 }
327#endif
328
329 trace_buffer_restore(tflags);
330#endif
331#endif
332}
333EXPORT_SYMBOL(dump_bfin_trace_buffer);
334
335void dump_bfin_process(struct pt_regs *fp)
336{
337#ifdef CONFIG_DEBUG_VERBOSE
338 /* We should be able to look at fp->ipend, but we don't push it on the
339 * stack all the time, so do this until we fix that */
340 unsigned int context = bfin_read_IPEND();
341
342 if (oops_in_progress)
343 verbose_printk(KERN_EMERG "Kernel OOPS in progress\n");
344
345 if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR)
346 verbose_printk(KERN_NOTICE "HW Error context\n");
347 else if (context & 0x0020)
348 verbose_printk(KERN_NOTICE "Deferred Exception context\n");
349 else if (context & 0x3FC0)
350 verbose_printk(KERN_NOTICE "Interrupt context\n");
351 else if (context & 0x4000)
352 verbose_printk(KERN_NOTICE "Deferred Interrupt context\n");
353 else if (context & 0x8000)
354 verbose_printk(KERN_NOTICE "Kernel process context\n");
355
356 /* Because we are crashing, and pointers could be bad, we check things
357 * pretty closely before we use them
358 */
359 if ((unsigned long)current >= FIXED_CODE_START &&
360 !((unsigned long)current & 0x3) && current->pid) {
361 verbose_printk(KERN_NOTICE "CURRENT PROCESS:\n");
362 if (current->comm >= (char *)FIXED_CODE_START)
363 verbose_printk(KERN_NOTICE "COMM=%s PID=%d",
364 current->comm, current->pid);
365 else
366 verbose_printk(KERN_NOTICE "COMM= invalid");
367
368 printk(KERN_CONT " CPU=%d\n", current_thread_info()->cpu);
369 if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START)
370 verbose_printk(KERN_NOTICE
371 "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
372 " BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
373 (void *)current->mm->start_code,
374 (void *)current->mm->end_code,
375 (void *)current->mm->start_data,
376 (void *)current->mm->end_data,
377 (void *)current->mm->end_data,
378 (void *)current->mm->brk,
379 (void *)current->mm->start_stack);
380 else
381 verbose_printk(KERN_NOTICE "invalid mm\n");
382 } else
383 verbose_printk(KERN_NOTICE
384 "No Valid process in current context\n");
385#endif
386}
387
388void dump_bfin_mem(struct pt_regs *fp)
389{
390#ifdef CONFIG_DEBUG_VERBOSE
391 unsigned short *addr, *erraddr, val = 0, err = 0;
392 char sti = 0, buf[6];
393
394 erraddr = (void *)fp->pc;
395
396 verbose_printk(KERN_NOTICE "return address: [0x%p]; contents of:", erraddr);
397
398 for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10;
399 addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10;
400 addr++) {
401 if (!((unsigned long)addr & 0xF))
402 verbose_printk(KERN_NOTICE "0x%p: ", addr);
403
404 if (!get_instruction(&val, addr)) {
405 val = 0;
406 sprintf(buf, "????");
407 } else
408 sprintf(buf, "%04x", val);
409
410 if (addr == erraddr) {
411 verbose_printk("[%s]", buf);
412 err = val;
413 } else
414 verbose_printk(" %s ", buf);
415
416 /* Do any previous instructions turn on interrupts? */
417 if (addr <= erraddr && /* in the past */
418 ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */
419 val == 0x017b)) /* [SP++] = RETI */
420 sti = 1;
421 }
422
423 verbose_printk("\n");
424
425 /* Hardware error interrupts can be deferred */
426 if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR &&
427 oops_in_progress)){
428 verbose_printk(KERN_NOTICE "Looks like this was a deferred error - sorry\n");
429#ifndef CONFIG_DEBUG_HWERR
430 verbose_printk(KERN_NOTICE
431"The remaining message may be meaningless\n"
432"You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
433#else
434 /* If we are handling only one peripheral interrupt
435 * and current mm and pid are valid, and the last error
436 * was in that user space process's text area
437 * print it out - because that is where the problem exists
438 */
439 if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) &&
440 (current->pid && current->mm)) {
441 /* And the last RETI points to the current userspace context */
442 if ((fp + 1)->pc >= current->mm->start_code &&
443 (fp + 1)->pc <= current->mm->end_code) {
444 verbose_printk(KERN_NOTICE "It might be better to look around here :\n");
445 verbose_printk(KERN_NOTICE "-------------------------------------------\n");
446 show_regs(fp + 1);
447 verbose_printk(KERN_NOTICE "-------------------------------------------\n");
448 }
449 }
450#endif
451 }
452#endif
453}
454
455void show_regs(struct pt_regs *fp)
456{
457#ifdef CONFIG_DEBUG_VERBOSE
458 char buf[150];
459 struct irqaction *action;
460 unsigned int i;
461 unsigned long flags = 0;
462 unsigned int cpu = raw_smp_processor_id();
463 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
464
465 verbose_printk(KERN_NOTICE "\n");
466 if (CPUID != bfin_cpuid())
467 verbose_printk(KERN_NOTICE "Compiled for cpu family 0x%04x (Rev %d), "
468 "but running on:0x%04x (Rev %d)\n",
469 CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
470
471 verbose_printk(KERN_NOTICE "ADSP-%s-0.%d",
472 CPU, bfin_compiled_revid());
473
474 if (bfin_compiled_revid() != bfin_revid())
475 verbose_printk("(Detected 0.%d)", bfin_revid());
476
477 verbose_printk(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
478 get_cclk()/1000000, get_sclk()/1000000,
479#ifdef CONFIG_MPU
480 "mpu on"
481#else
482 "mpu off"
483#endif
484 );
485
486 verbose_printk(KERN_NOTICE "%s", linux_banner);
487
488 verbose_printk(KERN_NOTICE "\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
489 verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
490 (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg);
491 if (fp->ipend & EVT_IRPTEN)
492 verbose_printk(KERN_NOTICE " Global Interrupts Disabled (IPEND[4])\n");
493 if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 |
494 EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR)))
495 verbose_printk(KERN_NOTICE " Peripheral interrupts masked off\n");
496 if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14)))
497 verbose_printk(KERN_NOTICE " Kernel interrupts masked off\n");
498 if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) {
499 verbose_printk(KERN_NOTICE " HWERRCAUSE: 0x%lx\n",
500 (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14);
501#ifdef EBIU_ERRMST
502 /* If the error was from the EBIU, print it out */
503 if (bfin_read_EBIU_ERRMST() & CORE_ERROR) {
504 verbose_printk(KERN_NOTICE " EBIU Error Reason : 0x%04x\n",
505 bfin_read_EBIU_ERRMST());
506 verbose_printk(KERN_NOTICE " EBIU Error Address : 0x%08x\n",
507 bfin_read_EBIU_ERRADD());
508 }
509#endif
510 }
511 verbose_printk(KERN_NOTICE " EXCAUSE : 0x%lx\n",
512 fp->seqstat & SEQSTAT_EXCAUSE);
513 for (i = 2; i <= 15 ; i++) {
514 if (fp->ipend & (1 << i)) {
515 if (i != 4) {
516 decode_address(buf, bfin_read32(EVT0 + 4*i));
517 verbose_printk(KERN_NOTICE " physical IVG%i asserted : %s\n", i, buf);
518 } else
519 verbose_printk(KERN_NOTICE " interrupts disabled\n");
520 }
521 }
522
523 /* if no interrupts are going off, don't print this out */
524 if (fp->ipend & ~0x3F) {
525 for (i = 0; i < (NR_IRQS - 1); i++) {
526 if (!in_atomic)
527 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
528
529 action = irq_desc[i].action;
530 if (!action)
531 goto unlock;
532
533 decode_address(buf, (unsigned int)action->handler);
534 verbose_printk(KERN_NOTICE " logical irq %3d mapped : %s", i, buf);
535 for (action = action->next; action; action = action->next) {
536 decode_address(buf, (unsigned int)action->handler);
537 verbose_printk(", %s", buf);
538 }
539 verbose_printk("\n");
540unlock:
541 if (!in_atomic)
542 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
543 }
544 }
545
546 decode_address(buf, fp->rete);
547 verbose_printk(KERN_NOTICE " RETE: %s\n", buf);
548 decode_address(buf, fp->retn);
549 verbose_printk(KERN_NOTICE " RETN: %s\n", buf);
550 decode_address(buf, fp->retx);
551 verbose_printk(KERN_NOTICE " RETX: %s\n", buf);
552 decode_address(buf, fp->rets);
553 verbose_printk(KERN_NOTICE " RETS: %s\n", buf);
554 decode_address(buf, fp->pc);
555 verbose_printk(KERN_NOTICE " PC : %s\n", buf);
556
557 if (((long)fp->seqstat & SEQSTAT_EXCAUSE) &&
558 (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) {
559 decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
560 verbose_printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf);
561 decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
562 verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf);
563 }
564
565 verbose_printk(KERN_NOTICE "PROCESSOR STATE:\n");
566 verbose_printk(KERN_NOTICE " R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
567 fp->r0, fp->r1, fp->r2, fp->r3);
568 verbose_printk(KERN_NOTICE " R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
569 fp->r4, fp->r5, fp->r6, fp->r7);
570 verbose_printk(KERN_NOTICE " P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
571 fp->p0, fp->p1, fp->p2, fp->p3);
572 verbose_printk(KERN_NOTICE " P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
573 fp->p4, fp->p5, fp->fp, (long)fp);
574 verbose_printk(KERN_NOTICE " LB0: %08lx LT0: %08lx LC0: %08lx\n",
575 fp->lb0, fp->lt0, fp->lc0);
576 verbose_printk(KERN_NOTICE " LB1: %08lx LT1: %08lx LC1: %08lx\n",
577 fp->lb1, fp->lt1, fp->lc1);
578 verbose_printk(KERN_NOTICE " B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
579 fp->b0, fp->l0, fp->m0, fp->i0);
580 verbose_printk(KERN_NOTICE " B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
581 fp->b1, fp->l1, fp->m1, fp->i1);
582 verbose_printk(KERN_NOTICE " B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
583 fp->b2, fp->l2, fp->m2, fp->i2);
584 verbose_printk(KERN_NOTICE " B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
585 fp->b3, fp->l3, fp->m3, fp->i3);
586 verbose_printk(KERN_NOTICE "A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
587 fp->a0w, fp->a0x, fp->a1w, fp->a1x);
588
589 verbose_printk(KERN_NOTICE "USP : %08lx ASTAT: %08lx\n",
590 rdusp(), fp->astat);
591
592 verbose_printk(KERN_NOTICE "\n");
593#endif
594}