Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs |
| 4 | */ |
| 5 | #include <linux/kallsyms.h> |
| 6 | #include <linux/kprobes.h> |
| 7 | #include <linux/uaccess.h> |
| 8 | #include <linux/utsname.h> |
| 9 | #include <linux/hardirq.h> |
| 10 | #include <linux/kdebug.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/ptrace.h> |
| 13 | #include <linux/kexec.h> |
| 14 | #include <linux/bug.h> |
| 15 | #include <linux/nmi.h> |
| 16 | |
| 17 | #include <asm/stacktrace.h> |
| 18 | |
| 19 | int panic_on_unrecovered_nmi; |
| 20 | int kstack_depth_to_print = 24; |
| 21 | static unsigned int code_bytes = 64; |
| 22 | static int die_counter; |
| 23 | |
| 24 | void printk_address(unsigned long address, int reliable) |
| 25 | { |
| 26 | #ifdef CONFIG_KALLSYMS |
| 27 | unsigned long offset = 0; |
| 28 | unsigned long symsize; |
| 29 | const char *symname; |
| 30 | char *modname; |
| 31 | char *delim = ":"; |
| 32 | char namebuf[KSYM_NAME_LEN]; |
| 33 | char reliab[4] = ""; |
| 34 | |
| 35 | symname = kallsyms_lookup(address, &symsize, &offset, |
| 36 | &modname, namebuf); |
| 37 | if (!symname) { |
| 38 | printk(" [<%08lx>]\n", address); |
| 39 | return; |
| 40 | } |
| 41 | if (!reliable) |
| 42 | strcpy(reliab, "? "); |
| 43 | |
| 44 | if (!modname) |
| 45 | modname = delim = ""; |
| 46 | printk(" [<%08lx>] %s%s%s%s%s+0x%lx/0x%lx\n", |
| 47 | address, reliab, delim, modname, delim, symname, offset, symsize); |
| 48 | #else |
| 49 | printk(" [<%08lx>]\n", address); |
| 50 | #endif |
| 51 | } |
| 52 | |
| 53 | static inline int valid_stack_ptr(struct thread_info *tinfo, |
| 54 | void *p, unsigned int size) |
| 55 | { |
| 56 | void *t = tinfo; |
| 57 | return p > t && p <= t + THREAD_SIZE - size; |
| 58 | } |
| 59 | |
| 60 | /* The form of the top of the frame on the stack */ |
| 61 | struct stack_frame { |
| 62 | struct stack_frame *next_frame; |
| 63 | unsigned long return_address; |
| 64 | }; |
| 65 | |
| 66 | static inline unsigned long |
| 67 | print_context_stack(struct thread_info *tinfo, |
| 68 | unsigned long *stack, unsigned long bp, |
| 69 | const struct stacktrace_ops *ops, void *data) |
| 70 | { |
| 71 | struct stack_frame *frame = (struct stack_frame *)bp; |
| 72 | |
| 73 | while (valid_stack_ptr(tinfo, stack, sizeof(*stack))) { |
| 74 | unsigned long addr; |
| 75 | |
| 76 | addr = *stack; |
| 77 | if (__kernel_text_address(addr)) { |
| 78 | if ((unsigned long) stack == bp + 4) { |
| 79 | ops->address(data, addr, 1); |
| 80 | frame = frame->next_frame; |
| 81 | bp = (unsigned long) frame; |
| 82 | } else { |
| 83 | ops->address(data, addr, bp == 0); |
| 84 | } |
| 85 | } |
| 86 | stack++; |
| 87 | } |
| 88 | return bp; |
| 89 | } |
| 90 | |
| 91 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
| 92 | unsigned long *stack, unsigned long bp, |
| 93 | const struct stacktrace_ops *ops, void *data) |
| 94 | { |
| 95 | if (!task) |
| 96 | task = current; |
| 97 | |
| 98 | if (!stack) { |
| 99 | unsigned long dummy; |
| 100 | stack = &dummy; |
| 101 | if (task != current) |
| 102 | stack = (unsigned long *)task->thread.sp; |
| 103 | } |
| 104 | |
| 105 | #ifdef CONFIG_FRAME_POINTER |
| 106 | if (!bp) { |
| 107 | if (task == current) { |
| 108 | /* Grab bp right from our regs */ |
| 109 | asm("movl %%ebp, %0" : "=r" (bp) :); |
| 110 | } else { |
| 111 | /* bp is the last reg pushed by switch_to */ |
| 112 | bp = *(unsigned long *) task->thread.sp; |
| 113 | } |
| 114 | } |
| 115 | #endif |
| 116 | |
| 117 | for (;;) { |
| 118 | struct thread_info *context; |
| 119 | |
| 120 | context = (struct thread_info *) |
| 121 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); |
| 122 | bp = print_context_stack(context, stack, bp, ops, data); |
| 123 | /* |
| 124 | * Should be after the line below, but somewhere |
| 125 | * in early boot context comes out corrupted and we |
| 126 | * can't reference it: |
| 127 | */ |
| 128 | if (ops->stack(data, "IRQ") < 0) |
| 129 | break; |
| 130 | stack = (unsigned long *)context->previous_esp; |
| 131 | if (!stack) |
| 132 | break; |
| 133 | touch_nmi_watchdog(); |
| 134 | } |
| 135 | } |
| 136 | EXPORT_SYMBOL(dump_trace); |
| 137 | |
| 138 | static void |
| 139 | print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) |
| 140 | { |
| 141 | printk(data); |
| 142 | print_symbol(msg, symbol); |
| 143 | printk("\n"); |
| 144 | } |
| 145 | |
| 146 | static void print_trace_warning(void *data, char *msg) |
| 147 | { |
| 148 | printk("%s%s\n", (char *)data, msg); |
| 149 | } |
| 150 | |
| 151 | static int print_trace_stack(void *data, char *name) |
| 152 | { |
| 153 | return 0; |
| 154 | } |
| 155 | |
| 156 | /* |
| 157 | * Print one address/symbol entries per line. |
| 158 | */ |
| 159 | static void print_trace_address(void *data, unsigned long addr, int reliable) |
| 160 | { |
| 161 | printk("%s [<%08lx>] ", (char *)data, addr); |
| 162 | if (!reliable) |
| 163 | printk("? "); |
| 164 | print_symbol("%s\n", addr); |
| 165 | touch_nmi_watchdog(); |
| 166 | } |
| 167 | |
| 168 | static const struct stacktrace_ops print_trace_ops = { |
| 169 | .warning = print_trace_warning, |
| 170 | .warning_symbol = print_trace_warning_symbol, |
| 171 | .stack = print_trace_stack, |
| 172 | .address = print_trace_address, |
| 173 | }; |
| 174 | |
| 175 | static void |
| 176 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, |
| 177 | unsigned long *stack, unsigned long bp, char *log_lvl) |
| 178 | { |
| 179 | dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); |
| 180 | printk("%s =======================\n", log_lvl); |
| 181 | } |
| 182 | |
| 183 | void show_trace(struct task_struct *task, struct pt_regs *regs, |
| 184 | unsigned long *stack, unsigned long bp) |
| 185 | { |
| 186 | show_trace_log_lvl(task, regs, stack, bp, ""); |
| 187 | } |
| 188 | |
| 189 | static void |
| 190 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, |
| 191 | unsigned long *sp, unsigned long bp, char *log_lvl) |
| 192 | { |
| 193 | unsigned long *stack; |
| 194 | int i; |
| 195 | |
| 196 | if (sp == NULL) { |
| 197 | if (task) |
| 198 | sp = (unsigned long *)task->thread.sp; |
| 199 | else |
| 200 | sp = (unsigned long *)&sp; |
| 201 | } |
| 202 | |
| 203 | stack = sp; |
| 204 | for (i = 0; i < kstack_depth_to_print; i++) { |
| 205 | if (kstack_end(stack)) |
| 206 | break; |
| 207 | if (i && ((i % 8) == 0)) |
| 208 | printk("\n%s ", log_lvl); |
| 209 | printk("%08lx ", *stack++); |
| 210 | } |
| 211 | printk("\n%sCall Trace:\n", log_lvl); |
| 212 | |
| 213 | show_trace_log_lvl(task, regs, sp, bp, log_lvl); |
| 214 | } |
| 215 | |
| 216 | void show_stack(struct task_struct *task, unsigned long *sp) |
| 217 | { |
| 218 | printk(" "); |
| 219 | show_stack_log_lvl(task, NULL, sp, 0, ""); |
| 220 | } |
| 221 | |
| 222 | /* |
| 223 | * The architecture-independent dump_stack generator |
| 224 | */ |
| 225 | void dump_stack(void) |
| 226 | { |
| 227 | unsigned long bp = 0; |
| 228 | unsigned long stack; |
| 229 | |
| 230 | #ifdef CONFIG_FRAME_POINTER |
| 231 | if (!bp) |
| 232 | asm("movl %%ebp, %0" : "=r" (bp):); |
| 233 | #endif |
| 234 | |
| 235 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", |
| 236 | current->pid, current->comm, print_tainted(), |
| 237 | init_utsname()->release, |
| 238 | (int)strcspn(init_utsname()->version, " "), |
| 239 | init_utsname()->version); |
| 240 | |
| 241 | show_trace(current, NULL, &stack, bp); |
| 242 | } |
| 243 | |
| 244 | EXPORT_SYMBOL(dump_stack); |
| 245 | |
| 246 | void show_registers(struct pt_regs *regs) |
| 247 | { |
| 248 | int i; |
| 249 | |
| 250 | print_modules(); |
| 251 | __show_regs(regs, 0); |
| 252 | |
| 253 | printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)", |
| 254 | TASK_COMM_LEN, current->comm, task_pid_nr(current), |
| 255 | current_thread_info(), current, task_thread_info(current)); |
| 256 | /* |
| 257 | * When in-kernel, we also print out the stack and code at the |
| 258 | * time of the fault.. |
| 259 | */ |
| 260 | if (!user_mode_vm(regs)) { |
| 261 | unsigned int code_prologue = code_bytes * 43 / 64; |
| 262 | unsigned int code_len = code_bytes; |
| 263 | unsigned char c; |
| 264 | u8 *ip; |
| 265 | |
| 266 | printk("\n" KERN_EMERG "Stack: "); |
| 267 | show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG); |
| 268 | |
| 269 | printk(KERN_EMERG "Code: "); |
| 270 | |
| 271 | ip = (u8 *)regs->ip - code_prologue; |
| 272 | if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { |
| 273 | /* try starting at EIP */ |
| 274 | ip = (u8 *)regs->ip; |
| 275 | code_len = code_len - code_prologue + 1; |
| 276 | } |
| 277 | for (i = 0; i < code_len; i++, ip++) { |
| 278 | if (ip < (u8 *)PAGE_OFFSET || |
| 279 | probe_kernel_address(ip, c)) { |
| 280 | printk(" Bad EIP value."); |
| 281 | break; |
| 282 | } |
| 283 | if (ip == (u8 *)regs->ip) |
| 284 | printk("<%02x> ", c); |
| 285 | else |
| 286 | printk("%02x ", c); |
| 287 | } |
| 288 | } |
| 289 | printk("\n"); |
| 290 | } |
| 291 | |
| 292 | int is_valid_bugaddr(unsigned long ip) |
| 293 | { |
| 294 | unsigned short ud2; |
| 295 | |
| 296 | if (ip < PAGE_OFFSET) |
| 297 | return 0; |
| 298 | if (probe_kernel_address((unsigned short *)ip, ud2)) |
| 299 | return 0; |
| 300 | |
| 301 | return ud2 == 0x0b0f; |
| 302 | } |
| 303 | |
| 304 | static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; |
| 305 | static int die_owner = -1; |
| 306 | static unsigned int die_nest_count; |
| 307 | |
| 308 | unsigned __kprobes long oops_begin(void) |
| 309 | { |
| 310 | unsigned long flags; |
| 311 | |
| 312 | oops_enter(); |
| 313 | |
| 314 | if (die_owner != raw_smp_processor_id()) { |
| 315 | console_verbose(); |
| 316 | raw_local_irq_save(flags); |
| 317 | __raw_spin_lock(&die_lock); |
| 318 | die_owner = smp_processor_id(); |
| 319 | die_nest_count = 0; |
| 320 | bust_spinlocks(1); |
| 321 | } else { |
| 322 | raw_local_irq_save(flags); |
| 323 | } |
| 324 | die_nest_count++; |
| 325 | return flags; |
| 326 | } |
| 327 | |
| 328 | void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) |
| 329 | { |
| 330 | bust_spinlocks(0); |
| 331 | die_owner = -1; |
| 332 | add_taint(TAINT_DIE); |
| 333 | __raw_spin_unlock(&die_lock); |
| 334 | raw_local_irq_restore(flags); |
| 335 | |
| 336 | if (!regs) |
| 337 | return; |
| 338 | |
| 339 | if (kexec_should_crash(current)) |
| 340 | crash_kexec(regs); |
| 341 | |
| 342 | if (in_interrupt()) |
| 343 | panic("Fatal exception in interrupt"); |
| 344 | |
| 345 | if (panic_on_oops) |
| 346 | panic("Fatal exception"); |
| 347 | |
| 348 | oops_exit(); |
| 349 | do_exit(signr); |
| 350 | } |
| 351 | |
| 352 | int __kprobes __die(const char *str, struct pt_regs *regs, long err) |
| 353 | { |
| 354 | unsigned short ss; |
| 355 | unsigned long sp; |
| 356 | |
| 357 | printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); |
| 358 | #ifdef CONFIG_PREEMPT |
| 359 | printk("PREEMPT "); |
| 360 | #endif |
| 361 | #ifdef CONFIG_SMP |
| 362 | printk("SMP "); |
| 363 | #endif |
| 364 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 365 | printk("DEBUG_PAGEALLOC"); |
| 366 | #endif |
| 367 | printk("\n"); |
| 368 | if (notify_die(DIE_OOPS, str, regs, err, |
| 369 | current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) |
| 370 | return 1; |
| 371 | |
| 372 | show_registers(regs); |
| 373 | /* Executive summary in case the oops scrolled away */ |
| 374 | sp = (unsigned long) (®s->sp); |
| 375 | savesegment(ss, ss); |
| 376 | if (user_mode(regs)) { |
| 377 | sp = regs->sp; |
| 378 | ss = regs->ss & 0xffff; |
| 379 | } |
| 380 | printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip); |
| 381 | print_symbol("%s", regs->ip); |
| 382 | printk(" SS:ESP %04x:%08lx\n", ss, sp); |
| 383 | return 0; |
| 384 | } |
| 385 | |
| 386 | /* |
| 387 | * This is gone through when something in the kernel has done something bad |
| 388 | * and is about to be terminated: |
| 389 | */ |
| 390 | void die(const char *str, struct pt_regs *regs, long err) |
| 391 | { |
| 392 | unsigned long flags = oops_begin(); |
| 393 | |
| 394 | if (die_nest_count < 3) { |
| 395 | report_bug(regs->ip, regs); |
| 396 | |
| 397 | if (__die(str, regs, err)) |
| 398 | regs = NULL; |
| 399 | } else { |
| 400 | printk(KERN_EMERG "Recursive die() failure, output suppressed\n"); |
| 401 | } |
| 402 | |
| 403 | oops_end(flags, regs, SIGSEGV); |
| 404 | } |
| 405 | |
Alexander van Heukelum | dd6e4eb | 2008-10-04 23:12:40 +0200 | [diff] [blame^] | 406 | static DEFINE_SPINLOCK(nmi_print_lock); |
| 407 | |
| 408 | void notrace __kprobes |
| 409 | die_nmi(char *str, struct pt_regs *regs, int do_panic) |
| 410 | { |
| 411 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) |
| 412 | return; |
| 413 | |
| 414 | spin_lock(&nmi_print_lock); |
| 415 | /* |
| 416 | * We are in trouble anyway, lets at least try |
| 417 | * to get a message out: |
| 418 | */ |
| 419 | bust_spinlocks(1); |
| 420 | printk(KERN_EMERG "%s", str); |
| 421 | printk(" on CPU%d, ip %08lx, registers:\n", |
| 422 | smp_processor_id(), regs->ip); |
| 423 | show_registers(regs); |
| 424 | if (do_panic) |
| 425 | panic("Non maskable interrupt"); |
| 426 | console_silent(); |
| 427 | spin_unlock(&nmi_print_lock); |
| 428 | bust_spinlocks(0); |
| 429 | |
| 430 | /* |
| 431 | * If we are in kernel we are probably nested up pretty bad |
| 432 | * and might aswell get out now while we still can: |
| 433 | */ |
| 434 | if (!user_mode_vm(regs)) { |
| 435 | current->thread.trap_no = 2; |
| 436 | crash_kexec(regs); |
| 437 | } |
| 438 | |
| 439 | do_exit(SIGSEGV); |
| 440 | } |
| 441 | |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 442 | static int __init kstack_setup(char *s) |
| 443 | { |
| 444 | kstack_depth_to_print = simple_strtoul(s, NULL, 0); |
| 445 | |
| 446 | return 1; |
| 447 | } |
| 448 | __setup("kstack=", kstack_setup); |
| 449 | |
| 450 | static int __init code_bytes_setup(char *s) |
| 451 | { |
| 452 | code_bytes = simple_strtoul(s, NULL, 0); |
| 453 | if (code_bytes > 8192) |
| 454 | code_bytes = 8192; |
| 455 | |
| 456 | return 1; |
| 457 | } |
| 458 | __setup("code_bytes=", code_bytes_setup); |