blob: 0be6b0109ce0cda1e75d858f774f8e9d4d645479 [file] [log] [blame]
Chris Metcalf867e3592010-05-28 23:09:12 -04001/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/kprobes.h>
18#include <linux/module.h>
19#include <linux/pfn.h>
20#include <linux/kallsyms.h>
21#include <linux/stacktrace.h>
22#include <linux/uaccess.h>
23#include <linux/mmzone.h>
24#include <asm/backtrace.h>
25#include <asm/page.h>
26#include <asm/tlbflush.h>
27#include <asm/ucontext.h>
Paul Gortmaker34f2c0a2012-04-01 16:38:46 -040028#include <asm/switch_to.h>
Chris Metcalf867e3592010-05-28 23:09:12 -040029#include <asm/sigframe.h>
30#include <asm/stack.h>
31#include <arch/abi.h>
32#include <arch/interrupts.h>
33
Chris Metcalfdabe98c2010-10-14 15:19:04 -040034#define KBT_ONGOING 0 /* Backtrace still ongoing */
35#define KBT_DONE 1 /* Backtrace cleanly completed */
36#define KBT_RUNNING 2 /* Can't run backtrace on a running task */
37#define KBT_LOOP 3 /* Backtrace entered a loop */
Chris Metcalf867e3592010-05-28 23:09:12 -040038
39/* Is address on the specified kernel stack? */
Chris Metcalf93013a02011-05-02 13:49:14 -040040static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
Chris Metcalf867e3592010-05-28 23:09:12 -040041{
42 ulong kstack_base = (ulong) kbt->task->stack;
43 if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
44 return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
45 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
46}
47
Chris Metcalf867e3592010-05-28 23:09:12 -040048/* Is address valid for reading? */
Chris Metcalf93013a02011-05-02 13:49:14 -040049static int valid_address(struct KBacktraceIterator *kbt, unsigned long address)
Chris Metcalf867e3592010-05-28 23:09:12 -040050{
51 HV_PTE *l1_pgtable = kbt->pgtable;
52 HV_PTE *l2_pgtable;
53 unsigned long pfn;
54 HV_PTE pte;
55 struct page *page;
56
Chris Metcalf0707ad32010-06-25 17:04:17 -040057 if (l1_pgtable == NULL)
58 return 0; /* can't read user space in other tasks */
59
Chris Metcalf3cebbaf2011-02-28 15:30:16 -050060#ifdef CONFIG_64BIT
61 /* Find the real l1_pgtable by looking in the l0_pgtable. */
62 pte = l1_pgtable[HV_L0_INDEX(address)];
63 if (!hv_pte_get_present(pte))
64 return 0;
65 pfn = hv_pte_get_pfn(pte);
66 if (pte_huge(pte)) {
67 if (!pfn_valid(pfn)) {
68 pr_err("L0 huge page has bad pfn %#lx\n", pfn);
69 return 0;
70 }
71 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
72 }
73 page = pfn_to_page(pfn);
74 BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */
75 l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
76#endif
Chris Metcalf867e3592010-05-28 23:09:12 -040077 pte = l1_pgtable[HV_L1_INDEX(address)];
78 if (!hv_pte_get_present(pte))
79 return 0;
80 pfn = hv_pte_get_pfn(pte);
81 if (pte_huge(pte)) {
82 if (!pfn_valid(pfn)) {
Chris Metcalf0707ad32010-06-25 17:04:17 -040083 pr_err("huge page has bad pfn %#lx\n", pfn);
Chris Metcalf867e3592010-05-28 23:09:12 -040084 return 0;
85 }
86 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
87 }
88
89 page = pfn_to_page(pfn);
90 if (PageHighMem(page)) {
Chris Metcalf0707ad32010-06-25 17:04:17 -040091 pr_err("L2 page table not in LOWMEM (%#llx)\n",
Chris Metcalf867e3592010-05-28 23:09:12 -040092 HV_PFN_TO_CPA(pfn));
93 return 0;
94 }
95 l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
96 pte = l2_pgtable[HV_L2_INDEX(address)];
97 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
98}
99
100/* Callback for backtracer; basically a glorified memcpy */
Chris Metcalf93013a02011-05-02 13:49:14 -0400101static bool read_memory_func(void *result, unsigned long address,
Chris Metcalf867e3592010-05-28 23:09:12 -0400102 unsigned int size, void *vkbt)
103{
104 int retval;
105 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
Chris Metcalf3cebbaf2011-02-28 15:30:16 -0500106 if (__kernel_text_address(address)) {
Chris Metcalf867e3592010-05-28 23:09:12 -0400107 /* OK to read kernel code. */
108 } else if (address >= PAGE_OFFSET) {
109 /* We only tolerate kernel-space reads of this task's stack */
110 if (!in_kernel_stack(kbt, address))
111 return 0;
Chris Metcalf867e3592010-05-28 23:09:12 -0400112 } else if (!valid_address(kbt, address)) {
113 return 0; /* invalid user-space address */
114 }
115 pagefault_disable();
Chris Metcalf0707ad32010-06-25 17:04:17 -0400116 retval = __copy_from_user_inatomic(result,
117 (void __user __force *)address,
Chris Metcalf867e3592010-05-28 23:09:12 -0400118 size);
119 pagefault_enable();
120 return (retval == 0);
121}
122
123/* Return a pt_regs pointer for a valid fault handler frame */
124static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
125{
Chris Metcalf867e3592010-05-28 23:09:12 -0400126 const char *fault = NULL; /* happy compiler */
127 char fault_buf[64];
Chris Metcalf93013a02011-05-02 13:49:14 -0400128 unsigned long sp = kbt->it.sp;
Chris Metcalf867e3592010-05-28 23:09:12 -0400129 struct pt_regs *p;
130
131 if (!in_kernel_stack(kbt, sp))
132 return NULL;
133 if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
134 return NULL;
135 p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
136 if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
137 fault = "syscall";
138 else {
139 if (kbt->verbose) { /* else we aren't going to use it */
140 snprintf(fault_buf, sizeof(fault_buf),
141 "interrupt %ld", p->faultnum);
142 fault = fault_buf;
143 }
144 }
145 if (EX1_PL(p->ex1) == KERNEL_PL &&
Chris Metcalf3cebbaf2011-02-28 15:30:16 -0500146 __kernel_text_address(p->pc) &&
Chris Metcalf867e3592010-05-28 23:09:12 -0400147 in_kernel_stack(kbt, p->sp) &&
148 p->sp >= sp) {
149 if (kbt->verbose)
Chris Metcalf0707ad32010-06-25 17:04:17 -0400150 pr_err(" <%s while in kernel mode>\n", fault);
Chris Metcalf867e3592010-05-28 23:09:12 -0400151 } else if (EX1_PL(p->ex1) == USER_PL &&
152 p->pc < PAGE_OFFSET &&
153 p->sp < PAGE_OFFSET) {
154 if (kbt->verbose)
Chris Metcalf0707ad32010-06-25 17:04:17 -0400155 pr_err(" <%s while in user mode>\n", fault);
Chris Metcalf867e3592010-05-28 23:09:12 -0400156 } else if (kbt->verbose) {
Chris Metcalf0707ad32010-06-25 17:04:17 -0400157 pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
Chris Metcalf867e3592010-05-28 23:09:12 -0400158 p->pc, p->sp, p->ex1);
159 p = NULL;
160 }
161 if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
162 return p;
Chris Metcalf867e3592010-05-28 23:09:12 -0400163 return NULL;
164}
165
166/* Is the pc pointing to a sigreturn trampoline? */
Chris Metcalf93013a02011-05-02 13:49:14 -0400167static int is_sigreturn(unsigned long pc)
Chris Metcalf867e3592010-05-28 23:09:12 -0400168{
169 return (pc == VDSO_BASE);
170}
171
172/* Return a pt_regs pointer for a valid signal handler frame */
173static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
174{
175 BacktraceIterator *b = &kbt->it;
176
177 if (b->pc == VDSO_BASE) {
178 struct rt_sigframe *frame;
179 unsigned long sigframe_top =
180 b->sp + sizeof(struct rt_sigframe) - 1;
181 if (!valid_address(kbt, b->sp) ||
182 !valid_address(kbt, sigframe_top)) {
183 if (kbt->verbose)
Chris Metcalf0707ad32010-06-25 17:04:17 -0400184 pr_err(" (odd signal: sp %#lx?)\n",
Chris Metcalf867e3592010-05-28 23:09:12 -0400185 (unsigned long)(b->sp));
186 return NULL;
187 }
188 frame = (struct rt_sigframe *)b->sp;
189 if (kbt->verbose) {
Chris Metcalf0707ad32010-06-25 17:04:17 -0400190 pr_err(" <received signal %d>\n",
Chris Metcalf867e3592010-05-28 23:09:12 -0400191 frame->info.si_signo);
192 }
Chris Metcalf74fca9d2010-09-15 11:16:08 -0400193 return (struct pt_regs *)&frame->uc.uc_mcontext;
Chris Metcalf867e3592010-05-28 23:09:12 -0400194 }
195 return NULL;
196}
197
Chris Metcalf0707ad32010-06-25 17:04:17 -0400198static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
Chris Metcalf867e3592010-05-28 23:09:12 -0400199{
200 return is_sigreturn(kbt->it.pc);
201}
202
203static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
204{
205 struct pt_regs *p;
206
207 p = valid_fault_handler(kbt);
208 if (p == NULL)
209 p = valid_sigframe(kbt);
210 if (p == NULL)
211 return 0;
212 backtrace_init(&kbt->it, read_memory_func, kbt,
213 p->pc, p->lr, p->sp, p->regs[52]);
214 kbt->new_context = 1;
215 return 1;
216}
217
218/* Find a frame that isn't a sigreturn, if there is one. */
219static int KBacktraceIterator_next_item_inclusive(
220 struct KBacktraceIterator *kbt)
221{
222 for (;;) {
223 do {
224 if (!KBacktraceIterator_is_sigreturn(kbt))
Chris Metcalfdabe98c2010-10-14 15:19:04 -0400225 return KBT_ONGOING;
Chris Metcalf867e3592010-05-28 23:09:12 -0400226 } while (backtrace_next(&kbt->it));
227
228 if (!KBacktraceIterator_restart(kbt))
Chris Metcalfdabe98c2010-10-14 15:19:04 -0400229 return KBT_DONE;
Chris Metcalf867e3592010-05-28 23:09:12 -0400230 }
231}
232
233/*
234 * If the current sp is on a page different than what we recorded
235 * as the top-of-kernel-stack last time we context switched, we have
236 * probably blown the stack, and nothing is going to work out well.
237 * If we can at least get out a warning, that may help the debug,
238 * though we probably won't be able to backtrace into the code that
239 * actually did the recursive damage.
240 */
241static void validate_stack(struct pt_regs *regs)
242{
243 int cpu = smp_processor_id();
244 unsigned long ksp0 = get_current_ksp0();
245 unsigned long ksp0_base = ksp0 - THREAD_SIZE;
246 unsigned long sp = stack_pointer;
247
248 if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
Chris Metcalf0707ad32010-06-25 17:04:17 -0400249 pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n"
Chris Metcalf867e3592010-05-28 23:09:12 -0400250 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
251 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
252 }
253
254 else if (sp < ksp0_base + sizeof(struct thread_info)) {
Chris Metcalf0707ad32010-06-25 17:04:17 -0400255 pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n"
Chris Metcalf867e3592010-05-28 23:09:12 -0400256 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
257 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
258 }
259}
260
261void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
262 struct task_struct *t, struct pt_regs *regs)
263{
Chris Metcalf93013a02011-05-02 13:49:14 -0400264 unsigned long pc, lr, sp, r52;
Chris Metcalf867e3592010-05-28 23:09:12 -0400265 int is_current;
266
267 /*
268 * Set up callback information. We grab the kernel stack base
269 * so we will allow reads of that address range, and if we're
270 * asking about the current process we grab the page table
271 * so we can check user accesses before trying to read them.
272 * We flush the TLB to avoid any weird skew issues.
273 */
274 is_current = (t == NULL);
275 kbt->is_current = is_current;
276 if (is_current)
277 t = validate_current();
278 kbt->task = t;
279 kbt->pgtable = NULL;
280 kbt->verbose = 0; /* override in caller if desired */
281 kbt->profile = 0; /* override in caller if desired */
Chris Metcalfdabe98c2010-10-14 15:19:04 -0400282 kbt->end = KBT_ONGOING;
Chris Metcalf867e3592010-05-28 23:09:12 -0400283 kbt->new_context = 0;
284 if (is_current) {
285 HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
286 if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) {
287 /*
288 * Not just an optimization: this also allows
289 * this to work at all before va/pa mappings
290 * are set up.
291 */
292 kbt->pgtable = swapper_pg_dir;
293 } else {
294 struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa));
295 if (!PageHighMem(page))
296 kbt->pgtable = __va(pgdir_pa);
297 else
Chris Metcalf0707ad32010-06-25 17:04:17 -0400298 pr_err("page table not in LOWMEM"
Chris Metcalf867e3592010-05-28 23:09:12 -0400299 " (%#llx)\n", pgdir_pa);
300 }
301 local_flush_tlb_all();
302 validate_stack(regs);
303 }
304
305 if (regs == NULL) {
Chris Metcalf867e3592010-05-28 23:09:12 -0400306 if (is_current || t->state == TASK_RUNNING) {
307 /* Can't do this; we need registers */
Chris Metcalfdabe98c2010-10-14 15:19:04 -0400308 kbt->end = KBT_RUNNING;
Chris Metcalf867e3592010-05-28 23:09:12 -0400309 return;
310 }
Chris Metcalf0707ad32010-06-25 17:04:17 -0400311 pc = get_switch_to_pc();
Chris Metcalf867e3592010-05-28 23:09:12 -0400312 lr = t->thread.pc;
313 sp = t->thread.ksp;
314 r52 = 0;
315 } else {
316 pc = regs->pc;
317 lr = regs->lr;
318 sp = regs->sp;
319 r52 = regs->regs[52];
320 }
321
322 backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
Chris Metcalfdabe98c2010-10-14 15:19:04 -0400323 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
Chris Metcalf867e3592010-05-28 23:09:12 -0400324}
325EXPORT_SYMBOL(KBacktraceIterator_init);
326
327int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
328{
Chris Metcalfdabe98c2010-10-14 15:19:04 -0400329 return kbt->end != KBT_ONGOING;
Chris Metcalf867e3592010-05-28 23:09:12 -0400330}
331EXPORT_SYMBOL(KBacktraceIterator_end);
332
333void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
334{
Chris Metcalf93013a02011-05-02 13:49:14 -0400335 unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
Chris Metcalf867e3592010-05-28 23:09:12 -0400336 kbt->new_context = 0;
Chris Metcalfdabe98c2010-10-14 15:19:04 -0400337 if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
338 kbt->end = KBT_DONE;
339 return;
340 }
341 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
342 if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
343 /* Trapped in a loop; give up. */
344 kbt->end = KBT_LOOP;
345 }
Chris Metcalf867e3592010-05-28 23:09:12 -0400346}
347EXPORT_SYMBOL(KBacktraceIterator_next);
348
349/*
350 * This method wraps the backtracer's more generic support.
351 * It is only invoked from the architecture-specific code; show_stack()
352 * and dump_stack() (in entry.S) are architecture-independent entry points.
353 */
354void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
355{
356 int i;
357
358 if (headers) {
359 /*
360 * Add a blank line since if we are called from panic(),
361 * then bust_spinlocks() spit out a space in front of us
362 * and it will mess up our KERN_ERR.
363 */
Chris Metcalf0707ad32010-06-25 17:04:17 -0400364 pr_err("\n");
365 pr_err("Starting stack dump of tid %d, pid %d (%s)"
Chris Metcalf867e3592010-05-28 23:09:12 -0400366 " on cpu %d at cycle %lld\n",
367 kbt->task->pid, kbt->task->tgid, kbt->task->comm,
368 smp_processor_id(), get_cycles());
369 }
Chris Metcalf867e3592010-05-28 23:09:12 -0400370 kbt->verbose = 1;
371 i = 0;
372 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
373 char *modname;
374 const char *name;
375 unsigned long address = kbt->it.pc;
376 unsigned long offset, size;
377 char namebuf[KSYM_NAME_LEN+100];
378
379 if (address >= PAGE_OFFSET)
380 name = kallsyms_lookup(address, &size, &offset,
381 &modname, namebuf);
382 else
383 name = NULL;
384
385 if (!name)
386 namebuf[0] = '\0';
387 else {
388 size_t namelen = strlen(namebuf);
389 size_t remaining = (sizeof(namebuf) - 1) - namelen;
390 char *p = namebuf + namelen;
391 int rc = snprintf(p, remaining, "+%#lx/%#lx ",
392 offset, size);
393 if (modname && rc < remaining)
394 snprintf(p + rc, remaining - rc,
395 "[%s] ", modname);
396 namebuf[sizeof(namebuf)-1] = '\0';
397 }
398
Chris Metcalf0707ad32010-06-25 17:04:17 -0400399 pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
Chris Metcalf867e3592010-05-28 23:09:12 -0400400 i++, address, namebuf, (unsigned long)(kbt->it.sp));
401
402 if (i >= 100) {
Chris Metcalf0707ad32010-06-25 17:04:17 -0400403 pr_err("Stack dump truncated"
Chris Metcalf867e3592010-05-28 23:09:12 -0400404 " (%d frames)\n", i);
405 break;
406 }
407 }
Chris Metcalfdabe98c2010-10-14 15:19:04 -0400408 if (kbt->end == KBT_LOOP)
409 pr_err("Stack dump stopped; next frame identical to this one\n");
Chris Metcalf867e3592010-05-28 23:09:12 -0400410 if (headers)
Chris Metcalf0707ad32010-06-25 17:04:17 -0400411 pr_err("Stack dump complete\n");
Chris Metcalf867e3592010-05-28 23:09:12 -0400412}
413EXPORT_SYMBOL(tile_show_stack);
414
415
416/* This is called from show_regs() and _dump_stack() */
417void dump_stack_regs(struct pt_regs *regs)
418{
419 struct KBacktraceIterator kbt;
420 KBacktraceIterator_init(&kbt, NULL, regs);
421 tile_show_stack(&kbt, 1);
422}
423EXPORT_SYMBOL(dump_stack_regs);
424
425static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
426 ulong pc, ulong lr, ulong sp, ulong r52)
427{
428 memset(regs, 0, sizeof(struct pt_regs));
429 regs->pc = pc;
430 regs->lr = lr;
431 regs->sp = sp;
432 regs->regs[52] = r52;
433 return regs;
434}
435
436/* This is called from dump_stack() and just converts to pt_regs */
437void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
438{
439 struct pt_regs regs;
440 dump_stack_regs(regs_to_pt_regs(&regs, pc, lr, sp, r52));
441}
442
443/* This is called from KBacktraceIterator_init_current() */
444void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
445 ulong lr, ulong sp, ulong r52)
446{
447 struct pt_regs regs;
448 KBacktraceIterator_init(kbt, NULL,
449 regs_to_pt_regs(&regs, pc, lr, sp, r52));
450}
451
452/* This is called only from kernel/sched.c, with esp == NULL */
453void show_stack(struct task_struct *task, unsigned long *esp)
454{
455 struct KBacktraceIterator kbt;
456 if (task == NULL || task == current)
457 KBacktraceIterator_init_current(&kbt);
458 else
459 KBacktraceIterator_init(&kbt, task, NULL);
460 tile_show_stack(&kbt, 0);
461}
462
463#ifdef CONFIG_STACKTRACE
464
465/* Support generic Linux stack API too */
466
467void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
468{
469 struct KBacktraceIterator kbt;
470 int skip = trace->skip;
471 int i = 0;
472
473 if (task == NULL || task == current)
474 KBacktraceIterator_init_current(&kbt);
475 else
476 KBacktraceIterator_init(&kbt, task, NULL);
477 for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
478 if (skip) {
479 --skip;
480 continue;
481 }
482 if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET)
483 break;
484 trace->entries[i++] = kbt.it.pc;
485 }
486 trace->nr_entries = i;
487}
488EXPORT_SYMBOL(save_stack_trace_tsk);
489
490void save_stack_trace(struct stack_trace *trace)
491{
492 save_stack_trace_tsk(NULL, trace);
493}
494
495#endif
496
497/* In entry.S */
498EXPORT_SYMBOL(KBacktraceIterator_init_current);