| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright 2010 Tilera Corporation. All Rights Reserved. | 
 | 3 |  * | 
 | 4 |  *   This program is free software; you can redistribute it and/or | 
 | 5 |  *   modify it under the terms of the GNU General Public License | 
 | 6 |  *   as published by the Free Software Foundation, version 2. | 
 | 7 |  * | 
 | 8 |  *   This program is distributed in the hope that it will be useful, but | 
 | 9 |  *   WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 10 |  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | 
 | 11 |  *   NON INFRINGEMENT.  See the GNU General Public License for | 
 | 12 |  *   more details. | 
 | 13 |  */ | 
 | 14 |  | 
 | 15 | #include <linux/sched.h> | 
 | 16 | #include <linux/kernel.h> | 
 | 17 | #include <linux/kprobes.h> | 
 | 18 | #include <linux/module.h> | 
 | 19 | #include <linux/pfn.h> | 
 | 20 | #include <linux/kallsyms.h> | 
 | 21 | #include <linux/stacktrace.h> | 
 | 22 | #include <linux/uaccess.h> | 
 | 23 | #include <linux/mmzone.h> | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 24 | #include <linux/dcache.h> | 
 | 25 | #include <linux/fs.h> | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 26 | #include <asm/backtrace.h> | 
 | 27 | #include <asm/page.h> | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 28 | #include <asm/ucontext.h> | 
| Paul Gortmaker | 34f2c0a | 2012-04-01 16:38:46 -0400 | [diff] [blame] | 29 | #include <asm/switch_to.h> | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 30 | #include <asm/sigframe.h> | 
 | 31 | #include <asm/stack.h> | 
 | 32 | #include <arch/abi.h> | 
 | 33 | #include <arch/interrupts.h> | 
 | 34 |  | 
| Chris Metcalf | dabe98c | 2010-10-14 15:19:04 -0400 | [diff] [blame] | 35 | #define KBT_ONGOING	0  /* Backtrace still ongoing */ | 
 | 36 | #define KBT_DONE	1  /* Backtrace cleanly completed */ | 
 | 37 | #define KBT_RUNNING	2  /* Can't run backtrace on a running task */ | 
 | 38 | #define KBT_LOOP	3  /* Backtrace entered a loop */ | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 39 |  | 
 | 40 | /* Is address on the specified kernel stack? */ | 
| Chris Metcalf | 93013a0 | 2011-05-02 13:49:14 -0400 | [diff] [blame] | 41 | static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp) | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 42 | { | 
 | 43 | 	ulong kstack_base = (ulong) kbt->task->stack; | 
 | 44 | 	if (kstack_base == 0)  /* corrupt task pointer; just follow stack... */ | 
 | 45 | 		return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory; | 
 | 46 | 	return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; | 
 | 47 | } | 
 | 48 |  | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 49 | /* Callback for backtracer; basically a glorified memcpy */ | 
| Chris Metcalf | 93013a0 | 2011-05-02 13:49:14 -0400 | [diff] [blame] | 50 | static bool read_memory_func(void *result, unsigned long address, | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 51 | 			     unsigned int size, void *vkbt) | 
 | 52 | { | 
 | 53 | 	int retval; | 
 | 54 | 	struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 55 |  | 
 | 56 | 	if (address == 0) | 
 | 57 | 		return 0; | 
| Chris Metcalf | 3cebbaf | 2011-02-28 15:30:16 -0500 | [diff] [blame] | 58 | 	if (__kernel_text_address(address)) { | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 59 | 		/* OK to read kernel code. */ | 
 | 60 | 	} else if (address >= PAGE_OFFSET) { | 
 | 61 | 		/* We only tolerate kernel-space reads of this task's stack */ | 
 | 62 | 		if (!in_kernel_stack(kbt, address)) | 
 | 63 | 			return 0; | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 64 | 	} else if (!kbt->is_current) { | 
 | 65 | 		return 0;	/* can't read from other user address spaces */ | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 66 | 	} | 
 | 67 | 	pagefault_disable(); | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 68 | 	retval = __copy_from_user_inatomic(result, | 
 | 69 | 					   (void __user __force *)address, | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 70 | 					   size); | 
 | 71 | 	pagefault_enable(); | 
 | 72 | 	return (retval == 0); | 
 | 73 | } | 
 | 74 |  | 
 | 75 | /* Return a pt_regs pointer for a valid fault handler frame */ | 
 | 76 | static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) | 
 | 77 | { | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 78 | 	const char *fault = NULL;  /* happy compiler */ | 
 | 79 | 	char fault_buf[64]; | 
| Chris Metcalf | 93013a0 | 2011-05-02 13:49:14 -0400 | [diff] [blame] | 80 | 	unsigned long sp = kbt->it.sp; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 81 | 	struct pt_regs *p; | 
 | 82 |  | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 83 | 	if (sp % sizeof(long) != 0) | 
 | 84 | 		return NULL; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 85 | 	if (!in_kernel_stack(kbt, sp)) | 
 | 86 | 		return NULL; | 
 | 87 | 	if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) | 
 | 88 | 		return NULL; | 
 | 89 | 	p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE); | 
 | 90 | 	if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN) | 
 | 91 | 		fault = "syscall"; | 
 | 92 | 	else { | 
 | 93 | 		if (kbt->verbose) {     /* else we aren't going to use it */ | 
 | 94 | 			snprintf(fault_buf, sizeof(fault_buf), | 
 | 95 | 				 "interrupt %ld", p->faultnum); | 
 | 96 | 			fault = fault_buf; | 
 | 97 | 		} | 
 | 98 | 	} | 
 | 99 | 	if (EX1_PL(p->ex1) == KERNEL_PL && | 
| Chris Metcalf | 3cebbaf | 2011-02-28 15:30:16 -0500 | [diff] [blame] | 100 | 	    __kernel_text_address(p->pc) && | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 101 | 	    in_kernel_stack(kbt, p->sp) && | 
 | 102 | 	    p->sp >= sp) { | 
 | 103 | 		if (kbt->verbose) | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 104 | 			pr_err("  <%s while in kernel mode>\n", fault); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 105 | 	} else if (EX1_PL(p->ex1) == USER_PL && | 
 | 106 | 	    p->pc < PAGE_OFFSET && | 
 | 107 | 	    p->sp < PAGE_OFFSET) { | 
 | 108 | 		if (kbt->verbose) | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 109 | 			pr_err("  <%s while in user mode>\n", fault); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 110 | 	} else if (kbt->verbose) { | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 111 | 		pr_err("  (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n", | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 112 | 		       p->pc, p->sp, p->ex1); | 
 | 113 | 		p = NULL; | 
 | 114 | 	} | 
 | 115 | 	if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) | 
 | 116 | 		return p; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 117 | 	return NULL; | 
 | 118 | } | 
 | 119 |  | 
 | 120 | /* Is the pc pointing to a sigreturn trampoline? */ | 
| Chris Metcalf | 93013a0 | 2011-05-02 13:49:14 -0400 | [diff] [blame] | 121 | static int is_sigreturn(unsigned long pc) | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 122 | { | 
 | 123 | 	return (pc == VDSO_BASE); | 
 | 124 | } | 
 | 125 |  | 
 | 126 | /* Return a pt_regs pointer for a valid signal handler frame */ | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 127 | static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt, | 
 | 128 | 				      struct rt_sigframe* kframe) | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 129 | { | 
 | 130 | 	BacktraceIterator *b = &kbt->it; | 
 | 131 |  | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 132 | 	if (b->pc == VDSO_BASE && b->sp < PAGE_OFFSET && | 
 | 133 | 	    b->sp % sizeof(long) == 0) { | 
 | 134 | 		int retval; | 
 | 135 | 		pagefault_disable(); | 
 | 136 | 		retval = __copy_from_user_inatomic( | 
 | 137 | 			kframe, (void __user __force *)b->sp, | 
 | 138 | 			sizeof(*kframe)); | 
 | 139 | 		pagefault_enable(); | 
 | 140 | 		if (retval != 0 || | 
 | 141 | 		    (unsigned int)(kframe->info.si_signo) >= _NSIG) | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 142 | 			return NULL; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 143 | 		if (kbt->verbose) { | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 144 | 			pr_err("  <received signal %d>\n", | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 145 | 			       kframe->info.si_signo); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 146 | 		} | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 147 | 		return (struct pt_regs *)&kframe->uc.uc_mcontext; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 148 | 	} | 
 | 149 | 	return NULL; | 
 | 150 | } | 
 | 151 |  | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 152 | static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt) | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 153 | { | 
 | 154 | 	return is_sigreturn(kbt->it.pc); | 
 | 155 | } | 
 | 156 |  | 
 | 157 | static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) | 
 | 158 | { | 
 | 159 | 	struct pt_regs *p; | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 160 | 	struct rt_sigframe kframe; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 161 |  | 
 | 162 | 	p = valid_fault_handler(kbt); | 
 | 163 | 	if (p == NULL) | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 164 | 		p = valid_sigframe(kbt, &kframe); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 165 | 	if (p == NULL) | 
 | 166 | 		return 0; | 
 | 167 | 	backtrace_init(&kbt->it, read_memory_func, kbt, | 
 | 168 | 		       p->pc, p->lr, p->sp, p->regs[52]); | 
 | 169 | 	kbt->new_context = 1; | 
 | 170 | 	return 1; | 
 | 171 | } | 
 | 172 |  | 
 | 173 | /* Find a frame that isn't a sigreturn, if there is one. */ | 
 | 174 | static int KBacktraceIterator_next_item_inclusive( | 
 | 175 | 	struct KBacktraceIterator *kbt) | 
 | 176 | { | 
 | 177 | 	for (;;) { | 
 | 178 | 		do { | 
 | 179 | 			if (!KBacktraceIterator_is_sigreturn(kbt)) | 
| Chris Metcalf | dabe98c | 2010-10-14 15:19:04 -0400 | [diff] [blame] | 180 | 				return KBT_ONGOING; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 181 | 		} while (backtrace_next(&kbt->it)); | 
 | 182 |  | 
 | 183 | 		if (!KBacktraceIterator_restart(kbt)) | 
| Chris Metcalf | dabe98c | 2010-10-14 15:19:04 -0400 | [diff] [blame] | 184 | 			return KBT_DONE; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 185 | 	} | 
 | 186 | } | 
 | 187 |  | 
 | 188 | /* | 
 | 189 |  * If the current sp is on a page different than what we recorded | 
 | 190 |  * as the top-of-kernel-stack last time we context switched, we have | 
 | 191 |  * probably blown the stack, and nothing is going to work out well. | 
 | 192 |  * If we can at least get out a warning, that may help the debug, | 
 | 193 |  * though we probably won't be able to backtrace into the code that | 
 | 194 |  * actually did the recursive damage. | 
 | 195 |  */ | 
 | 196 | static void validate_stack(struct pt_regs *regs) | 
 | 197 | { | 
 | 198 | 	int cpu = smp_processor_id(); | 
 | 199 | 	unsigned long ksp0 = get_current_ksp0(); | 
 | 200 | 	unsigned long ksp0_base = ksp0 - THREAD_SIZE; | 
 | 201 | 	unsigned long sp = stack_pointer; | 
 | 202 |  | 
 | 203 | 	if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) { | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 204 | 		pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n" | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 205 | 		       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", | 
 | 206 | 		       cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); | 
 | 207 | 	} | 
 | 208 |  | 
 | 209 | 	else if (sp < ksp0_base + sizeof(struct thread_info)) { | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 210 | 		pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n" | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 211 | 		       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", | 
 | 212 | 		       cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); | 
 | 213 | 	} | 
 | 214 | } | 
 | 215 |  | 
 | 216 | void KBacktraceIterator_init(struct KBacktraceIterator *kbt, | 
 | 217 | 			     struct task_struct *t, struct pt_regs *regs) | 
 | 218 | { | 
| Chris Metcalf | 93013a0 | 2011-05-02 13:49:14 -0400 | [diff] [blame] | 219 | 	unsigned long pc, lr, sp, r52; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 220 | 	int is_current; | 
 | 221 |  | 
 | 222 | 	/* | 
 | 223 | 	 * Set up callback information.  We grab the kernel stack base | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 224 | 	 * so we will allow reads of that address range. | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 225 | 	 */ | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 226 | 	is_current = (t == NULL || t == current); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 227 | 	kbt->is_current = is_current; | 
 | 228 | 	if (is_current) | 
 | 229 | 		t = validate_current(); | 
 | 230 | 	kbt->task = t; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 231 | 	kbt->verbose = 0;   /* override in caller if desired */ | 
 | 232 | 	kbt->profile = 0;   /* override in caller if desired */ | 
| Chris Metcalf | dabe98c | 2010-10-14 15:19:04 -0400 | [diff] [blame] | 233 | 	kbt->end = KBT_ONGOING; | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 234 | 	kbt->new_context = 1; | 
 | 235 | 	if (is_current) | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 236 | 		validate_stack(regs); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 237 |  | 
 | 238 | 	if (regs == NULL) { | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 239 | 		if (is_current || t->state == TASK_RUNNING) { | 
 | 240 | 			/* Can't do this; we need registers */ | 
| Chris Metcalf | dabe98c | 2010-10-14 15:19:04 -0400 | [diff] [blame] | 241 | 			kbt->end = KBT_RUNNING; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 242 | 			return; | 
 | 243 | 		} | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 244 | 		pc = get_switch_to_pc(); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 245 | 		lr = t->thread.pc; | 
 | 246 | 		sp = t->thread.ksp; | 
 | 247 | 		r52 = 0; | 
 | 248 | 	} else { | 
 | 249 | 		pc = regs->pc; | 
 | 250 | 		lr = regs->lr; | 
 | 251 | 		sp = regs->sp; | 
 | 252 | 		r52 = regs->regs[52]; | 
 | 253 | 	} | 
 | 254 |  | 
 | 255 | 	backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52); | 
| Chris Metcalf | dabe98c | 2010-10-14 15:19:04 -0400 | [diff] [blame] | 256 | 	kbt->end = KBacktraceIterator_next_item_inclusive(kbt); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 257 | } | 
 | 258 | EXPORT_SYMBOL(KBacktraceIterator_init); | 
 | 259 |  | 
 | 260 | int KBacktraceIterator_end(struct KBacktraceIterator *kbt) | 
 | 261 | { | 
| Chris Metcalf | dabe98c | 2010-10-14 15:19:04 -0400 | [diff] [blame] | 262 | 	return kbt->end != KBT_ONGOING; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 263 | } | 
 | 264 | EXPORT_SYMBOL(KBacktraceIterator_end); | 
 | 265 |  | 
 | 266 | void KBacktraceIterator_next(struct KBacktraceIterator *kbt) | 
 | 267 | { | 
| Chris Metcalf | 93013a0 | 2011-05-02 13:49:14 -0400 | [diff] [blame] | 268 | 	unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 269 | 	kbt->new_context = 0; | 
| Chris Metcalf | dabe98c | 2010-10-14 15:19:04 -0400 | [diff] [blame] | 270 | 	if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) { | 
 | 271 | 		kbt->end = KBT_DONE; | 
 | 272 | 		return; | 
 | 273 | 	} | 
 | 274 | 	kbt->end = KBacktraceIterator_next_item_inclusive(kbt); | 
 | 275 | 	if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) { | 
 | 276 | 		/* Trapped in a loop; give up. */ | 
 | 277 | 		kbt->end = KBT_LOOP; | 
 | 278 | 	} | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 279 | } | 
 | 280 | EXPORT_SYMBOL(KBacktraceIterator_next); | 
 | 281 |  | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 282 | static void describe_addr(struct KBacktraceIterator *kbt, | 
 | 283 | 			  unsigned long address, | 
 | 284 | 			  int have_mmap_sem, char *buf, size_t bufsize) | 
 | 285 | { | 
 | 286 | 	struct vm_area_struct *vma; | 
 | 287 | 	size_t namelen, remaining; | 
 | 288 | 	unsigned long size, offset, adjust; | 
 | 289 | 	char *p, *modname; | 
 | 290 | 	const char *name; | 
 | 291 | 	int rc; | 
 | 292 |  | 
 | 293 | 	/* | 
 | 294 | 	 * Look one byte back for every caller frame (i.e. those that | 
 | 295 | 	 * aren't a new context) so we look up symbol data for the | 
 | 296 | 	 * call itself, not the following instruction, which may be on | 
 | 297 | 	 * a different line (or in a different function). | 
 | 298 | 	 */ | 
 | 299 | 	adjust = !kbt->new_context; | 
 | 300 | 	address -= adjust; | 
 | 301 |  | 
 | 302 | 	if (address >= PAGE_OFFSET) { | 
 | 303 | 		/* Handle kernel symbols. */ | 
 | 304 | 		BUG_ON(bufsize < KSYM_NAME_LEN); | 
 | 305 | 		name = kallsyms_lookup(address, &size, &offset, | 
 | 306 | 				       &modname, buf); | 
 | 307 | 		if (name == NULL) { | 
 | 308 | 			buf[0] = '\0'; | 
 | 309 | 			return; | 
 | 310 | 		} | 
 | 311 | 		namelen = strlen(buf); | 
 | 312 | 		remaining = (bufsize - 1) - namelen; | 
 | 313 | 		p = buf + namelen; | 
 | 314 | 		rc = snprintf(p, remaining, "+%#lx/%#lx ", | 
 | 315 | 			      offset + adjust, size); | 
 | 316 | 		if (modname && rc < remaining) | 
 | 317 | 			snprintf(p + rc, remaining - rc, "[%s] ", modname); | 
 | 318 | 		buf[bufsize-1] = '\0'; | 
 | 319 | 		return; | 
 | 320 | 	} | 
 | 321 |  | 
 | 322 | 	/* If we don't have the mmap_sem, we can't show any more info. */ | 
 | 323 | 	buf[0] = '\0'; | 
 | 324 | 	if (!have_mmap_sem) | 
 | 325 | 		return; | 
 | 326 |  | 
 | 327 | 	/* Find vma info. */ | 
 | 328 | 	vma = find_vma(kbt->task->mm, address); | 
 | 329 | 	if (vma == NULL || address < vma->vm_start) { | 
 | 330 | 		snprintf(buf, bufsize, "[unmapped address] "); | 
 | 331 | 		return; | 
 | 332 | 	} | 
 | 333 |  | 
 | 334 | 	if (vma->vm_file) { | 
 | 335 | 		char *s; | 
 | 336 | 		p = d_path(&vma->vm_file->f_path, buf, bufsize); | 
 | 337 | 		if (IS_ERR(p)) | 
 | 338 | 			p = "?"; | 
 | 339 | 		s = strrchr(p, '/'); | 
 | 340 | 		if (s) | 
 | 341 | 			p = s+1; | 
 | 342 | 	} else { | 
 | 343 | 		p = "anon"; | 
 | 344 | 	} | 
 | 345 |  | 
 | 346 | 	/* Generate a string description of the vma info. */ | 
 | 347 | 	namelen = strlen(p); | 
 | 348 | 	remaining = (bufsize - 1) - namelen; | 
 | 349 | 	memmove(buf, p, namelen); | 
 | 350 | 	snprintf(buf + namelen, remaining, "[%lx+%lx] ", | 
 | 351 | 		 vma->vm_start, vma->vm_end - vma->vm_start); | 
 | 352 | } | 
 | 353 |  | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 354 | /* | 
 | 355 |  * This method wraps the backtracer's more generic support. | 
 | 356 |  * It is only invoked from the architecture-specific code; show_stack() | 
 | 357 |  * and dump_stack() (in entry.S) are architecture-independent entry points. | 
 | 358 |  */ | 
 | 359 | void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | 
 | 360 | { | 
 | 361 | 	int i; | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 362 | 	int have_mmap_sem = 0; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 363 |  | 
 | 364 | 	if (headers) { | 
 | 365 | 		/* | 
 | 366 | 		 * Add a blank line since if we are called from panic(), | 
 | 367 | 		 * then bust_spinlocks() spit out a space in front of us | 
 | 368 | 		 * and it will mess up our KERN_ERR. | 
 | 369 | 		 */ | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 370 | 		pr_err("\n"); | 
 | 371 | 		pr_err("Starting stack dump of tid %d, pid %d (%s)" | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 372 | 		       " on cpu %d at cycle %lld\n", | 
 | 373 | 		       kbt->task->pid, kbt->task->tgid, kbt->task->comm, | 
 | 374 | 		       smp_processor_id(), get_cycles()); | 
 | 375 | 	} | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 376 | 	kbt->verbose = 1; | 
 | 377 | 	i = 0; | 
 | 378 | 	for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 379 | 		char namebuf[KSYM_NAME_LEN+100]; | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 380 | 		unsigned long address = kbt->it.pc; | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 381 |  | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 382 | 		/* Try to acquire the mmap_sem as we pass into userspace. */ | 
 | 383 | 		if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm) | 
 | 384 | 			have_mmap_sem = | 
 | 385 | 				down_read_trylock(&kbt->task->mm->mmap_sem); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 386 |  | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 387 | 		describe_addr(kbt, address, have_mmap_sem, | 
 | 388 | 			      namebuf, sizeof(namebuf)); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 389 |  | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 390 | 		pr_err("  frame %d: 0x%lx %s(sp 0x%lx)\n", | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 391 | 		       i++, address, namebuf, (unsigned long)(kbt->it.sp)); | 
 | 392 |  | 
 | 393 | 		if (i >= 100) { | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 394 | 			pr_err("Stack dump truncated" | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 395 | 			       " (%d frames)\n", i); | 
 | 396 | 			break; | 
 | 397 | 		} | 
 | 398 | 	} | 
| Chris Metcalf | dabe98c | 2010-10-14 15:19:04 -0400 | [diff] [blame] | 399 | 	if (kbt->end == KBT_LOOP) | 
 | 400 | 		pr_err("Stack dump stopped; next frame identical to this one\n"); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 401 | 	if (headers) | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 402 | 		pr_err("Stack dump complete\n"); | 
| Chris Metcalf | 5f639fd | 2012-03-29 14:06:14 -0400 | [diff] [blame] | 403 | 	if (have_mmap_sem) | 
 | 404 | 		up_read(&kbt->task->mm->mmap_sem); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 405 | } | 
 | 406 | EXPORT_SYMBOL(tile_show_stack); | 
 | 407 |  | 
 | 408 |  | 
 | 409 | /* This is called from show_regs() and _dump_stack() */ | 
 | 410 | void dump_stack_regs(struct pt_regs *regs) | 
 | 411 | { | 
 | 412 | 	struct KBacktraceIterator kbt; | 
 | 413 | 	KBacktraceIterator_init(&kbt, NULL, regs); | 
 | 414 | 	tile_show_stack(&kbt, 1); | 
 | 415 | } | 
 | 416 | EXPORT_SYMBOL(dump_stack_regs); | 
 | 417 |  | 
 | 418 | static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs, | 
 | 419 | 				       ulong pc, ulong lr, ulong sp, ulong r52) | 
 | 420 | { | 
 | 421 | 	memset(regs, 0, sizeof(struct pt_regs)); | 
 | 422 | 	regs->pc = pc; | 
 | 423 | 	regs->lr = lr; | 
 | 424 | 	regs->sp = sp; | 
 | 425 | 	regs->regs[52] = r52; | 
 | 426 | 	return regs; | 
 | 427 | } | 
 | 428 |  | 
 | 429 | /* This is called from dump_stack() and just converts to pt_regs */ | 
 | 430 | void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) | 
 | 431 | { | 
 | 432 | 	struct pt_regs regs; | 
 | 433 | 	dump_stack_regs(regs_to_pt_regs(®s, pc, lr, sp, r52)); | 
 | 434 | } | 
 | 435 |  | 
 | 436 | /* This is called from KBacktraceIterator_init_current() */ | 
 | 437 | void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc, | 
 | 438 | 				      ulong lr, ulong sp, ulong r52) | 
 | 439 | { | 
 | 440 | 	struct pt_regs regs; | 
 | 441 | 	KBacktraceIterator_init(kbt, NULL, | 
 | 442 | 				regs_to_pt_regs(®s, pc, lr, sp, r52)); | 
 | 443 | } | 
 | 444 |  | 
 | 445 | /* This is called only from kernel/sched.c, with esp == NULL */ | 
 | 446 | void show_stack(struct task_struct *task, unsigned long *esp) | 
 | 447 | { | 
 | 448 | 	struct KBacktraceIterator kbt; | 
 | 449 | 	if (task == NULL || task == current) | 
 | 450 | 		KBacktraceIterator_init_current(&kbt); | 
 | 451 | 	else | 
 | 452 | 		KBacktraceIterator_init(&kbt, task, NULL); | 
 | 453 | 	tile_show_stack(&kbt, 0); | 
 | 454 | } | 
 | 455 |  | 
 | 456 | #ifdef CONFIG_STACKTRACE | 
 | 457 |  | 
 | 458 | /* Support generic Linux stack API too */ | 
 | 459 |  | 
 | 460 | void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) | 
 | 461 | { | 
 | 462 | 	struct KBacktraceIterator kbt; | 
 | 463 | 	int skip = trace->skip; | 
 | 464 | 	int i = 0; | 
 | 465 |  | 
 | 466 | 	if (task == NULL || task == current) | 
 | 467 | 		KBacktraceIterator_init_current(&kbt); | 
 | 468 | 	else | 
 | 469 | 		KBacktraceIterator_init(&kbt, task, NULL); | 
 | 470 | 	for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) { | 
 | 471 | 		if (skip) { | 
 | 472 | 			--skip; | 
 | 473 | 			continue; | 
 | 474 | 		} | 
 | 475 | 		if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET) | 
 | 476 | 			break; | 
 | 477 | 		trace->entries[i++] = kbt.it.pc; | 
 | 478 | 	} | 
 | 479 | 	trace->nr_entries = i; | 
 | 480 | } | 
 | 481 | EXPORT_SYMBOL(save_stack_trace_tsk); | 
 | 482 |  | 
 | 483 | void save_stack_trace(struct stack_trace *trace) | 
 | 484 | { | 
 | 485 | 	save_stack_trace_tsk(NULL, trace); | 
 | 486 | } | 
 | 487 |  | 
 | 488 | #endif | 
 | 489 |  | 
 | 490 | /* In entry.S */ | 
 | 491 | EXPORT_SYMBOL(KBacktraceIterator_init_current); |