Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /** |
| 2 | * @file backtrace.c |
| 3 | * |
| 4 | * @remark Copyright 2002 OProfile authors |
| 5 | * @remark Read the file COPYING |
| 6 | * |
| 7 | * @author John Levon |
| 8 | * @author David Smith |
| 9 | */ |
| 10 | |
| 11 | #include <linux/oprofile.h> |
| 12 | #include <linux/sched.h> |
| 13 | #include <linux/mm.h> |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame^] | 14 | #include <linux/compat.h> |
| 15 | #include <linux/highmem.h> |
| 16 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <asm/ptrace.h> |
Hugh Dickins | c34d1b4 | 2005-10-29 18:16:32 -0700 | [diff] [blame] | 18 | #include <asm/uaccess.h> |
Jan Blunck | 574a604 | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 19 | #include <asm/stacktrace.h> |
| 20 | |
Jan Blunck | 574a604 | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 21 | static int backtrace_stack(void *data, char *name) |
| 22 | { |
| 23 | /* Yes, we want all stacks */ |
| 24 | return 0; |
| 25 | } |
| 26 | |
Arjan van de Ven | bc850d6 | 2008-01-30 13:33:07 +0100 | [diff] [blame] | 27 | static void backtrace_address(void *data, unsigned long addr, int reliable) |
Jan Blunck | 574a604 | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 28 | { |
| 29 | unsigned int *depth = data; |
| 30 | |
| 31 | if ((*depth)--) |
| 32 | oprofile_add_trace(addr); |
| 33 | } |
| 34 | |
| 35 | static struct stacktrace_ops backtrace_ops = { |
Frederic Weisbecker | 61c1917 | 2009-12-17 05:40:33 +0100 | [diff] [blame] | 36 | .stack = backtrace_stack, |
| 37 | .address = backtrace_address, |
| 38 | .walk_stack = print_context_stack, |
Jan Blunck | 574a604 | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 39 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame^] | 41 | /* from arch/x86/kernel/cpu/perf_event.c: */ |
| 42 | |
| 43 | /* |
| 44 | * best effort, GUP based copy_from_user() that assumes IRQ or NMI context |
| 45 | */ |
| 46 | static unsigned long |
| 47 | copy_from_user_nmi(void *to, const void __user *from, unsigned long n) |
| 48 | { |
| 49 | unsigned long offset, addr = (unsigned long)from; |
| 50 | unsigned long size, len = 0; |
| 51 | struct page *page; |
| 52 | void *map; |
| 53 | int ret; |
| 54 | |
| 55 | do { |
| 56 | ret = __get_user_pages_fast(addr, 1, 0, &page); |
| 57 | if (!ret) |
| 58 | break; |
| 59 | |
| 60 | offset = addr & (PAGE_SIZE - 1); |
| 61 | size = min(PAGE_SIZE - offset, n - len); |
| 62 | |
| 63 | map = kmap_atomic(page); |
| 64 | memcpy(to, map+offset, size); |
| 65 | kunmap_atomic(map); |
| 66 | put_page(page); |
| 67 | |
| 68 | len += size; |
| 69 | to += size; |
| 70 | addr += size; |
| 71 | |
| 72 | } while (len < n); |
| 73 | |
| 74 | return len; |
| 75 | } |
| 76 | |
Jiri Olsa | f6dedec | 2010-09-29 10:46:47 -0400 | [diff] [blame] | 77 | #ifdef CONFIG_COMPAT |
| 78 | static struct stack_frame_ia32 * |
| 79 | dump_user_backtrace_32(struct stack_frame_ia32 *head) |
| 80 | { |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame^] | 81 | /* Also check accessibility of one struct frame_head beyond: */ |
Jiri Olsa | f6dedec | 2010-09-29 10:46:47 -0400 | [diff] [blame] | 82 | struct stack_frame_ia32 bufhead[2]; |
| 83 | struct stack_frame_ia32 *fp; |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame^] | 84 | unsigned long bytes; |
Jiri Olsa | f6dedec | 2010-09-29 10:46:47 -0400 | [diff] [blame] | 85 | |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame^] | 86 | bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); |
| 87 | if (bytes != sizeof(bufhead)) |
Jiri Olsa | f6dedec | 2010-09-29 10:46:47 -0400 | [diff] [blame] | 88 | return NULL; |
| 89 | |
| 90 | fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); |
| 91 | |
| 92 | oprofile_add_trace(bufhead[0].return_address); |
| 93 | |
| 94 | /* frame pointers should strictly progress back up the stack |
| 95 | * (towards higher addresses) */ |
| 96 | if (head >= fp) |
| 97 | return NULL; |
| 98 | |
| 99 | return fp; |
| 100 | } |
| 101 | |
| 102 | static inline int |
| 103 | x86_backtrace_32(struct pt_regs * const regs, unsigned int depth) |
| 104 | { |
| 105 | struct stack_frame_ia32 *head; |
| 106 | |
| 107 | /* User process is 32-bit */ |
| 108 | if (!current || !test_thread_flag(TIF_IA32)) |
| 109 | return 0; |
| 110 | |
| 111 | head = (struct stack_frame_ia32 *) regs->bp; |
| 112 | while (depth-- && head) |
| 113 | head = dump_user_backtrace_32(head); |
| 114 | |
| 115 | return 1; |
| 116 | } |
| 117 | |
| 118 | #else |
| 119 | static inline int |
| 120 | x86_backtrace_32(struct pt_regs * const regs, unsigned int depth) |
| 121 | { |
| 122 | return 0; |
| 123 | } |
| 124 | #endif /* CONFIG_COMPAT */ |
| 125 | |
Jiri Olsa | 40c6b3c | 2010-09-29 10:46:46 -0400 | [diff] [blame] | 126 | static struct stack_frame *dump_user_backtrace(struct stack_frame *head) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | { |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame^] | 128 | /* Also check accessibility of one struct frame_head beyond: */ |
Jiri Olsa | 40c6b3c | 2010-09-29 10:46:46 -0400 | [diff] [blame] | 129 | struct stack_frame bufhead[2]; |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame^] | 130 | unsigned long bytes; |
Hugh Dickins | c34d1b4 | 2005-10-29 18:16:32 -0700 | [diff] [blame] | 131 | |
Robert Richter | a0e3e70 | 2011-06-03 16:37:47 +0200 | [diff] [blame^] | 132 | bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); |
| 133 | if (bytes != sizeof(bufhead)) |
Hugh Dickins | c34d1b4 | 2005-10-29 18:16:32 -0700 | [diff] [blame] | 134 | return NULL; |
| 135 | |
Jiri Olsa | 40c6b3c | 2010-09-29 10:46:46 -0400 | [diff] [blame] | 136 | oprofile_add_trace(bufhead[0].return_address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
| 138 | /* frame pointers should strictly progress back up the stack |
| 139 | * (towards higher addresses) */ |
Jiri Olsa | 40c6b3c | 2010-09-29 10:46:46 -0400 | [diff] [blame] | 140 | if (head >= bufhead[0].next_frame) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | return NULL; |
| 142 | |
Jiri Olsa | 40c6b3c | 2010-09-29 10:46:46 -0400 | [diff] [blame] | 143 | return bufhead[0].next_frame; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | } |
| 145 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | void |
| 147 | x86_backtrace(struct pt_regs * const regs, unsigned int depth) |
| 148 | { |
Jiri Olsa | 40c6b3c | 2010-09-29 10:46:46 -0400 | [diff] [blame] | 149 | struct stack_frame *head = (struct stack_frame *)frame_pointer(regs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
Vincent Hanquez | fa1e1bd | 2005-06-23 00:08:44 -0700 | [diff] [blame] | 151 | if (!user_mode_vm(regs)) { |
Masami Hiramatsu | 7b6c6c7 | 2009-05-11 17:03:00 -0400 | [diff] [blame] | 152 | unsigned long stack = kernel_stack_pointer(regs); |
Jan Blunck | 574a604 | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 153 | if (depth) |
Namhyung Kim | e8e999c | 2011-03-18 11:40:06 +0900 | [diff] [blame] | 154 | dump_trace(NULL, regs, (unsigned long *)stack, 0, |
Jan Blunck | 574a604 | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 155 | &backtrace_ops, &depth); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | return; |
| 157 | } |
| 158 | |
Jiri Olsa | f6dedec | 2010-09-29 10:46:47 -0400 | [diff] [blame] | 159 | if (x86_backtrace_32(regs, depth)) |
| 160 | return; |
| 161 | |
Hugh Dickins | c34d1b4 | 2005-10-29 18:16:32 -0700 | [diff] [blame] | 162 | while (depth-- && head) |
Gerald Britton | 3037944 | 2006-02-14 10:19:04 -0500 | [diff] [blame] | 163 | head = dump_user_backtrace(head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | } |