blob: 32f78eb4674455f78fec097cd41721510cc78b34 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/**
2 * @file backtrace.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author John Levon
8 * @author David Smith
9 */
10
11#include <linux/oprofile.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
Robert Richtera0e3e702011-06-03 16:37:47 +020014#include <linux/compat.h>
15#include <linux/highmem.h>
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/ptrace.h>
Hugh Dickinsc34d1b42005-10-29 18:16:32 -070018#include <asm/uaccess.h>
Jan Blunck574a6042007-10-19 20:35:03 +020019#include <asm/stacktrace.h>
20
Jan Blunck574a6042007-10-19 20:35:03 +020021static int backtrace_stack(void *data, char *name)
22{
23 /* Yes, we want all stacks */
24 return 0;
25}
26
Arjan van de Venbc850d62008-01-30 13:33:07 +010027static void backtrace_address(void *data, unsigned long addr, int reliable)
Jan Blunck574a6042007-10-19 20:35:03 +020028{
29 unsigned int *depth = data;
30
31 if ((*depth)--)
32 oprofile_add_trace(addr);
33}
34
35static struct stacktrace_ops backtrace_ops = {
Frederic Weisbecker61c19172009-12-17 05:40:33 +010036 .stack = backtrace_stack,
37 .address = backtrace_address,
38 .walk_stack = print_context_stack,
Jan Blunck574a6042007-10-19 20:35:03 +020039};
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Robert Richtera0e3e702011-06-03 16:37:47 +020041/* from arch/x86/kernel/cpu/perf_event.c: */
42
43/*
44 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
45 */
46static unsigned long
47copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
48{
49 unsigned long offset, addr = (unsigned long)from;
50 unsigned long size, len = 0;
51 struct page *page;
52 void *map;
53 int ret;
54
55 do {
56 ret = __get_user_pages_fast(addr, 1, 0, &page);
57 if (!ret)
58 break;
59
60 offset = addr & (PAGE_SIZE - 1);
61 size = min(PAGE_SIZE - offset, n - len);
62
63 map = kmap_atomic(page);
64 memcpy(to, map+offset, size);
65 kunmap_atomic(map);
66 put_page(page);
67
68 len += size;
69 to += size;
70 addr += size;
71
72 } while (len < n);
73
74 return len;
75}
76
Jiri Olsaf6dedec2010-09-29 10:46:47 -040077#ifdef CONFIG_COMPAT
78static struct stack_frame_ia32 *
79dump_user_backtrace_32(struct stack_frame_ia32 *head)
80{
Robert Richtera0e3e702011-06-03 16:37:47 +020081 /* Also check accessibility of one struct frame_head beyond: */
Jiri Olsaf6dedec2010-09-29 10:46:47 -040082 struct stack_frame_ia32 bufhead[2];
83 struct stack_frame_ia32 *fp;
Robert Richtera0e3e702011-06-03 16:37:47 +020084 unsigned long bytes;
Jiri Olsaf6dedec2010-09-29 10:46:47 -040085
Robert Richtera0e3e702011-06-03 16:37:47 +020086 bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
87 if (bytes != sizeof(bufhead))
Jiri Olsaf6dedec2010-09-29 10:46:47 -040088 return NULL;
89
90 fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
91
92 oprofile_add_trace(bufhead[0].return_address);
93
94 /* frame pointers should strictly progress back up the stack
95 * (towards higher addresses) */
96 if (head >= fp)
97 return NULL;
98
99 return fp;
100}
101
102static inline int
103x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
104{
105 struct stack_frame_ia32 *head;
106
107 /* User process is 32-bit */
108 if (!current || !test_thread_flag(TIF_IA32))
109 return 0;
110
111 head = (struct stack_frame_ia32 *) regs->bp;
112 while (depth-- && head)
113 head = dump_user_backtrace_32(head);
114
115 return 1;
116}
117
118#else
119static inline int
120x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
121{
122 return 0;
123}
124#endif /* CONFIG_COMPAT */
125
Jiri Olsa40c6b3c2010-09-29 10:46:46 -0400126static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127{
Robert Richtera0e3e702011-06-03 16:37:47 +0200128 /* Also check accessibility of one struct frame_head beyond: */
Jiri Olsa40c6b3c2010-09-29 10:46:46 -0400129 struct stack_frame bufhead[2];
Robert Richtera0e3e702011-06-03 16:37:47 +0200130 unsigned long bytes;
Hugh Dickinsc34d1b42005-10-29 18:16:32 -0700131
Robert Richtera0e3e702011-06-03 16:37:47 +0200132 bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
133 if (bytes != sizeof(bufhead))
Hugh Dickinsc34d1b42005-10-29 18:16:32 -0700134 return NULL;
135
Jiri Olsa40c6b3c2010-09-29 10:46:46 -0400136 oprofile_add_trace(bufhead[0].return_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
138 /* frame pointers should strictly progress back up the stack
139 * (towards higher addresses) */
Jiri Olsa40c6b3c2010-09-29 10:46:46 -0400140 if (head >= bufhead[0].next_frame)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 return NULL;
142
Jiri Olsa40c6b3c2010-09-29 10:46:46 -0400143 return bufhead[0].next_frame;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144}
145
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146void
147x86_backtrace(struct pt_regs * const regs, unsigned int depth)
148{
Jiri Olsa40c6b3c2010-09-29 10:46:46 -0400149 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
Vincent Hanquezfa1e1bd2005-06-23 00:08:44 -0700151 if (!user_mode_vm(regs)) {
Masami Hiramatsu7b6c6c72009-05-11 17:03:00 -0400152 unsigned long stack = kernel_stack_pointer(regs);
Jan Blunck574a6042007-10-19 20:35:03 +0200153 if (depth)
Namhyung Kime8e999c2011-03-18 11:40:06 +0900154 dump_trace(NULL, regs, (unsigned long *)stack, 0,
Jan Blunck574a6042007-10-19 20:35:03 +0200155 &backtrace_ops, &depth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 return;
157 }
158
Jiri Olsaf6dedec2010-09-29 10:46:47 -0400159 if (x86_backtrace_32(regs, depth))
160 return;
161
Hugh Dickinsc34d1b42005-10-29 18:16:32 -0700162 while (depth-- && head)
Gerald Britton30379442006-02-14 10:19:04 -0500163 head = dump_user_backtrace(head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164}