blob: 76dd953eeccd8a3b1641e2bb5bbcf26ee428e6ec [file] [log] [blame]
Ingo Molnarf06c3812008-05-12 21:20:47 +02001/*
2 * trace stack traces
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
Ingo Molnar56a08bd2008-05-12 21:20:47 +02006 * Copyright (C) 2004, 2005, Soeren Sandmann
Ingo Molnarf06c3812008-05-12 21:20:47 +02007 */
Ingo Molnar0075fa82008-05-12 21:20:47 +02008#include <linux/kallsyms.h>
9#include <linux/debugfs.h>
10#include <linux/hrtimer.h>
11#include <linux/uaccess.h>
12#include <linux/ftrace.h>
Ingo Molnarf06c3812008-05-12 21:20:47 +020013#include <linux/module.h>
Ingo Molnar56a08bd2008-05-12 21:20:47 +020014#include <linux/irq.h>
Ingo Molnarf06c3812008-05-12 21:20:47 +020015#include <linux/fs.h>
Ingo Molnarf06c3812008-05-12 21:20:47 +020016
17#include "trace.h"
18
Ingo Molnar56a08bd2008-05-12 21:20:47 +020019static struct trace_array *sysprof_trace;
Ingo Molnarf06c3812008-05-12 21:20:47 +020020static int __read_mostly tracer_enabled;
21
Ingo Molnar56a08bd2008-05-12 21:20:47 +020022/*
Ingo Molnard618b3e2008-05-12 21:20:49 +020023 * 1 msec sample interval by default:
Ingo Molnar56a08bd2008-05-12 21:20:47 +020024 */
Ingo Molnard618b3e2008-05-12 21:20:49 +020025static unsigned long sample_period = 1000000;
Ingo Molnar842af312008-05-12 21:20:47 +020026static const unsigned int sample_max_depth = 512;
Ingo Molnar0075fa82008-05-12 21:20:47 +020027
Ingo Molnard618b3e2008-05-12 21:20:49 +020028static DEFINE_MUTEX(sample_timer_lock);
Ingo Molnar0075fa82008-05-12 21:20:47 +020029/*
30 * Per CPU hrtimers that do the profiling:
31 */
32static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer);
33
Ingo Molnar56a08bd2008-05-12 21:20:47 +020034struct stack_frame {
35 const void __user *next_fp;
36 unsigned long return_address;
37};
38
39static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
40{
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020041 int ret;
42
Ingo Molnar56a08bd2008-05-12 21:20:47 +020043 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
44 return 0;
45
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020046 ret = 1;
47 pagefault_disable();
48 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
49 ret = 0;
50 pagefault_enable();
Ingo Molnar56a08bd2008-05-12 21:20:47 +020051
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020052 return ret;
Ingo Molnar56a08bd2008-05-12 21:20:47 +020053}
54
Ingo Molnar56a08bd2008-05-12 21:20:47 +020055static void timer_notify(struct pt_regs *regs, int cpu)
56{
Ingo Molnar56a08bd2008-05-12 21:20:47 +020057 struct trace_array_cpu *data;
58 struct stack_frame frame;
59 struct trace_array *tr;
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020060 const void __user *fp;
Ingo Molnar56a08bd2008-05-12 21:20:47 +020061 int is_user;
62 int i;
63
64 if (!regs)
65 return;
66
67 tr = sysprof_trace;
68 data = tr->data[cpu];
69 is_user = user_mode(regs);
70
71 if (!current || current->pid == 0)
72 return;
73
74 if (is_user && current->state != TASK_RUNNING)
75 return;
76
77 if (!is_user) {
78 /* kernel */
79 ftrace(tr, data, current->pid, 1, 0);
80 return;
81
82 }
83
Thomas Gleixner9caee612008-05-23 23:55:54 +020084 __trace_special(tr, data, 0, current->pid, regs->ip);
Ingo Molnar56a08bd2008-05-12 21:20:47 +020085
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020086 fp = (void __user *)regs->bp;
Ingo Molnar56a08bd2008-05-12 21:20:47 +020087
Ingo Molnar842af312008-05-12 21:20:47 +020088 for (i = 0; i < sample_max_depth; i++) {
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020089 frame.next_fp = 0;
90 frame.return_address = 0;
91 if (!copy_stack_frame(fp, &frame))
Ingo Molnar56a08bd2008-05-12 21:20:47 +020092 break;
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020093 if ((unsigned long)fp < regs->sp)
Ingo Molnar56a08bd2008-05-12 21:20:47 +020094 break;
95
Thomas Gleixner9caee612008-05-23 23:55:54 +020096 __trace_special(tr, data, 1, frame.return_address,
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020097 (unsigned long)fp);
98 fp = frame.next_fp;
Ingo Molnar56a08bd2008-05-12 21:20:47 +020099 }
100
Thomas Gleixner9caee612008-05-23 23:55:54 +0200101 __trace_special(tr, data, 2, current->pid, i);
Ingo Molnar56a08bd2008-05-12 21:20:47 +0200102
Ingo Molnar9f6b4e32008-05-12 21:20:48 +0200103 /*
104 * Special trace entry if we overflow the max depth:
105 */
Ingo Molnar842af312008-05-12 21:20:47 +0200106 if (i == sample_max_depth)
Thomas Gleixner9caee612008-05-23 23:55:54 +0200107 __trace_special(tr, data, -1, -1, -1);
Ingo Molnar56a08bd2008-05-12 21:20:47 +0200108}
109
Ingo Molnar0075fa82008-05-12 21:20:47 +0200110static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
111{
112 /* trace here */
Ingo Molnar56a08bd2008-05-12 21:20:47 +0200113 timer_notify(get_irq_regs(), smp_processor_id());
Ingo Molnar0075fa82008-05-12 21:20:47 +0200114
115 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
116
117 return HRTIMER_RESTART;
118}
119
120static void start_stack_timer(int cpu)
121{
122 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
123
124 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
125 hrtimer->function = stack_trace_timer_fn;
126 hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
127
128 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
129}
130
131static void start_stack_timers(void)
132{
133 cpumask_t saved_mask = current->cpus_allowed;
134 int cpu;
135
136 for_each_online_cpu(cpu) {
137 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
138 start_stack_timer(cpu);
Ingo Molnar0075fa82008-05-12 21:20:47 +0200139 }
140 set_cpus_allowed_ptr(current, &saved_mask);
141}
142
143static void stop_stack_timer(int cpu)
144{
145 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
146
147 hrtimer_cancel(hrtimer);
Ingo Molnar0075fa82008-05-12 21:20:47 +0200148}
149
150static void stop_stack_timers(void)
151{
152 int cpu;
153
154 for_each_online_cpu(cpu)
155 stop_stack_timer(cpu);
156}
157
Thomas Gleixnerada6b832008-05-23 23:50:41 +0200158static void stack_reset(struct trace_array *tr)
Ingo Molnarf06c3812008-05-12 21:20:47 +0200159{
160 int cpu;
161
162 tr->time_start = ftrace_now(tr->cpu);
163
164 for_each_online_cpu(cpu)
165 tracing_reset(tr->data[cpu]);
166}
167
Thomas Gleixnerada6b832008-05-23 23:50:41 +0200168static void start_stack_trace(struct trace_array *tr)
Ingo Molnarf06c3812008-05-12 21:20:47 +0200169{
Ingo Molnard618b3e2008-05-12 21:20:49 +0200170 mutex_lock(&sample_timer_lock);
Ingo Molnarf06c3812008-05-12 21:20:47 +0200171 stack_reset(tr);
Ingo Molnar0075fa82008-05-12 21:20:47 +0200172 start_stack_timers();
Ingo Molnarf06c3812008-05-12 21:20:47 +0200173 tracer_enabled = 1;
Ingo Molnard618b3e2008-05-12 21:20:49 +0200174 mutex_unlock(&sample_timer_lock);
Ingo Molnarf06c3812008-05-12 21:20:47 +0200175}
176
Thomas Gleixnerada6b832008-05-23 23:50:41 +0200177static void stop_stack_trace(struct trace_array *tr)
Ingo Molnarf06c3812008-05-12 21:20:47 +0200178{
Ingo Molnard618b3e2008-05-12 21:20:49 +0200179 mutex_lock(&sample_timer_lock);
Ingo Molnar0075fa82008-05-12 21:20:47 +0200180 stop_stack_timers();
Ingo Molnarf06c3812008-05-12 21:20:47 +0200181 tracer_enabled = 0;
Ingo Molnard618b3e2008-05-12 21:20:49 +0200182 mutex_unlock(&sample_timer_lock);
Ingo Molnarf06c3812008-05-12 21:20:47 +0200183}
184
Thomas Gleixnerada6b832008-05-23 23:50:41 +0200185static void stack_trace_init(struct trace_array *tr)
Ingo Molnarf06c3812008-05-12 21:20:47 +0200186{
Ingo Molnar56a08bd2008-05-12 21:20:47 +0200187 sysprof_trace = tr;
Ingo Molnarf06c3812008-05-12 21:20:47 +0200188
189 if (tr->ctrl)
190 start_stack_trace(tr);
191}
192
Thomas Gleixnerada6b832008-05-23 23:50:41 +0200193static void stack_trace_reset(struct trace_array *tr)
Ingo Molnarf06c3812008-05-12 21:20:47 +0200194{
195 if (tr->ctrl)
196 stop_stack_trace(tr);
197}
198
199static void stack_trace_ctrl_update(struct trace_array *tr)
200{
201 /* When starting a new trace, reset the buffers */
202 if (tr->ctrl)
203 start_stack_trace(tr);
204 else
205 stop_stack_trace(tr);
206}
207
208static struct tracer stack_trace __read_mostly =
209{
210 .name = "sysprof",
211 .init = stack_trace_init,
212 .reset = stack_trace_reset,
213 .ctrl_update = stack_trace_ctrl_update,
214#ifdef CONFIG_FTRACE_SELFTEST
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200215 .selftest = trace_selftest_startup_sysprof,
Ingo Molnarf06c3812008-05-12 21:20:47 +0200216#endif
217};
218
219__init static int init_stack_trace(void)
220{
221 return register_tracer(&stack_trace);
222}
223device_initcall(init_stack_trace);
Ingo Molnard618b3e2008-05-12 21:20:49 +0200224
225#define MAX_LONG_DIGITS 22
226
227static ssize_t
228sysprof_sample_read(struct file *filp, char __user *ubuf,
229 size_t cnt, loff_t *ppos)
230{
231 char buf[MAX_LONG_DIGITS];
232 int r;
233
234 r = sprintf(buf, "%ld\n", nsecs_to_usecs(sample_period));
235
236 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
237}
238
239static ssize_t
240sysprof_sample_write(struct file *filp, const char __user *ubuf,
241 size_t cnt, loff_t *ppos)
242{
243 char buf[MAX_LONG_DIGITS];
244 unsigned long val;
245
246 if (cnt > MAX_LONG_DIGITS-1)
247 cnt = MAX_LONG_DIGITS-1;
248
249 if (copy_from_user(&buf, ubuf, cnt))
250 return -EFAULT;
251
252 buf[cnt] = 0;
253
254 val = simple_strtoul(buf, NULL, 10);
255 /*
256 * Enforce a minimum sample period of 100 usecs:
257 */
258 if (val < 100)
259 val = 100;
260
261 mutex_lock(&sample_timer_lock);
262 stop_stack_timers();
263 sample_period = val * 1000;
264 start_stack_timers();
265 mutex_unlock(&sample_timer_lock);
266
267 return cnt;
268}
269
270static struct file_operations sysprof_sample_fops = {
271 .read = sysprof_sample_read,
272 .write = sysprof_sample_write,
273};
274
275void init_tracer_sysprof_debugfs(struct dentry *d_tracer)
276{
277 struct dentry *entry;
278
279 entry = debugfs_create_file("sysprof_sample_period", 0644,
280 d_tracer, NULL, &sysprof_sample_fops);
281 if (entry)
282 return;
283 pr_warning("Could not create debugfs 'dyn_ftrace_total_info' entry\n");
284}