blob: f9a09fe705b014066378039c1c32e0e904e478ad [file] [log] [blame]
Ingo Molnarf06c3812008-05-12 21:20:47 +02001/*
2 * trace stack traces
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
Ingo Molnar56a08bd2008-05-12 21:20:47 +02006 * Copyright (C) 2004, 2005, Soeren Sandmann
Ingo Molnarf06c3812008-05-12 21:20:47 +02007 */
Ingo Molnar0075fa82008-05-12 21:20:47 +02008#include <linux/kallsyms.h>
9#include <linux/debugfs.h>
10#include <linux/hrtimer.h>
11#include <linux/uaccess.h>
12#include <linux/ftrace.h>
Ingo Molnarf06c3812008-05-12 21:20:47 +020013#include <linux/module.h>
Ingo Molnar56a08bd2008-05-12 21:20:47 +020014#include <linux/irq.h>
Ingo Molnarf06c3812008-05-12 21:20:47 +020015#include <linux/fs.h>
Ingo Molnarf06c3812008-05-12 21:20:47 +020016
17#include "trace.h"
18
Ingo Molnar56a08bd2008-05-12 21:20:47 +020019static struct trace_array *sysprof_trace;
Ingo Molnarf06c3812008-05-12 21:20:47 +020020static int __read_mostly tracer_enabled;
21
Ingo Molnar56a08bd2008-05-12 21:20:47 +020022/*
23 * 10 msecs for now:
24 */
Ingo Molnar0075fa82008-05-12 21:20:47 +020025static const unsigned long sample_period = 1000000;
Ingo Molnar842af312008-05-12 21:20:47 +020026static const unsigned int sample_max_depth = 512;
Ingo Molnar0075fa82008-05-12 21:20:47 +020027
28/*
29 * Per CPU hrtimers that do the profiling:
30 */
31static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer);
32
Ingo Molnar56a08bd2008-05-12 21:20:47 +020033struct stack_frame {
34 const void __user *next_fp;
35 unsigned long return_address;
36};
37
38static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
39{
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020040 int ret;
41
Ingo Molnar56a08bd2008-05-12 21:20:47 +020042 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
43 return 0;
44
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020045 ret = 1;
46 pagefault_disable();
47 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
48 ret = 0;
49 pagefault_enable();
Ingo Molnar56a08bd2008-05-12 21:20:47 +020050
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020051 return ret;
Ingo Molnar56a08bd2008-05-12 21:20:47 +020052}
53
Ingo Molnar56a08bd2008-05-12 21:20:47 +020054static void timer_notify(struct pt_regs *regs, int cpu)
55{
Ingo Molnar56a08bd2008-05-12 21:20:47 +020056 struct trace_array_cpu *data;
57 struct stack_frame frame;
58 struct trace_array *tr;
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020059 const void __user *fp;
Ingo Molnar56a08bd2008-05-12 21:20:47 +020060 int is_user;
61 int i;
62
63 if (!regs)
64 return;
65
66 tr = sysprof_trace;
67 data = tr->data[cpu];
68 is_user = user_mode(regs);
69
70 if (!current || current->pid == 0)
71 return;
72
73 if (is_user && current->state != TASK_RUNNING)
74 return;
75
76 if (!is_user) {
77 /* kernel */
78 ftrace(tr, data, current->pid, 1, 0);
79 return;
80
81 }
82
83 trace_special(tr, data, 0, current->pid, regs->ip);
84
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020085 fp = (void __user *)regs->bp;
Ingo Molnar56a08bd2008-05-12 21:20:47 +020086
Ingo Molnar842af312008-05-12 21:20:47 +020087 for (i = 0; i < sample_max_depth; i++) {
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020088 frame.next_fp = 0;
89 frame.return_address = 0;
90 if (!copy_stack_frame(fp, &frame))
Ingo Molnar56a08bd2008-05-12 21:20:47 +020091 break;
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020092 if ((unsigned long)fp < regs->sp)
Ingo Molnar56a08bd2008-05-12 21:20:47 +020093 break;
94
95 trace_special(tr, data, 1, frame.return_address,
Ingo Molnar9f6b4e32008-05-12 21:20:48 +020096 (unsigned long)fp);
97 fp = frame.next_fp;
Ingo Molnar56a08bd2008-05-12 21:20:47 +020098 }
99
100 trace_special(tr, data, 2, current->pid, i);
101
Ingo Molnar9f6b4e32008-05-12 21:20:48 +0200102 /*
103 * Special trace entry if we overflow the max depth:
104 */
Ingo Molnar842af312008-05-12 21:20:47 +0200105 if (i == sample_max_depth)
Ingo Molnar56a08bd2008-05-12 21:20:47 +0200106 trace_special(tr, data, -1, -1, -1);
107}
108
Ingo Molnar0075fa82008-05-12 21:20:47 +0200109static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
110{
111 /* trace here */
Ingo Molnar56a08bd2008-05-12 21:20:47 +0200112 timer_notify(get_irq_regs(), smp_processor_id());
Ingo Molnar0075fa82008-05-12 21:20:47 +0200113
114 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
115
116 return HRTIMER_RESTART;
117}
118
119static void start_stack_timer(int cpu)
120{
121 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
122
123 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
124 hrtimer->function = stack_trace_timer_fn;
125 hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
126
127 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
128}
129
130static void start_stack_timers(void)
131{
132 cpumask_t saved_mask = current->cpus_allowed;
133 int cpu;
134
135 for_each_online_cpu(cpu) {
136 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
137 start_stack_timer(cpu);
Ingo Molnar0075fa82008-05-12 21:20:47 +0200138 }
139 set_cpus_allowed_ptr(current, &saved_mask);
140}
141
142static void stop_stack_timer(int cpu)
143{
144 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
145
146 hrtimer_cancel(hrtimer);
Ingo Molnar0075fa82008-05-12 21:20:47 +0200147}
148
149static void stop_stack_timers(void)
150{
151 int cpu;
152
153 for_each_online_cpu(cpu)
154 stop_stack_timer(cpu);
155}
156
Ingo Molnarf06c3812008-05-12 21:20:47 +0200157static notrace void stack_reset(struct trace_array *tr)
158{
159 int cpu;
160
161 tr->time_start = ftrace_now(tr->cpu);
162
163 for_each_online_cpu(cpu)
164 tracing_reset(tr->data[cpu]);
165}
166
167static notrace void start_stack_trace(struct trace_array *tr)
168{
169 stack_reset(tr);
Ingo Molnar0075fa82008-05-12 21:20:47 +0200170 start_stack_timers();
Ingo Molnarf06c3812008-05-12 21:20:47 +0200171 tracer_enabled = 1;
172}
173
174static notrace void stop_stack_trace(struct trace_array *tr)
175{
Ingo Molnar0075fa82008-05-12 21:20:47 +0200176 stop_stack_timers();
Ingo Molnarf06c3812008-05-12 21:20:47 +0200177 tracer_enabled = 0;
178}
179
180static notrace void stack_trace_init(struct trace_array *tr)
181{
Ingo Molnar56a08bd2008-05-12 21:20:47 +0200182 sysprof_trace = tr;
Ingo Molnarf06c3812008-05-12 21:20:47 +0200183
184 if (tr->ctrl)
185 start_stack_trace(tr);
186}
187
188static notrace void stack_trace_reset(struct trace_array *tr)
189{
190 if (tr->ctrl)
191 stop_stack_trace(tr);
192}
193
194static void stack_trace_ctrl_update(struct trace_array *tr)
195{
196 /* When starting a new trace, reset the buffers */
197 if (tr->ctrl)
198 start_stack_trace(tr);
199 else
200 stop_stack_trace(tr);
201}
202
203static struct tracer stack_trace __read_mostly =
204{
205 .name = "sysprof",
206 .init = stack_trace_init,
207 .reset = stack_trace_reset,
208 .ctrl_update = stack_trace_ctrl_update,
209#ifdef CONFIG_FTRACE_SELFTEST
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200210 .selftest = trace_selftest_startup_sysprof,
Ingo Molnarf06c3812008-05-12 21:20:47 +0200211#endif
212};
213
214__init static int init_stack_trace(void)
215{
216 return register_tracer(&stack_trace);
217}
218device_initcall(init_stack_trace);