blob: b1137c11ef8b86b207d445221dc716c38908048e [file] [log] [blame]
Ingo Molnarf06c3812008-05-12 21:20:47 +02001/*
2 * trace stack traces
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
Ingo Molnar56a08bd2008-05-12 21:20:47 +02006 * Copyright (C) 2004, 2005, Soeren Sandmann
Ingo Molnarf06c3812008-05-12 21:20:47 +02007 */
Ingo Molnar0075fa82008-05-12 21:20:47 +02008#include <linux/kallsyms.h>
9#include <linux/debugfs.h>
10#include <linux/hrtimer.h>
11#include <linux/uaccess.h>
12#include <linux/ftrace.h>
Ingo Molnarf06c3812008-05-12 21:20:47 +020013#include <linux/module.h>
Ingo Molnar56a08bd2008-05-12 21:20:47 +020014#include <linux/irq.h>
Ingo Molnarf06c3812008-05-12 21:20:47 +020015#include <linux/fs.h>
Ingo Molnarf06c3812008-05-12 21:20:47 +020016
17#include "trace.h"
18
Ingo Molnar56a08bd2008-05-12 21:20:47 +020019static struct trace_array *sysprof_trace;
Ingo Molnarf06c3812008-05-12 21:20:47 +020020static int __read_mostly tracer_enabled;
21
Ingo Molnar56a08bd2008-05-12 21:20:47 +020022/*
23 * 10 msecs for now:
24 */
Ingo Molnar0075fa82008-05-12 21:20:47 +020025static const unsigned long sample_period = 1000000;
26
27/*
28 * Per CPU hrtimers that do the profiling:
29 */
30static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer);
31
Ingo Molnar56a08bd2008-05-12 21:20:47 +020032struct stack_frame {
33 const void __user *next_fp;
34 unsigned long return_address;
35};
36
37static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
38{
39 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
40 return 0;
41
42 if (__copy_from_user_inatomic(frame, frame_pointer, sizeof(*frame)))
43 return 0;
44
45 return 1;
46}
47
48#define SYSPROF_MAX_ADDRESSES 512
49
50static void timer_notify(struct pt_regs *regs, int cpu)
51{
52 const void __user *frame_pointer;
53 struct trace_array_cpu *data;
54 struct stack_frame frame;
55 struct trace_array *tr;
56 int is_user;
57 int i;
58
59 if (!regs)
60 return;
61
62 tr = sysprof_trace;
63 data = tr->data[cpu];
64 is_user = user_mode(regs);
65
66 if (!current || current->pid == 0)
67 return;
68
69 if (is_user && current->state != TASK_RUNNING)
70 return;
71
72 if (!is_user) {
73 /* kernel */
74 ftrace(tr, data, current->pid, 1, 0);
75 return;
76
77 }
78
79 trace_special(tr, data, 0, current->pid, regs->ip);
80
81 frame_pointer = (void __user *)regs->bp;
82
83 for (i = 0; i < SYSPROF_MAX_ADDRESSES; i++) {
84 if (!copy_stack_frame(frame_pointer, &frame))
85 break;
86 if ((unsigned long)frame_pointer < regs->sp)
87 break;
88
89 trace_special(tr, data, 1, frame.return_address,
90 (unsigned long)frame_pointer);
91 frame_pointer = frame.next_fp;
92 }
93
94 trace_special(tr, data, 2, current->pid, i);
95
96 if (i == SYSPROF_MAX_ADDRESSES)
97 trace_special(tr, data, -1, -1, -1);
98}
99
Ingo Molnar0075fa82008-05-12 21:20:47 +0200100static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
101{
102 /* trace here */
Ingo Molnar56a08bd2008-05-12 21:20:47 +0200103 timer_notify(get_irq_regs(), smp_processor_id());
Ingo Molnar0075fa82008-05-12 21:20:47 +0200104
105 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
106
107 return HRTIMER_RESTART;
108}
109
110static void start_stack_timer(int cpu)
111{
112 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
113
114 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
115 hrtimer->function = stack_trace_timer_fn;
116 hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
117
118 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
119}
120
121static void start_stack_timers(void)
122{
123 cpumask_t saved_mask = current->cpus_allowed;
124 int cpu;
125
126 for_each_online_cpu(cpu) {
127 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
128 start_stack_timer(cpu);
129 printk("started timer on cpu%d\n", cpu);
130 }
131 set_cpus_allowed_ptr(current, &saved_mask);
132}
133
134static void stop_stack_timer(int cpu)
135{
136 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
137
138 hrtimer_cancel(hrtimer);
139 printk("cancelled timer on cpu%d\n", cpu);
140}
141
142static void stop_stack_timers(void)
143{
144 int cpu;
145
146 for_each_online_cpu(cpu)
147 stop_stack_timer(cpu);
148}
149
Ingo Molnarf06c3812008-05-12 21:20:47 +0200150static notrace void stack_reset(struct trace_array *tr)
151{
152 int cpu;
153
154 tr->time_start = ftrace_now(tr->cpu);
155
156 for_each_online_cpu(cpu)
157 tracing_reset(tr->data[cpu]);
158}
159
160static notrace void start_stack_trace(struct trace_array *tr)
161{
162 stack_reset(tr);
Ingo Molnar0075fa82008-05-12 21:20:47 +0200163 start_stack_timers();
Ingo Molnarf06c3812008-05-12 21:20:47 +0200164 tracer_enabled = 1;
165}
166
167static notrace void stop_stack_trace(struct trace_array *tr)
168{
Ingo Molnar0075fa82008-05-12 21:20:47 +0200169 stop_stack_timers();
Ingo Molnarf06c3812008-05-12 21:20:47 +0200170 tracer_enabled = 0;
171}
172
173static notrace void stack_trace_init(struct trace_array *tr)
174{
Ingo Molnar56a08bd2008-05-12 21:20:47 +0200175 sysprof_trace = tr;
Ingo Molnarf06c3812008-05-12 21:20:47 +0200176
177 if (tr->ctrl)
178 start_stack_trace(tr);
179}
180
181static notrace void stack_trace_reset(struct trace_array *tr)
182{
183 if (tr->ctrl)
184 stop_stack_trace(tr);
185}
186
187static void stack_trace_ctrl_update(struct trace_array *tr)
188{
189 /* When starting a new trace, reset the buffers */
190 if (tr->ctrl)
191 start_stack_trace(tr);
192 else
193 stop_stack_trace(tr);
194}
195
196static struct tracer stack_trace __read_mostly =
197{
198 .name = "sysprof",
199 .init = stack_trace_init,
200 .reset = stack_trace_reset,
201 .ctrl_update = stack_trace_ctrl_update,
202#ifdef CONFIG_FTRACE_SELFTEST
203 .selftest = trace_selftest_startup_stack,
204#endif
205};
206
207__init static int init_stack_trace(void)
208{
209 return register_tracer(&stack_trace);
210}
211device_initcall(init_stack_trace);