blob: ba55b871b3d9daef0892d35c20e92a69c7d81211 [file] [log] [blame]
Ingo Molnarf06c3812008-05-12 21:20:47 +02001/*
2 * trace stack traces
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 */
Ingo Molnar0075fa82008-05-12 21:20:47 +02008#include <linux/kallsyms.h>
9#include <linux/debugfs.h>
10#include <linux/hrtimer.h>
11#include <linux/uaccess.h>
12#include <linux/ftrace.h>
Ingo Molnarf06c3812008-05-12 21:20:47 +020013#include <linux/module.h>
14#include <linux/fs.h>
Ingo Molnarf06c3812008-05-12 21:20:47 +020015
16#include "trace.h"
17
18static struct trace_array *ctx_trace;
19static int __read_mostly tracer_enabled;
20
Ingo Molnar0075fa82008-05-12 21:20:47 +020021static const unsigned long sample_period = 1000000;
22
23/*
24 * Per CPU hrtimers that do the profiling:
25 */
26static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer);
27
28static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
29{
30 /* trace here */
31 panic_timeout++;
32
33 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
34
35 return HRTIMER_RESTART;
36}
37
38static void start_stack_timer(int cpu)
39{
40 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
41
42 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
43 hrtimer->function = stack_trace_timer_fn;
44 hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
45
46 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
47}
48
49static void start_stack_timers(void)
50{
51 cpumask_t saved_mask = current->cpus_allowed;
52 int cpu;
53
54 for_each_online_cpu(cpu) {
55 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
56 start_stack_timer(cpu);
57 printk("started timer on cpu%d\n", cpu);
58 }
59 set_cpus_allowed_ptr(current, &saved_mask);
60}
61
62static void stop_stack_timer(int cpu)
63{
64 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
65
66 hrtimer_cancel(hrtimer);
67 printk("cancelled timer on cpu%d\n", cpu);
68}
69
70static void stop_stack_timers(void)
71{
72 int cpu;
73
74 for_each_online_cpu(cpu)
75 stop_stack_timer(cpu);
76}
77
Ingo Molnarf06c3812008-05-12 21:20:47 +020078static notrace void stack_reset(struct trace_array *tr)
79{
80 int cpu;
81
82 tr->time_start = ftrace_now(tr->cpu);
83
84 for_each_online_cpu(cpu)
85 tracing_reset(tr->data[cpu]);
86}
87
88static notrace void start_stack_trace(struct trace_array *tr)
89{
90 stack_reset(tr);
Ingo Molnar0075fa82008-05-12 21:20:47 +020091 start_stack_timers();
Ingo Molnarf06c3812008-05-12 21:20:47 +020092 tracer_enabled = 1;
93}
94
95static notrace void stop_stack_trace(struct trace_array *tr)
96{
Ingo Molnar0075fa82008-05-12 21:20:47 +020097 stop_stack_timers();
Ingo Molnarf06c3812008-05-12 21:20:47 +020098 tracer_enabled = 0;
99}
100
101static notrace void stack_trace_init(struct trace_array *tr)
102{
103 ctx_trace = tr;
104
105 if (tr->ctrl)
106 start_stack_trace(tr);
107}
108
109static notrace void stack_trace_reset(struct trace_array *tr)
110{
111 if (tr->ctrl)
112 stop_stack_trace(tr);
113}
114
115static void stack_trace_ctrl_update(struct trace_array *tr)
116{
117 /* When starting a new trace, reset the buffers */
118 if (tr->ctrl)
119 start_stack_trace(tr);
120 else
121 stop_stack_trace(tr);
122}
123
124static struct tracer stack_trace __read_mostly =
125{
126 .name = "sysprof",
127 .init = stack_trace_init,
128 .reset = stack_trace_reset,
129 .ctrl_update = stack_trace_ctrl_update,
130#ifdef CONFIG_FTRACE_SELFTEST
131 .selftest = trace_selftest_startup_stack,
132#endif
133};
134
135__init static int init_stack_trace(void)
136{
137 return register_tracer(&stack_trace);
138}
139device_initcall(init_stack_trace);