blob: 798ec0dc863c3e9e0541584c398a799304e91dfa [file] [log] [blame]
Steven Rostedt35e8e302008-05-12 21:20:42 +02001/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
12#include <linux/marker.h>
13#include <linux/ftrace.h>
14
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020019static atomic_t sched_ref;
Steven Rostedt35e8e302008-05-12 21:20:42 +020020
Ingo Molnare309b412008-05-12 21:20:51 +020021static void
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020022sched_switch_func(void *private, void *__rq, struct task_struct *prev,
23 struct task_struct *next)
Steven Rostedt35e8e302008-05-12 21:20:42 +020024{
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020025 struct trace_array **ptr = private;
26 struct trace_array *tr = *ptr;
Steven Rostedt35e8e302008-05-12 21:20:42 +020027 struct trace_array_cpu *data;
28 unsigned long flags;
29 long disabled;
30 int cpu;
31
32 if (!tracer_enabled)
33 return;
34
Steven Rostedt18cef372008-05-12 21:20:44 +020035 local_irq_save(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020036 cpu = raw_smp_processor_id();
37 data = tr->data[cpu];
38 disabled = atomic_inc_return(&data->disabled);
39
Ingo Molnar4d9493c2008-05-12 21:20:54 +020040 if (likely(disabled == 1))
Steven Rostedt35e8e302008-05-12 21:20:42 +020041 tracing_sched_switch_trace(tr, data, prev, next, flags);
42
43 atomic_dec(&data->disabled);
Steven Rostedt18cef372008-05-12 21:20:44 +020044 local_irq_restore(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020045}
46
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020047static notrace void
48sched_switch_callback(void *probe_data, void *call_data,
49 const char *format, va_list *args)
Ingo Molnar57422792008-05-12 21:20:51 +020050{
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020051 struct task_struct *prev;
52 struct task_struct *next;
53 struct rq *__rq;
54
55 if (!atomic_read(&sched_ref))
56 return;
57
58 /* skip prev_pid %d next_pid %d prev_state %ld */
59 (void)va_arg(*args, int);
60 (void)va_arg(*args, int);
61 (void)va_arg(*args, long);
62 __rq = va_arg(*args, typeof(__rq));
63 prev = va_arg(*args, typeof(prev));
64 next = va_arg(*args, typeof(next));
65
66 tracing_record_cmdline(prev);
67
68 /*
69 * If tracer_switch_func only points to the local
70 * switch func, it still needs the ptr passed to it.
71 */
72 sched_switch_func(probe_data, __rq, prev, next);
73}
74
75static void
76wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct
77 task_struct *curr)
78{
79 struct trace_array **ptr = private;
80 struct trace_array *tr = *ptr;
Ingo Molnar57422792008-05-12 21:20:51 +020081 struct trace_array_cpu *data;
82 unsigned long flags;
83 long disabled;
84 int cpu;
85
86 if (!tracer_enabled)
87 return;
88
Ingo Molnard9af56f2008-05-12 21:20:53 +020089 tracing_record_cmdline(curr);
90
Ingo Molnar57422792008-05-12 21:20:51 +020091 local_irq_save(flags);
92 cpu = raw_smp_processor_id();
93 data = tr->data[cpu];
94 disabled = atomic_inc_return(&data->disabled);
95
Ingo Molnar4d9493c2008-05-12 21:20:54 +020096 if (likely(disabled == 1))
Ingo Molnar57422792008-05-12 21:20:51 +020097 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
98
99 atomic_dec(&data->disabled);
100 local_irq_restore(flags);
101}
102
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200103static notrace void
104wake_up_callback(void *probe_data, void *call_data,
105 const char *format, va_list *args)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200106{
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200107 struct task_struct *curr;
108 struct task_struct *task;
109 struct rq *__rq;
Steven Rostedt25b0b442008-05-12 21:21:00 +0200110
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200111 if (likely(!tracer_enabled))
112 return;
Steven Rostedt35e8e302008-05-12 21:20:42 +0200113
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200114 /* Skip pid %d state %ld */
115 (void)va_arg(*args, int);
116 (void)va_arg(*args, long);
117 /* now get the meat: "rq %p task %p rq->curr %p" */
118 __rq = va_arg(*args, typeof(__rq));
119 task = va_arg(*args, typeof(task));
120 curr = va_arg(*args, typeof(curr));
Steven Rostedt35e8e302008-05-12 21:20:42 +0200121
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200122 tracing_record_cmdline(task);
123 tracing_record_cmdline(curr);
Ingo Molnar57422792008-05-12 21:20:51 +0200124
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200125 wakeup_func(probe_data, __rq, task, curr);
Ingo Molnar57422792008-05-12 21:20:51 +0200126}
127
Ingo Molnare309b412008-05-12 21:20:51 +0200128static void sched_switch_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200129{
130 int cpu;
131
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200132 tr->time_start = ftrace_now(tr->cpu);
Steven Rostedt35e8e302008-05-12 21:20:42 +0200133
134 for_each_online_cpu(cpu)
135 tracing_reset(tr->data[cpu]);
136}
137
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200138static int tracing_sched_register(void)
139{
140 int ret;
141
142 ret = marker_probe_register("kernel_sched_wakeup",
143 "pid %d state %ld ## rq %p task %p rq->curr %p",
144 wake_up_callback,
145 &ctx_trace);
146 if (ret) {
147 pr_info("wakeup trace: Couldn't add marker"
148 " probe to kernel_sched_wakeup\n");
149 return ret;
150 }
151
152 ret = marker_probe_register("kernel_sched_wakeup_new",
153 "pid %d state %ld ## rq %p task %p rq->curr %p",
154 wake_up_callback,
155 &ctx_trace);
156 if (ret) {
157 pr_info("wakeup trace: Couldn't add marker"
158 " probe to kernel_sched_wakeup_new\n");
159 goto fail_deprobe;
160 }
161
162 ret = marker_probe_register("kernel_sched_schedule",
163 "prev_pid %d next_pid %d prev_state %ld "
164 "## rq %p prev %p next %p",
165 sched_switch_callback,
166 &ctx_trace);
167 if (ret) {
168 pr_info("sched trace: Couldn't add marker"
169 " probe to kernel_sched_schedule\n");
170 goto fail_deprobe_wake_new;
171 }
172
173 return ret;
174fail_deprobe_wake_new:
175 marker_probe_unregister("kernel_sched_wakeup_new",
176 wake_up_callback,
177 &ctx_trace);
178fail_deprobe:
179 marker_probe_unregister("kernel_sched_wakeup",
180 wake_up_callback,
181 &ctx_trace);
182 return ret;
183}
184
185static void tracing_sched_unregister(void)
186{
187 marker_probe_unregister("kernel_sched_schedule",
188 sched_switch_callback,
189 &ctx_trace);
190 marker_probe_unregister("kernel_sched_wakeup_new",
191 wake_up_callback,
192 &ctx_trace);
193 marker_probe_unregister("kernel_sched_wakeup",
194 wake_up_callback,
195 &ctx_trace);
196}
197
198void tracing_start_sched_switch(void)
199{
200 long ref;
201
202 ref = atomic_inc_return(&sched_ref);
203 if (ref == 1)
204 tracing_sched_register();
205}
206
207void tracing_stop_sched_switch(void)
208{
209 long ref;
210
211 ref = atomic_dec_and_test(&sched_ref);
212 if (ref)
213 tracing_sched_unregister();
214}
215
Ingo Molnare309b412008-05-12 21:20:51 +0200216static void start_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200217{
218 sched_switch_reset(tr);
Steven Rostedt25b0b442008-05-12 21:21:00 +0200219 atomic_inc(&trace_record_cmdline_enabled);
Steven Rostedt35e8e302008-05-12 21:20:42 +0200220 tracer_enabled = 1;
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200221 tracing_start_sched_switch();
Steven Rostedt35e8e302008-05-12 21:20:42 +0200222}
223
Ingo Molnare309b412008-05-12 21:20:51 +0200224static void stop_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200225{
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200226 tracing_stop_sched_switch();
Steven Rostedt25b0b442008-05-12 21:21:00 +0200227 atomic_dec(&trace_record_cmdline_enabled);
Steven Rostedt35e8e302008-05-12 21:20:42 +0200228 tracer_enabled = 0;
229}
230
Ingo Molnare309b412008-05-12 21:20:51 +0200231static void sched_switch_trace_init(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200232{
233 ctx_trace = tr;
234
235 if (tr->ctrl)
236 start_sched_trace(tr);
237}
238
Ingo Molnare309b412008-05-12 21:20:51 +0200239static void sched_switch_trace_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200240{
241 if (tr->ctrl)
242 stop_sched_trace(tr);
243}
244
245static void sched_switch_trace_ctrl_update(struct trace_array *tr)
246{
247 /* When starting a new trace, reset the buffers */
248 if (tr->ctrl)
249 start_sched_trace(tr);
250 else
251 stop_sched_trace(tr);
252}
253
254static struct tracer sched_switch_trace __read_mostly =
255{
256 .name = "sched_switch",
257 .init = sched_switch_trace_init,
258 .reset = sched_switch_trace_reset,
259 .ctrl_update = sched_switch_trace_ctrl_update,
Steven Rostedt60a11772008-05-12 21:20:44 +0200260#ifdef CONFIG_FTRACE_SELFTEST
261 .selftest = trace_selftest_startup_sched_switch,
262#endif
Steven Rostedt35e8e302008-05-12 21:20:42 +0200263};
264
265__init static int init_sched_switch_trace(void)
266{
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200267 int ret = 0;
268
269 if (atomic_read(&sched_ref))
270 ret = tracing_sched_register();
271 if (ret) {
272 pr_info("error registering scheduler trace\n");
273 return ret;
274 }
Steven Rostedt35e8e302008-05-12 21:20:42 +0200275 return register_tracer(&sched_switch_trace);
276}
277device_initcall(init_sched_switch_trace);