blob: d25ffa5eaf2b622be6b1409188a676388cb4617a [file] [log] [blame]
Steven Rostedt35e8e302008-05-12 21:20:42 +02001/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
12#include <linux/marker.h>
13#include <linux/ftrace.h>
14
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020019static atomic_t sched_ref;
Steven Rostedt35e8e302008-05-12 21:20:42 +020020
Ingo Molnare309b412008-05-12 21:20:51 +020021static void
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020022sched_switch_func(void *private, void *__rq, struct task_struct *prev,
23 struct task_struct *next)
Steven Rostedt35e8e302008-05-12 21:20:42 +020024{
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020025 struct trace_array **ptr = private;
26 struct trace_array *tr = *ptr;
Steven Rostedt35e8e302008-05-12 21:20:42 +020027 struct trace_array_cpu *data;
28 unsigned long flags;
29 long disabled;
30 int cpu;
31
32 if (!tracer_enabled)
33 return;
34
Steven Rostedt18cef372008-05-12 21:20:44 +020035 local_irq_save(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020036 cpu = raw_smp_processor_id();
37 data = tr->data[cpu];
38 disabled = atomic_inc_return(&data->disabled);
39
Ingo Molnar4d9493c2008-05-12 21:20:54 +020040 if (likely(disabled == 1))
Steven Rostedt35e8e302008-05-12 21:20:42 +020041 tracing_sched_switch_trace(tr, data, prev, next, flags);
42
43 atomic_dec(&data->disabled);
Steven Rostedt18cef372008-05-12 21:20:44 +020044 local_irq_restore(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020045}
46
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020047static notrace void
48sched_switch_callback(void *probe_data, void *call_data,
49 const char *format, va_list *args)
Ingo Molnar57422792008-05-12 21:20:51 +020050{
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020051 struct task_struct *prev;
52 struct task_struct *next;
53 struct rq *__rq;
54
55 if (!atomic_read(&sched_ref))
56 return;
57
58 /* skip prev_pid %d next_pid %d prev_state %ld */
59 (void)va_arg(*args, int);
60 (void)va_arg(*args, int);
61 (void)va_arg(*args, long);
62 __rq = va_arg(*args, typeof(__rq));
63 prev = va_arg(*args, typeof(prev));
64 next = va_arg(*args, typeof(next));
65
66 tracing_record_cmdline(prev);
67
68 /*
69 * If tracer_switch_func only points to the local
70 * switch func, it still needs the ptr passed to it.
71 */
72 sched_switch_func(probe_data, __rq, prev, next);
73}
74
75static void
76wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct
77 task_struct *curr)
78{
79 struct trace_array **ptr = private;
80 struct trace_array *tr = *ptr;
Ingo Molnar57422792008-05-12 21:20:51 +020081 struct trace_array_cpu *data;
82 unsigned long flags;
83 long disabled;
84 int cpu;
85
86 if (!tracer_enabled)
87 return;
88
Ingo Molnard9af56f2008-05-12 21:20:53 +020089 tracing_record_cmdline(curr);
90
Ingo Molnar57422792008-05-12 21:20:51 +020091 local_irq_save(flags);
92 cpu = raw_smp_processor_id();
93 data = tr->data[cpu];
94 disabled = atomic_inc_return(&data->disabled);
95
Ingo Molnar4d9493c2008-05-12 21:20:54 +020096 if (likely(disabled == 1))
Ingo Molnar57422792008-05-12 21:20:51 +020097 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
98
99 atomic_dec(&data->disabled);
100 local_irq_restore(flags);
101}
102
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200103static notrace void
104wake_up_callback(void *probe_data, void *call_data,
105 const char *format, va_list *args)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200106{
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200107 struct task_struct *curr;
108 struct task_struct *task;
109 struct rq *__rq;
Steven Rostedt25b0b442008-05-12 21:21:00 +0200110
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200111 if (likely(!tracer_enabled))
112 return;
Steven Rostedt35e8e302008-05-12 21:20:42 +0200113
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200114 /* Skip pid %d state %ld */
115 (void)va_arg(*args, int);
116 (void)va_arg(*args, long);
117 /* now get the meat: "rq %p task %p rq->curr %p" */
118 __rq = va_arg(*args, typeof(__rq));
119 task = va_arg(*args, typeof(task));
120 curr = va_arg(*args, typeof(curr));
Steven Rostedt35e8e302008-05-12 21:20:42 +0200121
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200122 tracing_record_cmdline(task);
123 tracing_record_cmdline(curr);
Ingo Molnar57422792008-05-12 21:20:51 +0200124
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200125 wakeup_func(probe_data, __rq, task, curr);
Ingo Molnar57422792008-05-12 21:20:51 +0200126}
127
Ingo Molnar88a42162008-05-12 21:20:53 +0200128void
129ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
130{
131 struct trace_array *tr = ctx_trace;
132 struct trace_array_cpu *data;
133 unsigned long flags;
134 long disabled;
135 int cpu;
136
137 if (!tracer_enabled)
138 return;
139
140 local_irq_save(flags);
141 cpu = raw_smp_processor_id();
142 data = tr->data[cpu];
143 disabled = atomic_inc_return(&data->disabled);
144
145 if (likely(disabled == 1))
146 __trace_special(tr, data, arg1, arg2, arg3);
147
148 atomic_dec(&data->disabled);
149 local_irq_restore(flags);
150}
151
Ingo Molnare309b412008-05-12 21:20:51 +0200152static void sched_switch_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200153{
154 int cpu;
155
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200156 tr->time_start = ftrace_now(tr->cpu);
Steven Rostedt35e8e302008-05-12 21:20:42 +0200157
158 for_each_online_cpu(cpu)
159 tracing_reset(tr->data[cpu]);
160}
161
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200162static int tracing_sched_register(void)
163{
164 int ret;
165
166 ret = marker_probe_register("kernel_sched_wakeup",
167 "pid %d state %ld ## rq %p task %p rq->curr %p",
168 wake_up_callback,
169 &ctx_trace);
170 if (ret) {
171 pr_info("wakeup trace: Couldn't add marker"
172 " probe to kernel_sched_wakeup\n");
173 return ret;
174 }
175
176 ret = marker_probe_register("kernel_sched_wakeup_new",
177 "pid %d state %ld ## rq %p task %p rq->curr %p",
178 wake_up_callback,
179 &ctx_trace);
180 if (ret) {
181 pr_info("wakeup trace: Couldn't add marker"
182 " probe to kernel_sched_wakeup_new\n");
183 goto fail_deprobe;
184 }
185
186 ret = marker_probe_register("kernel_sched_schedule",
187 "prev_pid %d next_pid %d prev_state %ld "
188 "## rq %p prev %p next %p",
189 sched_switch_callback,
190 &ctx_trace);
191 if (ret) {
192 pr_info("sched trace: Couldn't add marker"
193 " probe to kernel_sched_schedule\n");
194 goto fail_deprobe_wake_new;
195 }
196
197 return ret;
198fail_deprobe_wake_new:
199 marker_probe_unregister("kernel_sched_wakeup_new",
200 wake_up_callback,
201 &ctx_trace);
202fail_deprobe:
203 marker_probe_unregister("kernel_sched_wakeup",
204 wake_up_callback,
205 &ctx_trace);
206 return ret;
207}
208
209static void tracing_sched_unregister(void)
210{
211 marker_probe_unregister("kernel_sched_schedule",
212 sched_switch_callback,
213 &ctx_trace);
214 marker_probe_unregister("kernel_sched_wakeup_new",
215 wake_up_callback,
216 &ctx_trace);
217 marker_probe_unregister("kernel_sched_wakeup",
218 wake_up_callback,
219 &ctx_trace);
220}
221
222void tracing_start_sched_switch(void)
223{
224 long ref;
225
226 ref = atomic_inc_return(&sched_ref);
227 if (ref == 1)
228 tracing_sched_register();
229}
230
231void tracing_stop_sched_switch(void)
232{
233 long ref;
234
235 ref = atomic_dec_and_test(&sched_ref);
236 if (ref)
237 tracing_sched_unregister();
238}
239
Ingo Molnare309b412008-05-12 21:20:51 +0200240static void start_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200241{
242 sched_switch_reset(tr);
Steven Rostedt25b0b442008-05-12 21:21:00 +0200243 atomic_inc(&trace_record_cmdline_enabled);
Steven Rostedt35e8e302008-05-12 21:20:42 +0200244 tracer_enabled = 1;
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200245 tracing_start_sched_switch();
Steven Rostedt35e8e302008-05-12 21:20:42 +0200246}
247
Ingo Molnare309b412008-05-12 21:20:51 +0200248static void stop_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200249{
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200250 tracing_stop_sched_switch();
Steven Rostedt25b0b442008-05-12 21:21:00 +0200251 atomic_dec(&trace_record_cmdline_enabled);
Steven Rostedt35e8e302008-05-12 21:20:42 +0200252 tracer_enabled = 0;
253}
254
Ingo Molnare309b412008-05-12 21:20:51 +0200255static void sched_switch_trace_init(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200256{
257 ctx_trace = tr;
258
259 if (tr->ctrl)
260 start_sched_trace(tr);
261}
262
Ingo Molnare309b412008-05-12 21:20:51 +0200263static void sched_switch_trace_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200264{
265 if (tr->ctrl)
266 stop_sched_trace(tr);
267}
268
269static void sched_switch_trace_ctrl_update(struct trace_array *tr)
270{
271 /* When starting a new trace, reset the buffers */
272 if (tr->ctrl)
273 start_sched_trace(tr);
274 else
275 stop_sched_trace(tr);
276}
277
278static struct tracer sched_switch_trace __read_mostly =
279{
280 .name = "sched_switch",
281 .init = sched_switch_trace_init,
282 .reset = sched_switch_trace_reset,
283 .ctrl_update = sched_switch_trace_ctrl_update,
Steven Rostedt60a11772008-05-12 21:20:44 +0200284#ifdef CONFIG_FTRACE_SELFTEST
285 .selftest = trace_selftest_startup_sched_switch,
286#endif
Steven Rostedt35e8e302008-05-12 21:20:42 +0200287};
288
289__init static int init_sched_switch_trace(void)
290{
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200291 int ret = 0;
292
293 if (atomic_read(&sched_ref))
294 ret = tracing_sched_register();
295 if (ret) {
296 pr_info("error registering scheduler trace\n");
297 return ret;
298 }
Steven Rostedt35e8e302008-05-12 21:20:42 +0200299 return register_tracer(&sched_switch_trace);
300}
301device_initcall(init_sched_switch_trace);