blob: c7fa08a5b7f43a4d1388dda9f6694961516f854e [file] [log] [blame]
Steven Rostedt35e8e302008-05-12 21:20:42 +02001/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020012#include <linux/ftrace.h>
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040013#include <trace/sched.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020014
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020019static atomic_t sched_ref;
Steven Rostedt35e8e302008-05-12 21:20:42 +020020
Ingo Molnare309b412008-05-12 21:20:51 +020021static void
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040022probe_sched_switch(struct rq *__rq, struct task_struct *prev,
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020023 struct task_struct *next)
Steven Rostedt35e8e302008-05-12 21:20:42 +020024{
Steven Rostedt35e8e302008-05-12 21:20:42 +020025 struct trace_array_cpu *data;
26 unsigned long flags;
27 long disabled;
28 int cpu;
Steven Rostedt38697052008-10-01 13:14:09 -040029 int pc;
Steven Rostedt35e8e302008-05-12 21:20:42 +020030
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040031 if (!atomic_read(&sched_ref))
32 return;
33
Steven Rostedt41bc8142008-05-22 11:49:22 -040034 tracing_record_cmdline(prev);
35 tracing_record_cmdline(next);
36
Steven Rostedt35e8e302008-05-12 21:20:42 +020037 if (!tracer_enabled)
38 return;
39
Steven Rostedt38697052008-10-01 13:14:09 -040040 pc = preempt_count();
Steven Rostedt18cef372008-05-12 21:20:44 +020041 local_irq_save(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020042 cpu = raw_smp_processor_id();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040043 data = ctx_trace->data[cpu];
Steven Rostedt35e8e302008-05-12 21:20:42 +020044 disabled = atomic_inc_return(&data->disabled);
45
Ingo Molnar4d9493c2008-05-12 21:20:54 +020046 if (likely(disabled == 1))
Steven Rostedt38697052008-10-01 13:14:09 -040047 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
Steven Rostedt35e8e302008-05-12 21:20:42 +020048
49 atomic_dec(&data->disabled);
Steven Rostedt18cef372008-05-12 21:20:44 +020050 local_irq_restore(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020051}
52
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020053static void
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040054probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020055{
Ingo Molnar57422792008-05-12 21:20:51 +020056 struct trace_array_cpu *data;
57 unsigned long flags;
58 long disabled;
Steven Rostedt38697052008-10-01 13:14:09 -040059 int cpu, pc;
Ingo Molnar57422792008-05-12 21:20:51 +020060
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040061 if (!likely(tracer_enabled))
Ingo Molnar57422792008-05-12 21:20:51 +020062 return;
63
Steven Rostedt38697052008-10-01 13:14:09 -040064 pc = preempt_count();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040065 tracing_record_cmdline(current);
Ingo Molnard9af56f2008-05-12 21:20:53 +020066
Ingo Molnar57422792008-05-12 21:20:51 +020067 local_irq_save(flags);
68 cpu = raw_smp_processor_id();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040069 data = ctx_trace->data[cpu];
Ingo Molnar57422792008-05-12 21:20:51 +020070 disabled = atomic_inc_return(&data->disabled);
71
Ingo Molnar4d9493c2008-05-12 21:20:54 +020072 if (likely(disabled == 1))
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040073 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
Steven Rostedt38697052008-10-01 13:14:09 -040074 flags, pc);
Ingo Molnar57422792008-05-12 21:20:51 +020075
76 atomic_dec(&data->disabled);
77 local_irq_restore(flags);
78}
79
Ingo Molnare309b412008-05-12 21:20:51 +020080static void sched_switch_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +020081{
82 int cpu;
83
Ingo Molnar750ed1a2008-05-12 21:20:46 +020084 tr->time_start = ftrace_now(tr->cpu);
Steven Rostedt35e8e302008-05-12 21:20:42 +020085
86 for_each_online_cpu(cpu)
Steven Rostedt3928a8a2008-09-29 23:02:41 -040087 tracing_reset(tr, cpu);
Steven Rostedt35e8e302008-05-12 21:20:42 +020088}
89
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020090static int tracing_sched_register(void)
91{
92 int ret;
93
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040094 ret = register_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020095 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040096 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020097 " probe to kernel_sched_wakeup\n");
98 return ret;
99 }
100
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400101 ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200102 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400103 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200104 " probe to kernel_sched_wakeup_new\n");
105 goto fail_deprobe;
106 }
107
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400108 ret = register_trace_sched_switch(probe_sched_switch);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200109 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400110 pr_info("sched trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200111 " probe to kernel_sched_schedule\n");
112 goto fail_deprobe_wake_new;
113 }
114
115 return ret;
116fail_deprobe_wake_new:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400117 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200118fail_deprobe:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400119 unregister_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200120 return ret;
121}
122
123static void tracing_sched_unregister(void)
124{
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400125 unregister_trace_sched_switch(probe_sched_switch);
126 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
127 unregister_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200128}
129
Ingo Molnarf2252932008-05-22 10:37:48 +0200130static void tracing_start_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200131{
132 long ref;
133
134 ref = atomic_inc_return(&sched_ref);
135 if (ref == 1)
136 tracing_sched_register();
137}
138
Ingo Molnarf2252932008-05-22 10:37:48 +0200139static void tracing_stop_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200140{
141 long ref;
142
143 ref = atomic_dec_and_test(&sched_ref);
144 if (ref)
145 tracing_sched_unregister();
146}
147
Steven Rostedt41bc8142008-05-22 11:49:22 -0400148void tracing_start_cmdline_record(void)
149{
150 tracing_start_sched_switch();
151}
152
153void tracing_stop_cmdline_record(void)
154{
155 tracing_stop_sched_switch();
156}
157
Ingo Molnare309b412008-05-12 21:20:51 +0200158static void start_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200159{
160 sched_switch_reset(tr);
Steven Rostedt41bc8142008-05-22 11:49:22 -0400161 tracing_start_cmdline_record();
Steven Rostedt007c05d2008-07-10 20:58:09 -0400162 tracer_enabled = 1;
Steven Rostedt35e8e302008-05-12 21:20:42 +0200163}
164
Ingo Molnare309b412008-05-12 21:20:51 +0200165static void stop_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200166{
167 tracer_enabled = 0;
Steven Rostedt007c05d2008-07-10 20:58:09 -0400168 tracing_stop_cmdline_record();
Steven Rostedt35e8e302008-05-12 21:20:42 +0200169}
170
Ingo Molnare309b412008-05-12 21:20:51 +0200171static void sched_switch_trace_init(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200172{
173 ctx_trace = tr;
174
175 if (tr->ctrl)
176 start_sched_trace(tr);
177}
178
Ingo Molnare309b412008-05-12 21:20:51 +0200179static void sched_switch_trace_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200180{
181 if (tr->ctrl)
182 stop_sched_trace(tr);
183}
184
185static void sched_switch_trace_ctrl_update(struct trace_array *tr)
186{
187 /* When starting a new trace, reset the buffers */
188 if (tr->ctrl)
189 start_sched_trace(tr);
190 else
191 stop_sched_trace(tr);
192}
193
194static struct tracer sched_switch_trace __read_mostly =
195{
196 .name = "sched_switch",
197 .init = sched_switch_trace_init,
198 .reset = sched_switch_trace_reset,
199 .ctrl_update = sched_switch_trace_ctrl_update,
Steven Rostedt60a11772008-05-12 21:20:44 +0200200#ifdef CONFIG_FTRACE_SELFTEST
201 .selftest = trace_selftest_startup_sched_switch,
202#endif
Steven Rostedt35e8e302008-05-12 21:20:42 +0200203};
204
205__init static int init_sched_switch_trace(void)
206{
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200207 int ret = 0;
208
209 if (atomic_read(&sched_ref))
210 ret = tracing_sched_register();
211 if (ret) {
212 pr_info("error registering scheduler trace\n");
213 return ret;
214 }
Steven Rostedt35e8e302008-05-12 21:20:42 +0200215 return register_tracer(&sched_switch_trace);
216}
217device_initcall(init_sched_switch_trace);