blob: 789e927abc9cff116abc148c20a3b7ed0de7661d [file] [log] [blame]
Steven Rostedt35e8e302008-05-12 21:20:42 +02001/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020012#include <linux/ftrace.h>
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040013#include <trace/sched.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020014
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020019static atomic_t sched_ref;
Steven Rostedt35e8e302008-05-12 21:20:42 +020020
Ingo Molnare309b412008-05-12 21:20:51 +020021static void
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040022probe_sched_switch(struct rq *__rq, struct task_struct *prev,
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020023 struct task_struct *next)
Steven Rostedt35e8e302008-05-12 21:20:42 +020024{
Steven Rostedt35e8e302008-05-12 21:20:42 +020025 struct trace_array_cpu *data;
26 unsigned long flags;
27 long disabled;
28 int cpu;
29
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040030 if (!atomic_read(&sched_ref))
31 return;
32
Steven Rostedt41bc8142008-05-22 11:49:22 -040033 tracing_record_cmdline(prev);
34 tracing_record_cmdline(next);
35
Steven Rostedt35e8e302008-05-12 21:20:42 +020036 if (!tracer_enabled)
37 return;
38
Steven Rostedt18cef372008-05-12 21:20:44 +020039 local_irq_save(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020040 cpu = raw_smp_processor_id();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040041 data = ctx_trace->data[cpu];
Steven Rostedt35e8e302008-05-12 21:20:42 +020042 disabled = atomic_inc_return(&data->disabled);
43
Ingo Molnar4d9493c2008-05-12 21:20:54 +020044 if (likely(disabled == 1))
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040045 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020046
47 atomic_dec(&data->disabled);
Steven Rostedt18cef372008-05-12 21:20:44 +020048 local_irq_restore(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020049}
50
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020051static void
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040052probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020053{
Ingo Molnar57422792008-05-12 21:20:51 +020054 struct trace_array_cpu *data;
55 unsigned long flags;
56 long disabled;
57 int cpu;
58
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040059 if (!likely(tracer_enabled))
Ingo Molnar57422792008-05-12 21:20:51 +020060 return;
61
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040062 tracing_record_cmdline(current);
Ingo Molnard9af56f2008-05-12 21:20:53 +020063
Ingo Molnar57422792008-05-12 21:20:51 +020064 local_irq_save(flags);
65 cpu = raw_smp_processor_id();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040066 data = ctx_trace->data[cpu];
Ingo Molnar57422792008-05-12 21:20:51 +020067 disabled = atomic_inc_return(&data->disabled);
68
Ingo Molnar4d9493c2008-05-12 21:20:54 +020069 if (likely(disabled == 1))
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040070 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
71 flags);
Ingo Molnar57422792008-05-12 21:20:51 +020072
73 atomic_dec(&data->disabled);
74 local_irq_restore(flags);
75}
76
Ingo Molnare309b412008-05-12 21:20:51 +020077static void sched_switch_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +020078{
79 int cpu;
80
Ingo Molnar750ed1a2008-05-12 21:20:46 +020081 tr->time_start = ftrace_now(tr->cpu);
Steven Rostedt35e8e302008-05-12 21:20:42 +020082
83 for_each_online_cpu(cpu)
84 tracing_reset(tr->data[cpu]);
85}
86
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020087static int tracing_sched_register(void)
88{
89 int ret;
90
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040091 ret = register_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020092 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040093 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020094 " probe to kernel_sched_wakeup\n");
95 return ret;
96 }
97
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040098 ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020099 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400100 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200101 " probe to kernel_sched_wakeup_new\n");
102 goto fail_deprobe;
103 }
104
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400105 ret = register_trace_sched_switch(probe_sched_switch);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200106 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400107 pr_info("sched trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200108 " probe to kernel_sched_schedule\n");
109 goto fail_deprobe_wake_new;
110 }
111
112 return ret;
113fail_deprobe_wake_new:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400114 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200115fail_deprobe:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400116 unregister_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200117 return ret;
118}
119
120static void tracing_sched_unregister(void)
121{
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400122 unregister_trace_sched_switch(probe_sched_switch);
123 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
124 unregister_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200125}
126
Ingo Molnarf2252932008-05-22 10:37:48 +0200127static void tracing_start_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200128{
129 long ref;
130
131 ref = atomic_inc_return(&sched_ref);
132 if (ref == 1)
133 tracing_sched_register();
134}
135
Ingo Molnarf2252932008-05-22 10:37:48 +0200136static void tracing_stop_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200137{
138 long ref;
139
140 ref = atomic_dec_and_test(&sched_ref);
141 if (ref)
142 tracing_sched_unregister();
143}
144
Steven Rostedt41bc8142008-05-22 11:49:22 -0400145void tracing_start_cmdline_record(void)
146{
147 tracing_start_sched_switch();
148}
149
150void tracing_stop_cmdline_record(void)
151{
152 tracing_stop_sched_switch();
153}
154
Ingo Molnare309b412008-05-12 21:20:51 +0200155static void start_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200156{
157 sched_switch_reset(tr);
Steven Rostedt41bc8142008-05-22 11:49:22 -0400158 tracing_start_cmdline_record();
Steven Rostedt007c05d2008-07-10 20:58:09 -0400159 tracer_enabled = 1;
Steven Rostedt35e8e302008-05-12 21:20:42 +0200160}
161
Ingo Molnare309b412008-05-12 21:20:51 +0200162static void stop_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200163{
164 tracer_enabled = 0;
Steven Rostedt007c05d2008-07-10 20:58:09 -0400165 tracing_stop_cmdline_record();
Steven Rostedt35e8e302008-05-12 21:20:42 +0200166}
167
Ingo Molnare309b412008-05-12 21:20:51 +0200168static void sched_switch_trace_init(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200169{
170 ctx_trace = tr;
171
172 if (tr->ctrl)
173 start_sched_trace(tr);
174}
175
Ingo Molnare309b412008-05-12 21:20:51 +0200176static void sched_switch_trace_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200177{
178 if (tr->ctrl)
179 stop_sched_trace(tr);
180}
181
182static void sched_switch_trace_ctrl_update(struct trace_array *tr)
183{
184 /* When starting a new trace, reset the buffers */
185 if (tr->ctrl)
186 start_sched_trace(tr);
187 else
188 stop_sched_trace(tr);
189}
190
191static struct tracer sched_switch_trace __read_mostly =
192{
193 .name = "sched_switch",
194 .init = sched_switch_trace_init,
195 .reset = sched_switch_trace_reset,
196 .ctrl_update = sched_switch_trace_ctrl_update,
Steven Rostedt60a11772008-05-12 21:20:44 +0200197#ifdef CONFIG_FTRACE_SELFTEST
198 .selftest = trace_selftest_startup_sched_switch,
199#endif
Steven Rostedt35e8e302008-05-12 21:20:42 +0200200};
201
202__init static int init_sched_switch_trace(void)
203{
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200204 int ret = 0;
205
206 if (atomic_read(&sched_ref))
207 ret = tracing_sched_register();
208 if (ret) {
209 pr_info("error registering scheduler trace\n");
210 return ret;
211 }
Steven Rostedt35e8e302008-05-12 21:20:42 +0200212 return register_tracer(&sched_switch_trace);
213}
214device_initcall(init_sched_switch_trace);