blob: 9d7bdac331dd262561055e926a913641b5f7bdfc [file] [log] [blame]
Steven Rostedt35e8e302008-05-12 21:20:42 +02001/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020012#include <linux/ftrace.h>
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040013#include <trace/sched.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020014
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020019static atomic_t sched_ref;
Frederic Weisbecker07695fa2008-10-31 13:08:28 +010020static DEFINE_MUTEX(tracepoint_mutex);
Steven Rostedt35e8e302008-05-12 21:20:42 +020021
Ingo Molnare309b412008-05-12 21:20:51 +020022static void
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040023probe_sched_switch(struct rq *__rq, struct task_struct *prev,
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020024 struct task_struct *next)
Steven Rostedt35e8e302008-05-12 21:20:42 +020025{
Steven Rostedt35e8e302008-05-12 21:20:42 +020026 struct trace_array_cpu *data;
27 unsigned long flags;
Steven Rostedt35e8e302008-05-12 21:20:42 +020028 int cpu;
Steven Rostedt38697052008-10-01 13:14:09 -040029 int pc;
Steven Rostedt35e8e302008-05-12 21:20:42 +020030
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040031 if (!atomic_read(&sched_ref))
32 return;
33
Steven Rostedt41bc8142008-05-22 11:49:22 -040034 tracing_record_cmdline(prev);
35 tracing_record_cmdline(next);
36
Steven Rostedt35e8e302008-05-12 21:20:42 +020037 if (!tracer_enabled)
38 return;
39
Steven Rostedt38697052008-10-01 13:14:09 -040040 pc = preempt_count();
Steven Rostedt18cef372008-05-12 21:20:44 +020041 local_irq_save(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020042 cpu = raw_smp_processor_id();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040043 data = ctx_trace->data[cpu];
Steven Rostedt35e8e302008-05-12 21:20:42 +020044
Steven Rostedt3ea2e6d2008-10-04 02:01:00 -040045 if (likely(!atomic_read(&data->disabled)))
Steven Rostedt38697052008-10-01 13:14:09 -040046 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
Steven Rostedt35e8e302008-05-12 21:20:42 +020047
Steven Rostedt18cef372008-05-12 21:20:44 +020048 local_irq_restore(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020049}
50
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020051static void
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040052probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020053{
Ingo Molnar57422792008-05-12 21:20:51 +020054 struct trace_array_cpu *data;
55 unsigned long flags;
Steven Rostedt38697052008-10-01 13:14:09 -040056 int cpu, pc;
Ingo Molnar57422792008-05-12 21:20:51 +020057
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040058 if (!likely(tracer_enabled))
Ingo Molnar57422792008-05-12 21:20:51 +020059 return;
60
Steven Rostedt38697052008-10-01 13:14:09 -040061 pc = preempt_count();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040062 tracing_record_cmdline(current);
Ingo Molnard9af56f2008-05-12 21:20:53 +020063
Ingo Molnar57422792008-05-12 21:20:51 +020064 local_irq_save(flags);
65 cpu = raw_smp_processor_id();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040066 data = ctx_trace->data[cpu];
Ingo Molnar57422792008-05-12 21:20:51 +020067
Steven Rostedt3ea2e6d2008-10-04 02:01:00 -040068 if (likely(!atomic_read(&data->disabled)))
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040069 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
Steven Rostedt38697052008-10-01 13:14:09 -040070 flags, pc);
Ingo Molnar57422792008-05-12 21:20:51 +020071
Ingo Molnar57422792008-05-12 21:20:51 +020072 local_irq_restore(flags);
73}
74
Ingo Molnare309b412008-05-12 21:20:51 +020075static void sched_switch_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +020076{
77 int cpu;
78
Ingo Molnar750ed1a2008-05-12 21:20:46 +020079 tr->time_start = ftrace_now(tr->cpu);
Steven Rostedt35e8e302008-05-12 21:20:42 +020080
81 for_each_online_cpu(cpu)
Steven Rostedt3928a8a2008-09-29 23:02:41 -040082 tracing_reset(tr, cpu);
Steven Rostedt35e8e302008-05-12 21:20:42 +020083}
84
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020085static int tracing_sched_register(void)
86{
87 int ret;
88
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040089 ret = register_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020090 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040091 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020092 " probe to kernel_sched_wakeup\n");
93 return ret;
94 }
95
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040096 ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020097 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040098 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020099 " probe to kernel_sched_wakeup_new\n");
100 goto fail_deprobe;
101 }
102
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400103 ret = register_trace_sched_switch(probe_sched_switch);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200104 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400105 pr_info("sched trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200106 " probe to kernel_sched_schedule\n");
107 goto fail_deprobe_wake_new;
108 }
109
110 return ret;
111fail_deprobe_wake_new:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400112 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200113fail_deprobe:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400114 unregister_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200115 return ret;
116}
117
118static void tracing_sched_unregister(void)
119{
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400120 unregister_trace_sched_switch(probe_sched_switch);
121 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
122 unregister_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200123}
124
Ingo Molnarf2252932008-05-22 10:37:48 +0200125static void tracing_start_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200126{
127 long ref;
128
Frederic Weisbecker07695fa2008-10-31 13:08:28 +0100129 mutex_lock(&tracepoint_mutex);
Frederic Weisbeckerd7ad44b2008-10-31 13:20:08 +0100130 tracer_enabled = 1;
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200131 ref = atomic_inc_return(&sched_ref);
132 if (ref == 1)
133 tracing_sched_register();
Frederic Weisbecker07695fa2008-10-31 13:08:28 +0100134 mutex_unlock(&tracepoint_mutex);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200135}
136
Ingo Molnarf2252932008-05-22 10:37:48 +0200137static void tracing_stop_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200138{
139 long ref;
140
Frederic Weisbecker07695fa2008-10-31 13:08:28 +0100141 mutex_lock(&tracepoint_mutex);
Frederic Weisbeckerd7ad44b2008-10-31 13:20:08 +0100142 tracer_enabled = 0;
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200143 ref = atomic_dec_and_test(&sched_ref);
144 if (ref)
145 tracing_sched_unregister();
Frederic Weisbecker07695fa2008-10-31 13:08:28 +0100146 mutex_unlock(&tracepoint_mutex);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200147}
148
Steven Rostedt41bc8142008-05-22 11:49:22 -0400149void tracing_start_cmdline_record(void)
150{
151 tracing_start_sched_switch();
152}
153
154void tracing_stop_cmdline_record(void)
155{
156 tracing_stop_sched_switch();
157}
158
Ingo Molnare309b412008-05-12 21:20:51 +0200159static void start_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200160{
161 sched_switch_reset(tr);
Steven Rostedt41bc8142008-05-22 11:49:22 -0400162 tracing_start_cmdline_record();
Steven Rostedt35e8e302008-05-12 21:20:42 +0200163}
164
Ingo Molnare309b412008-05-12 21:20:51 +0200165static void stop_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200166{
Steven Rostedt007c05d2008-07-10 20:58:09 -0400167 tracing_stop_cmdline_record();
Steven Rostedt35e8e302008-05-12 21:20:42 +0200168}
169
Ingo Molnare309b412008-05-12 21:20:51 +0200170static void sched_switch_trace_init(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200171{
172 ctx_trace = tr;
173
174 if (tr->ctrl)
175 start_sched_trace(tr);
176}
177
Ingo Molnare309b412008-05-12 21:20:51 +0200178static void sched_switch_trace_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200179{
180 if (tr->ctrl)
181 stop_sched_trace(tr);
182}
183
184static void sched_switch_trace_ctrl_update(struct trace_array *tr)
185{
186 /* When starting a new trace, reset the buffers */
187 if (tr->ctrl)
188 start_sched_trace(tr);
189 else
190 stop_sched_trace(tr);
191}
192
Frederic Weisbeckerd7ad44b2008-10-31 13:20:08 +0100193struct tracer sched_switch_trace __read_mostly =
Steven Rostedt35e8e302008-05-12 21:20:42 +0200194{
195 .name = "sched_switch",
196 .init = sched_switch_trace_init,
197 .reset = sched_switch_trace_reset,
198 .ctrl_update = sched_switch_trace_ctrl_update,
Steven Rostedt60a11772008-05-12 21:20:44 +0200199#ifdef CONFIG_FTRACE_SELFTEST
200 .selftest = trace_selftest_startup_sched_switch,
201#endif
Steven Rostedt35e8e302008-05-12 21:20:42 +0200202};
203
204__init static int init_sched_switch_trace(void)
205{
Steven Rostedt35e8e302008-05-12 21:20:42 +0200206 return register_tracer(&sched_switch_trace);
207}
208device_initcall(init_sched_switch_trace);