blob: b8f56beb1a621d5ff527a93aee383f0e02fd30dd [file] [log] [blame]
Steven Rostedt35e8e302008-05-12 21:20:42 +02001/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020012#include <linux/ftrace.h>
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040013#include <trace/sched.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020014
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020019static atomic_t sched_ref;
Steven Rostedt35e8e302008-05-12 21:20:42 +020020
Ingo Molnare309b412008-05-12 21:20:51 +020021static void
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040022probe_sched_switch(struct rq *__rq, struct task_struct *prev,
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020023 struct task_struct *next)
Steven Rostedt35e8e302008-05-12 21:20:42 +020024{
Steven Rostedt35e8e302008-05-12 21:20:42 +020025 struct trace_array_cpu *data;
26 unsigned long flags;
Steven Rostedt35e8e302008-05-12 21:20:42 +020027 int cpu;
Steven Rostedt38697052008-10-01 13:14:09 -040028 int pc;
Steven Rostedt35e8e302008-05-12 21:20:42 +020029
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040030 if (!atomic_read(&sched_ref))
31 return;
32
Steven Rostedt41bc8142008-05-22 11:49:22 -040033 tracing_record_cmdline(prev);
34 tracing_record_cmdline(next);
35
Steven Rostedt35e8e302008-05-12 21:20:42 +020036 if (!tracer_enabled)
37 return;
38
Steven Rostedt38697052008-10-01 13:14:09 -040039 pc = preempt_count();
Steven Rostedt18cef372008-05-12 21:20:44 +020040 local_irq_save(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020041 cpu = raw_smp_processor_id();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040042 data = ctx_trace->data[cpu];
Steven Rostedt35e8e302008-05-12 21:20:42 +020043
Steven Rostedt3ea2e6d2008-10-04 02:01:00 -040044 if (likely(!atomic_read(&data->disabled)))
Steven Rostedt38697052008-10-01 13:14:09 -040045 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
Steven Rostedt35e8e302008-05-12 21:20:42 +020046
Steven Rostedt18cef372008-05-12 21:20:44 +020047 local_irq_restore(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020048}
49
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020050static void
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040051probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020052{
Ingo Molnar57422792008-05-12 21:20:51 +020053 struct trace_array_cpu *data;
54 unsigned long flags;
Steven Rostedt38697052008-10-01 13:14:09 -040055 int cpu, pc;
Ingo Molnar57422792008-05-12 21:20:51 +020056
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040057 if (!likely(tracer_enabled))
Ingo Molnar57422792008-05-12 21:20:51 +020058 return;
59
Steven Rostedt38697052008-10-01 13:14:09 -040060 pc = preempt_count();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040061 tracing_record_cmdline(current);
Ingo Molnard9af56f2008-05-12 21:20:53 +020062
Ingo Molnar57422792008-05-12 21:20:51 +020063 local_irq_save(flags);
64 cpu = raw_smp_processor_id();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040065 data = ctx_trace->data[cpu];
Ingo Molnar57422792008-05-12 21:20:51 +020066
Steven Rostedt3ea2e6d2008-10-04 02:01:00 -040067 if (likely(!atomic_read(&data->disabled)))
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040068 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
Steven Rostedt38697052008-10-01 13:14:09 -040069 flags, pc);
Ingo Molnar57422792008-05-12 21:20:51 +020070
Ingo Molnar57422792008-05-12 21:20:51 +020071 local_irq_restore(flags);
72}
73
Ingo Molnare309b412008-05-12 21:20:51 +020074static void sched_switch_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +020075{
76 int cpu;
77
Ingo Molnar750ed1a2008-05-12 21:20:46 +020078 tr->time_start = ftrace_now(tr->cpu);
Steven Rostedt35e8e302008-05-12 21:20:42 +020079
80 for_each_online_cpu(cpu)
Steven Rostedt3928a8a2008-09-29 23:02:41 -040081 tracing_reset(tr, cpu);
Steven Rostedt35e8e302008-05-12 21:20:42 +020082}
83
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020084static int tracing_sched_register(void)
85{
86 int ret;
87
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040088 ret = register_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020089 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040090 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020091 " probe to kernel_sched_wakeup\n");
92 return ret;
93 }
94
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040095 ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020096 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040097 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020098 " probe to kernel_sched_wakeup_new\n");
99 goto fail_deprobe;
100 }
101
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400102 ret = register_trace_sched_switch(probe_sched_switch);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200103 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400104 pr_info("sched trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200105 " probe to kernel_sched_schedule\n");
106 goto fail_deprobe_wake_new;
107 }
108
109 return ret;
110fail_deprobe_wake_new:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400111 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200112fail_deprobe:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400113 unregister_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200114 return ret;
115}
116
117static void tracing_sched_unregister(void)
118{
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400119 unregister_trace_sched_switch(probe_sched_switch);
120 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
121 unregister_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200122}
123
Ingo Molnarf2252932008-05-22 10:37:48 +0200124static void tracing_start_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200125{
126 long ref;
127
128 ref = atomic_inc_return(&sched_ref);
129 if (ref == 1)
130 tracing_sched_register();
131}
132
Ingo Molnarf2252932008-05-22 10:37:48 +0200133static void tracing_stop_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200134{
135 long ref;
136
137 ref = atomic_dec_and_test(&sched_ref);
138 if (ref)
139 tracing_sched_unregister();
140}
141
Steven Rostedt41bc8142008-05-22 11:49:22 -0400142void tracing_start_cmdline_record(void)
143{
144 tracing_start_sched_switch();
145}
146
147void tracing_stop_cmdline_record(void)
148{
149 tracing_stop_sched_switch();
150}
151
Ingo Molnare309b412008-05-12 21:20:51 +0200152static void start_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200153{
154 sched_switch_reset(tr);
Steven Rostedt41bc8142008-05-22 11:49:22 -0400155 tracing_start_cmdline_record();
Steven Rostedt007c05d2008-07-10 20:58:09 -0400156 tracer_enabled = 1;
Steven Rostedt35e8e302008-05-12 21:20:42 +0200157}
158
Ingo Molnare309b412008-05-12 21:20:51 +0200159static void stop_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200160{
161 tracer_enabled = 0;
Steven Rostedt007c05d2008-07-10 20:58:09 -0400162 tracing_stop_cmdline_record();
Steven Rostedt35e8e302008-05-12 21:20:42 +0200163}
164
Ingo Molnare309b412008-05-12 21:20:51 +0200165static void sched_switch_trace_init(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200166{
167 ctx_trace = tr;
168
169 if (tr->ctrl)
170 start_sched_trace(tr);
171}
172
Ingo Molnare309b412008-05-12 21:20:51 +0200173static void sched_switch_trace_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200174{
175 if (tr->ctrl)
176 stop_sched_trace(tr);
177}
178
179static void sched_switch_trace_ctrl_update(struct trace_array *tr)
180{
181 /* When starting a new trace, reset the buffers */
182 if (tr->ctrl)
183 start_sched_trace(tr);
184 else
185 stop_sched_trace(tr);
186}
187
188static struct tracer sched_switch_trace __read_mostly =
189{
190 .name = "sched_switch",
191 .init = sched_switch_trace_init,
192 .reset = sched_switch_trace_reset,
193 .ctrl_update = sched_switch_trace_ctrl_update,
Steven Rostedt60a11772008-05-12 21:20:44 +0200194#ifdef CONFIG_FTRACE_SELFTEST
195 .selftest = trace_selftest_startup_sched_switch,
196#endif
Steven Rostedt35e8e302008-05-12 21:20:42 +0200197};
198
199__init static int init_sched_switch_trace(void)
200{
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200201 int ret = 0;
202
203 if (atomic_read(&sched_ref))
204 ret = tracing_sched_register();
205 if (ret) {
206 pr_info("error registering scheduler trace\n");
207 return ret;
208 }
Steven Rostedt35e8e302008-05-12 21:20:42 +0200209 return register_tracer(&sched_switch_trace);
210}
211device_initcall(init_sched_switch_trace);