blob: e1285d7b5488b5e6bfb157a498338166e72c9340 [file] [log] [blame]
Steven Rostedt35e8e302008-05-12 21:20:42 +02001/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020012#include <linux/ftrace.h>
Steven Rostedtad8d75f2009-04-14 19:39:12 -040013#include <trace/events/sched.h>
Steven Rostedt35e8e302008-05-12 21:20:42 +020014
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +010019static int sched_ref;
20static DEFINE_MUTEX(sched_register_mutex);
Steven Rostedt5fec6dd2009-03-17 19:59:53 -040021static int sched_stopped;
Steven Rostedt35e8e302008-05-12 21:20:42 +020022
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020023
24void
25tracing_sched_switch_trace(struct trace_array *tr,
26 struct task_struct *prev,
27 struct task_struct *next,
28 unsigned long flags, int pc)
29{
30 struct ftrace_event_call *call = &event_context_switch;
31 struct ring_buffer_event *event;
32 struct ctx_switch_entry *entry;
33
34 event = trace_buffer_lock_reserve(tr, TRACE_CTX,
35 sizeof(*entry), flags, pc);
36 if (!event)
37 return;
38 entry = ring_buffer_event_data(event);
39 entry->prev_pid = prev->pid;
40 entry->prev_prio = prev->prio;
41 entry->prev_state = prev->state;
42 entry->next_pid = next->pid;
43 entry->next_prio = next->prio;
44 entry->next_state = next->state;
45 entry->next_cpu = task_cpu(next);
46
47 if (!filter_check_discard(call, entry, tr->buffer, event))
48 trace_buffer_unlock_commit(tr, event, flags, pc);
49}
50
Ingo Molnare309b412008-05-12 21:20:51 +020051static void
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040052probe_sched_switch(struct rq *__rq, struct task_struct *prev,
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +020053 struct task_struct *next)
Steven Rostedt35e8e302008-05-12 21:20:42 +020054{
Steven Rostedt35e8e302008-05-12 21:20:42 +020055 struct trace_array_cpu *data;
56 unsigned long flags;
Steven Rostedt35e8e302008-05-12 21:20:42 +020057 int cpu;
Steven Rostedt38697052008-10-01 13:14:09 -040058 int pc;
Steven Rostedt35e8e302008-05-12 21:20:42 +020059
Zhaoleidcef7882009-03-31 15:26:14 +080060 if (unlikely(!sched_ref))
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040061 return;
62
Steven Rostedt41bc8142008-05-22 11:49:22 -040063 tracing_record_cmdline(prev);
64 tracing_record_cmdline(next);
65
Zhaoleidcef7882009-03-31 15:26:14 +080066 if (!tracer_enabled || sched_stopped)
Steven Rostedt35e8e302008-05-12 21:20:42 +020067 return;
68
Steven Rostedt38697052008-10-01 13:14:09 -040069 pc = preempt_count();
Steven Rostedt18cef372008-05-12 21:20:44 +020070 local_irq_save(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020071 cpu = raw_smp_processor_id();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -040072 data = ctx_trace->data[cpu];
Steven Rostedt35e8e302008-05-12 21:20:42 +020073
Steven Rostedt3ea2e6d2008-10-04 02:01:00 -040074 if (likely(!atomic_read(&data->disabled)))
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -050075 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
Steven Rostedt35e8e302008-05-12 21:20:42 +020076
Steven Rostedt18cef372008-05-12 21:20:44 +020077 local_irq_restore(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020078}
79
Frederic Weisbecker82e04af2009-07-29 18:00:29 +020080void
81tracing_sched_wakeup_trace(struct trace_array *tr,
82 struct task_struct *wakee,
83 struct task_struct *curr,
84 unsigned long flags, int pc)
85{
86 struct ftrace_event_call *call = &event_wakeup;
87 struct ring_buffer_event *event;
88 struct ctx_switch_entry *entry;
89
90 event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
91 sizeof(*entry), flags, pc);
92 if (!event)
93 return;
94 entry = ring_buffer_event_data(event);
95 entry->prev_pid = curr->pid;
96 entry->prev_prio = curr->prio;
97 entry->prev_state = curr->state;
98 entry->next_pid = wakee->pid;
99 entry->next_prio = wakee->prio;
100 entry->next_state = wakee->state;
101 entry->next_cpu = task_cpu(wakee);
102
103 if (!filter_check_discard(call, entry, tr->buffer, event))
104 ring_buffer_unlock_commit(tr->buffer, event);
105 ftrace_trace_stack(tr, flags, 6, pc);
106 ftrace_trace_userstack(tr, flags, pc);
107}
108
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200109static void
Peter Zijlstra468a15b2008-12-16 08:07:03 +0100110probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200111{
Ingo Molnar57422792008-05-12 21:20:51 +0200112 struct trace_array_cpu *data;
113 unsigned long flags;
Steven Rostedt38697052008-10-01 13:14:09 -0400114 int cpu, pc;
Ingo Molnar57422792008-05-12 21:20:51 +0200115
Zhaoleidcef7882009-03-31 15:26:14 +0800116 if (unlikely(!sched_ref))
117 return;
118
119 tracing_record_cmdline(current);
120
121 if (!tracer_enabled || sched_stopped)
Ingo Molnar57422792008-05-12 21:20:51 +0200122 return;
123
Steven Rostedt38697052008-10-01 13:14:09 -0400124 pc = preempt_count();
Ingo Molnar57422792008-05-12 21:20:51 +0200125 local_irq_save(flags);
126 cpu = raw_smp_processor_id();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400127 data = ctx_trace->data[cpu];
Ingo Molnar57422792008-05-12 21:20:51 +0200128
Steven Rostedt3ea2e6d2008-10-04 02:01:00 -0400129 if (likely(!atomic_read(&data->disabled)))
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500130 tracing_sched_wakeup_trace(ctx_trace, wakee, current,
Steven Rostedt38697052008-10-01 13:14:09 -0400131 flags, pc);
Ingo Molnar57422792008-05-12 21:20:51 +0200132
Ingo Molnar57422792008-05-12 21:20:51 +0200133 local_irq_restore(flags);
134}
135
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200136static int tracing_sched_register(void)
137{
138 int ret;
139
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400140 ret = register_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200141 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400142 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200143 " probe to kernel_sched_wakeup\n");
144 return ret;
145 }
146
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400147 ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200148 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400149 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200150 " probe to kernel_sched_wakeup_new\n");
151 goto fail_deprobe;
152 }
153
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400154 ret = register_trace_sched_switch(probe_sched_switch);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200155 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400156 pr_info("sched trace: Couldn't activate tracepoint"
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500157 " probe to kernel_sched_switch\n");
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200158 goto fail_deprobe_wake_new;
159 }
160
161 return ret;
162fail_deprobe_wake_new:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400163 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200164fail_deprobe:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400165 unregister_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200166 return ret;
167}
168
169static void tracing_sched_unregister(void)
170{
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400171 unregister_trace_sched_switch(probe_sched_switch);
172 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
173 unregister_trace_sched_wakeup(probe_sched_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200174}
175
Ingo Molnarf2252932008-05-22 10:37:48 +0200176static void tracing_start_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200177{
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100178 mutex_lock(&sched_register_mutex);
Steven Rostedte168e052008-11-07 22:36:02 -0500179 if (!(sched_ref++))
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200180 tracing_sched_register();
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100181 mutex_unlock(&sched_register_mutex);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200182}
183
Ingo Molnarf2252932008-05-22 10:37:48 +0200184static void tracing_stop_sched_switch(void)
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200185{
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100186 mutex_lock(&sched_register_mutex);
Steven Rostedte168e052008-11-07 22:36:02 -0500187 if (!(--sched_ref))
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200188 tracing_sched_unregister();
Frederic Weisbeckerefade6e2008-10-31 13:28:58 +0100189 mutex_unlock(&sched_register_mutex);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200190}
191
Steven Rostedt41bc8142008-05-22 11:49:22 -0400192void tracing_start_cmdline_record(void)
193{
194 tracing_start_sched_switch();
195}
196
197void tracing_stop_cmdline_record(void)
198{
199 tracing_stop_sched_switch();
200}
201
Steven Rostedt75f5c472008-11-07 22:36:02 -0500202/**
Steven Rostedte168e052008-11-07 22:36:02 -0500203 * tracing_start_sched_switch_record - start tracing context switches
204 *
205 * Turns on context switch tracing for a tracer.
206 */
207void tracing_start_sched_switch_record(void)
208{
209 if (unlikely(!ctx_trace)) {
210 WARN_ON(1);
211 return;
212 }
213
214 tracing_start_sched_switch();
215
216 mutex_lock(&sched_register_mutex);
217 tracer_enabled++;
218 mutex_unlock(&sched_register_mutex);
219}
220
221/**
222 * tracing_stop_sched_switch_record - start tracing context switches
223 *
224 * Turns off context switch tracing for a tracer.
225 */
226void tracing_stop_sched_switch_record(void)
227{
228 mutex_lock(&sched_register_mutex);
229 tracer_enabled--;
230 WARN_ON(tracer_enabled < 0);
231 mutex_unlock(&sched_register_mutex);
232
233 tracing_stop_sched_switch();
234}
235
236/**
237 * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
Steven Rostedt75f5c472008-11-07 22:36:02 -0500238 * @tr: trace array pointer to assign
239 *
240 * Some tracers might want to record the context switches in their
241 * trace. This function lets those tracers assign the trace array
242 * to use.
243 */
Steven Rostedte168e052008-11-07 22:36:02 -0500244void tracing_sched_switch_assign_trace(struct trace_array *tr)
Steven Rostedt75f5c472008-11-07 22:36:02 -0500245{
246 ctx_trace = tr;
247}
248
Ingo Molnare309b412008-05-12 21:20:51 +0200249static void stop_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200250{
Steven Rostedte168e052008-11-07 22:36:02 -0500251 tracing_stop_sched_switch_record();
Steven Rostedt35e8e302008-05-12 21:20:42 +0200252}
253
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100254static int sched_switch_trace_init(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200255{
256 ctx_trace = tr;
Steven Rostedt5fec6dd2009-03-17 19:59:53 -0400257 tracing_reset_online_cpus(tr);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200258 tracing_start_sched_switch_record();
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100259 return 0;
Steven Rostedt35e8e302008-05-12 21:20:42 +0200260}
261
Ingo Molnare309b412008-05-12 21:20:51 +0200262static void sched_switch_trace_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200263{
Steven Rostedtc76f0692008-11-07 22:36:02 -0500264 if (sched_ref)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200265 stop_sched_trace(tr);
266}
267
Steven Rostedt90369902008-11-05 16:05:44 -0500268static void sched_switch_trace_start(struct trace_array *tr)
269{
Steven Rostedt5fec6dd2009-03-17 19:59:53 -0400270 sched_stopped = 0;
Steven Rostedt90369902008-11-05 16:05:44 -0500271}
272
273static void sched_switch_trace_stop(struct trace_array *tr)
274{
Steven Rostedt5fec6dd2009-03-17 19:59:53 -0400275 sched_stopped = 1;
Steven Rostedt90369902008-11-05 16:05:44 -0500276}
277
Steven Rostedt75f5c472008-11-07 22:36:02 -0500278static struct tracer sched_switch_trace __read_mostly =
Steven Rostedt35e8e302008-05-12 21:20:42 +0200279{
280 .name = "sched_switch",
281 .init = sched_switch_trace_init,
282 .reset = sched_switch_trace_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500283 .start = sched_switch_trace_start,
284 .stop = sched_switch_trace_stop,
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +0100285 .wait_pipe = poll_wait_pipe,
Steven Rostedt60a11772008-05-12 21:20:44 +0200286#ifdef CONFIG_FTRACE_SELFTEST
287 .selftest = trace_selftest_startup_sched_switch,
288#endif
Steven Rostedt35e8e302008-05-12 21:20:42 +0200289};
290
291__init static int init_sched_switch_trace(void)
292{
Steven Rostedt35e8e302008-05-12 21:20:42 +0200293 return register_tracer(&sched_switch_trace);
294}
295device_initcall(init_sched_switch_trace);
Ingo Molnarc71dd422008-12-19 01:09:51 +0100296