blob: 12658b3f2b2891fc5d853325dc081a311a972a5a [file] [log] [blame]
Steven Rostedt35e8e302008-05-12 21:20:42 +02001/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
12#include <linux/marker.h>
13#include <linux/ftrace.h>
14
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
19
Ingo Molnare309b412008-05-12 21:20:51 +020020static void
Ingo Molnar4e655512008-05-12 21:20:52 +020021ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
Steven Rostedt35e8e302008-05-12 21:20:42 +020022{
23 struct trace_array *tr = ctx_trace;
24 struct trace_array_cpu *data;
25 unsigned long flags;
26 long disabled;
27 int cpu;
28
29 if (!tracer_enabled)
30 return;
31
Steven Rostedt18cef372008-05-12 21:20:44 +020032 local_irq_save(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020033 cpu = raw_smp_processor_id();
34 data = tr->data[cpu];
35 disabled = atomic_inc_return(&data->disabled);
36
Ingo Molnar4e655512008-05-12 21:20:52 +020037 if (likely(disabled == 1)) {
Steven Rostedt35e8e302008-05-12 21:20:42 +020038 tracing_sched_switch_trace(tr, data, prev, next, flags);
Ingo Molnar4e655512008-05-12 21:20:52 +020039 ftrace_all_fair_tasks(__rq, tr, data);
40 }
Steven Rostedt35e8e302008-05-12 21:20:42 +020041
42 atomic_dec(&data->disabled);
Steven Rostedt18cef372008-05-12 21:20:44 +020043 local_irq_restore(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020044}
45
Ingo Molnar4e655512008-05-12 21:20:52 +020046static void
47wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
Ingo Molnar57422792008-05-12 21:20:51 +020048{
49 struct trace_array *tr = ctx_trace;
50 struct trace_array_cpu *data;
51 unsigned long flags;
52 long disabled;
53 int cpu;
54
55 if (!tracer_enabled)
56 return;
57
58 local_irq_save(flags);
59 cpu = raw_smp_processor_id();
60 data = tr->data[cpu];
61 disabled = atomic_inc_return(&data->disabled);
62
Ingo Molnar4e655512008-05-12 21:20:52 +020063 if (likely(disabled == 1)) {
Ingo Molnar57422792008-05-12 21:20:51 +020064 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
Ingo Molnar4e655512008-05-12 21:20:52 +020065 ftrace_all_fair_tasks(__rq, tr, data);
66 }
Ingo Molnar57422792008-05-12 21:20:51 +020067
68 atomic_dec(&data->disabled);
69 local_irq_restore(flags);
70}
71
Ingo Molnar4e655512008-05-12 21:20:52 +020072void
73ftrace_ctx_switch(void *__rq, struct task_struct *prev,
74 struct task_struct *next)
Steven Rostedt35e8e302008-05-12 21:20:42 +020075{
76 tracing_record_cmdline(prev);
77
78 /*
79 * If tracer_switch_func only points to the local
80 * switch func, it still needs the ptr passed to it.
81 */
Ingo Molnar4e655512008-05-12 21:20:52 +020082 ctx_switch_func(__rq, prev, next);
Steven Rostedt35e8e302008-05-12 21:20:42 +020083
84 /*
85 * Chain to the wakeup tracer (this is a NOP if disabled):
86 */
87 wakeup_sched_switch(prev, next);
88}
89
Ingo Molnar57422792008-05-12 21:20:51 +020090void
Ingo Molnar4e655512008-05-12 21:20:52 +020091ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
92 struct task_struct *curr)
Ingo Molnar57422792008-05-12 21:20:51 +020093{
94 tracing_record_cmdline(curr);
95
Ingo Molnar4e655512008-05-12 21:20:52 +020096 wakeup_func(__rq, wakee, curr);
Ingo Molnar57422792008-05-12 21:20:51 +020097
98 /*
99 * Chain to the wakeup tracer (this is a NOP if disabled):
100 */
101 wakeup_sched_wakeup(wakee, curr);
102}
103
Ingo Molnare309b412008-05-12 21:20:51 +0200104static void sched_switch_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200105{
106 int cpu;
107
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200108 tr->time_start = ftrace_now(tr->cpu);
Steven Rostedt35e8e302008-05-12 21:20:42 +0200109
110 for_each_online_cpu(cpu)
111 tracing_reset(tr->data[cpu]);
112}
113
Ingo Molnare309b412008-05-12 21:20:51 +0200114static void start_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200115{
116 sched_switch_reset(tr);
117 tracer_enabled = 1;
118}
119
Ingo Molnare309b412008-05-12 21:20:51 +0200120static void stop_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200121{
122 tracer_enabled = 0;
123}
124
Ingo Molnare309b412008-05-12 21:20:51 +0200125static void sched_switch_trace_init(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200126{
127 ctx_trace = tr;
128
129 if (tr->ctrl)
130 start_sched_trace(tr);
131}
132
Ingo Molnare309b412008-05-12 21:20:51 +0200133static void sched_switch_trace_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200134{
135 if (tr->ctrl)
136 stop_sched_trace(tr);
137}
138
139static void sched_switch_trace_ctrl_update(struct trace_array *tr)
140{
141 /* When starting a new trace, reset the buffers */
142 if (tr->ctrl)
143 start_sched_trace(tr);
144 else
145 stop_sched_trace(tr);
146}
147
148static struct tracer sched_switch_trace __read_mostly =
149{
150 .name = "sched_switch",
151 .init = sched_switch_trace_init,
152 .reset = sched_switch_trace_reset,
153 .ctrl_update = sched_switch_trace_ctrl_update,
Steven Rostedt60a11772008-05-12 21:20:44 +0200154#ifdef CONFIG_FTRACE_SELFTEST
155 .selftest = trace_selftest_startup_sched_switch,
156#endif
Steven Rostedt35e8e302008-05-12 21:20:42 +0200157};
158
159__init static int init_sched_switch_trace(void)
160{
161 return register_tracer(&sched_switch_trace);
162}
163device_initcall(init_sched_switch_trace);