blob: 5671db0e1827e99b7828cf84f36569a8a8dd4b26 [file] [log] [blame]
Steven Rostedt35e8e302008-05-12 21:20:42 +02001/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
12#include <linux/marker.h>
13#include <linux/ftrace.h>
14
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
19
Ingo Molnare309b412008-05-12 21:20:51 +020020static void
Ingo Molnar4e655512008-05-12 21:20:52 +020021ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
Steven Rostedt35e8e302008-05-12 21:20:42 +020022{
23 struct trace_array *tr = ctx_trace;
24 struct trace_array_cpu *data;
25 unsigned long flags;
26 long disabled;
27 int cpu;
28
29 if (!tracer_enabled)
30 return;
31
Ingo Molnard9af56f2008-05-12 21:20:53 +020032 tracing_record_cmdline(prev);
33
Steven Rostedt18cef372008-05-12 21:20:44 +020034 local_irq_save(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020035 cpu = raw_smp_processor_id();
36 data = tr->data[cpu];
37 disabled = atomic_inc_return(&data->disabled);
38
Ingo Molnar4d9493c2008-05-12 21:20:54 +020039 if (likely(disabled == 1))
Steven Rostedt35e8e302008-05-12 21:20:42 +020040 tracing_sched_switch_trace(tr, data, prev, next, flags);
41
42 atomic_dec(&data->disabled);
Steven Rostedt18cef372008-05-12 21:20:44 +020043 local_irq_restore(flags);
Steven Rostedt35e8e302008-05-12 21:20:42 +020044}
45
Ingo Molnar4e655512008-05-12 21:20:52 +020046static void
47wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
Ingo Molnar57422792008-05-12 21:20:51 +020048{
49 struct trace_array *tr = ctx_trace;
50 struct trace_array_cpu *data;
51 unsigned long flags;
52 long disabled;
53 int cpu;
54
55 if (!tracer_enabled)
56 return;
57
Ingo Molnard9af56f2008-05-12 21:20:53 +020058 tracing_record_cmdline(curr);
59
Ingo Molnar57422792008-05-12 21:20:51 +020060 local_irq_save(flags);
61 cpu = raw_smp_processor_id();
62 data = tr->data[cpu];
63 disabled = atomic_inc_return(&data->disabled);
64
Ingo Molnar4d9493c2008-05-12 21:20:54 +020065 if (likely(disabled == 1))
Ingo Molnar57422792008-05-12 21:20:51 +020066 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
67
68 atomic_dec(&data->disabled);
69 local_irq_restore(flags);
70}
71
Ingo Molnar4e655512008-05-12 21:20:52 +020072void
73ftrace_ctx_switch(void *__rq, struct task_struct *prev,
74 struct task_struct *next)
Steven Rostedt35e8e302008-05-12 21:20:42 +020075{
Steven Rostedt35e8e302008-05-12 21:20:42 +020076 /*
77 * If tracer_switch_func only points to the local
78 * switch func, it still needs the ptr passed to it.
79 */
Ingo Molnar4e655512008-05-12 21:20:52 +020080 ctx_switch_func(__rq, prev, next);
Steven Rostedt35e8e302008-05-12 21:20:42 +020081
82 /*
83 * Chain to the wakeup tracer (this is a NOP if disabled):
84 */
85 wakeup_sched_switch(prev, next);
86}
87
Ingo Molnar57422792008-05-12 21:20:51 +020088void
Ingo Molnar4e655512008-05-12 21:20:52 +020089ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
90 struct task_struct *curr)
Ingo Molnar57422792008-05-12 21:20:51 +020091{
Ingo Molnar4e655512008-05-12 21:20:52 +020092 wakeup_func(__rq, wakee, curr);
Ingo Molnar57422792008-05-12 21:20:51 +020093
94 /*
95 * Chain to the wakeup tracer (this is a NOP if disabled):
96 */
97 wakeup_sched_wakeup(wakee, curr);
98}
99
Ingo Molnar88a42162008-05-12 21:20:53 +0200100void
101ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
102{
103 struct trace_array *tr = ctx_trace;
104 struct trace_array_cpu *data;
105 unsigned long flags;
106 long disabled;
107 int cpu;
108
109 if (!tracer_enabled)
110 return;
111
112 local_irq_save(flags);
113 cpu = raw_smp_processor_id();
114 data = tr->data[cpu];
115 disabled = atomic_inc_return(&data->disabled);
116
117 if (likely(disabled == 1))
118 __trace_special(tr, data, arg1, arg2, arg3);
119
120 atomic_dec(&data->disabled);
121 local_irq_restore(flags);
122}
123
Ingo Molnare309b412008-05-12 21:20:51 +0200124static void sched_switch_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200125{
126 int cpu;
127
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200128 tr->time_start = ftrace_now(tr->cpu);
Steven Rostedt35e8e302008-05-12 21:20:42 +0200129
130 for_each_online_cpu(cpu)
131 tracing_reset(tr->data[cpu]);
132}
133
Ingo Molnare309b412008-05-12 21:20:51 +0200134static void start_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200135{
136 sched_switch_reset(tr);
137 tracer_enabled = 1;
138}
139
Ingo Molnare309b412008-05-12 21:20:51 +0200140static void stop_sched_trace(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200141{
142 tracer_enabled = 0;
143}
144
Ingo Molnare309b412008-05-12 21:20:51 +0200145static void sched_switch_trace_init(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200146{
147 ctx_trace = tr;
148
149 if (tr->ctrl)
150 start_sched_trace(tr);
151}
152
Ingo Molnare309b412008-05-12 21:20:51 +0200153static void sched_switch_trace_reset(struct trace_array *tr)
Steven Rostedt35e8e302008-05-12 21:20:42 +0200154{
155 if (tr->ctrl)
156 stop_sched_trace(tr);
157}
158
159static void sched_switch_trace_ctrl_update(struct trace_array *tr)
160{
161 /* When starting a new trace, reset the buffers */
162 if (tr->ctrl)
163 start_sched_trace(tr);
164 else
165 stop_sched_trace(tr);
166}
167
168static struct tracer sched_switch_trace __read_mostly =
169{
170 .name = "sched_switch",
171 .init = sched_switch_trace_init,
172 .reset = sched_switch_trace_reset,
173 .ctrl_update = sched_switch_trace_ctrl_update,
Steven Rostedt60a11772008-05-12 21:20:44 +0200174#ifdef CONFIG_FTRACE_SELFTEST
175 .selftest = trace_selftest_startup_sched_switch,
176#endif
Steven Rostedt35e8e302008-05-12 21:20:42 +0200177};
178
179__init static int init_sched_switch_trace(void)
180{
181 return register_tracer(&sched_switch_trace);
182}
183device_initcall(init_sched_switch_trace);