blob: 8052446ceeaa9333ae575edf6f762b665c76ac09 [file] [log] [blame]
Steven Rostedt352ad252008-05-12 21:20:42 +02001/*
2 * trace task wakeup timings
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12#include <linux/module.h>
13#include <linux/fs.h>
14#include <linux/debugfs.h>
15#include <linux/kallsyms.h>
16#include <linux/uaccess.h>
17#include <linux/ftrace.h>
Steven Rostedtad8d75f2009-04-14 19:39:12 -040018#include <trace/events/sched.h>
Steven Rostedt352ad252008-05-12 21:20:42 +020019
20#include "trace.h"
21
22static struct trace_array *wakeup_trace;
23static int __read_mostly tracer_enabled;
24
25static struct task_struct *wakeup_task;
26static int wakeup_cpu;
Steven Rostedt478142c2009-09-09 10:36:01 -040027static int wakeup_current_cpu;
Steven Rostedt352ad252008-05-12 21:20:42 +020028static unsigned wakeup_prio = -1;
Steven Rostedt32443512009-01-21 16:24:46 -050029static int wakeup_rt;
Steven Rostedt352ad252008-05-12 21:20:42 +020030
Thomas Gleixner445c8952009-12-02 19:49:50 +010031static arch_spinlock_t wakeup_lock =
Thomas Gleixneredc35bd2009-12-03 12:38:57 +010032 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt352ad252008-05-12 21:20:42 +020033
Ingo Molnare309b412008-05-12 21:20:51 +020034static void __wakeup_reset(struct trace_array *tr);
Steven Rostedt352ad252008-05-12 21:20:42 +020035
Steven Rostedte9d25fe2009-03-04 22:15:30 -050036static int save_lat_flag;
37
Steven Rostedt606576c2008-10-06 19:06:12 -040038#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt7e18d8e2008-05-22 00:22:19 -040039/*
40 * irqsoff uses its own tracer function to keep the overhead down:
41 */
42static void
43wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
44{
45 struct trace_array *tr = wakeup_trace;
46 struct trace_array_cpu *data;
47 unsigned long flags;
48 long disabled;
49 int resched;
50 int cpu;
Steven Rostedt38697052008-10-01 13:14:09 -040051 int pc;
Steven Rostedt7e18d8e2008-05-22 00:22:19 -040052
53 if (likely(!wakeup_task))
54 return;
55
Steven Rostedt38697052008-10-01 13:14:09 -040056 pc = preempt_count();
Steven Rostedt182e9f52008-11-03 23:15:56 -050057 resched = ftrace_preempt_disable();
Steven Rostedt7e18d8e2008-05-22 00:22:19 -040058
59 cpu = raw_smp_processor_id();
Steven Rostedt478142c2009-09-09 10:36:01 -040060 if (cpu != wakeup_current_cpu)
61 goto out_enable;
62
Steven Rostedt7e18d8e2008-05-22 00:22:19 -040063 data = tr->data[cpu];
64 disabled = atomic_inc_return(&data->disabled);
65 if (unlikely(disabled != 1))
66 goto out;
67
Steven Rostedte59494f2008-07-16 00:13:45 -040068 local_irq_save(flags);
Steven Rostedt7e18d8e2008-05-22 00:22:19 -040069
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -050070 trace_function(tr, ip, parent_ip, flags, pc);
Steven Rostedt7e18d8e2008-05-22 00:22:19 -040071
Steven Rostedte59494f2008-07-16 00:13:45 -040072 local_irq_restore(flags);
Steven Rostedt7e18d8e2008-05-22 00:22:19 -040073
74 out:
75 atomic_dec(&data->disabled);
Steven Rostedt478142c2009-09-09 10:36:01 -040076 out_enable:
Steven Rostedt182e9f52008-11-03 23:15:56 -050077 ftrace_preempt_enable(resched);
Steven Rostedt7e18d8e2008-05-22 00:22:19 -040078}
79
80static struct ftrace_ops trace_ops __read_mostly =
81{
82 .func = wakeup_tracer_call,
83};
Steven Rostedt606576c2008-10-06 19:06:12 -040084#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt7e18d8e2008-05-22 00:22:19 -040085
Steven Rostedt352ad252008-05-12 21:20:42 +020086/*
87 * Should this new latency be reported/recorded?
88 */
Ingo Molnare309b412008-05-12 21:20:51 +020089static int report_latency(cycle_t delta)
Steven Rostedt352ad252008-05-12 21:20:42 +020090{
91 if (tracing_thresh) {
92 if (delta < tracing_thresh)
93 return 0;
94 } else {
95 if (delta <= tracing_max_latency)
96 return 0;
97 }
98 return 1;
99}
100
Steven Rostedt478142c2009-09-09 10:36:01 -0400101static void probe_wakeup_migrate_task(struct task_struct *task, int cpu)
102{
103 if (task != wakeup_task)
104 return;
105
106 wakeup_current_cpu = cpu;
107}
108
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200109static void notrace
Peter Zijlstra27a9da62010-05-04 20:36:56 +0200110probe_wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
Steven Rostedt352ad252008-05-12 21:20:42 +0200111{
Steven Rostedt352ad252008-05-12 21:20:42 +0200112 struct trace_array_cpu *data;
113 cycle_t T0, T1, delta;
114 unsigned long flags;
115 long disabled;
116 int cpu;
Steven Rostedt38697052008-10-01 13:14:09 -0400117 int pc;
Steven Rostedt352ad252008-05-12 21:20:42 +0200118
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400119 tracing_record_cmdline(prev);
120
Steven Rostedt352ad252008-05-12 21:20:42 +0200121 if (unlikely(!tracer_enabled))
122 return;
123
124 /*
125 * When we start a new trace, we set wakeup_task to NULL
126 * and then set tracer_enabled = 1. We want to make sure
127 * that another CPU does not see the tracer_enabled = 1
128 * and the wakeup_task with an older task, that might
129 * actually be the same as next.
130 */
131 smp_rmb();
132
133 if (next != wakeup_task)
134 return;
135
Steven Rostedt38697052008-10-01 13:14:09 -0400136 pc = preempt_count();
137
Steven Rostedt352ad252008-05-12 21:20:42 +0200138 /* disable local data, not wakeup_cpu data */
139 cpu = raw_smp_processor_id();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400140 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
Steven Rostedt352ad252008-05-12 21:20:42 +0200141 if (likely(disabled != 1))
142 goto out;
143
Steven Rostedte59494f2008-07-16 00:13:45 -0400144 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100145 arch_spin_lock(&wakeup_lock);
Steven Rostedt352ad252008-05-12 21:20:42 +0200146
147 /* We could race with grabbing wakeup_lock */
148 if (unlikely(!tracer_enabled || next != wakeup_task))
149 goto out_unlock;
150
Steven Rostedt9be24412009-03-26 10:25:24 -0400151 /* The task we are waiting for is waking up */
152 data = wakeup_trace->data[wakeup_cpu];
153
Steven Rostedt301fd742009-04-03 11:12:23 -0400154 trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500155 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
Steven Rostedt352ad252008-05-12 21:20:42 +0200156
Steven Rostedt352ad252008-05-12 21:20:42 +0200157 T0 = data->preempt_timestamp;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200158 T1 = ftrace_now(cpu);
Steven Rostedt352ad252008-05-12 21:20:42 +0200159 delta = T1-T0;
160
161 if (!report_latency(delta))
162 goto out_unlock;
163
Carsten Emdeb5130b12009-09-13 01:43:07 +0200164 if (likely(!is_tracing_stopped())) {
165 tracing_max_latency = delta;
166 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
167 }
Steven Rostedt352ad252008-05-12 21:20:42 +0200168
Steven Rostedt352ad252008-05-12 21:20:42 +0200169out_unlock:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400170 __wakeup_reset(wakeup_trace);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100171 arch_spin_unlock(&wakeup_lock);
Steven Rostedte59494f2008-07-16 00:13:45 -0400172 local_irq_restore(flags);
Steven Rostedt352ad252008-05-12 21:20:42 +0200173out:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400174 atomic_dec(&wakeup_trace->data[cpu]->disabled);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200175}
176
Ingo Molnare309b412008-05-12 21:20:51 +0200177static void __wakeup_reset(struct trace_array *tr)
Steven Rostedt352ad252008-05-12 21:20:42 +0200178{
Steven Rostedt352ad252008-05-12 21:20:42 +0200179 wakeup_cpu = -1;
180 wakeup_prio = -1;
181
182 if (wakeup_task)
183 put_task_struct(wakeup_task);
184
185 wakeup_task = NULL;
186}
187
Ingo Molnare309b412008-05-12 21:20:51 +0200188static void wakeup_reset(struct trace_array *tr)
Steven Rostedt352ad252008-05-12 21:20:42 +0200189{
190 unsigned long flags;
191
Steven Rostedt2f26ebd2009-09-01 11:06:29 -0400192 tracing_reset_online_cpus(tr);
193
Steven Rostedte59494f2008-07-16 00:13:45 -0400194 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100195 arch_spin_lock(&wakeup_lock);
Steven Rostedt352ad252008-05-12 21:20:42 +0200196 __wakeup_reset(tr);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100197 arch_spin_unlock(&wakeup_lock);
Steven Rostedte59494f2008-07-16 00:13:45 -0400198 local_irq_restore(flags);
Steven Rostedt352ad252008-05-12 21:20:42 +0200199}
200
Ingo Molnare309b412008-05-12 21:20:51 +0200201static void
Peter Zijlstra27a9da62010-05-04 20:36:56 +0200202probe_wakeup(struct task_struct *p, int success)
Steven Rostedt352ad252008-05-12 21:20:42 +0200203{
Steven Rostedtf8ec1062009-01-21 17:17:04 -0500204 struct trace_array_cpu *data;
Steven Rostedt352ad252008-05-12 21:20:42 +0200205 int cpu = smp_processor_id();
206 unsigned long flags;
207 long disabled;
Steven Rostedt38697052008-10-01 13:14:09 -0400208 int pc;
Steven Rostedt352ad252008-05-12 21:20:42 +0200209
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400210 if (likely(!tracer_enabled))
Steven Rostedt352ad252008-05-12 21:20:42 +0200211 return;
212
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400213 tracing_record_cmdline(p);
214 tracing_record_cmdline(current);
215
Steven Rostedt32443512009-01-21 16:24:46 -0500216 if ((wakeup_rt && !rt_task(p)) ||
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400217 p->prio >= wakeup_prio ||
218 p->prio >= current->prio)
219 return;
220
Steven Rostedt38697052008-10-01 13:14:09 -0400221 pc = preempt_count();
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400222 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
Steven Rostedt352ad252008-05-12 21:20:42 +0200223 if (unlikely(disabled != 1))
224 goto out;
225
226 /* interrupts should be off from try_to_wake_up */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100227 arch_spin_lock(&wakeup_lock);
Steven Rostedt352ad252008-05-12 21:20:42 +0200228
229 /* check for races. */
230 if (!tracer_enabled || p->prio >= wakeup_prio)
231 goto out_locked;
232
233 /* reset the trace */
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400234 __wakeup_reset(wakeup_trace);
Steven Rostedt352ad252008-05-12 21:20:42 +0200235
236 wakeup_cpu = task_cpu(p);
Steven Rostedt478142c2009-09-09 10:36:01 -0400237 wakeup_current_cpu = wakeup_cpu;
Steven Rostedt352ad252008-05-12 21:20:42 +0200238 wakeup_prio = p->prio;
239
240 wakeup_task = p;
241 get_task_struct(wakeup_task);
242
243 local_save_flags(flags);
244
Steven Rostedtf8ec1062009-01-21 17:17:04 -0500245 data = wakeup_trace->data[wakeup_cpu];
246 data->preempt_timestamp = ftrace_now(cpu);
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500247 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
Steven Rostedt301fd742009-04-03 11:12:23 -0400248
249 /*
250 * We must be careful in using CALLER_ADDR2. But since wake_up
251 * is not called by an assembly function (where as schedule is)
252 * it should be safe to use it here.
253 */
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500254 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
Steven Rostedt352ad252008-05-12 21:20:42 +0200255
256out_locked:
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100257 arch_spin_unlock(&wakeup_lock);
Steven Rostedt352ad252008-05-12 21:20:42 +0200258out:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400259 atomic_dec(&wakeup_trace->data[cpu]->disabled);
Steven Rostedt352ad252008-05-12 21:20:42 +0200260}
261
Ingo Molnare309b412008-05-12 21:20:51 +0200262static void start_wakeup_tracer(struct trace_array *tr)
Steven Rostedt352ad252008-05-12 21:20:42 +0200263{
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200264 int ret;
265
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400266 ret = register_trace_sched_wakeup(probe_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200267 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400268 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200269 " probe to kernel_sched_wakeup\n");
270 return;
271 }
272
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400273 ret = register_trace_sched_wakeup_new(probe_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200274 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400275 pr_info("wakeup trace: Couldn't activate tracepoint"
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200276 " probe to kernel_sched_wakeup_new\n");
277 goto fail_deprobe;
278 }
279
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400280 ret = register_trace_sched_switch(probe_wakeup_sched_switch);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200281 if (ret) {
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400282 pr_info("sched trace: Couldn't activate tracepoint"
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500283 " probe to kernel_sched_switch\n");
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200284 goto fail_deprobe_wake_new;
285 }
286
Steven Rostedt478142c2009-09-09 10:36:01 -0400287 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task);
288 if (ret) {
289 pr_info("wakeup trace: Couldn't activate tracepoint"
290 " probe to kernel_sched_migrate_task\n");
291 return;
292 }
293
Steven Rostedt352ad252008-05-12 21:20:42 +0200294 wakeup_reset(tr);
295
296 /*
297 * Don't let the tracer_enabled = 1 show up before
298 * the wakeup_task is reset. This may be overkill since
299 * wakeup_reset does a spin_unlock after setting the
300 * wakeup_task to NULL, but I want to be safe.
301 * This is a slow path anyway.
302 */
303 smp_wmb();
304
Steven Rostedt7e18d8e2008-05-22 00:22:19 -0400305 register_ftrace_function(&trace_ops);
Steven Rostedt352ad252008-05-12 21:20:42 +0200306
Steven Rostedt5bc45642009-01-21 14:36:52 -0500307 if (tracing_is_enabled())
Steven Rostedt90369902008-11-05 16:05:44 -0500308 tracer_enabled = 1;
Steven Rostedt5bc45642009-01-21 14:36:52 -0500309 else
Steven Rostedt90369902008-11-05 16:05:44 -0500310 tracer_enabled = 0;
Steven Rostedtad591242008-07-10 20:58:13 -0400311
Steven Rostedt352ad252008-05-12 21:20:42 +0200312 return;
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200313fail_deprobe_wake_new:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400314 unregister_trace_sched_wakeup_new(probe_wakeup);
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200315fail_deprobe:
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400316 unregister_trace_sched_wakeup(probe_wakeup);
Steven Rostedt352ad252008-05-12 21:20:42 +0200317}
318
Ingo Molnare309b412008-05-12 21:20:51 +0200319static void stop_wakeup_tracer(struct trace_array *tr)
Steven Rostedt352ad252008-05-12 21:20:42 +0200320{
321 tracer_enabled = 0;
Steven Rostedt7e18d8e2008-05-22 00:22:19 -0400322 unregister_ftrace_function(&trace_ops);
Mathieu Desnoyersb07c3f12008-07-18 12:16:17 -0400323 unregister_trace_sched_switch(probe_wakeup_sched_switch);
324 unregister_trace_sched_wakeup_new(probe_wakeup);
325 unregister_trace_sched_wakeup(probe_wakeup);
Steven Rostedt478142c2009-09-09 10:36:01 -0400326 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task);
Steven Rostedt352ad252008-05-12 21:20:42 +0200327}
328
Steven Rostedt32443512009-01-21 16:24:46 -0500329static int __wakeup_tracer_init(struct trace_array *tr)
Steven Rostedt352ad252008-05-12 21:20:42 +0200330{
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500331 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
332 trace_flags |= TRACE_ITER_LATENCY_FMT;
333
Steven Rostedt745b1622009-01-15 23:40:11 -0500334 tracing_max_latency = 0;
Steven Rostedt352ad252008-05-12 21:20:42 +0200335 wakeup_trace = tr;
Steven Rostedtc76f0692008-11-07 22:36:02 -0500336 start_wakeup_tracer(tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100337 return 0;
Steven Rostedt352ad252008-05-12 21:20:42 +0200338}
339
Steven Rostedt32443512009-01-21 16:24:46 -0500340static int wakeup_tracer_init(struct trace_array *tr)
341{
342 wakeup_rt = 0;
343 return __wakeup_tracer_init(tr);
344}
345
346static int wakeup_rt_tracer_init(struct trace_array *tr)
347{
348 wakeup_rt = 1;
349 return __wakeup_tracer_init(tr);
350}
351
Ingo Molnare309b412008-05-12 21:20:51 +0200352static void wakeup_tracer_reset(struct trace_array *tr)
Steven Rostedt352ad252008-05-12 21:20:42 +0200353{
Steven Rostedtc76f0692008-11-07 22:36:02 -0500354 stop_wakeup_tracer(tr);
355 /* make sure we put back any tasks we are tracing */
356 wakeup_reset(tr);
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500357
358 if (!save_lat_flag)
359 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
Steven Rostedt352ad252008-05-12 21:20:42 +0200360}
361
Steven Rostedt90369902008-11-05 16:05:44 -0500362static void wakeup_tracer_start(struct trace_array *tr)
363{
364 wakeup_reset(tr);
365 tracer_enabled = 1;
Steven Rostedt90369902008-11-05 16:05:44 -0500366}
367
368static void wakeup_tracer_stop(struct trace_array *tr)
369{
370 tracer_enabled = 0;
Steven Rostedt352ad252008-05-12 21:20:42 +0200371}
372
373static struct tracer wakeup_tracer __read_mostly =
374{
375 .name = "wakeup",
376 .init = wakeup_tracer_init,
377 .reset = wakeup_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500378 .start = wakeup_tracer_start,
379 .stop = wakeup_tracer_stop,
Steven Rostedt352ad252008-05-12 21:20:42 +0200380 .print_max = 1,
Steven Rostedt60a11772008-05-12 21:20:44 +0200381#ifdef CONFIG_FTRACE_SELFTEST
382 .selftest = trace_selftest_startup_wakeup,
383#endif
Steven Rostedt352ad252008-05-12 21:20:42 +0200384};
385
Steven Rostedt32443512009-01-21 16:24:46 -0500386static struct tracer wakeup_rt_tracer __read_mostly =
387{
388 .name = "wakeup_rt",
389 .init = wakeup_rt_tracer_init,
390 .reset = wakeup_tracer_reset,
391 .start = wakeup_tracer_start,
392 .stop = wakeup_tracer_stop,
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +0100393 .wait_pipe = poll_wait_pipe,
Steven Rostedt32443512009-01-21 16:24:46 -0500394 .print_max = 1,
395#ifdef CONFIG_FTRACE_SELFTEST
396 .selftest = trace_selftest_startup_wakeup,
397#endif
398};
399
Steven Rostedt352ad252008-05-12 21:20:42 +0200400__init static int init_wakeup_tracer(void)
401{
402 int ret;
403
404 ret = register_tracer(&wakeup_tracer);
405 if (ret)
406 return ret;
407
Steven Rostedt32443512009-01-21 16:24:46 -0500408 ret = register_tracer(&wakeup_rt_tracer);
409 if (ret)
410 return ret;
411
Steven Rostedt352ad252008-05-12 21:20:42 +0200412 return 0;
413}
414device_initcall(init_wakeup_tracer);