Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * trace task wakeup timings |
| 3 | * |
| 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
| 5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> |
| 6 | * |
| 7 | * Based on code from the latency_tracer, that is: |
| 8 | * |
| 9 | * Copyright (C) 2004-2006 Ingo Molnar |
| 10 | * Copyright (C) 2004 William Lee Irwin III |
| 11 | */ |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/fs.h> |
| 14 | #include <linux/debugfs.h> |
| 15 | #include <linux/kallsyms.h> |
| 16 | #include <linux/uaccess.h> |
| 17 | #include <linux/ftrace.h> |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 18 | #include <trace/sched.h> |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 19 | |
| 20 | #include "trace.h" |
| 21 | |
| 22 | static struct trace_array *wakeup_trace; |
| 23 | static int __read_mostly tracer_enabled; |
| 24 | |
| 25 | static struct task_struct *wakeup_task; |
| 26 | static int wakeup_cpu; |
| 27 | static unsigned wakeup_prio = -1; |
| 28 | |
Steven Rostedt | e59494f | 2008-07-16 00:13:45 -0400 | [diff] [blame] | 29 | static raw_spinlock_t wakeup_lock = |
| 30 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 31 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 32 | static void __wakeup_reset(struct trace_array *tr); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 33 | |
Steven Rostedt | 7e18d8e | 2008-05-22 00:22:19 -0400 | [diff] [blame] | 34 | #ifdef CONFIG_FTRACE |
| 35 | /* |
| 36 | * irqsoff uses its own tracer function to keep the overhead down: |
| 37 | */ |
| 38 | static void |
| 39 | wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) |
| 40 | { |
| 41 | struct trace_array *tr = wakeup_trace; |
| 42 | struct trace_array_cpu *data; |
| 43 | unsigned long flags; |
| 44 | long disabled; |
| 45 | int resched; |
| 46 | int cpu; |
| 47 | |
| 48 | if (likely(!wakeup_task)) |
| 49 | return; |
| 50 | |
| 51 | resched = need_resched(); |
| 52 | preempt_disable_notrace(); |
| 53 | |
| 54 | cpu = raw_smp_processor_id(); |
| 55 | data = tr->data[cpu]; |
| 56 | disabled = atomic_inc_return(&data->disabled); |
| 57 | if (unlikely(disabled != 1)) |
| 58 | goto out; |
| 59 | |
Steven Rostedt | e59494f | 2008-07-16 00:13:45 -0400 | [diff] [blame] | 60 | local_irq_save(flags); |
| 61 | __raw_spin_lock(&wakeup_lock); |
Steven Rostedt | 7e18d8e | 2008-05-22 00:22:19 -0400 | [diff] [blame] | 62 | |
| 63 | if (unlikely(!wakeup_task)) |
| 64 | goto unlock; |
| 65 | |
| 66 | /* |
| 67 | * The task can't disappear because it needs to |
| 68 | * wake up first, and we have the wakeup_lock. |
| 69 | */ |
| 70 | if (task_cpu(wakeup_task) != cpu) |
| 71 | goto unlock; |
| 72 | |
| 73 | trace_function(tr, data, ip, parent_ip, flags); |
| 74 | |
| 75 | unlock: |
Steven Rostedt | e59494f | 2008-07-16 00:13:45 -0400 | [diff] [blame] | 76 | __raw_spin_unlock(&wakeup_lock); |
| 77 | local_irq_restore(flags); |
Steven Rostedt | 7e18d8e | 2008-05-22 00:22:19 -0400 | [diff] [blame] | 78 | |
| 79 | out: |
| 80 | atomic_dec(&data->disabled); |
| 81 | |
| 82 | /* |
| 83 | * To prevent recursion from the scheduler, if the |
| 84 | * resched flag was set before we entered, then |
| 85 | * don't reschedule. |
| 86 | */ |
| 87 | if (resched) |
| 88 | preempt_enable_no_resched_notrace(); |
| 89 | else |
| 90 | preempt_enable_notrace(); |
| 91 | } |
| 92 | |
| 93 | static struct ftrace_ops trace_ops __read_mostly = |
| 94 | { |
| 95 | .func = wakeup_tracer_call, |
| 96 | }; |
| 97 | #endif /* CONFIG_FTRACE */ |
| 98 | |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 99 | /* |
| 100 | * Should this new latency be reported/recorded? |
| 101 | */ |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 102 | static int report_latency(cycle_t delta) |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 103 | { |
| 104 | if (tracing_thresh) { |
| 105 | if (delta < tracing_thresh) |
| 106 | return 0; |
| 107 | } else { |
| 108 | if (delta <= tracing_max_latency) |
| 109 | return 0; |
| 110 | } |
| 111 | return 1; |
| 112 | } |
| 113 | |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 114 | static void notrace |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 115 | probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 116 | struct task_struct *next) |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 117 | { |
| 118 | unsigned long latency = 0, t0 = 0, t1 = 0; |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 119 | struct trace_array_cpu *data; |
| 120 | cycle_t T0, T1, delta; |
| 121 | unsigned long flags; |
| 122 | long disabled; |
| 123 | int cpu; |
| 124 | |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 125 | tracing_record_cmdline(prev); |
| 126 | |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 127 | if (unlikely(!tracer_enabled)) |
| 128 | return; |
| 129 | |
| 130 | /* |
| 131 | * When we start a new trace, we set wakeup_task to NULL |
| 132 | * and then set tracer_enabled = 1. We want to make sure |
| 133 | * that another CPU does not see the tracer_enabled = 1 |
| 134 | * and the wakeup_task with an older task, that might |
| 135 | * actually be the same as next. |
| 136 | */ |
| 137 | smp_rmb(); |
| 138 | |
| 139 | if (next != wakeup_task) |
| 140 | return; |
| 141 | |
Steven Rostedt | 7e18d8e | 2008-05-22 00:22:19 -0400 | [diff] [blame] | 142 | /* The task we are waiting for is waking up */ |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 143 | data = wakeup_trace->data[wakeup_cpu]; |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 144 | |
| 145 | /* disable local data, not wakeup_cpu data */ |
| 146 | cpu = raw_smp_processor_id(); |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 147 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 148 | if (likely(disabled != 1)) |
| 149 | goto out; |
| 150 | |
Steven Rostedt | e59494f | 2008-07-16 00:13:45 -0400 | [diff] [blame] | 151 | local_irq_save(flags); |
| 152 | __raw_spin_lock(&wakeup_lock); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 153 | |
| 154 | /* We could race with grabbing wakeup_lock */ |
| 155 | if (unlikely(!tracer_enabled || next != wakeup_task)) |
| 156 | goto out_unlock; |
| 157 | |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 158 | trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 159 | |
| 160 | /* |
| 161 | * usecs conversion is slow so we try to delay the conversion |
| 162 | * as long as possible: |
| 163 | */ |
| 164 | T0 = data->preempt_timestamp; |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 165 | T1 = ftrace_now(cpu); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 166 | delta = T1-T0; |
| 167 | |
| 168 | if (!report_latency(delta)) |
| 169 | goto out_unlock; |
| 170 | |
| 171 | latency = nsecs_to_usecs(delta); |
| 172 | |
| 173 | tracing_max_latency = delta; |
| 174 | t0 = nsecs_to_usecs(T0); |
| 175 | t1 = nsecs_to_usecs(T1); |
| 176 | |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 177 | update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 178 | |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 179 | out_unlock: |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 180 | __wakeup_reset(wakeup_trace); |
Steven Rostedt | e59494f | 2008-07-16 00:13:45 -0400 | [diff] [blame] | 181 | __raw_spin_unlock(&wakeup_lock); |
| 182 | local_irq_restore(flags); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 183 | out: |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 184 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 185 | } |
| 186 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 187 | static void __wakeup_reset(struct trace_array *tr) |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 188 | { |
| 189 | struct trace_array_cpu *data; |
| 190 | int cpu; |
| 191 | |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 192 | for_each_possible_cpu(cpu) { |
| 193 | data = tr->data[cpu]; |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame^] | 194 | tracing_reset(tr, cpu); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | wakeup_cpu = -1; |
| 198 | wakeup_prio = -1; |
| 199 | |
| 200 | if (wakeup_task) |
| 201 | put_task_struct(wakeup_task); |
| 202 | |
| 203 | wakeup_task = NULL; |
| 204 | } |
| 205 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 206 | static void wakeup_reset(struct trace_array *tr) |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 207 | { |
| 208 | unsigned long flags; |
| 209 | |
Steven Rostedt | e59494f | 2008-07-16 00:13:45 -0400 | [diff] [blame] | 210 | local_irq_save(flags); |
| 211 | __raw_spin_lock(&wakeup_lock); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 212 | __wakeup_reset(tr); |
Steven Rostedt | e59494f | 2008-07-16 00:13:45 -0400 | [diff] [blame] | 213 | __raw_spin_unlock(&wakeup_lock); |
| 214 | local_irq_restore(flags); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 215 | } |
| 216 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 217 | static void |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 218 | probe_wakeup(struct rq *rq, struct task_struct *p) |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 219 | { |
| 220 | int cpu = smp_processor_id(); |
| 221 | unsigned long flags; |
| 222 | long disabled; |
| 223 | |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 224 | if (likely(!tracer_enabled)) |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 225 | return; |
| 226 | |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 227 | tracing_record_cmdline(p); |
| 228 | tracing_record_cmdline(current); |
| 229 | |
| 230 | if (likely(!rt_task(p)) || |
| 231 | p->prio >= wakeup_prio || |
| 232 | p->prio >= current->prio) |
| 233 | return; |
| 234 | |
| 235 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 236 | if (unlikely(disabled != 1)) |
| 237 | goto out; |
| 238 | |
| 239 | /* interrupts should be off from try_to_wake_up */ |
Steven Rostedt | e59494f | 2008-07-16 00:13:45 -0400 | [diff] [blame] | 240 | __raw_spin_lock(&wakeup_lock); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 241 | |
| 242 | /* check for races. */ |
| 243 | if (!tracer_enabled || p->prio >= wakeup_prio) |
| 244 | goto out_locked; |
| 245 | |
| 246 | /* reset the trace */ |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 247 | __wakeup_reset(wakeup_trace); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 248 | |
| 249 | wakeup_cpu = task_cpu(p); |
| 250 | wakeup_prio = p->prio; |
| 251 | |
| 252 | wakeup_task = p; |
| 253 | get_task_struct(wakeup_task); |
| 254 | |
| 255 | local_save_flags(flags); |
| 256 | |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 257 | wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); |
| 258 | trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], |
Steven Rostedt | 6fb44b7 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 259 | CALLER_ADDR1, CALLER_ADDR2, flags); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 260 | |
| 261 | out_locked: |
Steven Rostedt | e59494f | 2008-07-16 00:13:45 -0400 | [diff] [blame] | 262 | __raw_spin_unlock(&wakeup_lock); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 263 | out: |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 264 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 265 | } |
| 266 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 267 | static void start_wakeup_tracer(struct trace_array *tr) |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 268 | { |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 269 | int ret; |
| 270 | |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 271 | ret = register_trace_sched_wakeup(probe_wakeup); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 272 | if (ret) { |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 273 | pr_info("wakeup trace: Couldn't activate tracepoint" |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 274 | " probe to kernel_sched_wakeup\n"); |
| 275 | return; |
| 276 | } |
| 277 | |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 278 | ret = register_trace_sched_wakeup_new(probe_wakeup); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 279 | if (ret) { |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 280 | pr_info("wakeup trace: Couldn't activate tracepoint" |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 281 | " probe to kernel_sched_wakeup_new\n"); |
| 282 | goto fail_deprobe; |
| 283 | } |
| 284 | |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 285 | ret = register_trace_sched_switch(probe_wakeup_sched_switch); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 286 | if (ret) { |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 287 | pr_info("sched trace: Couldn't activate tracepoint" |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 288 | " probe to kernel_sched_schedule\n"); |
| 289 | goto fail_deprobe_wake_new; |
| 290 | } |
| 291 | |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 292 | wakeup_reset(tr); |
| 293 | |
| 294 | /* |
| 295 | * Don't let the tracer_enabled = 1 show up before |
| 296 | * the wakeup_task is reset. This may be overkill since |
| 297 | * wakeup_reset does a spin_unlock after setting the |
| 298 | * wakeup_task to NULL, but I want to be safe. |
| 299 | * This is a slow path anyway. |
| 300 | */ |
| 301 | smp_wmb(); |
| 302 | |
Steven Rostedt | 7e18d8e | 2008-05-22 00:22:19 -0400 | [diff] [blame] | 303 | register_ftrace_function(&trace_ops); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 304 | |
Steven Rostedt | ad59124 | 2008-07-10 20:58:13 -0400 | [diff] [blame] | 305 | tracer_enabled = 1; |
| 306 | |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 307 | return; |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 308 | fail_deprobe_wake_new: |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 309 | unregister_trace_sched_wakeup_new(probe_wakeup); |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 310 | fail_deprobe: |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 311 | unregister_trace_sched_wakeup(probe_wakeup); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 312 | } |
| 313 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 314 | static void stop_wakeup_tracer(struct trace_array *tr) |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 315 | { |
| 316 | tracer_enabled = 0; |
Steven Rostedt | 7e18d8e | 2008-05-22 00:22:19 -0400 | [diff] [blame] | 317 | unregister_ftrace_function(&trace_ops); |
Mathieu Desnoyers | b07c3f1 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 318 | unregister_trace_sched_switch(probe_wakeup_sched_switch); |
| 319 | unregister_trace_sched_wakeup_new(probe_wakeup); |
| 320 | unregister_trace_sched_wakeup(probe_wakeup); |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 321 | } |
| 322 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 323 | static void wakeup_tracer_init(struct trace_array *tr) |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 324 | { |
| 325 | wakeup_trace = tr; |
| 326 | |
| 327 | if (tr->ctrl) |
| 328 | start_wakeup_tracer(tr); |
| 329 | } |
| 330 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 331 | static void wakeup_tracer_reset(struct trace_array *tr) |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 332 | { |
| 333 | if (tr->ctrl) { |
| 334 | stop_wakeup_tracer(tr); |
| 335 | /* make sure we put back any tasks we are tracing */ |
| 336 | wakeup_reset(tr); |
| 337 | } |
| 338 | } |
| 339 | |
| 340 | static void wakeup_tracer_ctrl_update(struct trace_array *tr) |
| 341 | { |
| 342 | if (tr->ctrl) |
| 343 | start_wakeup_tracer(tr); |
| 344 | else |
| 345 | stop_wakeup_tracer(tr); |
| 346 | } |
| 347 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 348 | static void wakeup_tracer_open(struct trace_iterator *iter) |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 349 | { |
| 350 | /* stop the trace while dumping */ |
| 351 | if (iter->tr->ctrl) |
| 352 | stop_wakeup_tracer(iter->tr); |
| 353 | } |
| 354 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 355 | static void wakeup_tracer_close(struct trace_iterator *iter) |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 356 | { |
| 357 | /* forget about any processes we were recording */ |
| 358 | if (iter->tr->ctrl) |
| 359 | start_wakeup_tracer(iter->tr); |
| 360 | } |
| 361 | |
| 362 | static struct tracer wakeup_tracer __read_mostly = |
| 363 | { |
| 364 | .name = "wakeup", |
| 365 | .init = wakeup_tracer_init, |
| 366 | .reset = wakeup_tracer_reset, |
| 367 | .open = wakeup_tracer_open, |
| 368 | .close = wakeup_tracer_close, |
| 369 | .ctrl_update = wakeup_tracer_ctrl_update, |
| 370 | .print_max = 1, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 371 | #ifdef CONFIG_FTRACE_SELFTEST |
| 372 | .selftest = trace_selftest_startup_wakeup, |
| 373 | #endif |
Steven Rostedt | 352ad25 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 374 | }; |
| 375 | |
| 376 | __init static int init_wakeup_tracer(void) |
| 377 | { |
| 378 | int ret; |
| 379 | |
| 380 | ret = register_tracer(&wakeup_tracer); |
| 381 | if (ret) |
| 382 | return ret; |
| 383 | |
| 384 | return 0; |
| 385 | } |
| 386 | device_initcall(init_wakeup_tracer); |