Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * trace context switch |
| 3 | * |
| 4 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> |
| 5 | * |
| 6 | */ |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/fs.h> |
| 9 | #include <linux/debugfs.h> |
| 10 | #include <linux/kallsyms.h> |
| 11 | #include <linux/uaccess.h> |
| 12 | #include <linux/marker.h> |
| 13 | #include <linux/ftrace.h> |
| 14 | |
| 15 | #include "trace.h" |
| 16 | |
| 17 | static struct trace_array *ctx_trace; |
| 18 | static int __read_mostly tracer_enabled; |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame^] | 19 | static atomic_t sched_ref; |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 20 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 21 | static void |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame^] | 22 | sched_switch_func(void *private, void *__rq, struct task_struct *prev, |
| 23 | struct task_struct *next) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 24 | { |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame^] | 25 | struct trace_array **ptr = private; |
| 26 | struct trace_array *tr = *ptr; |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 27 | struct trace_array_cpu *data; |
| 28 | unsigned long flags; |
| 29 | long disabled; |
| 30 | int cpu; |
| 31 | |
| 32 | if (!tracer_enabled) |
| 33 | return; |
| 34 | |
Steven Rostedt | 18cef37 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 35 | local_irq_save(flags); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 36 | cpu = raw_smp_processor_id(); |
| 37 | data = tr->data[cpu]; |
| 38 | disabled = atomic_inc_return(&data->disabled); |
| 39 | |
Ingo Molnar | 4d9493c | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 40 | if (likely(disabled == 1)) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 41 | tracing_sched_switch_trace(tr, data, prev, next, flags); |
| 42 | |
| 43 | atomic_dec(&data->disabled); |
Steven Rostedt | 18cef37 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 44 | local_irq_restore(flags); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 45 | } |
| 46 | |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame^] | 47 | static notrace void |
| 48 | sched_switch_callback(void *probe_data, void *call_data, |
| 49 | const char *format, va_list *args) |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 50 | { |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame^] | 51 | struct task_struct *prev; |
| 52 | struct task_struct *next; |
| 53 | struct rq *__rq; |
| 54 | |
| 55 | if (!atomic_read(&sched_ref)) |
| 56 | return; |
| 57 | |
| 58 | /* skip prev_pid %d next_pid %d prev_state %ld */ |
| 59 | (void)va_arg(*args, int); |
| 60 | (void)va_arg(*args, int); |
| 61 | (void)va_arg(*args, long); |
| 62 | __rq = va_arg(*args, typeof(__rq)); |
| 63 | prev = va_arg(*args, typeof(prev)); |
| 64 | next = va_arg(*args, typeof(next)); |
| 65 | |
| 66 | tracing_record_cmdline(prev); |
| 67 | |
| 68 | /* |
| 69 | * If tracer_switch_func only points to the local |
| 70 | * switch func, it still needs the ptr passed to it. |
| 71 | */ |
| 72 | sched_switch_func(probe_data, __rq, prev, next); |
| 73 | } |
| 74 | |
| 75 | static void |
| 76 | wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct |
| 77 | task_struct *curr) |
| 78 | { |
| 79 | struct trace_array **ptr = private; |
| 80 | struct trace_array *tr = *ptr; |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 81 | struct trace_array_cpu *data; |
| 82 | unsigned long flags; |
| 83 | long disabled; |
| 84 | int cpu; |
| 85 | |
| 86 | if (!tracer_enabled) |
| 87 | return; |
| 88 | |
Ingo Molnar | d9af56f | 2008-05-12 21:20:53 +0200 | [diff] [blame] | 89 | tracing_record_cmdline(curr); |
| 90 | |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 91 | local_irq_save(flags); |
| 92 | cpu = raw_smp_processor_id(); |
| 93 | data = tr->data[cpu]; |
| 94 | disabled = atomic_inc_return(&data->disabled); |
| 95 | |
Ingo Molnar | 4d9493c | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 96 | if (likely(disabled == 1)) |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 97 | tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); |
| 98 | |
| 99 | atomic_dec(&data->disabled); |
| 100 | local_irq_restore(flags); |
| 101 | } |
| 102 | |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame^] | 103 | static notrace void |
| 104 | wake_up_callback(void *probe_data, void *call_data, |
| 105 | const char *format, va_list *args) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 106 | { |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame^] | 107 | struct task_struct *curr; |
| 108 | struct task_struct *task; |
| 109 | struct rq *__rq; |
Steven Rostedt | 25b0b44 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 110 | |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame^] | 111 | if (likely(!tracer_enabled)) |
| 112 | return; |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 113 | |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame^] | 114 | /* Skip pid %d state %ld */ |
| 115 | (void)va_arg(*args, int); |
| 116 | (void)va_arg(*args, long); |
| 117 | /* now get the meat: "rq %p task %p rq->curr %p" */ |
| 118 | __rq = va_arg(*args, typeof(__rq)); |
| 119 | task = va_arg(*args, typeof(task)); |
| 120 | curr = va_arg(*args, typeof(curr)); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 121 | |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame^] | 122 | tracing_record_cmdline(task); |
| 123 | tracing_record_cmdline(curr); |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 124 | |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame^] | 125 | wakeup_func(probe_data, __rq, task, curr); |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 126 | } |
| 127 | |
Ingo Molnar | 88a4216 | 2008-05-12 21:20:53 +0200 | [diff] [blame] | 128 | void |
| 129 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) |
| 130 | { |
| 131 | struct trace_array *tr = ctx_trace; |
| 132 | struct trace_array_cpu *data; |
| 133 | unsigned long flags; |
| 134 | long disabled; |
| 135 | int cpu; |
| 136 | |
| 137 | if (!tracer_enabled) |
| 138 | return; |
| 139 | |
| 140 | local_irq_save(flags); |
| 141 | cpu = raw_smp_processor_id(); |
| 142 | data = tr->data[cpu]; |
| 143 | disabled = atomic_inc_return(&data->disabled); |
| 144 | |
| 145 | if (likely(disabled == 1)) |
| 146 | __trace_special(tr, data, arg1, arg2, arg3); |
| 147 | |
| 148 | atomic_dec(&data->disabled); |
| 149 | local_irq_restore(flags); |
| 150 | } |
| 151 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 152 | static void sched_switch_reset(struct trace_array *tr) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 153 | { |
| 154 | int cpu; |
| 155 | |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 156 | tr->time_start = ftrace_now(tr->cpu); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 157 | |
| 158 | for_each_online_cpu(cpu) |
| 159 | tracing_reset(tr->data[cpu]); |
| 160 | } |
| 161 | |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame^] | 162 | static int tracing_sched_register(void) |
| 163 | { |
| 164 | int ret; |
| 165 | |
| 166 | ret = marker_probe_register("kernel_sched_wakeup", |
| 167 | "pid %d state %ld ## rq %p task %p rq->curr %p", |
| 168 | wake_up_callback, |
| 169 | &ctx_trace); |
| 170 | if (ret) { |
| 171 | pr_info("wakeup trace: Couldn't add marker" |
| 172 | " probe to kernel_sched_wakeup\n"); |
| 173 | return ret; |
| 174 | } |
| 175 | |
| 176 | ret = marker_probe_register("kernel_sched_wakeup_new", |
| 177 | "pid %d state %ld ## rq %p task %p rq->curr %p", |
| 178 | wake_up_callback, |
| 179 | &ctx_trace); |
| 180 | if (ret) { |
| 181 | pr_info("wakeup trace: Couldn't add marker" |
| 182 | " probe to kernel_sched_wakeup_new\n"); |
| 183 | goto fail_deprobe; |
| 184 | } |
| 185 | |
| 186 | ret = marker_probe_register("kernel_sched_schedule", |
| 187 | "prev_pid %d next_pid %d prev_state %ld " |
| 188 | "## rq %p prev %p next %p", |
| 189 | sched_switch_callback, |
| 190 | &ctx_trace); |
| 191 | if (ret) { |
| 192 | pr_info("sched trace: Couldn't add marker" |
| 193 | " probe to kernel_sched_schedule\n"); |
| 194 | goto fail_deprobe_wake_new; |
| 195 | } |
| 196 | |
| 197 | return ret; |
| 198 | fail_deprobe_wake_new: |
| 199 | marker_probe_unregister("kernel_sched_wakeup_new", |
| 200 | wake_up_callback, |
| 201 | &ctx_trace); |
| 202 | fail_deprobe: |
| 203 | marker_probe_unregister("kernel_sched_wakeup", |
| 204 | wake_up_callback, |
| 205 | &ctx_trace); |
| 206 | return ret; |
| 207 | } |
| 208 | |
| 209 | static void tracing_sched_unregister(void) |
| 210 | { |
| 211 | marker_probe_unregister("kernel_sched_schedule", |
| 212 | sched_switch_callback, |
| 213 | &ctx_trace); |
| 214 | marker_probe_unregister("kernel_sched_wakeup_new", |
| 215 | wake_up_callback, |
| 216 | &ctx_trace); |
| 217 | marker_probe_unregister("kernel_sched_wakeup", |
| 218 | wake_up_callback, |
| 219 | &ctx_trace); |
| 220 | } |
| 221 | |
| 222 | void tracing_start_sched_switch(void) |
| 223 | { |
| 224 | long ref; |
| 225 | |
| 226 | ref = atomic_inc_return(&sched_ref); |
| 227 | if (ref == 1) |
| 228 | tracing_sched_register(); |
| 229 | } |
| 230 | |
| 231 | void tracing_stop_sched_switch(void) |
| 232 | { |
| 233 | long ref; |
| 234 | |
| 235 | ref = atomic_dec_and_test(&sched_ref); |
| 236 | if (ref) |
| 237 | tracing_sched_unregister(); |
| 238 | } |
| 239 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 240 | static void start_sched_trace(struct trace_array *tr) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 241 | { |
| 242 | sched_switch_reset(tr); |
Steven Rostedt | 25b0b44 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 243 | atomic_inc(&trace_record_cmdline_enabled); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 244 | tracer_enabled = 1; |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame^] | 245 | tracing_start_sched_switch(); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 246 | } |
| 247 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 248 | static void stop_sched_trace(struct trace_array *tr) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 249 | { |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame^] | 250 | tracing_stop_sched_switch(); |
Steven Rostedt | 25b0b44 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 251 | atomic_dec(&trace_record_cmdline_enabled); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 252 | tracer_enabled = 0; |
| 253 | } |
| 254 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 255 | static void sched_switch_trace_init(struct trace_array *tr) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 256 | { |
| 257 | ctx_trace = tr; |
| 258 | |
| 259 | if (tr->ctrl) |
| 260 | start_sched_trace(tr); |
| 261 | } |
| 262 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 263 | static void sched_switch_trace_reset(struct trace_array *tr) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 264 | { |
| 265 | if (tr->ctrl) |
| 266 | stop_sched_trace(tr); |
| 267 | } |
| 268 | |
| 269 | static void sched_switch_trace_ctrl_update(struct trace_array *tr) |
| 270 | { |
| 271 | /* When starting a new trace, reset the buffers */ |
| 272 | if (tr->ctrl) |
| 273 | start_sched_trace(tr); |
| 274 | else |
| 275 | stop_sched_trace(tr); |
| 276 | } |
| 277 | |
| 278 | static struct tracer sched_switch_trace __read_mostly = |
| 279 | { |
| 280 | .name = "sched_switch", |
| 281 | .init = sched_switch_trace_init, |
| 282 | .reset = sched_switch_trace_reset, |
| 283 | .ctrl_update = sched_switch_trace_ctrl_update, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 284 | #ifdef CONFIG_FTRACE_SELFTEST |
| 285 | .selftest = trace_selftest_startup_sched_switch, |
| 286 | #endif |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 287 | }; |
| 288 | |
| 289 | __init static int init_sched_switch_trace(void) |
| 290 | { |
Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame^] | 291 | int ret = 0; |
| 292 | |
| 293 | if (atomic_read(&sched_ref)) |
| 294 | ret = tracing_sched_register(); |
| 295 | if (ret) { |
| 296 | pr_info("error registering scheduler trace\n"); |
| 297 | return ret; |
| 298 | } |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 299 | return register_tracer(&sched_switch_trace); |
| 300 | } |
| 301 | device_initcall(init_sched_switch_trace); |