| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * trace context switch | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> | 
|  | 5 | * | 
|  | 6 | */ | 
|  | 7 | #include <linux/module.h> | 
|  | 8 | #include <linux/fs.h> | 
|  | 9 | #include <linux/debugfs.h> | 
|  | 10 | #include <linux/kallsyms.h> | 
|  | 11 | #include <linux/uaccess.h> | 
|  | 12 | #include <linux/marker.h> | 
|  | 13 | #include <linux/ftrace.h> | 
|  | 14 |  | 
|  | 15 | #include "trace.h" | 
|  | 16 |  | 
|  | 17 | static struct trace_array	*ctx_trace; | 
|  | 18 | static int __read_mostly	tracer_enabled; | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 19 | static atomic_t			sched_ref; | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 20 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 21 | static void | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 22 | sched_switch_func(void *private, void *__rq, struct task_struct *prev, | 
|  | 23 | struct task_struct *next) | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 24 | { | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 25 | struct trace_array **ptr = private; | 
|  | 26 | struct trace_array *tr = *ptr; | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 27 | struct trace_array_cpu *data; | 
|  | 28 | unsigned long flags; | 
|  | 29 | long disabled; | 
|  | 30 | int cpu; | 
|  | 31 |  | 
| Steven Rostedt | 41bc814 | 2008-05-22 11:49:22 -0400 | [diff] [blame] | 32 | tracing_record_cmdline(prev); | 
|  | 33 | tracing_record_cmdline(next); | 
|  | 34 |  | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 35 | if (!tracer_enabled) | 
|  | 36 | return; | 
|  | 37 |  | 
| Steven Rostedt | 18cef37 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 38 | local_irq_save(flags); | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 39 | cpu = raw_smp_processor_id(); | 
|  | 40 | data = tr->data[cpu]; | 
|  | 41 | disabled = atomic_inc_return(&data->disabled); | 
|  | 42 |  | 
| Ingo Molnar | 4d9493c | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 43 | if (likely(disabled == 1)) | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 44 | tracing_sched_switch_trace(tr, data, prev, next, flags); | 
|  | 45 |  | 
|  | 46 | atomic_dec(&data->disabled); | 
| Steven Rostedt | 18cef37 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 47 | local_irq_restore(flags); | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 48 | } | 
|  | 49 |  | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 50 | static notrace void | 
|  | 51 | sched_switch_callback(void *probe_data, void *call_data, | 
|  | 52 | const char *format, va_list *args) | 
| Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 53 | { | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 54 | struct task_struct *prev; | 
|  | 55 | struct task_struct *next; | 
|  | 56 | struct rq *__rq; | 
|  | 57 |  | 
|  | 58 | if (!atomic_read(&sched_ref)) | 
|  | 59 | return; | 
|  | 60 |  | 
|  | 61 | /* skip prev_pid %d next_pid %d prev_state %ld */ | 
|  | 62 | (void)va_arg(*args, int); | 
|  | 63 | (void)va_arg(*args, int); | 
|  | 64 | (void)va_arg(*args, long); | 
|  | 65 | __rq = va_arg(*args, typeof(__rq)); | 
|  | 66 | prev = va_arg(*args, typeof(prev)); | 
|  | 67 | next = va_arg(*args, typeof(next)); | 
|  | 68 |  | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 69 | /* | 
|  | 70 | * If tracer_switch_func only points to the local | 
|  | 71 | * switch func, it still needs the ptr passed to it. | 
|  | 72 | */ | 
|  | 73 | sched_switch_func(probe_data, __rq, prev, next); | 
|  | 74 | } | 
|  | 75 |  | 
|  | 76 | static void | 
|  | 77 | wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct | 
|  | 78 | task_struct *curr) | 
|  | 79 | { | 
|  | 80 | struct trace_array **ptr = private; | 
|  | 81 | struct trace_array *tr = *ptr; | 
| Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 82 | struct trace_array_cpu *data; | 
|  | 83 | unsigned long flags; | 
|  | 84 | long disabled; | 
|  | 85 | int cpu; | 
|  | 86 |  | 
|  | 87 | if (!tracer_enabled) | 
|  | 88 | return; | 
|  | 89 |  | 
| Ingo Molnar | d9af56f | 2008-05-12 21:20:53 +0200 | [diff] [blame] | 90 | tracing_record_cmdline(curr); | 
|  | 91 |  | 
| Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 92 | local_irq_save(flags); | 
|  | 93 | cpu = raw_smp_processor_id(); | 
|  | 94 | data = tr->data[cpu]; | 
|  | 95 | disabled = atomic_inc_return(&data->disabled); | 
|  | 96 |  | 
| Ingo Molnar | 4d9493c | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 97 | if (likely(disabled == 1)) | 
| Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 98 | tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); | 
|  | 99 |  | 
|  | 100 | atomic_dec(&data->disabled); | 
|  | 101 | local_irq_restore(flags); | 
|  | 102 | } | 
|  | 103 |  | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 104 | static notrace void | 
|  | 105 | wake_up_callback(void *probe_data, void *call_data, | 
|  | 106 | const char *format, va_list *args) | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 107 | { | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 108 | struct task_struct *curr; | 
|  | 109 | struct task_struct *task; | 
|  | 110 | struct rq *__rq; | 
| Steven Rostedt | 25b0b44 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 111 |  | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 112 | if (likely(!tracer_enabled)) | 
|  | 113 | return; | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 114 |  | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 115 | /* Skip pid %d state %ld */ | 
|  | 116 | (void)va_arg(*args, int); | 
|  | 117 | (void)va_arg(*args, long); | 
|  | 118 | /* now get the meat: "rq %p task %p rq->curr %p" */ | 
|  | 119 | __rq = va_arg(*args, typeof(__rq)); | 
|  | 120 | task = va_arg(*args, typeof(task)); | 
|  | 121 | curr = va_arg(*args, typeof(curr)); | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 122 |  | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 123 | tracing_record_cmdline(task); | 
|  | 124 | tracing_record_cmdline(curr); | 
| Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 125 |  | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 126 | wakeup_func(probe_data, __rq, task, curr); | 
| Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 127 | } | 
|  | 128 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 129 | static void sched_switch_reset(struct trace_array *tr) | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 130 | { | 
|  | 131 | int cpu; | 
|  | 132 |  | 
| Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 133 | tr->time_start = ftrace_now(tr->cpu); | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 134 |  | 
|  | 135 | for_each_online_cpu(cpu) | 
|  | 136 | tracing_reset(tr->data[cpu]); | 
|  | 137 | } | 
|  | 138 |  | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 139 | static int tracing_sched_register(void) | 
|  | 140 | { | 
|  | 141 | int ret; | 
|  | 142 |  | 
|  | 143 | ret = marker_probe_register("kernel_sched_wakeup", | 
|  | 144 | "pid %d state %ld ## rq %p task %p rq->curr %p", | 
|  | 145 | wake_up_callback, | 
|  | 146 | &ctx_trace); | 
|  | 147 | if (ret) { | 
|  | 148 | pr_info("wakeup trace: Couldn't add marker" | 
|  | 149 | " probe to kernel_sched_wakeup\n"); | 
|  | 150 | return ret; | 
|  | 151 | } | 
|  | 152 |  | 
|  | 153 | ret = marker_probe_register("kernel_sched_wakeup_new", | 
|  | 154 | "pid %d state %ld ## rq %p task %p rq->curr %p", | 
|  | 155 | wake_up_callback, | 
|  | 156 | &ctx_trace); | 
|  | 157 | if (ret) { | 
|  | 158 | pr_info("wakeup trace: Couldn't add marker" | 
|  | 159 | " probe to kernel_sched_wakeup_new\n"); | 
|  | 160 | goto fail_deprobe; | 
|  | 161 | } | 
|  | 162 |  | 
|  | 163 | ret = marker_probe_register("kernel_sched_schedule", | 
|  | 164 | "prev_pid %d next_pid %d prev_state %ld " | 
|  | 165 | "## rq %p prev %p next %p", | 
|  | 166 | sched_switch_callback, | 
|  | 167 | &ctx_trace); | 
|  | 168 | if (ret) { | 
|  | 169 | pr_info("sched trace: Couldn't add marker" | 
|  | 170 | " probe to kernel_sched_schedule\n"); | 
|  | 171 | goto fail_deprobe_wake_new; | 
|  | 172 | } | 
|  | 173 |  | 
|  | 174 | return ret; | 
|  | 175 | fail_deprobe_wake_new: | 
|  | 176 | marker_probe_unregister("kernel_sched_wakeup_new", | 
|  | 177 | wake_up_callback, | 
|  | 178 | &ctx_trace); | 
|  | 179 | fail_deprobe: | 
|  | 180 | marker_probe_unregister("kernel_sched_wakeup", | 
|  | 181 | wake_up_callback, | 
|  | 182 | &ctx_trace); | 
|  | 183 | return ret; | 
|  | 184 | } | 
|  | 185 |  | 
|  | 186 | static void tracing_sched_unregister(void) | 
|  | 187 | { | 
|  | 188 | marker_probe_unregister("kernel_sched_schedule", | 
|  | 189 | sched_switch_callback, | 
|  | 190 | &ctx_trace); | 
|  | 191 | marker_probe_unregister("kernel_sched_wakeup_new", | 
|  | 192 | wake_up_callback, | 
|  | 193 | &ctx_trace); | 
|  | 194 | marker_probe_unregister("kernel_sched_wakeup", | 
|  | 195 | wake_up_callback, | 
|  | 196 | &ctx_trace); | 
|  | 197 | } | 
|  | 198 |  | 
| Ingo Molnar | f225293 | 2008-05-22 10:37:48 +0200 | [diff] [blame] | 199 | static void tracing_start_sched_switch(void) | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 200 | { | 
|  | 201 | long ref; | 
|  | 202 |  | 
|  | 203 | ref = atomic_inc_return(&sched_ref); | 
|  | 204 | if (ref == 1) | 
|  | 205 | tracing_sched_register(); | 
|  | 206 | } | 
|  | 207 |  | 
| Ingo Molnar | f225293 | 2008-05-22 10:37:48 +0200 | [diff] [blame] | 208 | static void tracing_stop_sched_switch(void) | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 209 | { | 
|  | 210 | long ref; | 
|  | 211 |  | 
|  | 212 | ref = atomic_dec_and_test(&sched_ref); | 
|  | 213 | if (ref) | 
|  | 214 | tracing_sched_unregister(); | 
|  | 215 | } | 
|  | 216 |  | 
| Steven Rostedt | 41bc814 | 2008-05-22 11:49:22 -0400 | [diff] [blame] | 217 | void tracing_start_cmdline_record(void) | 
|  | 218 | { | 
|  | 219 | tracing_start_sched_switch(); | 
|  | 220 | } | 
|  | 221 |  | 
|  | 222 | void tracing_stop_cmdline_record(void) | 
|  | 223 | { | 
|  | 224 | tracing_stop_sched_switch(); | 
|  | 225 | } | 
|  | 226 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 227 | static void start_sched_trace(struct trace_array *tr) | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 228 | { | 
|  | 229 | sched_switch_reset(tr); | 
| Steven Rostedt | 41bc814 | 2008-05-22 11:49:22 -0400 | [diff] [blame] | 230 | tracing_start_cmdline_record(); | 
| Steven Rostedt | 007c05d | 2008-07-10 20:58:09 -0400 | [diff] [blame] | 231 | tracer_enabled = 1; | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 232 | } | 
|  | 233 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 234 | static void stop_sched_trace(struct trace_array *tr) | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 235 | { | 
|  | 236 | tracer_enabled = 0; | 
| Steven Rostedt | 007c05d | 2008-07-10 20:58:09 -0400 | [diff] [blame] | 237 | tracing_stop_cmdline_record(); | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 238 | } | 
|  | 239 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 240 | static void sched_switch_trace_init(struct trace_array *tr) | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 241 | { | 
|  | 242 | ctx_trace = tr; | 
|  | 243 |  | 
|  | 244 | if (tr->ctrl) | 
|  | 245 | start_sched_trace(tr); | 
|  | 246 | } | 
|  | 247 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 248 | static void sched_switch_trace_reset(struct trace_array *tr) | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 249 | { | 
|  | 250 | if (tr->ctrl) | 
|  | 251 | stop_sched_trace(tr); | 
|  | 252 | } | 
|  | 253 |  | 
|  | 254 | static void sched_switch_trace_ctrl_update(struct trace_array *tr) | 
|  | 255 | { | 
|  | 256 | /* When starting a new trace, reset the buffers */ | 
|  | 257 | if (tr->ctrl) | 
|  | 258 | start_sched_trace(tr); | 
|  | 259 | else | 
|  | 260 | stop_sched_trace(tr); | 
|  | 261 | } | 
|  | 262 |  | 
|  | 263 | static struct tracer sched_switch_trace __read_mostly = | 
|  | 264 | { | 
|  | 265 | .name		= "sched_switch", | 
|  | 266 | .init		= sched_switch_trace_init, | 
|  | 267 | .reset		= sched_switch_trace_reset, | 
|  | 268 | .ctrl_update	= sched_switch_trace_ctrl_update, | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 269 | #ifdef CONFIG_FTRACE_SELFTEST | 
|  | 270 | .selftest    = trace_selftest_startup_sched_switch, | 
|  | 271 | #endif | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 272 | }; | 
|  | 273 |  | 
|  | 274 | __init static int init_sched_switch_trace(void) | 
|  | 275 | { | 
| Mathieu Desnoyers | 5b82a1b | 2008-05-12 21:21:10 +0200 | [diff] [blame] | 276 | int ret = 0; | 
|  | 277 |  | 
|  | 278 | if (atomic_read(&sched_ref)) | 
|  | 279 | ret = tracing_sched_register(); | 
|  | 280 | if (ret) { | 
|  | 281 | pr_info("error registering scheduler trace\n"); | 
|  | 282 | return ret; | 
|  | 283 | } | 
| Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 284 | return register_tracer(&sched_switch_trace); | 
|  | 285 | } | 
|  | 286 | device_initcall(init_sched_switch_trace); |