Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * trace context switch |
| 3 | * |
| 4 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> |
| 5 | * |
| 6 | */ |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/fs.h> |
| 9 | #include <linux/debugfs.h> |
| 10 | #include <linux/kallsyms.h> |
| 11 | #include <linux/uaccess.h> |
| 12 | #include <linux/marker.h> |
| 13 | #include <linux/ftrace.h> |
| 14 | |
| 15 | #include "trace.h" |
| 16 | |
| 17 | static struct trace_array *ctx_trace; |
| 18 | static int __read_mostly tracer_enabled; |
| 19 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 20 | static void |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 21 | ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 22 | { |
| 23 | struct trace_array *tr = ctx_trace; |
| 24 | struct trace_array_cpu *data; |
| 25 | unsigned long flags; |
| 26 | long disabled; |
| 27 | int cpu; |
| 28 | |
| 29 | if (!tracer_enabled) |
| 30 | return; |
| 31 | |
Ingo Molnar | d9af56f | 2008-05-12 21:20:53 +0200 | [diff] [blame] | 32 | tracing_record_cmdline(prev); |
| 33 | |
Steven Rostedt | 18cef37 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 34 | local_irq_save(flags); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 35 | cpu = raw_smp_processor_id(); |
| 36 | data = tr->data[cpu]; |
| 37 | disabled = atomic_inc_return(&data->disabled); |
| 38 | |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 39 | if (likely(disabled == 1)) { |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 40 | tracing_sched_switch_trace(tr, data, prev, next, flags); |
Ingo Molnar | 4ac3ba4 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 41 | if (trace_flags & TRACE_ITER_SCHED_TREE) |
| 42 | ftrace_all_fair_tasks(__rq, tr, data); |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 43 | } |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 44 | |
| 45 | atomic_dec(&data->disabled); |
Steven Rostedt | 18cef37 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 46 | local_irq_restore(flags); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 47 | } |
| 48 | |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 49 | static void |
| 50 | wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr) |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 51 | { |
| 52 | struct trace_array *tr = ctx_trace; |
| 53 | struct trace_array_cpu *data; |
| 54 | unsigned long flags; |
| 55 | long disabled; |
| 56 | int cpu; |
| 57 | |
| 58 | if (!tracer_enabled) |
| 59 | return; |
| 60 | |
Ingo Molnar | d9af56f | 2008-05-12 21:20:53 +0200 | [diff] [blame] | 61 | tracing_record_cmdline(curr); |
| 62 | |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 63 | local_irq_save(flags); |
| 64 | cpu = raw_smp_processor_id(); |
| 65 | data = tr->data[cpu]; |
| 66 | disabled = atomic_inc_return(&data->disabled); |
| 67 | |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 68 | if (likely(disabled == 1)) { |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 69 | tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); |
Ingo Molnar | 4ac3ba4 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 70 | if (trace_flags & TRACE_ITER_SCHED_TREE) |
| 71 | ftrace_all_fair_tasks(__rq, tr, data); |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 72 | } |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 73 | |
| 74 | atomic_dec(&data->disabled); |
| 75 | local_irq_restore(flags); |
| 76 | } |
| 77 | |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 78 | void |
| 79 | ftrace_ctx_switch(void *__rq, struct task_struct *prev, |
| 80 | struct task_struct *next) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 81 | { |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 82 | /* |
| 83 | * If tracer_switch_func only points to the local |
| 84 | * switch func, it still needs the ptr passed to it. |
| 85 | */ |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 86 | ctx_switch_func(__rq, prev, next); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 87 | |
| 88 | /* |
| 89 | * Chain to the wakeup tracer (this is a NOP if disabled): |
| 90 | */ |
| 91 | wakeup_sched_switch(prev, next); |
| 92 | } |
| 93 | |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 94 | void |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 95 | ftrace_wake_up_task(void *__rq, struct task_struct *wakee, |
| 96 | struct task_struct *curr) |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 97 | { |
Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 98 | wakeup_func(__rq, wakee, curr); |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 99 | |
| 100 | /* |
| 101 | * Chain to the wakeup tracer (this is a NOP if disabled): |
| 102 | */ |
| 103 | wakeup_sched_wakeup(wakee, curr); |
| 104 | } |
| 105 | |
Ingo Molnar | 88a4216 | 2008-05-12 21:20:53 +0200 | [diff] [blame^] | 106 | void |
| 107 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) |
| 108 | { |
| 109 | struct trace_array *tr = ctx_trace; |
| 110 | struct trace_array_cpu *data; |
| 111 | unsigned long flags; |
| 112 | long disabled; |
| 113 | int cpu; |
| 114 | |
| 115 | if (!tracer_enabled) |
| 116 | return; |
| 117 | |
| 118 | local_irq_save(flags); |
| 119 | cpu = raw_smp_processor_id(); |
| 120 | data = tr->data[cpu]; |
| 121 | disabled = atomic_inc_return(&data->disabled); |
| 122 | |
| 123 | if (likely(disabled == 1)) |
| 124 | __trace_special(tr, data, arg1, arg2, arg3); |
| 125 | |
| 126 | atomic_dec(&data->disabled); |
| 127 | local_irq_restore(flags); |
| 128 | } |
| 129 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 130 | static void sched_switch_reset(struct trace_array *tr) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 131 | { |
| 132 | int cpu; |
| 133 | |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 134 | tr->time_start = ftrace_now(tr->cpu); |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 135 | |
| 136 | for_each_online_cpu(cpu) |
| 137 | tracing_reset(tr->data[cpu]); |
| 138 | } |
| 139 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 140 | static void start_sched_trace(struct trace_array *tr) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 141 | { |
| 142 | sched_switch_reset(tr); |
| 143 | tracer_enabled = 1; |
| 144 | } |
| 145 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 146 | static void stop_sched_trace(struct trace_array *tr) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 147 | { |
| 148 | tracer_enabled = 0; |
| 149 | } |
| 150 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 151 | static void sched_switch_trace_init(struct trace_array *tr) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 152 | { |
| 153 | ctx_trace = tr; |
| 154 | |
| 155 | if (tr->ctrl) |
| 156 | start_sched_trace(tr); |
| 157 | } |
| 158 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 159 | static void sched_switch_trace_reset(struct trace_array *tr) |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 160 | { |
| 161 | if (tr->ctrl) |
| 162 | stop_sched_trace(tr); |
| 163 | } |
| 164 | |
| 165 | static void sched_switch_trace_ctrl_update(struct trace_array *tr) |
| 166 | { |
| 167 | /* When starting a new trace, reset the buffers */ |
| 168 | if (tr->ctrl) |
| 169 | start_sched_trace(tr); |
| 170 | else |
| 171 | stop_sched_trace(tr); |
| 172 | } |
| 173 | |
| 174 | static struct tracer sched_switch_trace __read_mostly = |
| 175 | { |
| 176 | .name = "sched_switch", |
| 177 | .init = sched_switch_trace_init, |
| 178 | .reset = sched_switch_trace_reset, |
| 179 | .ctrl_update = sched_switch_trace_ctrl_update, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 180 | #ifdef CONFIG_FTRACE_SELFTEST |
| 181 | .selftest = trace_selftest_startup_sched_switch, |
| 182 | #endif |
Steven Rostedt | 35e8e30 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 183 | }; |
| 184 | |
| 185 | __init static int init_sched_switch_trace(void) |
| 186 | { |
| 187 | return register_tracer(&sched_switch_trace); |
| 188 | } |
| 189 | device_initcall(init_sched_switch_trace); |