| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* | 
| Wenji Huang | 73d8b8b | 2009-02-17 01:10:02 -0500 | [diff] [blame] | 2 | * trace irqs off critical timings | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3 | * | 
|  | 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 
|  | 5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | 
|  | 6 | * | 
|  | 7 | * From code in the latency_tracer, that is: | 
|  | 8 | * | 
|  | 9 | *  Copyright (C) 2004-2006 Ingo Molnar | 
|  | 10 | *  Copyright (C) 2004 William Lee Irwin III | 
|  | 11 | */ | 
|  | 12 | #include <linux/kallsyms.h> | 
|  | 13 | #include <linux/debugfs.h> | 
|  | 14 | #include <linux/uaccess.h> | 
|  | 15 | #include <linux/module.h> | 
|  | 16 | #include <linux/ftrace.h> | 
|  | 17 | #include <linux/fs.h> | 
|  | 18 |  | 
|  | 19 | #include "trace.h" | 
|  | 20 |  | 
|  | 21 | static struct trace_array		*irqsoff_trace __read_mostly; | 
|  | 22 | static int				tracer_enabled __read_mostly; | 
|  | 23 |  | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 24 | static DEFINE_PER_CPU(int, tracing_cpu); | 
|  | 25 |  | 
| Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 26 | static DEFINE_SPINLOCK(max_trace_lock); | 
|  | 27 |  | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 28 | enum { | 
|  | 29 | TRACER_IRQS_OFF		= (1 << 1), | 
|  | 30 | TRACER_PREEMPT_OFF	= (1 << 2), | 
|  | 31 | }; | 
|  | 32 |  | 
|  | 33 | static int trace_type __read_mostly; | 
|  | 34 |  | 
| Steven Rostedt | e9d25fe | 2009-03-04 22:15:30 -0500 | [diff] [blame] | 35 | static int save_lat_flag; | 
|  | 36 |  | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 37 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); | 
|  | 38 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); | 
|  | 39 |  | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 40 | #ifdef CONFIG_PREEMPT_TRACER | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 41 | static inline int | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 42 | preempt_trace(void) | 
|  | 43 | { | 
|  | 44 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); | 
|  | 45 | } | 
|  | 46 | #else | 
|  | 47 | # define preempt_trace() (0) | 
|  | 48 | #endif | 
|  | 49 |  | 
|  | 50 | #ifdef CONFIG_IRQSOFF_TRACER | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 51 | static inline int | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 52 | irq_trace(void) | 
|  | 53 | { | 
|  | 54 | return ((trace_type & TRACER_IRQS_OFF) && | 
|  | 55 | irqs_disabled()); | 
|  | 56 | } | 
|  | 57 | #else | 
|  | 58 | # define irq_trace() (0) | 
|  | 59 | #endif | 
|  | 60 |  | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 61 | #define TRACE_DISPLAY_GRAPH	1 | 
|  | 62 |  | 
|  | 63 | static struct tracer_opt trace_opts[] = { | 
|  | 64 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
|  | 65 | /* display latency trace as call graph */ | 
|  | 66 | { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, | 
|  | 67 | #endif | 
|  | 68 | { } /* Empty entry */ | 
|  | 69 | }; | 
|  | 70 |  | 
|  | 71 | static struct tracer_flags tracer_flags = { | 
|  | 72 | .val  = 0, | 
|  | 73 | .opts = trace_opts, | 
|  | 74 | }; | 
|  | 75 |  | 
|  | 76 | #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) | 
|  | 77 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 78 | /* | 
|  | 79 | * Sequence count - we record it when starting a measurement and | 
|  | 80 | * skip the latency if the sequence has changed - some other section | 
|  | 81 | * did a maximum and could disturb our measurement with serial console | 
|  | 82 | * printouts, etc. Truly coinciding maximum latencies should be rare | 
|  | 83 | * and what happens together happens separately as well, so this doesnt | 
|  | 84 | * decrease the validity of the maximum found: | 
|  | 85 | */ | 
|  | 86 | static __cacheline_aligned_in_smp	unsigned long max_sequence; | 
|  | 87 |  | 
| Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 88 | #ifdef CONFIG_FUNCTION_TRACER | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 89 | /* | 
|  | 90 | * irqsoff uses its own tracer function to keep the overhead down: | 
|  | 91 | */ | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 92 | static void | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 93 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | 
|  | 94 | { | 
|  | 95 | struct trace_array *tr = irqsoff_trace; | 
|  | 96 | struct trace_array_cpu *data; | 
|  | 97 | unsigned long flags; | 
|  | 98 | long disabled; | 
|  | 99 | int cpu; | 
|  | 100 |  | 
| Steven Rostedt | 361943a | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 101 | /* | 
|  | 102 | * Does not matter if we preempt. We test the flags | 
|  | 103 | * afterward, to see if irqs are disabled or not. | 
|  | 104 | * If we preempt and get a false positive, the flags | 
|  | 105 | * test will fail. | 
|  | 106 | */ | 
|  | 107 | cpu = raw_smp_processor_id(); | 
|  | 108 | if (likely(!per_cpu(tracing_cpu, cpu))) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 109 | return; | 
|  | 110 |  | 
|  | 111 | local_save_flags(flags); | 
| Steven Rostedt | 361943a | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 112 | /* slight chance to get a false positive on tracing_cpu */ | 
|  | 113 | if (!irqs_disabled_flags(flags)) | 
|  | 114 | return; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 115 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 116 | data = tr->data[cpu]; | 
|  | 117 | disabled = atomic_inc_return(&data->disabled); | 
|  | 118 |  | 
|  | 119 | if (likely(disabled == 1)) | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 120 | trace_function(tr, ip, parent_ip, flags, preempt_count()); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 121 |  | 
|  | 122 | atomic_dec(&data->disabled); | 
|  | 123 | } | 
|  | 124 |  | 
|  | 125 | static struct ftrace_ops trace_ops __read_mostly = | 
|  | 126 | { | 
|  | 127 | .func = irqsoff_tracer_call, | 
|  | 128 | }; | 
| Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 129 | #endif /* CONFIG_FUNCTION_TRACER */ | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 130 |  | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 131 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
|  | 132 | static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) | 
|  | 133 | { | 
|  | 134 | int cpu; | 
|  | 135 |  | 
|  | 136 | if (!(bit & TRACE_DISPLAY_GRAPH)) | 
|  | 137 | return -EINVAL; | 
|  | 138 |  | 
|  | 139 | if (!(is_graph() ^ set)) | 
|  | 140 | return 0; | 
|  | 141 |  | 
|  | 142 | stop_irqsoff_tracer(irqsoff_trace, !set); | 
|  | 143 |  | 
|  | 144 | for_each_possible_cpu(cpu) | 
|  | 145 | per_cpu(tracing_cpu, cpu) = 0; | 
|  | 146 |  | 
|  | 147 | tracing_max_latency = 0; | 
|  | 148 | tracing_reset_online_cpus(irqsoff_trace); | 
|  | 149 |  | 
|  | 150 | return start_irqsoff_tracer(irqsoff_trace, set); | 
|  | 151 | } | 
|  | 152 |  | 
|  | 153 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) | 
|  | 154 | { | 
|  | 155 | struct trace_array *tr = irqsoff_trace; | 
|  | 156 | struct trace_array_cpu *data; | 
|  | 157 | unsigned long flags; | 
|  | 158 | long disabled; | 
|  | 159 | int ret; | 
|  | 160 | int cpu; | 
|  | 161 | int pc; | 
|  | 162 |  | 
|  | 163 | cpu = raw_smp_processor_id(); | 
|  | 164 | if (likely(!per_cpu(tracing_cpu, cpu))) | 
|  | 165 | return 0; | 
|  | 166 |  | 
|  | 167 | local_save_flags(flags); | 
|  | 168 | /* slight chance to get a false positive on tracing_cpu */ | 
|  | 169 | if (!irqs_disabled_flags(flags)) | 
|  | 170 | return 0; | 
|  | 171 |  | 
|  | 172 | data = tr->data[cpu]; | 
|  | 173 | disabled = atomic_inc_return(&data->disabled); | 
|  | 174 |  | 
|  | 175 | if (likely(disabled == 1)) { | 
|  | 176 | pc = preempt_count(); | 
|  | 177 | ret = __trace_graph_entry(tr, trace, flags, pc); | 
|  | 178 | } else | 
|  | 179 | ret = 0; | 
|  | 180 |  | 
|  | 181 | atomic_dec(&data->disabled); | 
|  | 182 | return ret; | 
|  | 183 | } | 
|  | 184 |  | 
|  | 185 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) | 
|  | 186 | { | 
|  | 187 | struct trace_array *tr = irqsoff_trace; | 
|  | 188 | struct trace_array_cpu *data; | 
|  | 189 | unsigned long flags; | 
|  | 190 | long disabled; | 
|  | 191 | int cpu; | 
|  | 192 | int pc; | 
|  | 193 |  | 
|  | 194 | cpu = raw_smp_processor_id(); | 
|  | 195 | if (likely(!per_cpu(tracing_cpu, cpu))) | 
|  | 196 | return; | 
|  | 197 |  | 
|  | 198 | local_save_flags(flags); | 
|  | 199 | /* slight chance to get a false positive on tracing_cpu */ | 
|  | 200 | if (!irqs_disabled_flags(flags)) | 
|  | 201 | return; | 
|  | 202 |  | 
|  | 203 | data = tr->data[cpu]; | 
|  | 204 | disabled = atomic_inc_return(&data->disabled); | 
|  | 205 |  | 
|  | 206 | if (likely(disabled == 1)) { | 
|  | 207 | pc = preempt_count(); | 
|  | 208 | __trace_graph_return(tr, trace, flags, pc); | 
|  | 209 | } | 
|  | 210 |  | 
|  | 211 | atomic_dec(&data->disabled); | 
|  | 212 | } | 
|  | 213 |  | 
|  | 214 | static void irqsoff_trace_open(struct trace_iterator *iter) | 
|  | 215 | { | 
|  | 216 | if (is_graph()) | 
|  | 217 | graph_trace_open(iter); | 
|  | 218 |  | 
|  | 219 | } | 
|  | 220 |  | 
|  | 221 | static void irqsoff_trace_close(struct trace_iterator *iter) | 
|  | 222 | { | 
|  | 223 | if (iter->private) | 
|  | 224 | graph_trace_close(iter); | 
|  | 225 | } | 
|  | 226 |  | 
|  | 227 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ | 
|  | 228 | TRACE_GRAPH_PRINT_PROC) | 
|  | 229 |  | 
|  | 230 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) | 
|  | 231 | { | 
|  | 232 | u32 flags = GRAPH_TRACER_FLAGS; | 
|  | 233 |  | 
|  | 234 | if (trace_flags & TRACE_ITER_LATENCY_FMT) | 
|  | 235 | flags |= TRACE_GRAPH_PRINT_DURATION; | 
|  | 236 | else | 
|  | 237 | flags |= TRACE_GRAPH_PRINT_ABS_TIME; | 
|  | 238 |  | 
|  | 239 | /* | 
|  | 240 | * In graph mode call the graph tracer output function, | 
|  | 241 | * otherwise go with the TRACE_FN event handler | 
|  | 242 | */ | 
|  | 243 | if (is_graph()) | 
|  | 244 | return print_graph_function_flags(iter, flags); | 
|  | 245 |  | 
|  | 246 | return TRACE_TYPE_UNHANDLED; | 
|  | 247 | } | 
|  | 248 |  | 
|  | 249 | static void irqsoff_print_header(struct seq_file *s) | 
|  | 250 | { | 
|  | 251 | if (is_graph()) { | 
|  | 252 | struct trace_iterator *iter = s->private; | 
|  | 253 | u32 flags = GRAPH_TRACER_FLAGS; | 
|  | 254 |  | 
|  | 255 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | 
|  | 256 | /* print nothing if the buffers are empty */ | 
|  | 257 | if (trace_empty(iter)) | 
|  | 258 | return; | 
|  | 259 |  | 
|  | 260 | print_trace_header(s, iter); | 
|  | 261 | flags |= TRACE_GRAPH_PRINT_DURATION; | 
|  | 262 | } else | 
|  | 263 | flags |= TRACE_GRAPH_PRINT_ABS_TIME; | 
|  | 264 |  | 
|  | 265 | print_graph_headers_flags(s, flags); | 
|  | 266 | } else | 
|  | 267 | trace_default_header(s); | 
|  | 268 | } | 
|  | 269 |  | 
|  | 270 | static void | 
|  | 271 | trace_graph_function(struct trace_array *tr, | 
|  | 272 | unsigned long ip, unsigned long flags, int pc) | 
|  | 273 | { | 
|  | 274 | u64 time = trace_clock_local(); | 
|  | 275 | struct ftrace_graph_ent ent = { | 
|  | 276 | .func  = ip, | 
|  | 277 | .depth = 0, | 
|  | 278 | }; | 
|  | 279 | struct ftrace_graph_ret ret = { | 
|  | 280 | .func     = ip, | 
|  | 281 | .depth    = 0, | 
|  | 282 | .calltime = time, | 
|  | 283 | .rettime  = time, | 
|  | 284 | }; | 
|  | 285 |  | 
|  | 286 | __trace_graph_entry(tr, &ent, flags, pc); | 
|  | 287 | __trace_graph_return(tr, &ret, flags, pc); | 
|  | 288 | } | 
|  | 289 |  | 
|  | 290 | static void | 
|  | 291 | __trace_function(struct trace_array *tr, | 
|  | 292 | unsigned long ip, unsigned long parent_ip, | 
|  | 293 | unsigned long flags, int pc) | 
|  | 294 | { | 
|  | 295 | if (!is_graph()) | 
|  | 296 | trace_function(tr, ip, parent_ip, flags, pc); | 
|  | 297 | else { | 
|  | 298 | trace_graph_function(tr, parent_ip, flags, pc); | 
|  | 299 | trace_graph_function(tr, ip, flags, pc); | 
|  | 300 | } | 
|  | 301 | } | 
|  | 302 |  | 
|  | 303 | #else | 
|  | 304 | #define __trace_function trace_function | 
|  | 305 |  | 
|  | 306 | static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) | 
|  | 307 | { | 
|  | 308 | return -EINVAL; | 
|  | 309 | } | 
|  | 310 |  | 
|  | 311 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) | 
|  | 312 | { | 
|  | 313 | return -1; | 
|  | 314 | } | 
|  | 315 |  | 
|  | 316 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) | 
|  | 317 | { | 
|  | 318 | return TRACE_TYPE_UNHANDLED; | 
|  | 319 | } | 
|  | 320 |  | 
|  | 321 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } | 
|  | 322 | static void irqsoff_print_header(struct seq_file *s) { } | 
|  | 323 | static void irqsoff_trace_open(struct trace_iterator *iter) { } | 
|  | 324 | static void irqsoff_trace_close(struct trace_iterator *iter) { } | 
|  | 325 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 
|  | 326 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 327 | /* | 
|  | 328 | * Should this new latency be reported/recorded? | 
|  | 329 | */ | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 330 | static int report_latency(cycle_t delta) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 331 | { | 
|  | 332 | if (tracing_thresh) { | 
|  | 333 | if (delta < tracing_thresh) | 
|  | 334 | return 0; | 
|  | 335 | } else { | 
|  | 336 | if (delta <= tracing_max_latency) | 
|  | 337 | return 0; | 
|  | 338 | } | 
|  | 339 | return 1; | 
|  | 340 | } | 
|  | 341 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 342 | static void | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 343 | check_critical_timing(struct trace_array *tr, | 
|  | 344 | struct trace_array_cpu *data, | 
|  | 345 | unsigned long parent_ip, | 
|  | 346 | int cpu) | 
|  | 347 | { | 
| Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 348 | cycle_t T0, T1, delta; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 349 | unsigned long flags; | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 350 | int pc; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 351 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 352 | T0 = data->preempt_timestamp; | 
| Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 353 | T1 = ftrace_now(cpu); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 354 | delta = T1-T0; | 
|  | 355 |  | 
|  | 356 | local_save_flags(flags); | 
|  | 357 |  | 
| Steven Rostedt | 6450c1d | 2008-10-02 19:23:04 -0400 | [diff] [blame] | 358 | pc = preempt_count(); | 
|  | 359 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 360 | if (!report_latency(delta)) | 
|  | 361 | goto out; | 
|  | 362 |  | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 363 | spin_lock_irqsave(&max_trace_lock, flags); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 364 |  | 
| Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 365 | /* check if we are still the max latency */ | 
|  | 366 | if (!report_latency(delta)) | 
|  | 367 | goto out_unlock; | 
|  | 368 |  | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 369 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); | 
| Steven Rostedt | cc51a0fc | 2009-12-11 11:54:51 -0500 | [diff] [blame] | 370 | /* Skip 5 functions to get to the irq/preempt enable function */ | 
|  | 371 | __trace_stack(tr, flags, 5, pc); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 372 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 373 | if (data->critical_sequence != max_sequence) | 
| Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 374 | goto out_unlock; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 375 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 376 | data->critical_end = parent_ip; | 
|  | 377 |  | 
| Carsten Emde | b5130b1 | 2009-09-13 01:43:07 +0200 | [diff] [blame] | 378 | if (likely(!is_tracing_stopped())) { | 
|  | 379 | tracing_max_latency = delta; | 
|  | 380 | update_max_tr_single(tr, current, cpu); | 
|  | 381 | } | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 382 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 383 | max_sequence++; | 
|  | 384 |  | 
| Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 385 | out_unlock: | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 386 | spin_unlock_irqrestore(&max_trace_lock, flags); | 
| Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 387 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 388 | out: | 
|  | 389 | data->critical_sequence = max_sequence; | 
| Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 390 | data->preempt_timestamp = ftrace_now(cpu); | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 391 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 392 | } | 
|  | 393 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 394 | static inline void | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 395 | start_critical_timing(unsigned long ip, unsigned long parent_ip) | 
|  | 396 | { | 
|  | 397 | int cpu; | 
|  | 398 | struct trace_array *tr = irqsoff_trace; | 
|  | 399 | struct trace_array_cpu *data; | 
|  | 400 | unsigned long flags; | 
|  | 401 |  | 
|  | 402 | if (likely(!tracer_enabled)) | 
|  | 403 | return; | 
|  | 404 |  | 
| Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 405 | cpu = raw_smp_processor_id(); | 
|  | 406 |  | 
|  | 407 | if (per_cpu(tracing_cpu, cpu)) | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 408 | return; | 
|  | 409 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 410 | data = tr->data[cpu]; | 
|  | 411 |  | 
| Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 412 | if (unlikely(!data) || atomic_read(&data->disabled)) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 413 | return; | 
|  | 414 |  | 
|  | 415 | atomic_inc(&data->disabled); | 
|  | 416 |  | 
|  | 417 | data->critical_sequence = max_sequence; | 
| Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 418 | data->preempt_timestamp = ftrace_now(cpu); | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 419 | data->critical_start = parent_ip ? : ip; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 420 |  | 
|  | 421 | local_save_flags(flags); | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 422 |  | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 423 | __trace_function(tr, ip, parent_ip, flags, preempt_count()); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 424 |  | 
| Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 425 | per_cpu(tracing_cpu, cpu) = 1; | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 426 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 427 | atomic_dec(&data->disabled); | 
|  | 428 | } | 
|  | 429 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 430 | static inline void | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 431 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) | 
|  | 432 | { | 
|  | 433 | int cpu; | 
|  | 434 | struct trace_array *tr = irqsoff_trace; | 
|  | 435 | struct trace_array_cpu *data; | 
|  | 436 | unsigned long flags; | 
|  | 437 |  | 
| Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 438 | cpu = raw_smp_processor_id(); | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 439 | /* Always clear the tracing cpu on stopping the trace */ | 
| Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 440 | if (unlikely(per_cpu(tracing_cpu, cpu))) | 
|  | 441 | per_cpu(tracing_cpu, cpu) = 0; | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 442 | else | 
|  | 443 | return; | 
|  | 444 |  | 
|  | 445 | if (!tracer_enabled) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 446 | return; | 
|  | 447 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 448 | data = tr->data[cpu]; | 
|  | 449 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 450 | if (unlikely(!data) || | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 451 | !data->critical_start || atomic_read(&data->disabled)) | 
|  | 452 | return; | 
|  | 453 |  | 
|  | 454 | atomic_inc(&data->disabled); | 
| Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 455 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 456 | local_save_flags(flags); | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 457 | __trace_function(tr, ip, parent_ip, flags, preempt_count()); | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 458 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 459 | data->critical_start = 0; | 
|  | 460 | atomic_dec(&data->disabled); | 
|  | 461 | } | 
|  | 462 |  | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 463 | /* start and stop critical timings used to for stoppage (in idle) */ | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 464 | void start_critical_timings(void) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 465 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 466 | if (preempt_trace() || irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 467 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 
|  | 468 | } | 
| Ingo Molnar | 1fe3710 | 2008-07-26 15:09:47 +0200 | [diff] [blame] | 469 | EXPORT_SYMBOL_GPL(start_critical_timings); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 470 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 471 | void stop_critical_timings(void) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 472 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 473 | if (preempt_trace() || irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 474 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 
|  | 475 | } | 
| Ingo Molnar | 1fe3710 | 2008-07-26 15:09:47 +0200 | [diff] [blame] | 476 | EXPORT_SYMBOL_GPL(stop_critical_timings); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 477 |  | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 478 | #ifdef CONFIG_IRQSOFF_TRACER | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 479 | #ifdef CONFIG_PROVE_LOCKING | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 480 | void time_hardirqs_on(unsigned long a0, unsigned long a1) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 481 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 482 | if (!preempt_trace() && irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 483 | stop_critical_timing(a0, a1); | 
|  | 484 | } | 
|  | 485 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 486 | void time_hardirqs_off(unsigned long a0, unsigned long a1) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 487 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 488 | if (!preempt_trace() && irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 489 | start_critical_timing(a0, a1); | 
|  | 490 | } | 
|  | 491 |  | 
|  | 492 | #else /* !CONFIG_PROVE_LOCKING */ | 
|  | 493 |  | 
|  | 494 | /* | 
|  | 495 | * Stubs: | 
|  | 496 | */ | 
|  | 497 |  | 
|  | 498 | void early_boot_irqs_off(void) | 
|  | 499 | { | 
|  | 500 | } | 
|  | 501 |  | 
|  | 502 | void early_boot_irqs_on(void) | 
|  | 503 | { | 
|  | 504 | } | 
|  | 505 |  | 
|  | 506 | void trace_softirqs_on(unsigned long ip) | 
|  | 507 | { | 
|  | 508 | } | 
|  | 509 |  | 
|  | 510 | void trace_softirqs_off(unsigned long ip) | 
|  | 511 | { | 
|  | 512 | } | 
|  | 513 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 514 | inline void print_irqtrace_events(struct task_struct *curr) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 515 | { | 
|  | 516 | } | 
|  | 517 |  | 
|  | 518 | /* | 
|  | 519 | * We are only interested in hardirq on/off events: | 
|  | 520 | */ | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 521 | void trace_hardirqs_on(void) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 522 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 523 | if (!preempt_trace() && irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 524 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 
|  | 525 | } | 
|  | 526 | EXPORT_SYMBOL(trace_hardirqs_on); | 
|  | 527 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 528 | void trace_hardirqs_off(void) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 529 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 530 | if (!preempt_trace() && irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 531 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 
|  | 532 | } | 
|  | 533 | EXPORT_SYMBOL(trace_hardirqs_off); | 
|  | 534 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 535 | void trace_hardirqs_on_caller(unsigned long caller_addr) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 536 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 537 | if (!preempt_trace() && irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 538 | stop_critical_timing(CALLER_ADDR0, caller_addr); | 
|  | 539 | } | 
|  | 540 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | 
|  | 541 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 542 | void trace_hardirqs_off_caller(unsigned long caller_addr) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 543 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 544 | if (!preempt_trace() && irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 545 | start_critical_timing(CALLER_ADDR0, caller_addr); | 
|  | 546 | } | 
|  | 547 | EXPORT_SYMBOL(trace_hardirqs_off_caller); | 
|  | 548 |  | 
|  | 549 | #endif /* CONFIG_PROVE_LOCKING */ | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 550 | #endif /*  CONFIG_IRQSOFF_TRACER */ | 
|  | 551 |  | 
|  | 552 | #ifdef CONFIG_PREEMPT_TRACER | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 553 | void trace_preempt_on(unsigned long a0, unsigned long a1) | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 554 | { | 
| Steven Rostedt | 1e01cb0 | 2008-07-15 09:53:37 -0400 | [diff] [blame] | 555 | if (preempt_trace()) | 
|  | 556 | stop_critical_timing(a0, a1); | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 557 | } | 
|  | 558 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 559 | void trace_preempt_off(unsigned long a0, unsigned long a1) | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 560 | { | 
| Steven Rostedt | 1e01cb0 | 2008-07-15 09:53:37 -0400 | [diff] [blame] | 561 | if (preempt_trace()) | 
|  | 562 | start_critical_timing(a0, a1); | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 563 | } | 
|  | 564 | #endif /* CONFIG_PREEMPT_TRACER */ | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 565 |  | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 566 | static int start_irqsoff_tracer(struct trace_array *tr, int graph) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 567 | { | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 568 | int ret = 0; | 
|  | 569 |  | 
|  | 570 | if (!graph) | 
|  | 571 | ret = register_ftrace_function(&trace_ops); | 
|  | 572 | else | 
|  | 573 | ret = register_ftrace_graph(&irqsoff_graph_return, | 
|  | 574 | &irqsoff_graph_entry); | 
|  | 575 |  | 
|  | 576 | if (!ret && tracing_is_enabled()) | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 577 | tracer_enabled = 1; | 
| Steven Rostedt | 94523e8 | 2009-01-22 11:18:06 -0500 | [diff] [blame] | 578 | else | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 579 | tracer_enabled = 0; | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 580 |  | 
|  | 581 | return ret; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 582 | } | 
|  | 583 |  | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 584 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 585 | { | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 586 | tracer_enabled = 0; | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 587 |  | 
|  | 588 | if (!graph) | 
|  | 589 | unregister_ftrace_function(&trace_ops); | 
|  | 590 | else | 
|  | 591 | unregister_ftrace_graph(); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 592 | } | 
|  | 593 |  | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 594 | static void __irqsoff_tracer_init(struct trace_array *tr) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 595 | { | 
| Steven Rostedt | e9d25fe | 2009-03-04 22:15:30 -0500 | [diff] [blame] | 596 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; | 
|  | 597 | trace_flags |= TRACE_ITER_LATENCY_FMT; | 
|  | 598 |  | 
| Steven Rostedt | 745b162 | 2009-01-15 23:40:11 -0500 | [diff] [blame] | 599 | tracing_max_latency = 0; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 600 | irqsoff_trace = tr; | 
| Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 601 | /* make sure that the tracer is visible */ | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 602 | smp_wmb(); | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 603 | tracing_reset_online_cpus(tr); | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 604 |  | 
|  | 605 | if (start_irqsoff_tracer(tr, is_graph())) | 
|  | 606 | printk(KERN_ERR "failed to start irqsoff tracer\n"); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 607 | } | 
|  | 608 |  | 
|  | 609 | static void irqsoff_tracer_reset(struct trace_array *tr) | 
|  | 610 | { | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 611 | stop_irqsoff_tracer(tr, is_graph()); | 
| Steven Rostedt | e9d25fe | 2009-03-04 22:15:30 -0500 | [diff] [blame] | 612 |  | 
|  | 613 | if (!save_lat_flag) | 
|  | 614 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 615 | } | 
|  | 616 |  | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 617 | static void irqsoff_tracer_start(struct trace_array *tr) | 
|  | 618 | { | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 619 | tracer_enabled = 1; | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 620 | } | 
|  | 621 |  | 
|  | 622 | static void irqsoff_tracer_stop(struct trace_array *tr) | 
|  | 623 | { | 
|  | 624 | tracer_enabled = 0; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 625 | } | 
|  | 626 |  | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 627 | #ifdef CONFIG_IRQSOFF_TRACER | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 628 | static int irqsoff_tracer_init(struct trace_array *tr) | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 629 | { | 
|  | 630 | trace_type = TRACER_IRQS_OFF; | 
|  | 631 |  | 
|  | 632 | __irqsoff_tracer_init(tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 633 | return 0; | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 634 | } | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 635 | static struct tracer irqsoff_tracer __read_mostly = | 
|  | 636 | { | 
|  | 637 | .name		= "irqsoff", | 
|  | 638 | .init		= irqsoff_tracer_init, | 
|  | 639 | .reset		= irqsoff_tracer_reset, | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 640 | .start		= irqsoff_tracer_start, | 
|  | 641 | .stop		= irqsoff_tracer_stop, | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 642 | .print_max	= 1, | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 643 | .print_header   = irqsoff_print_header, | 
|  | 644 | .print_line     = irqsoff_print_line, | 
|  | 645 | .flags		= &tracer_flags, | 
|  | 646 | .set_flag	= irqsoff_set_flag, | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 647 | #ifdef CONFIG_FTRACE_SELFTEST | 
|  | 648 | .selftest    = trace_selftest_startup_irqsoff, | 
|  | 649 | #endif | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 650 | .open           = irqsoff_trace_open, | 
|  | 651 | .close          = irqsoff_trace_close, | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 652 | .use_max_tr	= 1, | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 653 | }; | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 654 | # define register_irqsoff(trace) register_tracer(&trace) | 
|  | 655 | #else | 
|  | 656 | # define register_irqsoff(trace) do { } while (0) | 
|  | 657 | #endif | 
|  | 658 |  | 
|  | 659 | #ifdef CONFIG_PREEMPT_TRACER | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 660 | static int preemptoff_tracer_init(struct trace_array *tr) | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 661 | { | 
|  | 662 | trace_type = TRACER_PREEMPT_OFF; | 
|  | 663 |  | 
|  | 664 | __irqsoff_tracer_init(tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 665 | return 0; | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 666 | } | 
|  | 667 |  | 
|  | 668 | static struct tracer preemptoff_tracer __read_mostly = | 
|  | 669 | { | 
|  | 670 | .name		= "preemptoff", | 
|  | 671 | .init		= preemptoff_tracer_init, | 
|  | 672 | .reset		= irqsoff_tracer_reset, | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 673 | .start		= irqsoff_tracer_start, | 
|  | 674 | .stop		= irqsoff_tracer_stop, | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 675 | .print_max	= 1, | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 676 | .print_header   = irqsoff_print_header, | 
|  | 677 | .print_line     = irqsoff_print_line, | 
|  | 678 | .flags		= &tracer_flags, | 
|  | 679 | .set_flag	= irqsoff_set_flag, | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 680 | #ifdef CONFIG_FTRACE_SELFTEST | 
|  | 681 | .selftest    = trace_selftest_startup_preemptoff, | 
|  | 682 | #endif | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 683 | .open		= irqsoff_trace_open, | 
|  | 684 | .close		= irqsoff_trace_close, | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 685 | .use_max_tr	= 1, | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 686 | }; | 
|  | 687 | # define register_preemptoff(trace) register_tracer(&trace) | 
|  | 688 | #else | 
|  | 689 | # define register_preemptoff(trace) do { } while (0) | 
|  | 690 | #endif | 
|  | 691 |  | 
|  | 692 | #if defined(CONFIG_IRQSOFF_TRACER) && \ | 
|  | 693 | defined(CONFIG_PREEMPT_TRACER) | 
|  | 694 |  | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 695 | static int preemptirqsoff_tracer_init(struct trace_array *tr) | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 696 | { | 
|  | 697 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; | 
|  | 698 |  | 
|  | 699 | __irqsoff_tracer_init(tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 700 | return 0; | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 701 | } | 
|  | 702 |  | 
|  | 703 | static struct tracer preemptirqsoff_tracer __read_mostly = | 
|  | 704 | { | 
|  | 705 | .name		= "preemptirqsoff", | 
|  | 706 | .init		= preemptirqsoff_tracer_init, | 
|  | 707 | .reset		= irqsoff_tracer_reset, | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 708 | .start		= irqsoff_tracer_start, | 
|  | 709 | .stop		= irqsoff_tracer_stop, | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 710 | .print_max	= 1, | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 711 | .print_header   = irqsoff_print_header, | 
|  | 712 | .print_line     = irqsoff_print_line, | 
|  | 713 | .flags		= &tracer_flags, | 
|  | 714 | .set_flag	= irqsoff_set_flag, | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 715 | #ifdef CONFIG_FTRACE_SELFTEST | 
|  | 716 | .selftest    = trace_selftest_startup_preemptirqsoff, | 
|  | 717 | #endif | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 718 | .open		= irqsoff_trace_open, | 
|  | 719 | .close		= irqsoff_trace_close, | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 720 | .use_max_tr	= 1, | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 721 | }; | 
|  | 722 |  | 
|  | 723 | # define register_preemptirqsoff(trace) register_tracer(&trace) | 
|  | 724 | #else | 
|  | 725 | # define register_preemptirqsoff(trace) do { } while (0) | 
|  | 726 | #endif | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 727 |  | 
|  | 728 | __init static int init_irqsoff_tracer(void) | 
|  | 729 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 730 | register_irqsoff(irqsoff_tracer); | 
|  | 731 | register_preemptoff(preemptoff_tracer); | 
|  | 732 | register_preemptirqsoff(preemptirqsoff_tracer); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 733 |  | 
|  | 734 | return 0; | 
|  | 735 | } | 
|  | 736 | device_initcall(init_irqsoff_tracer); |