| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* | 
| Wenji Huang | 73d8b8b | 2009-02-17 01:10:02 -0500 | [diff] [blame] | 2 |  * trace irqs off critical timings | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3 |  * | 
 | 4 |  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 
 | 5 |  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | 
 | 6 |  * | 
 | 7 |  * From code in the latency_tracer, that is: | 
 | 8 |  * | 
 | 9 |  *  Copyright (C) 2004-2006 Ingo Molnar | 
 | 10 |  *  Copyright (C) 2004 William Lee Irwin III | 
 | 11 |  */ | 
 | 12 | #include <linux/kallsyms.h> | 
 | 13 | #include <linux/debugfs.h> | 
 | 14 | #include <linux/uaccess.h> | 
 | 15 | #include <linux/module.h> | 
 | 16 | #include <linux/ftrace.h> | 
 | 17 | #include <linux/fs.h> | 
 | 18 |  | 
 | 19 | #include "trace.h" | 
 | 20 |  | 
 | 21 | static struct trace_array		*irqsoff_trace __read_mostly; | 
 | 22 | static int				tracer_enabled __read_mostly; | 
 | 23 |  | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 24 | static DEFINE_PER_CPU(int, tracing_cpu); | 
 | 25 |  | 
| Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 26 | static DEFINE_SPINLOCK(max_trace_lock); | 
 | 27 |  | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 28 | enum { | 
 | 29 | 	TRACER_IRQS_OFF		= (1 << 1), | 
 | 30 | 	TRACER_PREEMPT_OFF	= (1 << 2), | 
 | 31 | }; | 
 | 32 |  | 
 | 33 | static int trace_type __read_mostly; | 
 | 34 |  | 
| Steven Rostedt | e9d25fe | 2009-03-04 22:15:30 -0500 | [diff] [blame] | 35 | static int save_lat_flag; | 
 | 36 |  | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 37 | #ifdef CONFIG_PREEMPT_TRACER | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 38 | static inline int | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 39 | preempt_trace(void) | 
 | 40 | { | 
 | 41 | 	return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); | 
 | 42 | } | 
 | 43 | #else | 
 | 44 | # define preempt_trace() (0) | 
 | 45 | #endif | 
 | 46 |  | 
 | 47 | #ifdef CONFIG_IRQSOFF_TRACER | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 48 | static inline int | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 49 | irq_trace(void) | 
 | 50 | { | 
 | 51 | 	return ((trace_type & TRACER_IRQS_OFF) && | 
 | 52 | 		irqs_disabled()); | 
 | 53 | } | 
 | 54 | #else | 
 | 55 | # define irq_trace() (0) | 
 | 56 | #endif | 
 | 57 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 58 | /* | 
 | 59 |  * Sequence count - we record it when starting a measurement and | 
 | 60 |  * skip the latency if the sequence has changed - some other section | 
 | 61 |  * did a maximum and could disturb our measurement with serial console | 
 | 62 |  * printouts, etc. Truly coinciding maximum latencies should be rare | 
 | 63 |  * and what happens together happens separately as well, so this doesnt | 
 | 64 |  * decrease the validity of the maximum found: | 
 | 65 |  */ | 
 | 66 | static __cacheline_aligned_in_smp	unsigned long max_sequence; | 
 | 67 |  | 
| Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 68 | #ifdef CONFIG_FUNCTION_TRACER | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 69 | /* | 
 | 70 |  * irqsoff uses its own tracer function to keep the overhead down: | 
 | 71 |  */ | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 72 | static void | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 73 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | 
 | 74 | { | 
 | 75 | 	struct trace_array *tr = irqsoff_trace; | 
 | 76 | 	struct trace_array_cpu *data; | 
 | 77 | 	unsigned long flags; | 
 | 78 | 	long disabled; | 
 | 79 | 	int cpu; | 
 | 80 |  | 
| Steven Rostedt | 361943a | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 81 | 	/* | 
 | 82 | 	 * Does not matter if we preempt. We test the flags | 
 | 83 | 	 * afterward, to see if irqs are disabled or not. | 
 | 84 | 	 * If we preempt and get a false positive, the flags | 
 | 85 | 	 * test will fail. | 
 | 86 | 	 */ | 
 | 87 | 	cpu = raw_smp_processor_id(); | 
 | 88 | 	if (likely(!per_cpu(tracing_cpu, cpu))) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 89 | 		return; | 
 | 90 |  | 
 | 91 | 	local_save_flags(flags); | 
| Steven Rostedt | 361943a | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 92 | 	/* slight chance to get a false positive on tracing_cpu */ | 
 | 93 | 	if (!irqs_disabled_flags(flags)) | 
 | 94 | 		return; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 95 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 96 | 	data = tr->data[cpu]; | 
 | 97 | 	disabled = atomic_inc_return(&data->disabled); | 
 | 98 |  | 
 | 99 | 	if (likely(disabled == 1)) | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 100 | 		trace_function(tr, ip, parent_ip, flags, preempt_count()); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 101 |  | 
 | 102 | 	atomic_dec(&data->disabled); | 
 | 103 | } | 
 | 104 |  | 
 | 105 | static struct ftrace_ops trace_ops __read_mostly = | 
 | 106 | { | 
 | 107 | 	.func = irqsoff_tracer_call, | 
 | 108 | }; | 
| Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 109 | #endif /* CONFIG_FUNCTION_TRACER */ | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 110 |  | 
 | 111 | /* | 
 | 112 |  * Should this new latency be reported/recorded? | 
 | 113 |  */ | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 114 | static int report_latency(cycle_t delta) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 115 | { | 
 | 116 | 	if (tracing_thresh) { | 
 | 117 | 		if (delta < tracing_thresh) | 
 | 118 | 			return 0; | 
 | 119 | 	} else { | 
 | 120 | 		if (delta <= tracing_max_latency) | 
 | 121 | 			return 0; | 
 | 122 | 	} | 
 | 123 | 	return 1; | 
 | 124 | } | 
 | 125 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 126 | static void | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 127 | check_critical_timing(struct trace_array *tr, | 
 | 128 | 		      struct trace_array_cpu *data, | 
 | 129 | 		      unsigned long parent_ip, | 
 | 130 | 		      int cpu) | 
 | 131 | { | 
| Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 132 | 	cycle_t T0, T1, delta; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 133 | 	unsigned long flags; | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 134 | 	int pc; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 135 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 136 | 	T0 = data->preempt_timestamp; | 
| Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 137 | 	T1 = ftrace_now(cpu); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 138 | 	delta = T1-T0; | 
 | 139 |  | 
 | 140 | 	local_save_flags(flags); | 
 | 141 |  | 
| Steven Rostedt | 6450c1d | 2008-10-02 19:23:04 -0400 | [diff] [blame] | 142 | 	pc = preempt_count(); | 
 | 143 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 144 | 	if (!report_latency(delta)) | 
 | 145 | 		goto out; | 
 | 146 |  | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 147 | 	spin_lock_irqsave(&max_trace_lock, flags); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 148 |  | 
| Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 149 | 	/* check if we are still the max latency */ | 
 | 150 | 	if (!report_latency(delta)) | 
 | 151 | 		goto out_unlock; | 
 | 152 |  | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 153 | 	trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 154 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 155 | 	if (data->critical_sequence != max_sequence) | 
| Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 156 | 		goto out_unlock; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 157 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 158 | 	data->critical_end = parent_ip; | 
 | 159 |  | 
| Carsten Emde | b5130b1 | 2009-09-13 01:43:07 +0200 | [diff] [blame] | 160 | 	if (likely(!is_tracing_stopped())) { | 
 | 161 | 		tracing_max_latency = delta; | 
 | 162 | 		update_max_tr_single(tr, current, cpu); | 
 | 163 | 	} | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 164 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 165 | 	max_sequence++; | 
 | 166 |  | 
| Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 167 | out_unlock: | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 168 | 	spin_unlock_irqrestore(&max_trace_lock, flags); | 
| Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 169 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 170 | out: | 
 | 171 | 	data->critical_sequence = max_sequence; | 
| Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 172 | 	data->preempt_timestamp = ftrace_now(cpu); | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 173 | 	trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 174 | } | 
 | 175 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 176 | static inline void | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 177 | start_critical_timing(unsigned long ip, unsigned long parent_ip) | 
 | 178 | { | 
 | 179 | 	int cpu; | 
 | 180 | 	struct trace_array *tr = irqsoff_trace; | 
 | 181 | 	struct trace_array_cpu *data; | 
 | 182 | 	unsigned long flags; | 
 | 183 |  | 
 | 184 | 	if (likely(!tracer_enabled)) | 
 | 185 | 		return; | 
 | 186 |  | 
| Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 187 | 	cpu = raw_smp_processor_id(); | 
 | 188 |  | 
 | 189 | 	if (per_cpu(tracing_cpu, cpu)) | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 190 | 		return; | 
 | 191 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 192 | 	data = tr->data[cpu]; | 
 | 193 |  | 
| Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 194 | 	if (unlikely(!data) || atomic_read(&data->disabled)) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 195 | 		return; | 
 | 196 |  | 
 | 197 | 	atomic_inc(&data->disabled); | 
 | 198 |  | 
 | 199 | 	data->critical_sequence = max_sequence; | 
| Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 200 | 	data->preempt_timestamp = ftrace_now(cpu); | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 201 | 	data->critical_start = parent_ip ? : ip; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 202 |  | 
 | 203 | 	local_save_flags(flags); | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 204 |  | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 205 | 	trace_function(tr, ip, parent_ip, flags, preempt_count()); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 206 |  | 
| Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 207 | 	per_cpu(tracing_cpu, cpu) = 1; | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 208 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 209 | 	atomic_dec(&data->disabled); | 
 | 210 | } | 
 | 211 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 212 | static inline void | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 213 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) | 
 | 214 | { | 
 | 215 | 	int cpu; | 
 | 216 | 	struct trace_array *tr = irqsoff_trace; | 
 | 217 | 	struct trace_array_cpu *data; | 
 | 218 | 	unsigned long flags; | 
 | 219 |  | 
| Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 220 | 	cpu = raw_smp_processor_id(); | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 221 | 	/* Always clear the tracing cpu on stopping the trace */ | 
| Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 222 | 	if (unlikely(per_cpu(tracing_cpu, cpu))) | 
 | 223 | 		per_cpu(tracing_cpu, cpu) = 0; | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 224 | 	else | 
 | 225 | 		return; | 
 | 226 |  | 
 | 227 | 	if (!tracer_enabled) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 228 | 		return; | 
 | 229 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 230 | 	data = tr->data[cpu]; | 
 | 231 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 232 | 	if (unlikely(!data) || | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 233 | 	    !data->critical_start || atomic_read(&data->disabled)) | 
 | 234 | 		return; | 
 | 235 |  | 
 | 236 | 	atomic_inc(&data->disabled); | 
| Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 237 |  | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 238 | 	local_save_flags(flags); | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 239 | 	trace_function(tr, ip, parent_ip, flags, preempt_count()); | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 240 | 	check_critical_timing(tr, data, parent_ip ? : ip, cpu); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 241 | 	data->critical_start = 0; | 
 | 242 | 	atomic_dec(&data->disabled); | 
 | 243 | } | 
 | 244 |  | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 245 | /* start and stop critical timings used to for stoppage (in idle) */ | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 246 | void start_critical_timings(void) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 247 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 248 | 	if (preempt_trace() || irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 249 | 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 
 | 250 | } | 
| Ingo Molnar | 1fe3710 | 2008-07-26 15:09:47 +0200 | [diff] [blame] | 251 | EXPORT_SYMBOL_GPL(start_critical_timings); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 252 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 253 | void stop_critical_timings(void) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 254 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 255 | 	if (preempt_trace() || irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 256 | 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 
 | 257 | } | 
| Ingo Molnar | 1fe3710 | 2008-07-26 15:09:47 +0200 | [diff] [blame] | 258 | EXPORT_SYMBOL_GPL(stop_critical_timings); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 259 |  | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 260 | #ifdef CONFIG_IRQSOFF_TRACER | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 261 | #ifdef CONFIG_PROVE_LOCKING | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 262 | void time_hardirqs_on(unsigned long a0, unsigned long a1) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 263 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 264 | 	if (!preempt_trace() && irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 265 | 		stop_critical_timing(a0, a1); | 
 | 266 | } | 
 | 267 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 268 | void time_hardirqs_off(unsigned long a0, unsigned long a1) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 269 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 270 | 	if (!preempt_trace() && irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 271 | 		start_critical_timing(a0, a1); | 
 | 272 | } | 
 | 273 |  | 
 | 274 | #else /* !CONFIG_PROVE_LOCKING */ | 
 | 275 |  | 
 | 276 | /* | 
 | 277 |  * Stubs: | 
 | 278 |  */ | 
 | 279 |  | 
 | 280 | void early_boot_irqs_off(void) | 
 | 281 | { | 
 | 282 | } | 
 | 283 |  | 
 | 284 | void early_boot_irqs_on(void) | 
 | 285 | { | 
 | 286 | } | 
 | 287 |  | 
 | 288 | void trace_softirqs_on(unsigned long ip) | 
 | 289 | { | 
 | 290 | } | 
 | 291 |  | 
 | 292 | void trace_softirqs_off(unsigned long ip) | 
 | 293 | { | 
 | 294 | } | 
 | 295 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 296 | inline void print_irqtrace_events(struct task_struct *curr) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 297 | { | 
 | 298 | } | 
 | 299 |  | 
 | 300 | /* | 
 | 301 |  * We are only interested in hardirq on/off events: | 
 | 302 |  */ | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 303 | void trace_hardirqs_on(void) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 304 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 305 | 	if (!preempt_trace() && irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 306 | 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 
 | 307 | } | 
 | 308 | EXPORT_SYMBOL(trace_hardirqs_on); | 
 | 309 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 310 | void trace_hardirqs_off(void) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 311 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 312 | 	if (!preempt_trace() && irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 313 | 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 
 | 314 | } | 
 | 315 | EXPORT_SYMBOL(trace_hardirqs_off); | 
 | 316 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 317 | void trace_hardirqs_on_caller(unsigned long caller_addr) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 318 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 319 | 	if (!preempt_trace() && irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 320 | 		stop_critical_timing(CALLER_ADDR0, caller_addr); | 
 | 321 | } | 
 | 322 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | 
 | 323 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 324 | void trace_hardirqs_off_caller(unsigned long caller_addr) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 325 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 326 | 	if (!preempt_trace() && irq_trace()) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 327 | 		start_critical_timing(CALLER_ADDR0, caller_addr); | 
 | 328 | } | 
 | 329 | EXPORT_SYMBOL(trace_hardirqs_off_caller); | 
 | 330 |  | 
 | 331 | #endif /* CONFIG_PROVE_LOCKING */ | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 332 | #endif /*  CONFIG_IRQSOFF_TRACER */ | 
 | 333 |  | 
 | 334 | #ifdef CONFIG_PREEMPT_TRACER | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 335 | void trace_preempt_on(unsigned long a0, unsigned long a1) | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 336 | { | 
| Steven Rostedt | 1e01cb0 | 2008-07-15 09:53:37 -0400 | [diff] [blame] | 337 | 	if (preempt_trace()) | 
 | 338 | 		stop_critical_timing(a0, a1); | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 339 | } | 
 | 340 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 341 | void trace_preempt_off(unsigned long a0, unsigned long a1) | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 342 | { | 
| Steven Rostedt | 1e01cb0 | 2008-07-15 09:53:37 -0400 | [diff] [blame] | 343 | 	if (preempt_trace()) | 
 | 344 | 		start_critical_timing(a0, a1); | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 345 | } | 
 | 346 | #endif /* CONFIG_PREEMPT_TRACER */ | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 347 |  | 
 | 348 | static void start_irqsoff_tracer(struct trace_array *tr) | 
 | 349 | { | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 350 | 	register_ftrace_function(&trace_ops); | 
| Steven Rostedt | 94523e8 | 2009-01-22 11:18:06 -0500 | [diff] [blame] | 351 | 	if (tracing_is_enabled()) | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 352 | 		tracer_enabled = 1; | 
| Steven Rostedt | 94523e8 | 2009-01-22 11:18:06 -0500 | [diff] [blame] | 353 | 	else | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 354 | 		tracer_enabled = 0; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 355 | } | 
 | 356 |  | 
 | 357 | static void stop_irqsoff_tracer(struct trace_array *tr) | 
 | 358 | { | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 359 | 	tracer_enabled = 0; | 
| Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 360 | 	unregister_ftrace_function(&trace_ops); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 361 | } | 
 | 362 |  | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 363 | static void __irqsoff_tracer_init(struct trace_array *tr) | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 364 | { | 
| Steven Rostedt | e9d25fe | 2009-03-04 22:15:30 -0500 | [diff] [blame] | 365 | 	save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; | 
 | 366 | 	trace_flags |= TRACE_ITER_LATENCY_FMT; | 
 | 367 |  | 
| Steven Rostedt | 745b162 | 2009-01-15 23:40:11 -0500 | [diff] [blame] | 368 | 	tracing_max_latency = 0; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 369 | 	irqsoff_trace = tr; | 
| Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 370 | 	/* make sure that the tracer is visible */ | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 371 | 	smp_wmb(); | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 372 | 	tracing_reset_online_cpus(tr); | 
| Steven Rostedt | c76f069 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 373 | 	start_irqsoff_tracer(tr); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 374 | } | 
 | 375 |  | 
 | 376 | static void irqsoff_tracer_reset(struct trace_array *tr) | 
 | 377 | { | 
| Steven Rostedt | c76f069 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 378 | 	stop_irqsoff_tracer(tr); | 
| Steven Rostedt | e9d25fe | 2009-03-04 22:15:30 -0500 | [diff] [blame] | 379 |  | 
 | 380 | 	if (!save_lat_flag) | 
 | 381 | 		trace_flags &= ~TRACE_ITER_LATENCY_FMT; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 382 | } | 
 | 383 |  | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 384 | static void irqsoff_tracer_start(struct trace_array *tr) | 
 | 385 | { | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 386 | 	tracer_enabled = 1; | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 387 | } | 
 | 388 |  | 
 | 389 | static void irqsoff_tracer_stop(struct trace_array *tr) | 
 | 390 | { | 
 | 391 | 	tracer_enabled = 0; | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 392 | } | 
 | 393 |  | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 394 | #ifdef CONFIG_IRQSOFF_TRACER | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 395 | static int irqsoff_tracer_init(struct trace_array *tr) | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 396 | { | 
 | 397 | 	trace_type = TRACER_IRQS_OFF; | 
 | 398 |  | 
 | 399 | 	__irqsoff_tracer_init(tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 400 | 	return 0; | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 401 | } | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 402 | static struct tracer irqsoff_tracer __read_mostly = | 
 | 403 | { | 
 | 404 | 	.name		= "irqsoff", | 
 | 405 | 	.init		= irqsoff_tracer_init, | 
 | 406 | 	.reset		= irqsoff_tracer_reset, | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 407 | 	.start		= irqsoff_tracer_start, | 
 | 408 | 	.stop		= irqsoff_tracer_stop, | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 409 | 	.print_max	= 1, | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 410 | #ifdef CONFIG_FTRACE_SELFTEST | 
 | 411 | 	.selftest    = trace_selftest_startup_irqsoff, | 
 | 412 | #endif | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 413 | }; | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 414 | # define register_irqsoff(trace) register_tracer(&trace) | 
 | 415 | #else | 
 | 416 | # define register_irqsoff(trace) do { } while (0) | 
 | 417 | #endif | 
 | 418 |  | 
 | 419 | #ifdef CONFIG_PREEMPT_TRACER | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 420 | static int preemptoff_tracer_init(struct trace_array *tr) | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 421 | { | 
 | 422 | 	trace_type = TRACER_PREEMPT_OFF; | 
 | 423 |  | 
 | 424 | 	__irqsoff_tracer_init(tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 425 | 	return 0; | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 426 | } | 
 | 427 |  | 
 | 428 | static struct tracer preemptoff_tracer __read_mostly = | 
 | 429 | { | 
 | 430 | 	.name		= "preemptoff", | 
 | 431 | 	.init		= preemptoff_tracer_init, | 
 | 432 | 	.reset		= irqsoff_tracer_reset, | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 433 | 	.start		= irqsoff_tracer_start, | 
 | 434 | 	.stop		= irqsoff_tracer_stop, | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 435 | 	.print_max	= 1, | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 436 | #ifdef CONFIG_FTRACE_SELFTEST | 
 | 437 | 	.selftest    = trace_selftest_startup_preemptoff, | 
 | 438 | #endif | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 439 | }; | 
 | 440 | # define register_preemptoff(trace) register_tracer(&trace) | 
 | 441 | #else | 
 | 442 | # define register_preemptoff(trace) do { } while (0) | 
 | 443 | #endif | 
 | 444 |  | 
 | 445 | #if defined(CONFIG_IRQSOFF_TRACER) && \ | 
 | 446 | 	defined(CONFIG_PREEMPT_TRACER) | 
 | 447 |  | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 448 | static int preemptirqsoff_tracer_init(struct trace_array *tr) | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 449 | { | 
 | 450 | 	trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; | 
 | 451 |  | 
 | 452 | 	__irqsoff_tracer_init(tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 453 | 	return 0; | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 454 | } | 
 | 455 |  | 
 | 456 | static struct tracer preemptirqsoff_tracer __read_mostly = | 
 | 457 | { | 
 | 458 | 	.name		= "preemptirqsoff", | 
 | 459 | 	.init		= preemptirqsoff_tracer_init, | 
 | 460 | 	.reset		= irqsoff_tracer_reset, | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 461 | 	.start		= irqsoff_tracer_start, | 
 | 462 | 	.stop		= irqsoff_tracer_stop, | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 463 | 	.print_max	= 1, | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 464 | #ifdef CONFIG_FTRACE_SELFTEST | 
 | 465 | 	.selftest    = trace_selftest_startup_preemptirqsoff, | 
 | 466 | #endif | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 467 | }; | 
 | 468 |  | 
 | 469 | # define register_preemptirqsoff(trace) register_tracer(&trace) | 
 | 470 | #else | 
 | 471 | # define register_preemptirqsoff(trace) do { } while (0) | 
 | 472 | #endif | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 473 |  | 
 | 474 | __init static int init_irqsoff_tracer(void) | 
 | 475 | { | 
| Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 476 | 	register_irqsoff(irqsoff_tracer); | 
 | 477 | 	register_preemptoff(preemptoff_tracer); | 
 | 478 | 	register_preemptirqsoff(preemptirqsoff_tracer); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 479 |  | 
 | 480 | 	return 0; | 
 | 481 | } | 
 | 482 | device_initcall(init_irqsoff_tracer); |