| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 1 | /* Include in trace.c */ | 
 | 2 |  | 
 | 3 | #include <linux/kthread.h> | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 4 | #include <linux/delay.h> | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 5 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 6 | static inline int trace_valid_entry(struct trace_entry *entry) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 7 | { | 
 | 8 | 	switch (entry->type) { | 
 | 9 | 	case TRACE_FN: | 
 | 10 | 	case TRACE_CTX: | 
| Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 11 | 	case TRACE_WAKE: | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 12 | 	case TRACE_CONT: | 
| Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 13 | 	case TRACE_STACK: | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 14 | 	case TRACE_PRINT: | 
| Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 15 | 	case TRACE_SPECIAL: | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 16 | 	case TRACE_BRANCH: | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 17 | 		return 1; | 
 | 18 | 	} | 
 | 19 | 	return 0; | 
 | 20 | } | 
 | 21 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 22 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 23 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 24 | 	struct ring_buffer_event *event; | 
 | 25 | 	struct trace_entry *entry; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 26 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 27 | 	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { | 
 | 28 | 		entry = ring_buffer_event_data(event); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 29 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 30 | 		if (!trace_valid_entry(entry)) { | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 31 | 			printk(KERN_CONT ".. invalid entry %d ", | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 32 | 				entry->type); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 33 | 			goto failed; | 
 | 34 | 		} | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 35 | 	} | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 36 | 	return 0; | 
 | 37 |  | 
 | 38 |  failed: | 
| Steven Rostedt | 08bafa0 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 39 | 	/* disable tracing */ | 
 | 40 | 	tracing_disabled = 1; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 41 | 	printk(KERN_CONT ".. corrupted trace buffer .. "); | 
 | 42 | 	return -1; | 
 | 43 | } | 
 | 44 |  | 
 | 45 | /* | 
 | 46 |  * Test the trace buffer to see if all the elements | 
 | 47 |  * are still sane. | 
 | 48 |  */ | 
 | 49 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | 
 | 50 | { | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 51 | 	unsigned long flags, cnt = 0; | 
 | 52 | 	int cpu, ret = 0; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 53 |  | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 54 | 	/* Don't allow flipping of max traces now */ | 
| Steven Rostedt | d51ad7a | 2008-11-15 15:48:29 -0500 | [diff] [blame] | 55 | 	local_irq_save(flags); | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 56 | 	__raw_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 57 |  | 
 | 58 | 	cnt = ring_buffer_entries(tr->buffer); | 
 | 59 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 60 | 	for_each_possible_cpu(cpu) { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 61 | 		ret = trace_test_buffer_cpu(tr, cpu); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 62 | 		if (ret) | 
 | 63 | 			break; | 
 | 64 | 	} | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 65 | 	__raw_spin_unlock(&ftrace_max_lock); | 
| Steven Rostedt | d51ad7a | 2008-11-15 15:48:29 -0500 | [diff] [blame] | 66 | 	local_irq_restore(flags); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 67 |  | 
 | 68 | 	if (count) | 
 | 69 | 		*count = cnt; | 
 | 70 |  | 
 | 71 | 	return ret; | 
 | 72 | } | 
 | 73 |  | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 74 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | 
 | 75 | { | 
 | 76 | 	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | 
 | 77 | 		trace->name, init_ret); | 
 | 78 | } | 
| Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 79 | #ifdef CONFIG_FUNCTION_TRACER | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 80 |  | 
 | 81 | #ifdef CONFIG_DYNAMIC_FTRACE | 
 | 82 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 83 | #define __STR(x) #x | 
 | 84 | #define STR(x) __STR(x) | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 85 |  | 
 | 86 | /* Test dynamic code modification and ftrace filters */ | 
 | 87 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | 
 | 88 | 					   struct trace_array *tr, | 
 | 89 | 					   int (*func)(void)) | 
 | 90 | { | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 91 | 	int save_ftrace_enabled = ftrace_enabled; | 
 | 92 | 	int save_tracer_enabled = tracer_enabled; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 93 | 	unsigned long count; | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 94 | 	char *func_name; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 95 | 	int ret; | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 96 |  | 
 | 97 | 	/* The ftrace test PASSED */ | 
 | 98 | 	printk(KERN_CONT "PASSED\n"); | 
 | 99 | 	pr_info("Testing dynamic ftrace: "); | 
 | 100 |  | 
 | 101 | 	/* enable tracing, and record the filter function */ | 
 | 102 | 	ftrace_enabled = 1; | 
 | 103 | 	tracer_enabled = 1; | 
 | 104 |  | 
 | 105 | 	/* passed in by parameter to fool gcc from optimizing */ | 
 | 106 | 	func(); | 
 | 107 |  | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 108 | 	/* | 
 | 109 | 	 * Some archs *cough*PowerPC*cough* add charachters to the | 
 | 110 | 	 * start of the function names. We simply put a '*' to | 
 | 111 | 	 * accomodate them. | 
 | 112 | 	 */ | 
 | 113 | 	func_name = "*" STR(DYN_FTRACE_TEST_NAME); | 
 | 114 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 115 | 	/* filter only on our function */ | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 116 | 	ftrace_set_filter(func_name, strlen(func_name), 1); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 117 |  | 
 | 118 | 	/* enable tracing */ | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 119 | 	ret = trace->init(tr); | 
 | 120 | 	if (ret) { | 
 | 121 | 		warn_failed_init_tracer(trace, ret); | 
 | 122 | 		goto out; | 
 | 123 | 	} | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 124 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 125 | 	/* Sleep for a 1/10 of a second */ | 
 | 126 | 	msleep(100); | 
 | 127 |  | 
 | 128 | 	/* we should have nothing in the buffer */ | 
 | 129 | 	ret = trace_test_buffer(tr, &count); | 
 | 130 | 	if (ret) | 
 | 131 | 		goto out; | 
 | 132 |  | 
 | 133 | 	if (count) { | 
 | 134 | 		ret = -1; | 
 | 135 | 		printk(KERN_CONT ".. filter did not filter .. "); | 
 | 136 | 		goto out; | 
 | 137 | 	} | 
 | 138 |  | 
 | 139 | 	/* call our function again */ | 
 | 140 | 	func(); | 
 | 141 |  | 
 | 142 | 	/* sleep again */ | 
 | 143 | 	msleep(100); | 
 | 144 |  | 
 | 145 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 146 | 	tracing_stop(); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 147 | 	ftrace_enabled = 0; | 
 | 148 |  | 
 | 149 | 	/* check the trace buffer */ | 
 | 150 | 	ret = trace_test_buffer(tr, &count); | 
 | 151 | 	trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 152 | 	tracing_start(); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 153 |  | 
 | 154 | 	/* we should only have one item */ | 
 | 155 | 	if (!ret && count != 1) { | 
| Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 156 | 		printk(KERN_CONT ".. filter failed count=%ld ..", count); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 157 | 		ret = -1; | 
 | 158 | 		goto out; | 
 | 159 | 	} | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 160 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 161 |  out: | 
 | 162 | 	ftrace_enabled = save_ftrace_enabled; | 
 | 163 | 	tracer_enabled = save_tracer_enabled; | 
 | 164 |  | 
 | 165 | 	/* Enable tracing on all functions again */ | 
 | 166 | 	ftrace_set_filter(NULL, 0, 1); | 
 | 167 |  | 
 | 168 | 	return ret; | 
 | 169 | } | 
 | 170 | #else | 
 | 171 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | 
 | 172 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 173 | /* | 
 | 174 |  * Simple verification test of ftrace function tracer. | 
 | 175 |  * Enable ftrace, sleep 1/10 second, and then read the trace | 
 | 176 |  * buffer to see if all is in order. | 
 | 177 |  */ | 
 | 178 | int | 
 | 179 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | 
 | 180 | { | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 181 | 	int save_ftrace_enabled = ftrace_enabled; | 
 | 182 | 	int save_tracer_enabled = tracer_enabled; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 183 | 	unsigned long count; | 
 | 184 | 	int ret; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 185 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 186 | 	/* make sure msleep has been recorded */ | 
 | 187 | 	msleep(1); | 
 | 188 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 189 | 	/* start the tracing */ | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 190 | 	ftrace_enabled = 1; | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 191 | 	tracer_enabled = 1; | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 192 |  | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 193 | 	ret = trace->init(tr); | 
 | 194 | 	if (ret) { | 
 | 195 | 		warn_failed_init_tracer(trace, ret); | 
 | 196 | 		goto out; | 
 | 197 | 	} | 
 | 198 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 199 | 	/* Sleep for a 1/10 of a second */ | 
 | 200 | 	msleep(100); | 
 | 201 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 202 | 	tracing_stop(); | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 203 | 	ftrace_enabled = 0; | 
 | 204 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 205 | 	/* check the trace buffer */ | 
 | 206 | 	ret = trace_test_buffer(tr, &count); | 
 | 207 | 	trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 208 | 	tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 209 |  | 
 | 210 | 	if (!ret && !count) { | 
 | 211 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 212 | 		ret = -1; | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 213 | 		goto out; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 214 | 	} | 
 | 215 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 216 | 	ret = trace_selftest_startup_dynamic_tracing(trace, tr, | 
 | 217 | 						     DYN_FTRACE_TEST_NAME); | 
 | 218 |  | 
 | 219 |  out: | 
 | 220 | 	ftrace_enabled = save_ftrace_enabled; | 
 | 221 | 	tracer_enabled = save_tracer_enabled; | 
 | 222 |  | 
| Steven Rostedt | 4eebcc8 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 223 | 	/* kill ftrace totally if we failed */ | 
 | 224 | 	if (ret) | 
 | 225 | 		ftrace_kill(); | 
 | 226 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 227 | 	return ret; | 
 | 228 | } | 
| Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 229 | #endif /* CONFIG_FUNCTION_TRACER */ | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 230 |  | 
 | 231 | #ifdef CONFIG_IRQSOFF_TRACER | 
 | 232 | int | 
 | 233 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | 
 | 234 | { | 
 | 235 | 	unsigned long save_max = tracing_max_latency; | 
 | 236 | 	unsigned long count; | 
 | 237 | 	int ret; | 
 | 238 |  | 
 | 239 | 	/* start the tracing */ | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 240 | 	ret = trace->init(tr); | 
 | 241 | 	if (ret) { | 
 | 242 | 		warn_failed_init_tracer(trace, ret); | 
 | 243 | 		return ret; | 
 | 244 | 	} | 
 | 245 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 246 | 	/* reset the max latency */ | 
 | 247 | 	tracing_max_latency = 0; | 
 | 248 | 	/* disable interrupts for a bit */ | 
 | 249 | 	local_irq_disable(); | 
 | 250 | 	udelay(100); | 
 | 251 | 	local_irq_enable(); | 
 | 252 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 253 | 	tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 254 | 	/* check both trace buffers */ | 
 | 255 | 	ret = trace_test_buffer(tr, NULL); | 
 | 256 | 	if (!ret) | 
 | 257 | 		ret = trace_test_buffer(&max_tr, &count); | 
 | 258 | 	trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 259 | 	tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 260 |  | 
 | 261 | 	if (!ret && !count) { | 
 | 262 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 263 | 		ret = -1; | 
 | 264 | 	} | 
 | 265 |  | 
 | 266 | 	tracing_max_latency = save_max; | 
 | 267 |  | 
 | 268 | 	return ret; | 
 | 269 | } | 
 | 270 | #endif /* CONFIG_IRQSOFF_TRACER */ | 
 | 271 |  | 
 | 272 | #ifdef CONFIG_PREEMPT_TRACER | 
 | 273 | int | 
 | 274 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | 
 | 275 | { | 
 | 276 | 	unsigned long save_max = tracing_max_latency; | 
 | 277 | 	unsigned long count; | 
 | 278 | 	int ret; | 
 | 279 |  | 
| Steven Rostedt | 769c48e | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 280 | 	/* | 
 | 281 | 	 * Now that the big kernel lock is no longer preemptable, | 
 | 282 | 	 * and this is called with the BKL held, it will always | 
 | 283 | 	 * fail. If preemption is already disabled, simply | 
 | 284 | 	 * pass the test. When the BKL is removed, or becomes | 
 | 285 | 	 * preemptible again, we will once again test this, | 
 | 286 | 	 * so keep it in. | 
 | 287 | 	 */ | 
 | 288 | 	if (preempt_count()) { | 
 | 289 | 		printk(KERN_CONT "can not test ... force "); | 
 | 290 | 		return 0; | 
 | 291 | 	} | 
 | 292 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 293 | 	/* start the tracing */ | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 294 | 	ret = trace->init(tr); | 
 | 295 | 	if (ret) { | 
 | 296 | 		warn_failed_init_tracer(trace, ret); | 
 | 297 | 		return ret; | 
 | 298 | 	} | 
 | 299 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 300 | 	/* reset the max latency */ | 
 | 301 | 	tracing_max_latency = 0; | 
 | 302 | 	/* disable preemption for a bit */ | 
 | 303 | 	preempt_disable(); | 
 | 304 | 	udelay(100); | 
 | 305 | 	preempt_enable(); | 
 | 306 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 307 | 	tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 308 | 	/* check both trace buffers */ | 
 | 309 | 	ret = trace_test_buffer(tr, NULL); | 
 | 310 | 	if (!ret) | 
 | 311 | 		ret = trace_test_buffer(&max_tr, &count); | 
 | 312 | 	trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 313 | 	tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 314 |  | 
 | 315 | 	if (!ret && !count) { | 
 | 316 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 317 | 		ret = -1; | 
 | 318 | 	} | 
 | 319 |  | 
 | 320 | 	tracing_max_latency = save_max; | 
 | 321 |  | 
 | 322 | 	return ret; | 
 | 323 | } | 
 | 324 | #endif /* CONFIG_PREEMPT_TRACER */ | 
 | 325 |  | 
 | 326 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | 
 | 327 | int | 
 | 328 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | 
 | 329 | { | 
 | 330 | 	unsigned long save_max = tracing_max_latency; | 
 | 331 | 	unsigned long count; | 
 | 332 | 	int ret; | 
 | 333 |  | 
| Steven Rostedt | 769c48e | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 334 | 	/* | 
 | 335 | 	 * Now that the big kernel lock is no longer preemptable, | 
 | 336 | 	 * and this is called with the BKL held, it will always | 
 | 337 | 	 * fail. If preemption is already disabled, simply | 
 | 338 | 	 * pass the test. When the BKL is removed, or becomes | 
 | 339 | 	 * preemptible again, we will once again test this, | 
 | 340 | 	 * so keep it in. | 
 | 341 | 	 */ | 
 | 342 | 	if (preempt_count()) { | 
 | 343 | 		printk(KERN_CONT "can not test ... force "); | 
 | 344 | 		return 0; | 
 | 345 | 	} | 
 | 346 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 347 | 	/* start the tracing */ | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 348 | 	ret = trace->init(tr); | 
 | 349 | 	if (ret) { | 
 | 350 | 		warn_failed_init_tracer(trace, ret); | 
 | 351 | 		goto out; | 
 | 352 | 	} | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 353 |  | 
 | 354 | 	/* reset the max latency */ | 
 | 355 | 	tracing_max_latency = 0; | 
 | 356 |  | 
 | 357 | 	/* disable preemption and interrupts for a bit */ | 
 | 358 | 	preempt_disable(); | 
 | 359 | 	local_irq_disable(); | 
 | 360 | 	udelay(100); | 
 | 361 | 	preempt_enable(); | 
 | 362 | 	/* reverse the order of preempt vs irqs */ | 
 | 363 | 	local_irq_enable(); | 
 | 364 |  | 
 | 365 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 366 | 	tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 367 | 	/* check both trace buffers */ | 
 | 368 | 	ret = trace_test_buffer(tr, NULL); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 369 | 	if (ret) { | 
 | 370 | 		tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 371 | 		goto out; | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 372 | 	} | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 373 |  | 
 | 374 | 	ret = trace_test_buffer(&max_tr, &count); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 375 | 	if (ret) { | 
 | 376 | 		tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 377 | 		goto out; | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 378 | 	} | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 379 |  | 
 | 380 | 	if (!ret && !count) { | 
 | 381 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 382 | 		ret = -1; | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 383 | 		tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 384 | 		goto out; | 
 | 385 | 	} | 
 | 386 |  | 
 | 387 | 	/* do the test by disabling interrupts first this time */ | 
 | 388 | 	tracing_max_latency = 0; | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 389 | 	tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 390 | 	preempt_disable(); | 
 | 391 | 	local_irq_disable(); | 
 | 392 | 	udelay(100); | 
 | 393 | 	preempt_enable(); | 
 | 394 | 	/* reverse the order of preempt vs irqs */ | 
 | 395 | 	local_irq_enable(); | 
 | 396 |  | 
 | 397 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 398 | 	tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 399 | 	/* check both trace buffers */ | 
 | 400 | 	ret = trace_test_buffer(tr, NULL); | 
 | 401 | 	if (ret) | 
 | 402 | 		goto out; | 
 | 403 |  | 
 | 404 | 	ret = trace_test_buffer(&max_tr, &count); | 
 | 405 |  | 
 | 406 | 	if (!ret && !count) { | 
 | 407 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 408 | 		ret = -1; | 
 | 409 | 		goto out; | 
 | 410 | 	} | 
 | 411 |  | 
 | 412 |  out: | 
 | 413 | 	trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 414 | 	tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 415 | 	tracing_max_latency = save_max; | 
 | 416 |  | 
 | 417 | 	return ret; | 
 | 418 | } | 
 | 419 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | 
 | 420 |  | 
| Steven Noonan | fb1b6d8 | 2008-09-19 03:06:43 -0700 | [diff] [blame] | 421 | #ifdef CONFIG_NOP_TRACER | 
 | 422 | int | 
 | 423 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | 
 | 424 | { | 
 | 425 | 	/* What could possibly go wrong? */ | 
 | 426 | 	return 0; | 
 | 427 | } | 
 | 428 | #endif | 
 | 429 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 430 | #ifdef CONFIG_SCHED_TRACER | 
 | 431 | static int trace_wakeup_test_thread(void *data) | 
 | 432 | { | 
| Steven Rostedt | 05bd68c | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 433 | 	/* Make this a RT thread, doesn't need to be too high */ | 
 | 434 | 	struct sched_param param = { .sched_priority = 5 }; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 435 | 	struct completion *x = data; | 
 | 436 |  | 
| Steven Rostedt | 05bd68c | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 437 | 	sched_setscheduler(current, SCHED_FIFO, ¶m); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 438 |  | 
 | 439 | 	/* Make it know we have a new prio */ | 
 | 440 | 	complete(x); | 
 | 441 |  | 
 | 442 | 	/* now go to sleep and let the test wake us up */ | 
 | 443 | 	set_current_state(TASK_INTERRUPTIBLE); | 
 | 444 | 	schedule(); | 
 | 445 |  | 
 | 446 | 	/* we are awake, now wait to disappear */ | 
 | 447 | 	while (!kthread_should_stop()) { | 
 | 448 | 		/* | 
 | 449 | 		 * This is an RT task, do short sleeps to let | 
 | 450 | 		 * others run. | 
 | 451 | 		 */ | 
 | 452 | 		msleep(100); | 
 | 453 | 	} | 
 | 454 |  | 
 | 455 | 	return 0; | 
 | 456 | } | 
 | 457 |  | 
 | 458 | int | 
 | 459 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | 
 | 460 | { | 
 | 461 | 	unsigned long save_max = tracing_max_latency; | 
 | 462 | 	struct task_struct *p; | 
 | 463 | 	struct completion isrt; | 
 | 464 | 	unsigned long count; | 
 | 465 | 	int ret; | 
 | 466 |  | 
 | 467 | 	init_completion(&isrt); | 
 | 468 |  | 
 | 469 | 	/* create a high prio thread */ | 
 | 470 | 	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 471 | 	if (IS_ERR(p)) { | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 472 | 		printk(KERN_CONT "Failed to create ftrace wakeup test thread "); | 
 | 473 | 		return -1; | 
 | 474 | 	} | 
 | 475 |  | 
 | 476 | 	/* make sure the thread is running at an RT prio */ | 
 | 477 | 	wait_for_completion(&isrt); | 
 | 478 |  | 
 | 479 | 	/* start the tracing */ | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 480 | 	ret = trace->init(tr); | 
 | 481 | 	if (ret) { | 
 | 482 | 		warn_failed_init_tracer(trace, ret); | 
 | 483 | 		return ret; | 
 | 484 | 	} | 
 | 485 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 486 | 	/* reset the max latency */ | 
 | 487 | 	tracing_max_latency = 0; | 
 | 488 |  | 
 | 489 | 	/* sleep to let the RT thread sleep too */ | 
 | 490 | 	msleep(100); | 
 | 491 |  | 
 | 492 | 	/* | 
 | 493 | 	 * Yes this is slightly racy. It is possible that for some | 
 | 494 | 	 * strange reason that the RT thread we created, did not | 
 | 495 | 	 * call schedule for 100ms after doing the completion, | 
 | 496 | 	 * and we do a wakeup on a task that already is awake. | 
 | 497 | 	 * But that is extremely unlikely, and the worst thing that | 
 | 498 | 	 * happens in such a case, is that we disable tracing. | 
 | 499 | 	 * Honestly, if this race does happen something is horrible | 
 | 500 | 	 * wrong with the system. | 
 | 501 | 	 */ | 
 | 502 |  | 
 | 503 | 	wake_up_process(p); | 
 | 504 |  | 
| Steven Rostedt | 5aa60c6 | 2008-09-29 23:02:37 -0400 | [diff] [blame] | 505 | 	/* give a little time to let the thread wake up */ | 
 | 506 | 	msleep(100); | 
 | 507 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 508 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 509 | 	tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 510 | 	/* check both trace buffers */ | 
 | 511 | 	ret = trace_test_buffer(tr, NULL); | 
 | 512 | 	if (!ret) | 
 | 513 | 		ret = trace_test_buffer(&max_tr, &count); | 
 | 514 |  | 
 | 515 |  | 
 | 516 | 	trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 517 | 	tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 518 |  | 
 | 519 | 	tracing_max_latency = save_max; | 
 | 520 |  | 
 | 521 | 	/* kill the thread */ | 
 | 522 | 	kthread_stop(p); | 
 | 523 |  | 
 | 524 | 	if (!ret && !count) { | 
 | 525 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 526 | 		ret = -1; | 
 | 527 | 	} | 
 | 528 |  | 
 | 529 | 	return ret; | 
 | 530 | } | 
 | 531 | #endif /* CONFIG_SCHED_TRACER */ | 
 | 532 |  | 
 | 533 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | 
 | 534 | int | 
 | 535 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | 
 | 536 | { | 
 | 537 | 	unsigned long count; | 
 | 538 | 	int ret; | 
 | 539 |  | 
 | 540 | 	/* start the tracing */ | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 541 | 	ret = trace->init(tr); | 
 | 542 | 	if (ret) { | 
 | 543 | 		warn_failed_init_tracer(trace, ret); | 
 | 544 | 		return ret; | 
 | 545 | 	} | 
 | 546 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 547 | 	/* Sleep for a 1/10 of a second */ | 
 | 548 | 	msleep(100); | 
 | 549 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 550 | 	tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 551 | 	/* check the trace buffer */ | 
 | 552 | 	ret = trace_test_buffer(tr, &count); | 
 | 553 | 	trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 554 | 	tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 555 |  | 
 | 556 | 	if (!ret && !count) { | 
 | 557 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 558 | 		ret = -1; | 
 | 559 | 	} | 
 | 560 |  | 
 | 561 | 	return ret; | 
 | 562 | } | 
 | 563 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 564 |  | 
 | 565 | #ifdef CONFIG_SYSPROF_TRACER | 
 | 566 | int | 
 | 567 | trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | 
 | 568 | { | 
 | 569 | 	unsigned long count; | 
 | 570 | 	int ret; | 
 | 571 |  | 
 | 572 | 	/* start the tracing */ | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 573 | 	ret = trace->init(tr); | 
 | 574 | 	if (ret) { | 
 | 575 | 		warn_failed_init_tracer(trace, ret); | 
 | 576 | 		return 0; | 
 | 577 | 	} | 
 | 578 |  | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 579 | 	/* Sleep for a 1/10 of a second */ | 
 | 580 | 	msleep(100); | 
 | 581 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 582 | 	tracing_stop(); | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 583 | 	/* check the trace buffer */ | 
 | 584 | 	ret = trace_test_buffer(tr, &count); | 
 | 585 | 	trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 586 | 	tracing_start(); | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 587 |  | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 588 | 	return ret; | 
 | 589 | } | 
 | 590 | #endif /* CONFIG_SYSPROF_TRACER */ | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 591 |  | 
 | 592 | #ifdef CONFIG_BRANCH_TRACER | 
 | 593 | int | 
 | 594 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | 
 | 595 | { | 
 | 596 | 	unsigned long count; | 
 | 597 | 	int ret; | 
 | 598 |  | 
 | 599 | 	/* start the tracing */ | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 600 | 	ret = trace->init(tr); | 
 | 601 | 	if (ret) { | 
 | 602 | 		warn_failed_init_tracer(trace, ret); | 
 | 603 | 		return ret; | 
 | 604 | 	} | 
 | 605 |  | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 606 | 	/* Sleep for a 1/10 of a second */ | 
 | 607 | 	msleep(100); | 
 | 608 | 	/* stop the tracing. */ | 
 | 609 | 	tracing_stop(); | 
 | 610 | 	/* check the trace buffer */ | 
 | 611 | 	ret = trace_test_buffer(tr, &count); | 
 | 612 | 	trace->reset(tr); | 
 | 613 | 	tracing_start(); | 
 | 614 |  | 
 | 615 | 	return ret; | 
 | 616 | } | 
 | 617 | #endif /* CONFIG_BRANCH_TRACER */ |