| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 1 | /* Include in trace.c */ | 
 | 2 |  | 
| Steven Rostedt | 9cc26a2 | 2009-03-09 16:00:22 -0400 | [diff] [blame] | 3 | #include <linux/stringify.h> | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 4 | #include <linux/kthread.h> | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 5 | #include <linux/delay.h> | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 6 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 7 | static inline int trace_valid_entry(struct trace_entry *entry) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 8 | { | 
 | 9 | 	switch (entry->type) { | 
 | 10 | 	case TRACE_FN: | 
 | 11 | 	case TRACE_CTX: | 
| Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 12 | 	case TRACE_WAKE: | 
| Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 13 | 	case TRACE_STACK: | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 14 | 	case TRACE_PRINT: | 
| Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 15 | 	case TRACE_SPECIAL: | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 16 | 	case TRACE_BRANCH: | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 17 | 	case TRACE_GRAPH_ENT: | 
 | 18 | 	case TRACE_GRAPH_RET: | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 19 | 	case TRACE_HW_BRANCHES: | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 20 | 		return 1; | 
 | 21 | 	} | 
 | 22 | 	return 0; | 
 | 23 | } | 
 | 24 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 25 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 26 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 27 | 	struct ring_buffer_event *event; | 
 | 28 | 	struct trace_entry *entry; | 
| Steven Rostedt | 4b3e3d2 | 2009-02-18 22:50:01 -0500 | [diff] [blame] | 29 | 	unsigned int loops = 0; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 30 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 31 | 	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { | 
 | 32 | 		entry = ring_buffer_event_data(event); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 33 |  | 
| Steven Rostedt | 4b3e3d2 | 2009-02-18 22:50:01 -0500 | [diff] [blame] | 34 | 		/* | 
 | 35 | 		 * The ring buffer is a size of trace_buf_size, if | 
 | 36 | 		 * we loop more than the size, there's something wrong | 
 | 37 | 		 * with the ring buffer. | 
 | 38 | 		 */ | 
 | 39 | 		if (loops++ > trace_buf_size) { | 
 | 40 | 			printk(KERN_CONT ".. bad ring buffer "); | 
 | 41 | 			goto failed; | 
 | 42 | 		} | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 43 | 		if (!trace_valid_entry(entry)) { | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 44 | 			printk(KERN_CONT ".. invalid entry %d ", | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 45 | 				entry->type); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 46 | 			goto failed; | 
 | 47 | 		} | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 48 | 	} | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 49 | 	return 0; | 
 | 50 |  | 
 | 51 |  failed: | 
| Steven Rostedt | 08bafa0 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 52 | 	/* disable tracing */ | 
 | 53 | 	tracing_disabled = 1; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 54 | 	printk(KERN_CONT ".. corrupted trace buffer .. "); | 
 | 55 | 	return -1; | 
 | 56 | } | 
 | 57 |  | 
 | 58 | /* | 
 | 59 |  * Test the trace buffer to see if all the elements | 
 | 60 |  * are still sane. | 
 | 61 |  */ | 
 | 62 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | 
 | 63 | { | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 64 | 	unsigned long flags, cnt = 0; | 
 | 65 | 	int cpu, ret = 0; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 66 |  | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 67 | 	/* Don't allow flipping of max traces now */ | 
| Steven Rostedt | d51ad7a | 2008-11-15 15:48:29 -0500 | [diff] [blame] | 68 | 	local_irq_save(flags); | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 69 | 	__raw_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 70 |  | 
 | 71 | 	cnt = ring_buffer_entries(tr->buffer); | 
 | 72 |  | 
| Steven Rostedt | 0c5119c | 2009-02-18 18:33:57 -0500 | [diff] [blame] | 73 | 	/* | 
 | 74 | 	 * The trace_test_buffer_cpu runs a while loop to consume all data. | 
 | 75 | 	 * If the calling tracer is broken, and is constantly filling | 
 | 76 | 	 * the buffer, this will run forever, and hard lock the box. | 
 | 77 | 	 * We disable the ring buffer while we do this test to prevent | 
 | 78 | 	 * a hard lock up. | 
 | 79 | 	 */ | 
 | 80 | 	tracing_off(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 81 | 	for_each_possible_cpu(cpu) { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 82 | 		ret = trace_test_buffer_cpu(tr, cpu); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 83 | 		if (ret) | 
 | 84 | 			break; | 
 | 85 | 	} | 
| Steven Rostedt | 0c5119c | 2009-02-18 18:33:57 -0500 | [diff] [blame] | 86 | 	tracing_on(); | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 87 | 	__raw_spin_unlock(&ftrace_max_lock); | 
| Steven Rostedt | d51ad7a | 2008-11-15 15:48:29 -0500 | [diff] [blame] | 88 | 	local_irq_restore(flags); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 89 |  | 
 | 90 | 	if (count) | 
 | 91 | 		*count = cnt; | 
 | 92 |  | 
 | 93 | 	return ret; | 
 | 94 | } | 
 | 95 |  | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 96 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | 
 | 97 | { | 
 | 98 | 	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | 
 | 99 | 		trace->name, init_ret); | 
 | 100 | } | 
| Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 101 | #ifdef CONFIG_FUNCTION_TRACER | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 102 |  | 
 | 103 | #ifdef CONFIG_DYNAMIC_FTRACE | 
 | 104 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 105 | /* Test dynamic code modification and ftrace filters */ | 
 | 106 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | 
 | 107 | 					   struct trace_array *tr, | 
 | 108 | 					   int (*func)(void)) | 
 | 109 | { | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 110 | 	int save_ftrace_enabled = ftrace_enabled; | 
 | 111 | 	int save_tracer_enabled = tracer_enabled; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 112 | 	unsigned long count; | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 113 | 	char *func_name; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 114 | 	int ret; | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 115 |  | 
 | 116 | 	/* The ftrace test PASSED */ | 
 | 117 | 	printk(KERN_CONT "PASSED\n"); | 
 | 118 | 	pr_info("Testing dynamic ftrace: "); | 
 | 119 |  | 
 | 120 | 	/* enable tracing, and record the filter function */ | 
 | 121 | 	ftrace_enabled = 1; | 
 | 122 | 	tracer_enabled = 1; | 
 | 123 |  | 
 | 124 | 	/* passed in by parameter to fool gcc from optimizing */ | 
 | 125 | 	func(); | 
 | 126 |  | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 127 | 	/* | 
| Wenji Huang | 73d8b8b | 2009-02-17 01:10:02 -0500 | [diff] [blame] | 128 | 	 * Some archs *cough*PowerPC*cough* add characters to the | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 129 | 	 * start of the function names. We simply put a '*' to | 
| Wenji Huang | 73d8b8b | 2009-02-17 01:10:02 -0500 | [diff] [blame] | 130 | 	 * accommodate them. | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 131 | 	 */ | 
| Steven Rostedt | 9cc26a2 | 2009-03-09 16:00:22 -0400 | [diff] [blame] | 132 | 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 133 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 134 | 	/* filter only on our function */ | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 135 | 	ftrace_set_filter(func_name, strlen(func_name), 1); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 136 |  | 
 | 137 | 	/* enable tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 138 | 	ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 139 | 	if (ret) { | 
 | 140 | 		warn_failed_init_tracer(trace, ret); | 
 | 141 | 		goto out; | 
 | 142 | 	} | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 143 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 144 | 	/* Sleep for a 1/10 of a second */ | 
 | 145 | 	msleep(100); | 
 | 146 |  | 
 | 147 | 	/* we should have nothing in the buffer */ | 
 | 148 | 	ret = trace_test_buffer(tr, &count); | 
 | 149 | 	if (ret) | 
 | 150 | 		goto out; | 
 | 151 |  | 
 | 152 | 	if (count) { | 
 | 153 | 		ret = -1; | 
 | 154 | 		printk(KERN_CONT ".. filter did not filter .. "); | 
 | 155 | 		goto out; | 
 | 156 | 	} | 
 | 157 |  | 
 | 158 | 	/* call our function again */ | 
 | 159 | 	func(); | 
 | 160 |  | 
 | 161 | 	/* sleep again */ | 
 | 162 | 	msleep(100); | 
 | 163 |  | 
 | 164 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 165 | 	tracing_stop(); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 166 | 	ftrace_enabled = 0; | 
 | 167 |  | 
 | 168 | 	/* check the trace buffer */ | 
 | 169 | 	ret = trace_test_buffer(tr, &count); | 
 | 170 | 	trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 171 | 	tracing_start(); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 172 |  | 
 | 173 | 	/* we should only have one item */ | 
 | 174 | 	if (!ret && count != 1) { | 
| Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 175 | 		printk(KERN_CONT ".. filter failed count=%ld ..", count); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 176 | 		ret = -1; | 
 | 177 | 		goto out; | 
 | 178 | 	} | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 179 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 180 |  out: | 
 | 181 | 	ftrace_enabled = save_ftrace_enabled; | 
 | 182 | 	tracer_enabled = save_tracer_enabled; | 
 | 183 |  | 
 | 184 | 	/* Enable tracing on all functions again */ | 
 | 185 | 	ftrace_set_filter(NULL, 0, 1); | 
 | 186 |  | 
 | 187 | 	return ret; | 
 | 188 | } | 
 | 189 | #else | 
 | 190 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | 
 | 191 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 
| Ingo Molnar | e9a22d1 | 2009-03-13 11:54:40 +0100 | [diff] [blame] | 192 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 193 | /* | 
 | 194 |  * Simple verification test of ftrace function tracer. | 
 | 195 |  * Enable ftrace, sleep 1/10 second, and then read the trace | 
 | 196 |  * buffer to see if all is in order. | 
 | 197 |  */ | 
 | 198 | int | 
 | 199 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | 
 | 200 | { | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 201 | 	int save_ftrace_enabled = ftrace_enabled; | 
 | 202 | 	int save_tracer_enabled = tracer_enabled; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 203 | 	unsigned long count; | 
 | 204 | 	int ret; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 205 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 206 | 	/* make sure msleep has been recorded */ | 
 | 207 | 	msleep(1); | 
 | 208 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 209 | 	/* start the tracing */ | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 210 | 	ftrace_enabled = 1; | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 211 | 	tracer_enabled = 1; | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 212 |  | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 213 | 	ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 214 | 	if (ret) { | 
 | 215 | 		warn_failed_init_tracer(trace, ret); | 
 | 216 | 		goto out; | 
 | 217 | 	} | 
 | 218 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 219 | 	/* Sleep for a 1/10 of a second */ | 
 | 220 | 	msleep(100); | 
 | 221 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 222 | 	tracing_stop(); | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 223 | 	ftrace_enabled = 0; | 
 | 224 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 225 | 	/* check the trace buffer */ | 
 | 226 | 	ret = trace_test_buffer(tr, &count); | 
 | 227 | 	trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 228 | 	tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 229 |  | 
 | 230 | 	if (!ret && !count) { | 
 | 231 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 232 | 		ret = -1; | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 233 | 		goto out; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 234 | 	} | 
 | 235 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 236 | 	ret = trace_selftest_startup_dynamic_tracing(trace, tr, | 
 | 237 | 						     DYN_FTRACE_TEST_NAME); | 
 | 238 |  | 
 | 239 |  out: | 
 | 240 | 	ftrace_enabled = save_ftrace_enabled; | 
 | 241 | 	tracer_enabled = save_tracer_enabled; | 
 | 242 |  | 
| Steven Rostedt | 4eebcc8 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 243 | 	/* kill ftrace totally if we failed */ | 
 | 244 | 	if (ret) | 
 | 245 | 		ftrace_kill(); | 
 | 246 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 247 | 	return ret; | 
 | 248 | } | 
| Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 249 | #endif /* CONFIG_FUNCTION_TRACER */ | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 250 |  | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 251 |  | 
 | 252 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 253 |  | 
 | 254 | /* Maximum number of functions to trace before diagnosing a hang */ | 
 | 255 | #define GRAPH_MAX_FUNC_TEST	100000000 | 
 | 256 |  | 
 | 257 | static void __ftrace_dump(bool disable_tracing); | 
 | 258 | static unsigned int graph_hang_thresh; | 
 | 259 |  | 
 | 260 | /* Wrap the real function entry probe to avoid possible hanging */ | 
 | 261 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | 
 | 262 | { | 
 | 263 | 	/* This is harmlessly racy, we want to approximately detect a hang */ | 
 | 264 | 	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | 
 | 265 | 		ftrace_graph_stop(); | 
 | 266 | 		printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | 
 | 267 | 		if (ftrace_dump_on_oops) | 
 | 268 | 			__ftrace_dump(false); | 
 | 269 | 		return 0; | 
 | 270 | 	} | 
 | 271 |  | 
 | 272 | 	return trace_graph_entry(trace); | 
 | 273 | } | 
 | 274 |  | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 275 | /* | 
 | 276 |  * Pretty much the same than for the function tracer from which the selftest | 
 | 277 |  * has been borrowed. | 
 | 278 |  */ | 
 | 279 | int | 
 | 280 | trace_selftest_startup_function_graph(struct tracer *trace, | 
 | 281 | 					struct trace_array *tr) | 
 | 282 | { | 
 | 283 | 	int ret; | 
 | 284 | 	unsigned long count; | 
 | 285 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 286 | 	/* | 
 | 287 | 	 * Simulate the init() callback but we attach a watchdog callback | 
 | 288 | 	 * to detect and recover from possible hangs | 
 | 289 | 	 */ | 
 | 290 | 	tracing_reset_online_cpus(tr); | 
| Frederic Weisbecker | 1a0799a | 2009-07-29 18:59:58 +0200 | [diff] [blame] | 291 | 	set_graph_array(tr); | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 292 | 	ret = register_ftrace_graph(&trace_graph_return, | 
 | 293 | 				    &trace_graph_entry_watchdog); | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 294 | 	if (ret) { | 
 | 295 | 		warn_failed_init_tracer(trace, ret); | 
 | 296 | 		goto out; | 
 | 297 | 	} | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 298 | 	tracing_start_cmdline_record(); | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 299 |  | 
 | 300 | 	/* Sleep for a 1/10 of a second */ | 
 | 301 | 	msleep(100); | 
 | 302 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 303 | 	/* Have we just recovered from a hang? */ | 
 | 304 | 	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | 
| Frederic Weisbecker | 0cf53ff | 2009-03-22 15:13:07 +0100 | [diff] [blame] | 305 | 		tracing_selftest_disabled = true; | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 306 | 		ret = -1; | 
 | 307 | 		goto out; | 
 | 308 | 	} | 
 | 309 |  | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 310 | 	tracing_stop(); | 
 | 311 |  | 
 | 312 | 	/* check the trace buffer */ | 
 | 313 | 	ret = trace_test_buffer(tr, &count); | 
 | 314 |  | 
 | 315 | 	trace->reset(tr); | 
 | 316 | 	tracing_start(); | 
 | 317 |  | 
 | 318 | 	if (!ret && !count) { | 
 | 319 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 320 | 		ret = -1; | 
 | 321 | 		goto out; | 
 | 322 | 	} | 
 | 323 |  | 
 | 324 | 	/* Don't test dynamic tracing, the function tracer already did */ | 
 | 325 |  | 
 | 326 | out: | 
 | 327 | 	/* Stop it if we failed */ | 
 | 328 | 	if (ret) | 
 | 329 | 		ftrace_graph_stop(); | 
 | 330 |  | 
 | 331 | 	return ret; | 
 | 332 | } | 
 | 333 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 
 | 334 |  | 
 | 335 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 336 | #ifdef CONFIG_IRQSOFF_TRACER | 
 | 337 | int | 
 | 338 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | 
 | 339 | { | 
 | 340 | 	unsigned long save_max = tracing_max_latency; | 
 | 341 | 	unsigned long count; | 
 | 342 | 	int ret; | 
 | 343 |  | 
 | 344 | 	/* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 345 | 	ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 346 | 	if (ret) { | 
 | 347 | 		warn_failed_init_tracer(trace, ret); | 
 | 348 | 		return ret; | 
 | 349 | 	} | 
 | 350 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 351 | 	/* reset the max latency */ | 
 | 352 | 	tracing_max_latency = 0; | 
 | 353 | 	/* disable interrupts for a bit */ | 
 | 354 | 	local_irq_disable(); | 
 | 355 | 	udelay(100); | 
 | 356 | 	local_irq_enable(); | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 357 |  | 
 | 358 | 	/* | 
 | 359 | 	 * Stop the tracer to avoid a warning subsequent | 
 | 360 | 	 * to buffer flipping failure because tracing_stop() | 
 | 361 | 	 * disables the tr and max buffers, making flipping impossible | 
 | 362 | 	 * in case of parallels max irqs off latencies. | 
 | 363 | 	 */ | 
 | 364 | 	trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 365 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 366 | 	tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 367 | 	/* check both trace buffers */ | 
 | 368 | 	ret = trace_test_buffer(tr, NULL); | 
 | 369 | 	if (!ret) | 
 | 370 | 		ret = trace_test_buffer(&max_tr, &count); | 
 | 371 | 	trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 372 | 	tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 373 |  | 
 | 374 | 	if (!ret && !count) { | 
 | 375 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 376 | 		ret = -1; | 
 | 377 | 	} | 
 | 378 |  | 
 | 379 | 	tracing_max_latency = save_max; | 
 | 380 |  | 
 | 381 | 	return ret; | 
 | 382 | } | 
 | 383 | #endif /* CONFIG_IRQSOFF_TRACER */ | 
 | 384 |  | 
 | 385 | #ifdef CONFIG_PREEMPT_TRACER | 
 | 386 | int | 
 | 387 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | 
 | 388 | { | 
 | 389 | 	unsigned long save_max = tracing_max_latency; | 
 | 390 | 	unsigned long count; | 
 | 391 | 	int ret; | 
 | 392 |  | 
| Steven Rostedt | 769c48e | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 393 | 	/* | 
 | 394 | 	 * Now that the big kernel lock is no longer preemptable, | 
 | 395 | 	 * and this is called with the BKL held, it will always | 
 | 396 | 	 * fail. If preemption is already disabled, simply | 
 | 397 | 	 * pass the test. When the BKL is removed, or becomes | 
 | 398 | 	 * preemptible again, we will once again test this, | 
 | 399 | 	 * so keep it in. | 
 | 400 | 	 */ | 
 | 401 | 	if (preempt_count()) { | 
 | 402 | 		printk(KERN_CONT "can not test ... force "); | 
 | 403 | 		return 0; | 
 | 404 | 	} | 
 | 405 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 406 | 	/* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 407 | 	ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 408 | 	if (ret) { | 
 | 409 | 		warn_failed_init_tracer(trace, ret); | 
 | 410 | 		return ret; | 
 | 411 | 	} | 
 | 412 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 413 | 	/* reset the max latency */ | 
 | 414 | 	tracing_max_latency = 0; | 
 | 415 | 	/* disable preemption for a bit */ | 
 | 416 | 	preempt_disable(); | 
 | 417 | 	udelay(100); | 
 | 418 | 	preempt_enable(); | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 419 |  | 
 | 420 | 	/* | 
 | 421 | 	 * Stop the tracer to avoid a warning subsequent | 
 | 422 | 	 * to buffer flipping failure because tracing_stop() | 
 | 423 | 	 * disables the tr and max buffers, making flipping impossible | 
 | 424 | 	 * in case of parallels max preempt off latencies. | 
 | 425 | 	 */ | 
 | 426 | 	trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 427 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 428 | 	tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 429 | 	/* check both trace buffers */ | 
 | 430 | 	ret = trace_test_buffer(tr, NULL); | 
 | 431 | 	if (!ret) | 
 | 432 | 		ret = trace_test_buffer(&max_tr, &count); | 
 | 433 | 	trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 434 | 	tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 435 |  | 
 | 436 | 	if (!ret && !count) { | 
 | 437 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 438 | 		ret = -1; | 
 | 439 | 	} | 
 | 440 |  | 
 | 441 | 	tracing_max_latency = save_max; | 
 | 442 |  | 
 | 443 | 	return ret; | 
 | 444 | } | 
 | 445 | #endif /* CONFIG_PREEMPT_TRACER */ | 
 | 446 |  | 
 | 447 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | 
 | 448 | int | 
 | 449 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | 
 | 450 | { | 
 | 451 | 	unsigned long save_max = tracing_max_latency; | 
 | 452 | 	unsigned long count; | 
 | 453 | 	int ret; | 
 | 454 |  | 
| Steven Rostedt | 769c48e | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 455 | 	/* | 
 | 456 | 	 * Now that the big kernel lock is no longer preemptable, | 
 | 457 | 	 * and this is called with the BKL held, it will always | 
 | 458 | 	 * fail. If preemption is already disabled, simply | 
 | 459 | 	 * pass the test. When the BKL is removed, or becomes | 
 | 460 | 	 * preemptible again, we will once again test this, | 
 | 461 | 	 * so keep it in. | 
 | 462 | 	 */ | 
 | 463 | 	if (preempt_count()) { | 
 | 464 | 		printk(KERN_CONT "can not test ... force "); | 
 | 465 | 		return 0; | 
 | 466 | 	} | 
 | 467 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 468 | 	/* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 469 | 	ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 470 | 	if (ret) { | 
 | 471 | 		warn_failed_init_tracer(trace, ret); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 472 | 		goto out_no_start; | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 473 | 	} | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 474 |  | 
 | 475 | 	/* reset the max latency */ | 
 | 476 | 	tracing_max_latency = 0; | 
 | 477 |  | 
 | 478 | 	/* disable preemption and interrupts for a bit */ | 
 | 479 | 	preempt_disable(); | 
 | 480 | 	local_irq_disable(); | 
 | 481 | 	udelay(100); | 
 | 482 | 	preempt_enable(); | 
 | 483 | 	/* reverse the order of preempt vs irqs */ | 
 | 484 | 	local_irq_enable(); | 
 | 485 |  | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 486 | 	/* | 
 | 487 | 	 * Stop the tracer to avoid a warning subsequent | 
 | 488 | 	 * to buffer flipping failure because tracing_stop() | 
 | 489 | 	 * disables the tr and max buffers, making flipping impossible | 
 | 490 | 	 * in case of parallels max irqs/preempt off latencies. | 
 | 491 | 	 */ | 
 | 492 | 	trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 493 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 494 | 	tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 495 | 	/* check both trace buffers */ | 
 | 496 | 	ret = trace_test_buffer(tr, NULL); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 497 | 	if (ret) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 498 | 		goto out; | 
 | 499 |  | 
 | 500 | 	ret = trace_test_buffer(&max_tr, &count); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 501 | 	if (ret) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 502 | 		goto out; | 
 | 503 |  | 
 | 504 | 	if (!ret && !count) { | 
 | 505 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 506 | 		ret = -1; | 
 | 507 | 		goto out; | 
 | 508 | 	} | 
 | 509 |  | 
 | 510 | 	/* do the test by disabling interrupts first this time */ | 
 | 511 | 	tracing_max_latency = 0; | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 512 | 	tracing_start(); | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 513 | 	trace->start(tr); | 
 | 514 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 515 | 	preempt_disable(); | 
 | 516 | 	local_irq_disable(); | 
 | 517 | 	udelay(100); | 
 | 518 | 	preempt_enable(); | 
 | 519 | 	/* reverse the order of preempt vs irqs */ | 
 | 520 | 	local_irq_enable(); | 
 | 521 |  | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 522 | 	trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 523 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 524 | 	tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 525 | 	/* check both trace buffers */ | 
 | 526 | 	ret = trace_test_buffer(tr, NULL); | 
 | 527 | 	if (ret) | 
 | 528 | 		goto out; | 
 | 529 |  | 
 | 530 | 	ret = trace_test_buffer(&max_tr, &count); | 
 | 531 |  | 
 | 532 | 	if (!ret && !count) { | 
 | 533 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 534 | 		ret = -1; | 
 | 535 | 		goto out; | 
 | 536 | 	} | 
 | 537 |  | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 538 | out: | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 539 | 	tracing_start(); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 540 | out_no_start: | 
 | 541 | 	trace->reset(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 542 | 	tracing_max_latency = save_max; | 
 | 543 |  | 
 | 544 | 	return ret; | 
 | 545 | } | 
 | 546 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | 
 | 547 |  | 
| Steven Noonan | fb1b6d8 | 2008-09-19 03:06:43 -0700 | [diff] [blame] | 548 | #ifdef CONFIG_NOP_TRACER | 
 | 549 | int | 
 | 550 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | 
 | 551 | { | 
 | 552 | 	/* What could possibly go wrong? */ | 
 | 553 | 	return 0; | 
 | 554 | } | 
 | 555 | #endif | 
 | 556 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 557 | #ifdef CONFIG_SCHED_TRACER | 
 | 558 | static int trace_wakeup_test_thread(void *data) | 
 | 559 | { | 
| Steven Rostedt | 05bd68c | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 560 | 	/* Make this a RT thread, doesn't need to be too high */ | 
 | 561 | 	struct sched_param param = { .sched_priority = 5 }; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 562 | 	struct completion *x = data; | 
 | 563 |  | 
| Steven Rostedt | 05bd68c | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 564 | 	sched_setscheduler(current, SCHED_FIFO, ¶m); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 565 |  | 
 | 566 | 	/* Make it know we have a new prio */ | 
 | 567 | 	complete(x); | 
 | 568 |  | 
 | 569 | 	/* now go to sleep and let the test wake us up */ | 
 | 570 | 	set_current_state(TASK_INTERRUPTIBLE); | 
 | 571 | 	schedule(); | 
 | 572 |  | 
 | 573 | 	/* we are awake, now wait to disappear */ | 
 | 574 | 	while (!kthread_should_stop()) { | 
 | 575 | 		/* | 
 | 576 | 		 * This is an RT task, do short sleeps to let | 
 | 577 | 		 * others run. | 
 | 578 | 		 */ | 
 | 579 | 		msleep(100); | 
 | 580 | 	} | 
 | 581 |  | 
 | 582 | 	return 0; | 
 | 583 | } | 
 | 584 |  | 
 | 585 | int | 
 | 586 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | 
 | 587 | { | 
 | 588 | 	unsigned long save_max = tracing_max_latency; | 
 | 589 | 	struct task_struct *p; | 
 | 590 | 	struct completion isrt; | 
 | 591 | 	unsigned long count; | 
 | 592 | 	int ret; | 
 | 593 |  | 
 | 594 | 	init_completion(&isrt); | 
 | 595 |  | 
 | 596 | 	/* create a high prio thread */ | 
 | 597 | 	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 598 | 	if (IS_ERR(p)) { | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 599 | 		printk(KERN_CONT "Failed to create ftrace wakeup test thread "); | 
 | 600 | 		return -1; | 
 | 601 | 	} | 
 | 602 |  | 
 | 603 | 	/* make sure the thread is running at an RT prio */ | 
 | 604 | 	wait_for_completion(&isrt); | 
 | 605 |  | 
 | 606 | 	/* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 607 | 	ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 608 | 	if (ret) { | 
 | 609 | 		warn_failed_init_tracer(trace, ret); | 
 | 610 | 		return ret; | 
 | 611 | 	} | 
 | 612 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 613 | 	/* reset the max latency */ | 
 | 614 | 	tracing_max_latency = 0; | 
 | 615 |  | 
 | 616 | 	/* sleep to let the RT thread sleep too */ | 
 | 617 | 	msleep(100); | 
 | 618 |  | 
 | 619 | 	/* | 
 | 620 | 	 * Yes this is slightly racy. It is possible that for some | 
 | 621 | 	 * strange reason that the RT thread we created, did not | 
 | 622 | 	 * call schedule for 100ms after doing the completion, | 
 | 623 | 	 * and we do a wakeup on a task that already is awake. | 
 | 624 | 	 * But that is extremely unlikely, and the worst thing that | 
 | 625 | 	 * happens in such a case, is that we disable tracing. | 
 | 626 | 	 * Honestly, if this race does happen something is horrible | 
 | 627 | 	 * wrong with the system. | 
 | 628 | 	 */ | 
 | 629 |  | 
 | 630 | 	wake_up_process(p); | 
 | 631 |  | 
| Steven Rostedt | 5aa60c6 | 2008-09-29 23:02:37 -0400 | [diff] [blame] | 632 | 	/* give a little time to let the thread wake up */ | 
 | 633 | 	msleep(100); | 
 | 634 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 635 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 636 | 	tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 637 | 	/* check both trace buffers */ | 
 | 638 | 	ret = trace_test_buffer(tr, NULL); | 
 | 639 | 	if (!ret) | 
 | 640 | 		ret = trace_test_buffer(&max_tr, &count); | 
 | 641 |  | 
 | 642 |  | 
 | 643 | 	trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 644 | 	tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 645 |  | 
 | 646 | 	tracing_max_latency = save_max; | 
 | 647 |  | 
 | 648 | 	/* kill the thread */ | 
 | 649 | 	kthread_stop(p); | 
 | 650 |  | 
 | 651 | 	if (!ret && !count) { | 
 | 652 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 653 | 		ret = -1; | 
 | 654 | 	} | 
 | 655 |  | 
 | 656 | 	return ret; | 
 | 657 | } | 
 | 658 | #endif /* CONFIG_SCHED_TRACER */ | 
 | 659 |  | 
 | 660 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | 
 | 661 | int | 
 | 662 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | 
 | 663 | { | 
 | 664 | 	unsigned long count; | 
 | 665 | 	int ret; | 
 | 666 |  | 
 | 667 | 	/* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 668 | 	ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 669 | 	if (ret) { | 
 | 670 | 		warn_failed_init_tracer(trace, ret); | 
 | 671 | 		return ret; | 
 | 672 | 	} | 
 | 673 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 674 | 	/* Sleep for a 1/10 of a second */ | 
 | 675 | 	msleep(100); | 
 | 676 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 677 | 	tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 678 | 	/* check the trace buffer */ | 
 | 679 | 	ret = trace_test_buffer(tr, &count); | 
 | 680 | 	trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 681 | 	tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 682 |  | 
 | 683 | 	if (!ret && !count) { | 
 | 684 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 685 | 		ret = -1; | 
 | 686 | 	} | 
 | 687 |  | 
 | 688 | 	return ret; | 
 | 689 | } | 
 | 690 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 691 |  | 
 | 692 | #ifdef CONFIG_SYSPROF_TRACER | 
 | 693 | int | 
 | 694 | trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | 
 | 695 | { | 
 | 696 | 	unsigned long count; | 
 | 697 | 	int ret; | 
 | 698 |  | 
 | 699 | 	/* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 700 | 	ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 701 | 	if (ret) { | 
 | 702 | 		warn_failed_init_tracer(trace, ret); | 
| Wenji Huang | d2ef7c2 | 2009-02-17 01:09:47 -0500 | [diff] [blame] | 703 | 		return ret; | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 704 | 	} | 
 | 705 |  | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 706 | 	/* Sleep for a 1/10 of a second */ | 
 | 707 | 	msleep(100); | 
 | 708 | 	/* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 709 | 	tracing_stop(); | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 710 | 	/* check the trace buffer */ | 
 | 711 | 	ret = trace_test_buffer(tr, &count); | 
 | 712 | 	trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 713 | 	tracing_start(); | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 714 |  | 
| Wenji Huang | d2ef7c2 | 2009-02-17 01:09:47 -0500 | [diff] [blame] | 715 | 	if (!ret && !count) { | 
 | 716 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 717 | 		ret = -1; | 
 | 718 | 	} | 
 | 719 |  | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 720 | 	return ret; | 
 | 721 | } | 
 | 722 | #endif /* CONFIG_SYSPROF_TRACER */ | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 723 |  | 
 | 724 | #ifdef CONFIG_BRANCH_TRACER | 
 | 725 | int | 
 | 726 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | 
 | 727 | { | 
 | 728 | 	unsigned long count; | 
 | 729 | 	int ret; | 
 | 730 |  | 
 | 731 | 	/* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 732 | 	ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 733 | 	if (ret) { | 
 | 734 | 		warn_failed_init_tracer(trace, ret); | 
 | 735 | 		return ret; | 
 | 736 | 	} | 
 | 737 |  | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 738 | 	/* Sleep for a 1/10 of a second */ | 
 | 739 | 	msleep(100); | 
 | 740 | 	/* stop the tracing. */ | 
 | 741 | 	tracing_stop(); | 
 | 742 | 	/* check the trace buffer */ | 
 | 743 | 	ret = trace_test_buffer(tr, &count); | 
 | 744 | 	trace->reset(tr); | 
 | 745 | 	tracing_start(); | 
 | 746 |  | 
| Wenji Huang | d2ef7c2 | 2009-02-17 01:09:47 -0500 | [diff] [blame] | 747 | 	if (!ret && !count) { | 
 | 748 | 		printk(KERN_CONT ".. no entries found .."); | 
 | 749 | 		ret = -1; | 
 | 750 | 	} | 
 | 751 |  | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 752 | 	return ret; | 
 | 753 | } | 
 | 754 | #endif /* CONFIG_BRANCH_TRACER */ | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 755 |  | 
 | 756 | #ifdef CONFIG_HW_BRANCH_TRACER | 
 | 757 | int | 
 | 758 | trace_selftest_startup_hw_branches(struct tracer *trace, | 
 | 759 | 				   struct trace_array *tr) | 
 | 760 | { | 
| Markus Metzger | 4d657e5 | 2009-04-03 16:43:41 +0200 | [diff] [blame] | 761 | 	struct trace_iterator *iter; | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 762 | 	struct tracer tracer; | 
| Ingo Molnar | e9a22d1 | 2009-03-13 11:54:40 +0100 | [diff] [blame] | 763 | 	unsigned long count; | 
 | 764 | 	int ret; | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 765 |  | 
 | 766 | 	if (!trace->open) { | 
 | 767 | 		printk(KERN_CONT "missing open function..."); | 
 | 768 | 		return -1; | 
 | 769 | 	} | 
 | 770 |  | 
 | 771 | 	ret = tracer_init(trace, tr); | 
 | 772 | 	if (ret) { | 
 | 773 | 		warn_failed_init_tracer(trace, ret); | 
 | 774 | 		return ret; | 
 | 775 | 	} | 
 | 776 |  | 
 | 777 | 	/* | 
 | 778 | 	 * The hw-branch tracer needs to collect the trace from the various | 
 | 779 | 	 * cpu trace buffers - before tracing is stopped. | 
 | 780 | 	 */ | 
| Markus Metzger | 4d657e5 | 2009-04-03 16:43:41 +0200 | [diff] [blame] | 781 | 	iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 
 | 782 | 	if (!iter) | 
 | 783 | 		return -ENOMEM; | 
 | 784 |  | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 785 | 	memcpy(&tracer, trace, sizeof(tracer)); | 
 | 786 |  | 
| Markus Metzger | 4d657e5 | 2009-04-03 16:43:41 +0200 | [diff] [blame] | 787 | 	iter->trace = &tracer; | 
 | 788 | 	iter->tr = tr; | 
 | 789 | 	iter->pos = -1; | 
 | 790 | 	mutex_init(&iter->mutex); | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 791 |  | 
| Markus Metzger | 4d657e5 | 2009-04-03 16:43:41 +0200 | [diff] [blame] | 792 | 	trace->open(iter); | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 793 |  | 
| Markus Metzger | 4d657e5 | 2009-04-03 16:43:41 +0200 | [diff] [blame] | 794 | 	mutex_destroy(&iter->mutex); | 
 | 795 | 	kfree(iter); | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 796 |  | 
 | 797 | 	tracing_stop(); | 
 | 798 |  | 
 | 799 | 	ret = trace_test_buffer(tr, &count); | 
 | 800 | 	trace->reset(tr); | 
 | 801 | 	tracing_start(); | 
 | 802 |  | 
 | 803 | 	if (!ret && !count) { | 
 | 804 | 		printk(KERN_CONT "no entries found.."); | 
 | 805 | 		ret = -1; | 
 | 806 | 	} | 
 | 807 |  | 
 | 808 | 	return ret; | 
 | 809 | } | 
 | 810 | #endif /* CONFIG_HW_BRANCH_TRACER */ |