blob: 9ae40c823af8494a13b251d1599cff8969ee4526 [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
Steven Rostedt9cc26a22009-03-09 16:00:22 -04003#include <linux/stringify.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02004#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02005#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09006#include <linux/slab.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02007
Ingo Molnare309b412008-05-12 21:20:51 +02008static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +02009{
10 switch (entry->type) {
11 case TRACE_FN:
12 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020013 case TRACE_WAKE:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020014 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040015 case TRACE_PRINT:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050016 case TRACE_BRANCH:
Frederic Weisbecker7447dce2009-02-07 21:33:57 +010017 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
Steven Rostedt60a11772008-05-12 21:20:44 +020019 return 1;
20 }
21 return 0;
22}
23
Steven Rostedt3928a8a2008-09-29 23:02:41 -040024static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020025{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040026 struct ring_buffer_event *event;
27 struct trace_entry *entry;
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050028 unsigned int loops = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020029
Steven Rostedt66a8cb92010-03-31 13:21:56 -040030 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040031 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020032
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050033 /*
34 * The ring buffer is a size of trace_buf_size, if
35 * we loop more than the size, there's something wrong
36 * with the ring buffer.
37 */
38 if (loops++ > trace_buf_size) {
39 printk(KERN_CONT ".. bad ring buffer ");
40 goto failed;
41 }
Steven Rostedt3928a8a2008-09-29 23:02:41 -040042 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020043 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040044 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020045 goto failed;
46 }
Steven Rostedt60a11772008-05-12 21:20:44 +020047 }
Steven Rostedt60a11772008-05-12 21:20:44 +020048 return 0;
49
50 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020051 /* disable tracing */
52 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020053 printk(KERN_CONT ".. corrupted trace buffer .. ");
54 return -1;
55}
56
57/*
58 * Test the trace buffer to see if all the elements
59 * are still sane.
60 */
61static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
62{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020063 unsigned long flags, cnt = 0;
64 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020065
Steven Rostedt30afdcb2008-05-12 21:20:56 +020066 /* Don't allow flipping of max traces now */
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050067 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010068 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040069
70 cnt = ring_buffer_entries(tr->buffer);
71
Steven Rostedt0c5119c2009-02-18 18:33:57 -050072 /*
73 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 * If the calling tracer is broken, and is constantly filling
75 * the buffer, this will run forever, and hard lock the box.
76 * We disable the ring buffer while we do this test to prevent
77 * a hard lock up.
78 */
79 tracing_off();
Steven Rostedt60a11772008-05-12 21:20:44 +020080 for_each_possible_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040081 ret = trace_test_buffer_cpu(tr, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020082 if (ret)
83 break;
84 }
Steven Rostedt0c5119c2009-02-18 18:33:57 -050085 tracing_on();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010086 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050087 local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020088
89 if (count)
90 *count = cnt;
91
92 return ret;
93}
94
Frederic Weisbecker1c800252008-11-16 05:57:26 +010095static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96{
97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 trace->name, init_ret);
99}
Steven Rostedt606576c2008-10-06 19:06:12 -0400100#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +0200101
102#ifdef CONFIG_DYNAMIC_FTRACE
103
Steven Rostedt95950c22011-05-06 00:08:51 -0400104static int trace_selftest_test_probe1_cnt;
105static void trace_selftest_test_probe1_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400106 unsigned long pip,
107 struct ftrace_ops *op)
Steven Rostedt95950c22011-05-06 00:08:51 -0400108{
109 trace_selftest_test_probe1_cnt++;
110}
111
112static int trace_selftest_test_probe2_cnt;
113static void trace_selftest_test_probe2_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400114 unsigned long pip,
115 struct ftrace_ops *op)
Steven Rostedt95950c22011-05-06 00:08:51 -0400116{
117 trace_selftest_test_probe2_cnt++;
118}
119
120static int trace_selftest_test_probe3_cnt;
121static void trace_selftest_test_probe3_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400122 unsigned long pip,
123 struct ftrace_ops *op)
Steven Rostedt95950c22011-05-06 00:08:51 -0400124{
125 trace_selftest_test_probe3_cnt++;
126}
127
128static int trace_selftest_test_global_cnt;
129static void trace_selftest_test_global_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400130 unsigned long pip,
131 struct ftrace_ops *op)
Steven Rostedt95950c22011-05-06 00:08:51 -0400132{
133 trace_selftest_test_global_cnt++;
134}
135
136static int trace_selftest_test_dyn_cnt;
137static void trace_selftest_test_dyn_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400138 unsigned long pip,
139 struct ftrace_ops *op)
Steven Rostedt95950c22011-05-06 00:08:51 -0400140{
141 trace_selftest_test_dyn_cnt++;
142}
143
144static struct ftrace_ops test_probe1 = {
145 .func = trace_selftest_test_probe1_func,
146};
147
148static struct ftrace_ops test_probe2 = {
149 .func = trace_selftest_test_probe2_func,
150};
151
152static struct ftrace_ops test_probe3 = {
153 .func = trace_selftest_test_probe3_func,
154};
155
156static struct ftrace_ops test_global = {
157 .func = trace_selftest_test_global_func,
158 .flags = FTRACE_OPS_FL_GLOBAL,
159};
160
161static void print_counts(void)
162{
163 printk("(%d %d %d %d %d) ",
164 trace_selftest_test_probe1_cnt,
165 trace_selftest_test_probe2_cnt,
166 trace_selftest_test_probe3_cnt,
167 trace_selftest_test_global_cnt,
168 trace_selftest_test_dyn_cnt);
169}
170
171static void reset_counts(void)
172{
173 trace_selftest_test_probe1_cnt = 0;
174 trace_selftest_test_probe2_cnt = 0;
175 trace_selftest_test_probe3_cnt = 0;
176 trace_selftest_test_global_cnt = 0;
177 trace_selftest_test_dyn_cnt = 0;
178}
179
180static int trace_selftest_ops(int cnt)
181{
182 int save_ftrace_enabled = ftrace_enabled;
183 struct ftrace_ops *dyn_ops;
184 char *func1_name;
185 char *func2_name;
186 int len1;
187 int len2;
188 int ret = -1;
189
190 printk(KERN_CONT "PASSED\n");
191 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
192
193 ftrace_enabled = 1;
194 reset_counts();
195
196 /* Handle PPC64 '.' name */
197 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
198 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
199 len1 = strlen(func1_name);
200 len2 = strlen(func2_name);
201
202 /*
203 * Probe 1 will trace function 1.
204 * Probe 2 will trace function 2.
205 * Probe 3 will trace functions 1 and 2.
206 */
207 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
208 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
209 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
210 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
211
212 register_ftrace_function(&test_probe1);
213 register_ftrace_function(&test_probe2);
214 register_ftrace_function(&test_probe3);
215 register_ftrace_function(&test_global);
216
217 DYN_FTRACE_TEST_NAME();
218
219 print_counts();
220
221 if (trace_selftest_test_probe1_cnt != 1)
222 goto out;
223 if (trace_selftest_test_probe2_cnt != 0)
224 goto out;
225 if (trace_selftest_test_probe3_cnt != 1)
226 goto out;
227 if (trace_selftest_test_global_cnt == 0)
228 goto out;
229
230 DYN_FTRACE_TEST_NAME2();
231
232 print_counts();
233
234 if (trace_selftest_test_probe1_cnt != 1)
235 goto out;
236 if (trace_selftest_test_probe2_cnt != 1)
237 goto out;
238 if (trace_selftest_test_probe3_cnt != 2)
239 goto out;
240
241 /* Add a dynamic probe */
242 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
243 if (!dyn_ops) {
244 printk("MEMORY ERROR ");
245 goto out;
246 }
247
248 dyn_ops->func = trace_selftest_test_dyn_func;
249
250 register_ftrace_function(dyn_ops);
251
252 trace_selftest_test_global_cnt = 0;
253
254 DYN_FTRACE_TEST_NAME();
255
256 print_counts();
257
258 if (trace_selftest_test_probe1_cnt != 2)
259 goto out_free;
260 if (trace_selftest_test_probe2_cnt != 1)
261 goto out_free;
262 if (trace_selftest_test_probe3_cnt != 3)
263 goto out_free;
264 if (trace_selftest_test_global_cnt == 0)
265 goto out;
266 if (trace_selftest_test_dyn_cnt == 0)
267 goto out_free;
268
269 DYN_FTRACE_TEST_NAME2();
270
271 print_counts();
272
273 if (trace_selftest_test_probe1_cnt != 2)
274 goto out_free;
275 if (trace_selftest_test_probe2_cnt != 2)
276 goto out_free;
277 if (trace_selftest_test_probe3_cnt != 4)
278 goto out_free;
279
280 ret = 0;
281 out_free:
282 unregister_ftrace_function(dyn_ops);
283 kfree(dyn_ops);
284
285 out:
286 /* Purposely unregister in the same order */
287 unregister_ftrace_function(&test_probe1);
288 unregister_ftrace_function(&test_probe2);
289 unregister_ftrace_function(&test_probe3);
290 unregister_ftrace_function(&test_global);
291
292 /* Make sure everything is off */
293 reset_counts();
294 DYN_FTRACE_TEST_NAME();
295 DYN_FTRACE_TEST_NAME();
296
297 if (trace_selftest_test_probe1_cnt ||
298 trace_selftest_test_probe2_cnt ||
299 trace_selftest_test_probe3_cnt ||
300 trace_selftest_test_global_cnt ||
301 trace_selftest_test_dyn_cnt)
302 ret = -1;
303
304 ftrace_enabled = save_ftrace_enabled;
305
306 return ret;
307}
308
Steven Rostedt77a2b372008-05-12 21:20:45 +0200309/* Test dynamic code modification and ftrace filters */
310int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
311 struct trace_array *tr,
312 int (*func)(void))
313{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200314 int save_ftrace_enabled = ftrace_enabled;
315 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400316 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -0400317 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400318 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200319
320 /* The ftrace test PASSED */
321 printk(KERN_CONT "PASSED\n");
322 pr_info("Testing dynamic ftrace: ");
323
324 /* enable tracing, and record the filter function */
325 ftrace_enabled = 1;
326 tracer_enabled = 1;
327
328 /* passed in by parameter to fool gcc from optimizing */
329 func();
330
Steven Rostedt4e491d12008-05-14 23:49:44 -0400331 /*
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500332 * Some archs *cough*PowerPC*cough* add characters to the
Steven Rostedt4e491d12008-05-14 23:49:44 -0400333 * start of the function names. We simply put a '*' to
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500334 * accommodate them.
Steven Rostedt4e491d12008-05-14 23:49:44 -0400335 */
Steven Rostedt9cc26a22009-03-09 16:00:22 -0400336 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
Steven Rostedt4e491d12008-05-14 23:49:44 -0400337
Steven Rostedt77a2b372008-05-12 21:20:45 +0200338 /* filter only on our function */
Steven Rostedt936e0742011-05-05 22:54:01 -0400339 ftrace_set_global_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200340
341 /* enable tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200342 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100343 if (ret) {
344 warn_failed_init_tracer(trace, ret);
345 goto out;
346 }
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400347
Steven Rostedt77a2b372008-05-12 21:20:45 +0200348 /* Sleep for a 1/10 of a second */
349 msleep(100);
350
351 /* we should have nothing in the buffer */
352 ret = trace_test_buffer(tr, &count);
353 if (ret)
354 goto out;
355
356 if (count) {
357 ret = -1;
358 printk(KERN_CONT ".. filter did not filter .. ");
359 goto out;
360 }
361
362 /* call our function again */
363 func();
364
365 /* sleep again */
366 msleep(100);
367
368 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500369 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200370 ftrace_enabled = 0;
371
372 /* check the trace buffer */
373 ret = trace_test_buffer(tr, &count);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500374 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200375
376 /* we should only have one item */
377 if (!ret && count != 1) {
Steven Rostedt95950c22011-05-06 00:08:51 -0400378 trace->reset(tr);
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200379 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200380 ret = -1;
381 goto out;
382 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500383
Steven Rostedt95950c22011-05-06 00:08:51 -0400384 /* Test the ops with global tracing running */
385 ret = trace_selftest_ops(1);
386 trace->reset(tr);
387
Steven Rostedt77a2b372008-05-12 21:20:45 +0200388 out:
389 ftrace_enabled = save_ftrace_enabled;
390 tracer_enabled = save_tracer_enabled;
391
392 /* Enable tracing on all functions again */
Steven Rostedt936e0742011-05-05 22:54:01 -0400393 ftrace_set_global_filter(NULL, 0, 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200394
Steven Rostedt95950c22011-05-06 00:08:51 -0400395 /* Test the ops with global tracing off */
396 if (!ret)
397 ret = trace_selftest_ops(2);
398
Steven Rostedt77a2b372008-05-12 21:20:45 +0200399 return ret;
400}
401#else
402# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
403#endif /* CONFIG_DYNAMIC_FTRACE */
Ingo Molnare9a22d12009-03-13 11:54:40 +0100404
Steven Rostedt60a11772008-05-12 21:20:44 +0200405/*
406 * Simple verification test of ftrace function tracer.
407 * Enable ftrace, sleep 1/10 second, and then read the trace
408 * buffer to see if all is in order.
409 */
410int
411trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
412{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200413 int save_ftrace_enabled = ftrace_enabled;
414 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400415 unsigned long count;
416 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200417
Steven Rostedt77a2b372008-05-12 21:20:45 +0200418 /* make sure msleep has been recorded */
419 msleep(1);
420
Steven Rostedt60a11772008-05-12 21:20:44 +0200421 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200422 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200423 tracer_enabled = 1;
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200424
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200425 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100426 if (ret) {
427 warn_failed_init_tracer(trace, ret);
428 goto out;
429 }
430
Steven Rostedt60a11772008-05-12 21:20:44 +0200431 /* Sleep for a 1/10 of a second */
432 msleep(100);
433 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500434 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200435 ftrace_enabled = 0;
436
Steven Rostedt60a11772008-05-12 21:20:44 +0200437 /* check the trace buffer */
438 ret = trace_test_buffer(tr, &count);
439 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500440 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200441
442 if (!ret && !count) {
443 printk(KERN_CONT ".. no entries found ..");
444 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200445 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200446 }
447
Steven Rostedt77a2b372008-05-12 21:20:45 +0200448 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
449 DYN_FTRACE_TEST_NAME);
450
451 out:
452 ftrace_enabled = save_ftrace_enabled;
453 tracer_enabled = save_tracer_enabled;
454
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200455 /* kill ftrace totally if we failed */
456 if (ret)
457 ftrace_kill();
458
Steven Rostedt60a11772008-05-12 21:20:44 +0200459 return ret;
460}
Steven Rostedt606576c2008-10-06 19:06:12 -0400461#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200462
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100463
464#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100465
466/* Maximum number of functions to trace before diagnosing a hang */
467#define GRAPH_MAX_FUNC_TEST 100000000
468
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200469static void
470__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100471static unsigned int graph_hang_thresh;
472
473/* Wrap the real function entry probe to avoid possible hanging */
474static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
475{
476 /* This is harmlessly racy, we want to approximately detect a hang */
477 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
478 ftrace_graph_stop();
479 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
480 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200481 __ftrace_dump(false, DUMP_ALL);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100482 return 0;
483 }
484
485 return trace_graph_entry(trace);
486}
487
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100488/*
489 * Pretty much the same than for the function tracer from which the selftest
490 * has been borrowed.
491 */
492int
493trace_selftest_startup_function_graph(struct tracer *trace,
494 struct trace_array *tr)
495{
496 int ret;
497 unsigned long count;
498
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100499 /*
500 * Simulate the init() callback but we attach a watchdog callback
501 * to detect and recover from possible hangs
502 */
503 tracing_reset_online_cpus(tr);
Frederic Weisbecker1a0799a2009-07-29 18:59:58 +0200504 set_graph_array(tr);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100505 ret = register_ftrace_graph(&trace_graph_return,
506 &trace_graph_entry_watchdog);
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100507 if (ret) {
508 warn_failed_init_tracer(trace, ret);
509 goto out;
510 }
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100511 tracing_start_cmdline_record();
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100512
513 /* Sleep for a 1/10 of a second */
514 msleep(100);
515
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100516 /* Have we just recovered from a hang? */
517 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
Frederic Weisbecker0cf53ff2009-03-22 15:13:07 +0100518 tracing_selftest_disabled = true;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100519 ret = -1;
520 goto out;
521 }
522
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100523 tracing_stop();
524
525 /* check the trace buffer */
526 ret = trace_test_buffer(tr, &count);
527
528 trace->reset(tr);
529 tracing_start();
530
531 if (!ret && !count) {
532 printk(KERN_CONT ".. no entries found ..");
533 ret = -1;
534 goto out;
535 }
536
537 /* Don't test dynamic tracing, the function tracer already did */
538
539out:
540 /* Stop it if we failed */
541 if (ret)
542 ftrace_graph_stop();
543
544 return ret;
545}
546#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
547
548
Steven Rostedt60a11772008-05-12 21:20:44 +0200549#ifdef CONFIG_IRQSOFF_TRACER
550int
551trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
552{
553 unsigned long save_max = tracing_max_latency;
554 unsigned long count;
555 int ret;
556
557 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200558 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100559 if (ret) {
560 warn_failed_init_tracer(trace, ret);
561 return ret;
562 }
563
Steven Rostedt60a11772008-05-12 21:20:44 +0200564 /* reset the max latency */
565 tracing_max_latency = 0;
566 /* disable interrupts for a bit */
567 local_irq_disable();
568 udelay(100);
569 local_irq_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100570
571 /*
572 * Stop the tracer to avoid a warning subsequent
573 * to buffer flipping failure because tracing_stop()
574 * disables the tr and max buffers, making flipping impossible
575 * in case of parallels max irqs off latencies.
576 */
577 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200578 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500579 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200580 /* check both trace buffers */
581 ret = trace_test_buffer(tr, NULL);
582 if (!ret)
583 ret = trace_test_buffer(&max_tr, &count);
584 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500585 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200586
587 if (!ret && !count) {
588 printk(KERN_CONT ".. no entries found ..");
589 ret = -1;
590 }
591
592 tracing_max_latency = save_max;
593
594 return ret;
595}
596#endif /* CONFIG_IRQSOFF_TRACER */
597
598#ifdef CONFIG_PREEMPT_TRACER
599int
600trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
601{
602 unsigned long save_max = tracing_max_latency;
603 unsigned long count;
604 int ret;
605
Steven Rostedt769c48e2008-11-07 22:36:02 -0500606 /*
607 * Now that the big kernel lock is no longer preemptable,
608 * and this is called with the BKL held, it will always
609 * fail. If preemption is already disabled, simply
610 * pass the test. When the BKL is removed, or becomes
611 * preemptible again, we will once again test this,
612 * so keep it in.
613 */
614 if (preempt_count()) {
615 printk(KERN_CONT "can not test ... force ");
616 return 0;
617 }
618
Steven Rostedt60a11772008-05-12 21:20:44 +0200619 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200620 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100621 if (ret) {
622 warn_failed_init_tracer(trace, ret);
623 return ret;
624 }
625
Steven Rostedt60a11772008-05-12 21:20:44 +0200626 /* reset the max latency */
627 tracing_max_latency = 0;
628 /* disable preemption for a bit */
629 preempt_disable();
630 udelay(100);
631 preempt_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100632
633 /*
634 * Stop the tracer to avoid a warning subsequent
635 * to buffer flipping failure because tracing_stop()
636 * disables the tr and max buffers, making flipping impossible
637 * in case of parallels max preempt off latencies.
638 */
639 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200640 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500641 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200642 /* check both trace buffers */
643 ret = trace_test_buffer(tr, NULL);
644 if (!ret)
645 ret = trace_test_buffer(&max_tr, &count);
646 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500647 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200648
649 if (!ret && !count) {
650 printk(KERN_CONT ".. no entries found ..");
651 ret = -1;
652 }
653
654 tracing_max_latency = save_max;
655
656 return ret;
657}
658#endif /* CONFIG_PREEMPT_TRACER */
659
660#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
661int
662trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
663{
664 unsigned long save_max = tracing_max_latency;
665 unsigned long count;
666 int ret;
667
Steven Rostedt769c48e2008-11-07 22:36:02 -0500668 /*
669 * Now that the big kernel lock is no longer preemptable,
670 * and this is called with the BKL held, it will always
671 * fail. If preemption is already disabled, simply
672 * pass the test. When the BKL is removed, or becomes
673 * preemptible again, we will once again test this,
674 * so keep it in.
675 */
676 if (preempt_count()) {
677 printk(KERN_CONT "can not test ... force ");
678 return 0;
679 }
680
Steven Rostedt60a11772008-05-12 21:20:44 +0200681 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200682 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100683 if (ret) {
684 warn_failed_init_tracer(trace, ret);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100685 goto out_no_start;
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100686 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200687
688 /* reset the max latency */
689 tracing_max_latency = 0;
690
691 /* disable preemption and interrupts for a bit */
692 preempt_disable();
693 local_irq_disable();
694 udelay(100);
695 preempt_enable();
696 /* reverse the order of preempt vs irqs */
697 local_irq_enable();
698
Frederic Weisbecker49036202009-03-17 22:38:58 +0100699 /*
700 * Stop the tracer to avoid a warning subsequent
701 * to buffer flipping failure because tracing_stop()
702 * disables the tr and max buffers, making flipping impossible
703 * in case of parallels max irqs/preempt off latencies.
704 */
705 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200706 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500707 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200708 /* check both trace buffers */
709 ret = trace_test_buffer(tr, NULL);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100710 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200711 goto out;
712
713 ret = trace_test_buffer(&max_tr, &count);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100714 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200715 goto out;
716
717 if (!ret && !count) {
718 printk(KERN_CONT ".. no entries found ..");
719 ret = -1;
720 goto out;
721 }
722
723 /* do the test by disabling interrupts first this time */
724 tracing_max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500725 tracing_start();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100726 trace->start(tr);
727
Steven Rostedt60a11772008-05-12 21:20:44 +0200728 preempt_disable();
729 local_irq_disable();
730 udelay(100);
731 preempt_enable();
732 /* reverse the order of preempt vs irqs */
733 local_irq_enable();
734
Frederic Weisbecker49036202009-03-17 22:38:58 +0100735 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200736 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500737 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200738 /* check both trace buffers */
739 ret = trace_test_buffer(tr, NULL);
740 if (ret)
741 goto out;
742
743 ret = trace_test_buffer(&max_tr, &count);
744
745 if (!ret && !count) {
746 printk(KERN_CONT ".. no entries found ..");
747 ret = -1;
748 goto out;
749 }
750
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100751out:
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500752 tracing_start();
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100753out_no_start:
754 trace->reset(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200755 tracing_max_latency = save_max;
756
757 return ret;
758}
759#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
760
Steven Noonanfb1b6d82008-09-19 03:06:43 -0700761#ifdef CONFIG_NOP_TRACER
762int
763trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
764{
765 /* What could possibly go wrong? */
766 return 0;
767}
768#endif
769
Steven Rostedt60a11772008-05-12 21:20:44 +0200770#ifdef CONFIG_SCHED_TRACER
771static int trace_wakeup_test_thread(void *data)
772{
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200773 /* Make this a RT thread, doesn't need to be too high */
Peter Zijlstrac9b5f502011-01-07 13:41:40 +0100774 static const struct sched_param param = { .sched_priority = 5 };
Steven Rostedt60a11772008-05-12 21:20:44 +0200775 struct completion *x = data;
776
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200777 sched_setscheduler(current, SCHED_FIFO, &param);
Steven Rostedt60a11772008-05-12 21:20:44 +0200778
779 /* Make it know we have a new prio */
780 complete(x);
781
782 /* now go to sleep and let the test wake us up */
783 set_current_state(TASK_INTERRUPTIBLE);
784 schedule();
785
786 /* we are awake, now wait to disappear */
787 while (!kthread_should_stop()) {
788 /*
789 * This is an RT task, do short sleeps to let
790 * others run.
791 */
792 msleep(100);
793 }
794
795 return 0;
796}
797
798int
799trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
800{
801 unsigned long save_max = tracing_max_latency;
802 struct task_struct *p;
803 struct completion isrt;
804 unsigned long count;
805 int ret;
806
807 init_completion(&isrt);
808
809 /* create a high prio thread */
810 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200811 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200812 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
813 return -1;
814 }
815
816 /* make sure the thread is running at an RT prio */
817 wait_for_completion(&isrt);
818
819 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200820 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100821 if (ret) {
822 warn_failed_init_tracer(trace, ret);
823 return ret;
824 }
825
Steven Rostedt60a11772008-05-12 21:20:44 +0200826 /* reset the max latency */
827 tracing_max_latency = 0;
828
829 /* sleep to let the RT thread sleep too */
830 msleep(100);
831
832 /*
833 * Yes this is slightly racy. It is possible that for some
834 * strange reason that the RT thread we created, did not
835 * call schedule for 100ms after doing the completion,
836 * and we do a wakeup on a task that already is awake.
837 * But that is extremely unlikely, and the worst thing that
838 * happens in such a case, is that we disable tracing.
839 * Honestly, if this race does happen something is horrible
840 * wrong with the system.
841 */
842
843 wake_up_process(p);
844
Steven Rostedt5aa60c62008-09-29 23:02:37 -0400845 /* give a little time to let the thread wake up */
846 msleep(100);
847
Steven Rostedt60a11772008-05-12 21:20:44 +0200848 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500849 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200850 /* check both trace buffers */
851 ret = trace_test_buffer(tr, NULL);
852 if (!ret)
853 ret = trace_test_buffer(&max_tr, &count);
854
855
856 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500857 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200858
859 tracing_max_latency = save_max;
860
861 /* kill the thread */
862 kthread_stop(p);
863
864 if (!ret && !count) {
865 printk(KERN_CONT ".. no entries found ..");
866 ret = -1;
867 }
868
869 return ret;
870}
871#endif /* CONFIG_SCHED_TRACER */
872
873#ifdef CONFIG_CONTEXT_SWITCH_TRACER
874int
875trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
876{
877 unsigned long count;
878 int ret;
879
880 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200881 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100882 if (ret) {
883 warn_failed_init_tracer(trace, ret);
884 return ret;
885 }
886
Steven Rostedt60a11772008-05-12 21:20:44 +0200887 /* Sleep for a 1/10 of a second */
888 msleep(100);
889 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500890 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200891 /* check the trace buffer */
892 ret = trace_test_buffer(tr, &count);
893 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500894 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200895
896 if (!ret && !count) {
897 printk(KERN_CONT ".. no entries found ..");
898 ret = -1;
899 }
900
901 return ret;
902}
903#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200904
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500905#ifdef CONFIG_BRANCH_TRACER
906int
907trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
908{
909 unsigned long count;
910 int ret;
911
912 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200913 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100914 if (ret) {
915 warn_failed_init_tracer(trace, ret);
916 return ret;
917 }
918
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500919 /* Sleep for a 1/10 of a second */
920 msleep(100);
921 /* stop the tracing. */
922 tracing_stop();
923 /* check the trace buffer */
924 ret = trace_test_buffer(tr, &count);
925 trace->reset(tr);
926 tracing_start();
927
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500928 if (!ret && !count) {
929 printk(KERN_CONT ".. no entries found ..");
930 ret = -1;
931 }
932
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500933 return ret;
934}
935#endif /* CONFIG_BRANCH_TRACER */
Markus Metzger321bb5e2009-03-13 10:50:27 +0100936