blob: 01415f4edaa57c4508c6bd3acef4531120a04a02 [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
3#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02004#include <linux/delay.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02005
Ingo Molnare309b412008-05-12 21:20:51 +02006static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +02007{
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020011 case TRACE_WAKE:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020012 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040013 case TRACE_PRINT:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020014 case TRACE_SPECIAL:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050015 case TRACE_BRANCH:
Frederic Weisbecker7447dce2009-02-07 21:33:57 +010016 case TRACE_GRAPH_ENT:
17 case TRACE_GRAPH_RET:
Steven Rostedt60a11772008-05-12 21:20:44 +020018 return 1;
19 }
20 return 0;
21}
22
Steven Rostedt3928a8a2008-09-29 23:02:41 -040023static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020024{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040025 struct ring_buffer_event *event;
26 struct trace_entry *entry;
Steven Rostedt60a11772008-05-12 21:20:44 +020027
Steven Rostedt3928a8a2008-09-29 23:02:41 -040028 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
29 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020030
Steven Rostedt3928a8a2008-09-29 23:02:41 -040031 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020032 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040033 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020034 goto failed;
35 }
Steven Rostedt60a11772008-05-12 21:20:44 +020036 }
Steven Rostedt60a11772008-05-12 21:20:44 +020037 return 0;
38
39 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020040 /* disable tracing */
41 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020042 printk(KERN_CONT ".. corrupted trace buffer .. ");
43 return -1;
44}
45
46/*
47 * Test the trace buffer to see if all the elements
48 * are still sane.
49 */
50static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
51{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020052 unsigned long flags, cnt = 0;
53 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020054
Steven Rostedt30afdcb2008-05-12 21:20:56 +020055 /* Don't allow flipping of max traces now */
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050056 local_irq_save(flags);
Steven Rostedt30afdcb2008-05-12 21:20:56 +020057 __raw_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040058
59 cnt = ring_buffer_entries(tr->buffer);
60
Steven Rostedt60a11772008-05-12 21:20:44 +020061 for_each_possible_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040062 ret = trace_test_buffer_cpu(tr, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020063 if (ret)
64 break;
65 }
Steven Rostedt30afdcb2008-05-12 21:20:56 +020066 __raw_spin_unlock(&ftrace_max_lock);
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050067 local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020068
69 if (count)
70 *count = cnt;
71
72 return ret;
73}
74
Frederic Weisbecker1c800252008-11-16 05:57:26 +010075static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
76{
77 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
78 trace->name, init_ret);
79}
Steven Rostedt606576c2008-10-06 19:06:12 -040080#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +020081
82#ifdef CONFIG_DYNAMIC_FTRACE
83
Steven Rostedt77a2b372008-05-12 21:20:45 +020084#define __STR(x) #x
85#define STR(x) __STR(x)
Steven Rostedt77a2b372008-05-12 21:20:45 +020086
87/* Test dynamic code modification and ftrace filters */
88int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
89 struct trace_array *tr,
90 int (*func)(void))
91{
Steven Rostedt77a2b372008-05-12 21:20:45 +020092 int save_ftrace_enabled = ftrace_enabled;
93 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -040094 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -040095 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -040096 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +020097
98 /* The ftrace test PASSED */
99 printk(KERN_CONT "PASSED\n");
100 pr_info("Testing dynamic ftrace: ");
101
102 /* enable tracing, and record the filter function */
103 ftrace_enabled = 1;
104 tracer_enabled = 1;
105
106 /* passed in by parameter to fool gcc from optimizing */
107 func();
108
Steven Rostedt4e491d12008-05-14 23:49:44 -0400109 /*
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500110 * Some archs *cough*PowerPC*cough* add characters to the
Steven Rostedt4e491d12008-05-14 23:49:44 -0400111 * start of the function names. We simply put a '*' to
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500112 * accommodate them.
Steven Rostedt4e491d12008-05-14 23:49:44 -0400113 */
114 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
115
Steven Rostedt77a2b372008-05-12 21:20:45 +0200116 /* filter only on our function */
Steven Rostedt4e491d12008-05-14 23:49:44 -0400117 ftrace_set_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200118
119 /* enable tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200120 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100121 if (ret) {
122 warn_failed_init_tracer(trace, ret);
123 goto out;
124 }
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400125
Steven Rostedt77a2b372008-05-12 21:20:45 +0200126 /* Sleep for a 1/10 of a second */
127 msleep(100);
128
129 /* we should have nothing in the buffer */
130 ret = trace_test_buffer(tr, &count);
131 if (ret)
132 goto out;
133
134 if (count) {
135 ret = -1;
136 printk(KERN_CONT ".. filter did not filter .. ");
137 goto out;
138 }
139
140 /* call our function again */
141 func();
142
143 /* sleep again */
144 msleep(100);
145
146 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500147 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200148 ftrace_enabled = 0;
149
150 /* check the trace buffer */
151 ret = trace_test_buffer(tr, &count);
152 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500153 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200154
155 /* we should only have one item */
156 if (!ret && count != 1) {
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200157 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200158 ret = -1;
159 goto out;
160 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500161
Steven Rostedt77a2b372008-05-12 21:20:45 +0200162 out:
163 ftrace_enabled = save_ftrace_enabled;
164 tracer_enabled = save_tracer_enabled;
165
166 /* Enable tracing on all functions again */
167 ftrace_set_filter(NULL, 0, 1);
168
169 return ret;
170}
171#else
172# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
173#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedt60a11772008-05-12 21:20:44 +0200174/*
175 * Simple verification test of ftrace function tracer.
176 * Enable ftrace, sleep 1/10 second, and then read the trace
177 * buffer to see if all is in order.
178 */
179int
180trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
181{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200182 int save_ftrace_enabled = ftrace_enabled;
183 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400184 unsigned long count;
185 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200186
Steven Rostedt77a2b372008-05-12 21:20:45 +0200187 /* make sure msleep has been recorded */
188 msleep(1);
189
Steven Rostedt60a11772008-05-12 21:20:44 +0200190 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200191 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200192 tracer_enabled = 1;
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200193
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200194 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100195 if (ret) {
196 warn_failed_init_tracer(trace, ret);
197 goto out;
198 }
199
Steven Rostedt60a11772008-05-12 21:20:44 +0200200 /* Sleep for a 1/10 of a second */
201 msleep(100);
202 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500203 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200204 ftrace_enabled = 0;
205
Steven Rostedt60a11772008-05-12 21:20:44 +0200206 /* check the trace buffer */
207 ret = trace_test_buffer(tr, &count);
208 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500209 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200210
211 if (!ret && !count) {
212 printk(KERN_CONT ".. no entries found ..");
213 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200214 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200215 }
216
Steven Rostedt77a2b372008-05-12 21:20:45 +0200217 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
218 DYN_FTRACE_TEST_NAME);
219
220 out:
221 ftrace_enabled = save_ftrace_enabled;
222 tracer_enabled = save_tracer_enabled;
223
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200224 /* kill ftrace totally if we failed */
225 if (ret)
226 ftrace_kill();
227
Steven Rostedt60a11772008-05-12 21:20:44 +0200228 return ret;
229}
Steven Rostedt606576c2008-10-06 19:06:12 -0400230#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200231
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100232
233#ifdef CONFIG_FUNCTION_GRAPH_TRACER
234/*
235 * Pretty much the same than for the function tracer from which the selftest
236 * has been borrowed.
237 */
238int
239trace_selftest_startup_function_graph(struct tracer *trace,
240 struct trace_array *tr)
241{
242 int ret;
243 unsigned long count;
244
245 ret = tracer_init(trace, tr);
246 if (ret) {
247 warn_failed_init_tracer(trace, ret);
248 goto out;
249 }
250
251 /* Sleep for a 1/10 of a second */
252 msleep(100);
253
254 tracing_stop();
255
256 /* check the trace buffer */
257 ret = trace_test_buffer(tr, &count);
258
259 trace->reset(tr);
260 tracing_start();
261
262 if (!ret && !count) {
263 printk(KERN_CONT ".. no entries found ..");
264 ret = -1;
265 goto out;
266 }
267
268 /* Don't test dynamic tracing, the function tracer already did */
269
270out:
271 /* Stop it if we failed */
272 if (ret)
273 ftrace_graph_stop();
274
275 return ret;
276}
277#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
278
279
Steven Rostedt60a11772008-05-12 21:20:44 +0200280#ifdef CONFIG_IRQSOFF_TRACER
281int
282trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
283{
284 unsigned long save_max = tracing_max_latency;
285 unsigned long count;
286 int ret;
287
288 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200289 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100290 if (ret) {
291 warn_failed_init_tracer(trace, ret);
292 return ret;
293 }
294
Steven Rostedt60a11772008-05-12 21:20:44 +0200295 /* reset the max latency */
296 tracing_max_latency = 0;
297 /* disable interrupts for a bit */
298 local_irq_disable();
299 udelay(100);
300 local_irq_enable();
301 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500302 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200303 /* check both trace buffers */
304 ret = trace_test_buffer(tr, NULL);
305 if (!ret)
306 ret = trace_test_buffer(&max_tr, &count);
307 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500308 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200309
310 if (!ret && !count) {
311 printk(KERN_CONT ".. no entries found ..");
312 ret = -1;
313 }
314
315 tracing_max_latency = save_max;
316
317 return ret;
318}
319#endif /* CONFIG_IRQSOFF_TRACER */
320
321#ifdef CONFIG_PREEMPT_TRACER
322int
323trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
324{
325 unsigned long save_max = tracing_max_latency;
326 unsigned long count;
327 int ret;
328
Steven Rostedt769c48e2008-11-07 22:36:02 -0500329 /*
330 * Now that the big kernel lock is no longer preemptable,
331 * and this is called with the BKL held, it will always
332 * fail. If preemption is already disabled, simply
333 * pass the test. When the BKL is removed, or becomes
334 * preemptible again, we will once again test this,
335 * so keep it in.
336 */
337 if (preempt_count()) {
338 printk(KERN_CONT "can not test ... force ");
339 return 0;
340 }
341
Steven Rostedt60a11772008-05-12 21:20:44 +0200342 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200343 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100344 if (ret) {
345 warn_failed_init_tracer(trace, ret);
346 return ret;
347 }
348
Steven Rostedt60a11772008-05-12 21:20:44 +0200349 /* reset the max latency */
350 tracing_max_latency = 0;
351 /* disable preemption for a bit */
352 preempt_disable();
353 udelay(100);
354 preempt_enable();
355 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500356 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200357 /* check both trace buffers */
358 ret = trace_test_buffer(tr, NULL);
359 if (!ret)
360 ret = trace_test_buffer(&max_tr, &count);
361 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500362 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200363
364 if (!ret && !count) {
365 printk(KERN_CONT ".. no entries found ..");
366 ret = -1;
367 }
368
369 tracing_max_latency = save_max;
370
371 return ret;
372}
373#endif /* CONFIG_PREEMPT_TRACER */
374
375#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
376int
377trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
378{
379 unsigned long save_max = tracing_max_latency;
380 unsigned long count;
381 int ret;
382
Steven Rostedt769c48e2008-11-07 22:36:02 -0500383 /*
384 * Now that the big kernel lock is no longer preemptable,
385 * and this is called with the BKL held, it will always
386 * fail. If preemption is already disabled, simply
387 * pass the test. When the BKL is removed, or becomes
388 * preemptible again, we will once again test this,
389 * so keep it in.
390 */
391 if (preempt_count()) {
392 printk(KERN_CONT "can not test ... force ");
393 return 0;
394 }
395
Steven Rostedt60a11772008-05-12 21:20:44 +0200396 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200397 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100398 if (ret) {
399 warn_failed_init_tracer(trace, ret);
400 goto out;
401 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200402
403 /* reset the max latency */
404 tracing_max_latency = 0;
405
406 /* disable preemption and interrupts for a bit */
407 preempt_disable();
408 local_irq_disable();
409 udelay(100);
410 preempt_enable();
411 /* reverse the order of preempt vs irqs */
412 local_irq_enable();
413
414 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500415 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200416 /* check both trace buffers */
417 ret = trace_test_buffer(tr, NULL);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500418 if (ret) {
419 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200420 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500421 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200422
423 ret = trace_test_buffer(&max_tr, &count);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500424 if (ret) {
425 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200426 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500427 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200428
429 if (!ret && !count) {
430 printk(KERN_CONT ".. no entries found ..");
431 ret = -1;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500432 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200433 goto out;
434 }
435
436 /* do the test by disabling interrupts first this time */
437 tracing_max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500438 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200439 preempt_disable();
440 local_irq_disable();
441 udelay(100);
442 preempt_enable();
443 /* reverse the order of preempt vs irqs */
444 local_irq_enable();
445
446 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500447 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200448 /* check both trace buffers */
449 ret = trace_test_buffer(tr, NULL);
450 if (ret)
451 goto out;
452
453 ret = trace_test_buffer(&max_tr, &count);
454
455 if (!ret && !count) {
456 printk(KERN_CONT ".. no entries found ..");
457 ret = -1;
458 goto out;
459 }
460
461 out:
462 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500463 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200464 tracing_max_latency = save_max;
465
466 return ret;
467}
468#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
469
Steven Noonanfb1b6d82008-09-19 03:06:43 -0700470#ifdef CONFIG_NOP_TRACER
471int
472trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
473{
474 /* What could possibly go wrong? */
475 return 0;
476}
477#endif
478
Steven Rostedt60a11772008-05-12 21:20:44 +0200479#ifdef CONFIG_SCHED_TRACER
480static int trace_wakeup_test_thread(void *data)
481{
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200482 /* Make this a RT thread, doesn't need to be too high */
483 struct sched_param param = { .sched_priority = 5 };
Steven Rostedt60a11772008-05-12 21:20:44 +0200484 struct completion *x = data;
485
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200486 sched_setscheduler(current, SCHED_FIFO, &param);
Steven Rostedt60a11772008-05-12 21:20:44 +0200487
488 /* Make it know we have a new prio */
489 complete(x);
490
491 /* now go to sleep and let the test wake us up */
492 set_current_state(TASK_INTERRUPTIBLE);
493 schedule();
494
495 /* we are awake, now wait to disappear */
496 while (!kthread_should_stop()) {
497 /*
498 * This is an RT task, do short sleeps to let
499 * others run.
500 */
501 msleep(100);
502 }
503
504 return 0;
505}
506
507int
508trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
509{
510 unsigned long save_max = tracing_max_latency;
511 struct task_struct *p;
512 struct completion isrt;
513 unsigned long count;
514 int ret;
515
516 init_completion(&isrt);
517
518 /* create a high prio thread */
519 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200520 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200521 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
522 return -1;
523 }
524
525 /* make sure the thread is running at an RT prio */
526 wait_for_completion(&isrt);
527
528 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200529 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100530 if (ret) {
531 warn_failed_init_tracer(trace, ret);
532 return ret;
533 }
534
Steven Rostedt60a11772008-05-12 21:20:44 +0200535 /* reset the max latency */
536 tracing_max_latency = 0;
537
538 /* sleep to let the RT thread sleep too */
539 msleep(100);
540
541 /*
542 * Yes this is slightly racy. It is possible that for some
543 * strange reason that the RT thread we created, did not
544 * call schedule for 100ms after doing the completion,
545 * and we do a wakeup on a task that already is awake.
546 * But that is extremely unlikely, and the worst thing that
547 * happens in such a case, is that we disable tracing.
548 * Honestly, if this race does happen something is horrible
549 * wrong with the system.
550 */
551
552 wake_up_process(p);
553
Steven Rostedt5aa60c62008-09-29 23:02:37 -0400554 /* give a little time to let the thread wake up */
555 msleep(100);
556
Steven Rostedt60a11772008-05-12 21:20:44 +0200557 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500558 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200559 /* check both trace buffers */
560 ret = trace_test_buffer(tr, NULL);
561 if (!ret)
562 ret = trace_test_buffer(&max_tr, &count);
563
564
565 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500566 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200567
568 tracing_max_latency = save_max;
569
570 /* kill the thread */
571 kthread_stop(p);
572
573 if (!ret && !count) {
574 printk(KERN_CONT ".. no entries found ..");
575 ret = -1;
576 }
577
578 return ret;
579}
580#endif /* CONFIG_SCHED_TRACER */
581
582#ifdef CONFIG_CONTEXT_SWITCH_TRACER
583int
584trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
585{
586 unsigned long count;
587 int ret;
588
589 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200590 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100591 if (ret) {
592 warn_failed_init_tracer(trace, ret);
593 return ret;
594 }
595
Steven Rostedt60a11772008-05-12 21:20:44 +0200596 /* Sleep for a 1/10 of a second */
597 msleep(100);
598 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500599 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200600 /* check the trace buffer */
601 ret = trace_test_buffer(tr, &count);
602 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500603 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200604
605 if (!ret && !count) {
606 printk(KERN_CONT ".. no entries found ..");
607 ret = -1;
608 }
609
610 return ret;
611}
612#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200613
614#ifdef CONFIG_SYSPROF_TRACER
615int
616trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
617{
618 unsigned long count;
619 int ret;
620
621 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200622 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100623 if (ret) {
624 warn_failed_init_tracer(trace, ret);
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500625 return ret;
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100626 }
627
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200628 /* Sleep for a 1/10 of a second */
629 msleep(100);
630 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500631 tracing_stop();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200632 /* check the trace buffer */
633 ret = trace_test_buffer(tr, &count);
634 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500635 tracing_start();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200636
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500637 if (!ret && !count) {
638 printk(KERN_CONT ".. no entries found ..");
639 ret = -1;
640 }
641
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200642 return ret;
643}
644#endif /* CONFIG_SYSPROF_TRACER */
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500645
646#ifdef CONFIG_BRANCH_TRACER
647int
648trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
649{
650 unsigned long count;
651 int ret;
652
653 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200654 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100655 if (ret) {
656 warn_failed_init_tracer(trace, ret);
657 return ret;
658 }
659
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500660 /* Sleep for a 1/10 of a second */
661 msleep(100);
662 /* stop the tracing. */
663 tracing_stop();
664 /* check the trace buffer */
665 ret = trace_test_buffer(tr, &count);
666 trace->reset(tr);
667 tracing_start();
668
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500669 if (!ret && !count) {
670 printk(KERN_CONT ".. no entries found ..");
671 ret = -1;
672 }
673
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500674 return ret;
675}
676#endif /* CONFIG_BRANCH_TRACER */