blob: bc8e80a86bca4c0cf730fcf6b7bc568f2808f7e7 [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
3#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02004#include <linux/delay.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02005
Ingo Molnare309b412008-05-12 21:20:51 +02006static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +02007{
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020011 case TRACE_WAKE:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040012 case TRACE_CONT:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020013 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040014 case TRACE_PRINT:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020015 case TRACE_SPECIAL:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050016 case TRACE_BRANCH:
Steven Rostedt60a11772008-05-12 21:20:44 +020017 return 1;
18 }
19 return 0;
20}
21
Steven Rostedt3928a8a2008-09-29 23:02:41 -040022static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020023{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040024 struct ring_buffer_event *event;
25 struct trace_entry *entry;
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050026 unsigned int loops = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020027
Steven Rostedt3928a8a2008-09-29 23:02:41 -040028 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
29 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020030
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050031 /*
32 * The ring buffer is a size of trace_buf_size, if
33 * we loop more than the size, there's something wrong
34 * with the ring buffer.
35 */
36 if (loops++ > trace_buf_size) {
37 printk(KERN_CONT ".. bad ring buffer ");
38 goto failed;
39 }
Steven Rostedt3928a8a2008-09-29 23:02:41 -040040 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020041 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040042 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020043 goto failed;
44 }
Steven Rostedt60a11772008-05-12 21:20:44 +020045 }
Steven Rostedt60a11772008-05-12 21:20:44 +020046 return 0;
47
48 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020049 /* disable tracing */
50 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020051 printk(KERN_CONT ".. corrupted trace buffer .. ");
52 return -1;
53}
54
55/*
56 * Test the trace buffer to see if all the elements
57 * are still sane.
58 */
59static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
60{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020061 unsigned long flags, cnt = 0;
62 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020063
Steven Rostedt30afdcb2008-05-12 21:20:56 +020064 /* Don't allow flipping of max traces now */
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050065 local_irq_save(flags);
Steven Rostedt30afdcb2008-05-12 21:20:56 +020066 __raw_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040067
68 cnt = ring_buffer_entries(tr->buffer);
69
Steven Rostedt0c5119c2009-02-18 18:33:57 -050070 /*
71 * The trace_test_buffer_cpu runs a while loop to consume all data.
72 * If the calling tracer is broken, and is constantly filling
73 * the buffer, this will run forever, and hard lock the box.
74 * We disable the ring buffer while we do this test to prevent
75 * a hard lock up.
76 */
77 tracing_off();
Steven Rostedt60a11772008-05-12 21:20:44 +020078 for_each_possible_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040079 ret = trace_test_buffer_cpu(tr, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020080 if (ret)
81 break;
82 }
Steven Rostedt0c5119c2009-02-18 18:33:57 -050083 tracing_on();
Steven Rostedt30afdcb2008-05-12 21:20:56 +020084 __raw_spin_unlock(&ftrace_max_lock);
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050085 local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020086
87 if (count)
88 *count = cnt;
89
90 return ret;
91}
92
Frederic Weisbecker1c800252008-11-16 05:57:26 +010093static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
94{
95 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
96 trace->name, init_ret);
97}
Steven Rostedt606576c2008-10-06 19:06:12 -040098#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +020099
100#ifdef CONFIG_DYNAMIC_FTRACE
101
Steven Rostedt77a2b372008-05-12 21:20:45 +0200102#define __STR(x) #x
103#define STR(x) __STR(x)
Steven Rostedt77a2b372008-05-12 21:20:45 +0200104
105/* Test dynamic code modification and ftrace filters */
106int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
107 struct trace_array *tr,
108 int (*func)(void))
109{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200110 int save_ftrace_enabled = ftrace_enabled;
111 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400112 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -0400113 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400114 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200115
116 /* The ftrace test PASSED */
117 printk(KERN_CONT "PASSED\n");
118 pr_info("Testing dynamic ftrace: ");
119
120 /* enable tracing, and record the filter function */
121 ftrace_enabled = 1;
122 tracer_enabled = 1;
123
124 /* passed in by parameter to fool gcc from optimizing */
125 func();
126
Steven Rostedt4e491d12008-05-14 23:49:44 -0400127 /*
128 * Some archs *cough*PowerPC*cough* add charachters to the
129 * start of the function names. We simply put a '*' to
130 * accomodate them.
131 */
132 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
133
Steven Rostedt77a2b372008-05-12 21:20:45 +0200134 /* filter only on our function */
Steven Rostedt4e491d12008-05-14 23:49:44 -0400135 ftrace_set_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200136
137 /* enable tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100138 ret = trace->init(tr);
139 if (ret) {
140 warn_failed_init_tracer(trace, ret);
141 goto out;
142 }
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400143
Steven Rostedt77a2b372008-05-12 21:20:45 +0200144 /* Sleep for a 1/10 of a second */
145 msleep(100);
146
147 /* we should have nothing in the buffer */
148 ret = trace_test_buffer(tr, &count);
149 if (ret)
150 goto out;
151
152 if (count) {
153 ret = -1;
154 printk(KERN_CONT ".. filter did not filter .. ");
155 goto out;
156 }
157
158 /* call our function again */
159 func();
160
161 /* sleep again */
162 msleep(100);
163
164 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500165 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200166 ftrace_enabled = 0;
167
168 /* check the trace buffer */
169 ret = trace_test_buffer(tr, &count);
170 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500171 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200172
173 /* we should only have one item */
174 if (!ret && count != 1) {
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200175 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200176 ret = -1;
177 goto out;
178 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500179
Steven Rostedt77a2b372008-05-12 21:20:45 +0200180 out:
181 ftrace_enabled = save_ftrace_enabled;
182 tracer_enabled = save_tracer_enabled;
183
184 /* Enable tracing on all functions again */
185 ftrace_set_filter(NULL, 0, 1);
186
187 return ret;
188}
189#else
190# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
191#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedt60a11772008-05-12 21:20:44 +0200192/*
193 * Simple verification test of ftrace function tracer.
194 * Enable ftrace, sleep 1/10 second, and then read the trace
195 * buffer to see if all is in order.
196 */
197int
198trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
199{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200200 int save_ftrace_enabled = ftrace_enabled;
201 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400202 unsigned long count;
203 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200204
Steven Rostedt77a2b372008-05-12 21:20:45 +0200205 /* make sure msleep has been recorded */
206 msleep(1);
207
Steven Rostedt60a11772008-05-12 21:20:44 +0200208 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200209 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200210 tracer_enabled = 1;
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200211
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100212 ret = trace->init(tr);
213 if (ret) {
214 warn_failed_init_tracer(trace, ret);
215 goto out;
216 }
217
Steven Rostedt60a11772008-05-12 21:20:44 +0200218 /* Sleep for a 1/10 of a second */
219 msleep(100);
220 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500221 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200222 ftrace_enabled = 0;
223
Steven Rostedt60a11772008-05-12 21:20:44 +0200224 /* check the trace buffer */
225 ret = trace_test_buffer(tr, &count);
226 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500227 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200228
229 if (!ret && !count) {
230 printk(KERN_CONT ".. no entries found ..");
231 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200232 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200233 }
234
Steven Rostedt77a2b372008-05-12 21:20:45 +0200235 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
236 DYN_FTRACE_TEST_NAME);
237
238 out:
239 ftrace_enabled = save_ftrace_enabled;
240 tracer_enabled = save_tracer_enabled;
241
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200242 /* kill ftrace totally if we failed */
243 if (ret)
244 ftrace_kill();
245
Steven Rostedt60a11772008-05-12 21:20:44 +0200246 return ret;
247}
Steven Rostedt606576c2008-10-06 19:06:12 -0400248#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200249
250#ifdef CONFIG_IRQSOFF_TRACER
251int
252trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
253{
254 unsigned long save_max = tracing_max_latency;
255 unsigned long count;
256 int ret;
257
258 /* start the tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100259 ret = trace->init(tr);
260 if (ret) {
261 warn_failed_init_tracer(trace, ret);
262 return ret;
263 }
264
Steven Rostedt60a11772008-05-12 21:20:44 +0200265 /* reset the max latency */
266 tracing_max_latency = 0;
267 /* disable interrupts for a bit */
268 local_irq_disable();
269 udelay(100);
270 local_irq_enable();
271 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500272 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200273 /* check both trace buffers */
274 ret = trace_test_buffer(tr, NULL);
275 if (!ret)
276 ret = trace_test_buffer(&max_tr, &count);
277 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500278 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200279
280 if (!ret && !count) {
281 printk(KERN_CONT ".. no entries found ..");
282 ret = -1;
283 }
284
285 tracing_max_latency = save_max;
286
287 return ret;
288}
289#endif /* CONFIG_IRQSOFF_TRACER */
290
291#ifdef CONFIG_PREEMPT_TRACER
292int
293trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
294{
295 unsigned long save_max = tracing_max_latency;
296 unsigned long count;
297 int ret;
298
Steven Rostedt769c48e2008-11-07 22:36:02 -0500299 /*
300 * Now that the big kernel lock is no longer preemptable,
301 * and this is called with the BKL held, it will always
302 * fail. If preemption is already disabled, simply
303 * pass the test. When the BKL is removed, or becomes
304 * preemptible again, we will once again test this,
305 * so keep it in.
306 */
307 if (preempt_count()) {
308 printk(KERN_CONT "can not test ... force ");
309 return 0;
310 }
311
Steven Rostedt60a11772008-05-12 21:20:44 +0200312 /* start the tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100313 ret = trace->init(tr);
314 if (ret) {
315 warn_failed_init_tracer(trace, ret);
316 return ret;
317 }
318
Steven Rostedt60a11772008-05-12 21:20:44 +0200319 /* reset the max latency */
320 tracing_max_latency = 0;
321 /* disable preemption for a bit */
322 preempt_disable();
323 udelay(100);
324 preempt_enable();
325 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500326 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200327 /* check both trace buffers */
328 ret = trace_test_buffer(tr, NULL);
329 if (!ret)
330 ret = trace_test_buffer(&max_tr, &count);
331 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500332 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200333
334 if (!ret && !count) {
335 printk(KERN_CONT ".. no entries found ..");
336 ret = -1;
337 }
338
339 tracing_max_latency = save_max;
340
341 return ret;
342}
343#endif /* CONFIG_PREEMPT_TRACER */
344
345#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
346int
347trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
348{
349 unsigned long save_max = tracing_max_latency;
350 unsigned long count;
351 int ret;
352
Steven Rostedt769c48e2008-11-07 22:36:02 -0500353 /*
354 * Now that the big kernel lock is no longer preemptable,
355 * and this is called with the BKL held, it will always
356 * fail. If preemption is already disabled, simply
357 * pass the test. When the BKL is removed, or becomes
358 * preemptible again, we will once again test this,
359 * so keep it in.
360 */
361 if (preempt_count()) {
362 printk(KERN_CONT "can not test ... force ");
363 return 0;
364 }
365
Steven Rostedt60a11772008-05-12 21:20:44 +0200366 /* start the tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100367 ret = trace->init(tr);
368 if (ret) {
369 warn_failed_init_tracer(trace, ret);
370 goto out;
371 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200372
373 /* reset the max latency */
374 tracing_max_latency = 0;
375
376 /* disable preemption and interrupts for a bit */
377 preempt_disable();
378 local_irq_disable();
379 udelay(100);
380 preempt_enable();
381 /* reverse the order of preempt vs irqs */
382 local_irq_enable();
383
384 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500385 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200386 /* check both trace buffers */
387 ret = trace_test_buffer(tr, NULL);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500388 if (ret) {
389 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200390 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500391 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200392
393 ret = trace_test_buffer(&max_tr, &count);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500394 if (ret) {
395 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200396 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500397 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200398
399 if (!ret && !count) {
400 printk(KERN_CONT ".. no entries found ..");
401 ret = -1;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500402 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200403 goto out;
404 }
405
406 /* do the test by disabling interrupts first this time */
407 tracing_max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500408 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200409 preempt_disable();
410 local_irq_disable();
411 udelay(100);
412 preempt_enable();
413 /* reverse the order of preempt vs irqs */
414 local_irq_enable();
415
416 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500417 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200418 /* check both trace buffers */
419 ret = trace_test_buffer(tr, NULL);
420 if (ret)
421 goto out;
422
423 ret = trace_test_buffer(&max_tr, &count);
424
425 if (!ret && !count) {
426 printk(KERN_CONT ".. no entries found ..");
427 ret = -1;
428 goto out;
429 }
430
431 out:
432 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500433 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200434 tracing_max_latency = save_max;
435
436 return ret;
437}
438#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
439
Steven Noonanfb1b6d82008-09-19 03:06:43 -0700440#ifdef CONFIG_NOP_TRACER
441int
442trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
443{
444 /* What could possibly go wrong? */
445 return 0;
446}
447#endif
448
Steven Rostedt60a11772008-05-12 21:20:44 +0200449#ifdef CONFIG_SCHED_TRACER
450static int trace_wakeup_test_thread(void *data)
451{
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200452 /* Make this a RT thread, doesn't need to be too high */
453 struct sched_param param = { .sched_priority = 5 };
Steven Rostedt60a11772008-05-12 21:20:44 +0200454 struct completion *x = data;
455
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200456 sched_setscheduler(current, SCHED_FIFO, &param);
Steven Rostedt60a11772008-05-12 21:20:44 +0200457
458 /* Make it know we have a new prio */
459 complete(x);
460
461 /* now go to sleep and let the test wake us up */
462 set_current_state(TASK_INTERRUPTIBLE);
463 schedule();
464
465 /* we are awake, now wait to disappear */
466 while (!kthread_should_stop()) {
467 /*
468 * This is an RT task, do short sleeps to let
469 * others run.
470 */
471 msleep(100);
472 }
473
474 return 0;
475}
476
477int
478trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
479{
480 unsigned long save_max = tracing_max_latency;
481 struct task_struct *p;
482 struct completion isrt;
483 unsigned long count;
484 int ret;
485
486 init_completion(&isrt);
487
488 /* create a high prio thread */
489 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200490 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200491 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
492 return -1;
493 }
494
495 /* make sure the thread is running at an RT prio */
496 wait_for_completion(&isrt);
497
498 /* start the tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100499 ret = trace->init(tr);
500 if (ret) {
501 warn_failed_init_tracer(trace, ret);
502 return ret;
503 }
504
Steven Rostedt60a11772008-05-12 21:20:44 +0200505 /* reset the max latency */
506 tracing_max_latency = 0;
507
508 /* sleep to let the RT thread sleep too */
509 msleep(100);
510
511 /*
512 * Yes this is slightly racy. It is possible that for some
513 * strange reason that the RT thread we created, did not
514 * call schedule for 100ms after doing the completion,
515 * and we do a wakeup on a task that already is awake.
516 * But that is extremely unlikely, and the worst thing that
517 * happens in such a case, is that we disable tracing.
518 * Honestly, if this race does happen something is horrible
519 * wrong with the system.
520 */
521
522 wake_up_process(p);
523
Steven Rostedt5aa60c62008-09-29 23:02:37 -0400524 /* give a little time to let the thread wake up */
525 msleep(100);
526
Steven Rostedt60a11772008-05-12 21:20:44 +0200527 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500528 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200529 /* check both trace buffers */
530 ret = trace_test_buffer(tr, NULL);
531 if (!ret)
532 ret = trace_test_buffer(&max_tr, &count);
533
534
535 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500536 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200537
538 tracing_max_latency = save_max;
539
540 /* kill the thread */
541 kthread_stop(p);
542
543 if (!ret && !count) {
544 printk(KERN_CONT ".. no entries found ..");
545 ret = -1;
546 }
547
548 return ret;
549}
550#endif /* CONFIG_SCHED_TRACER */
551
552#ifdef CONFIG_CONTEXT_SWITCH_TRACER
553int
554trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
555{
556 unsigned long count;
557 int ret;
558
559 /* start the tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100560 ret = trace->init(tr);
561 if (ret) {
562 warn_failed_init_tracer(trace, ret);
563 return ret;
564 }
565
Steven Rostedt60a11772008-05-12 21:20:44 +0200566 /* Sleep for a 1/10 of a second */
567 msleep(100);
568 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500569 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200570 /* check the trace buffer */
571 ret = trace_test_buffer(tr, &count);
572 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500573 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200574
575 if (!ret && !count) {
576 printk(KERN_CONT ".. no entries found ..");
577 ret = -1;
578 }
579
580 return ret;
581}
582#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200583
584#ifdef CONFIG_SYSPROF_TRACER
585int
586trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
587{
588 unsigned long count;
589 int ret;
590
591 /* start the tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100592 ret = trace->init(tr);
593 if (ret) {
594 warn_failed_init_tracer(trace, ret);
595 return 0;
596 }
597
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200598 /* Sleep for a 1/10 of a second */
599 msleep(100);
600 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500601 tracing_stop();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200602 /* check the trace buffer */
603 ret = trace_test_buffer(tr, &count);
604 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500605 tracing_start();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200606
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200607 return ret;
608}
609#endif /* CONFIG_SYSPROF_TRACER */
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500610
611#ifdef CONFIG_BRANCH_TRACER
612int
613trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
614{
615 unsigned long count;
616 int ret;
617
618 /* start the tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100619 ret = trace->init(tr);
620 if (ret) {
621 warn_failed_init_tracer(trace, ret);
622 return ret;
623 }
624
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500625 /* Sleep for a 1/10 of a second */
626 msleep(100);
627 /* stop the tracing. */
628 tracing_stop();
629 /* check the trace buffer */
630 ret = trace_test_buffer(tr, &count);
631 trace->reset(tr);
632 tracing_start();
633
634 return ret;
635}
636#endif /* CONFIG_BRANCH_TRACER */