blob: a7e0ef662f9f9bd593949b6d66f1e3872baf04b3 [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
3#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02004#include <linux/delay.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02005
Ingo Molnare309b412008-05-12 21:20:51 +02006static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +02007{
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020011 case TRACE_WAKE:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040012 case TRACE_CONT:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020013 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040014 case TRACE_PRINT:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020015 case TRACE_SPECIAL:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050016 case TRACE_BRANCH:
Steven Rostedt60a11772008-05-12 21:20:44 +020017 return 1;
18 }
19 return 0;
20}
21
Steven Rostedt3928a8a2008-09-29 23:02:41 -040022static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020023{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040024 struct ring_buffer_event *event;
25 struct trace_entry *entry;
Steven Rostedt60a11772008-05-12 21:20:44 +020026
Steven Rostedt3928a8a2008-09-29 23:02:41 -040027 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
28 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020029
Steven Rostedt3928a8a2008-09-29 23:02:41 -040030 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020031 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040032 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020033 goto failed;
34 }
Steven Rostedt60a11772008-05-12 21:20:44 +020035 }
Steven Rostedt60a11772008-05-12 21:20:44 +020036 return 0;
37
38 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020039 /* disable tracing */
40 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020041 printk(KERN_CONT ".. corrupted trace buffer .. ");
42 return -1;
43}
44
45/*
46 * Test the trace buffer to see if all the elements
47 * are still sane.
48 */
49static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
50{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020051 unsigned long flags, cnt = 0;
52 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020053
Steven Rostedt30afdcb2008-05-12 21:20:56 +020054 /* Don't allow flipping of max traces now */
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050055 local_irq_save(flags);
Steven Rostedt30afdcb2008-05-12 21:20:56 +020056 __raw_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040057
58 cnt = ring_buffer_entries(tr->buffer);
59
Steven Rostedt0c5119c2009-02-18 18:33:57 -050060 /*
61 * The trace_test_buffer_cpu runs a while loop to consume all data.
62 * If the calling tracer is broken, and is constantly filling
63 * the buffer, this will run forever, and hard lock the box.
64 * We disable the ring buffer while we do this test to prevent
65 * a hard lock up.
66 */
67 tracing_off();
Steven Rostedt60a11772008-05-12 21:20:44 +020068 for_each_possible_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040069 ret = trace_test_buffer_cpu(tr, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020070 if (ret)
71 break;
72 }
Steven Rostedt0c5119c2009-02-18 18:33:57 -050073 tracing_on();
Steven Rostedt30afdcb2008-05-12 21:20:56 +020074 __raw_spin_unlock(&ftrace_max_lock);
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050075 local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020076
77 if (count)
78 *count = cnt;
79
80 return ret;
81}
82
Frederic Weisbecker1c800252008-11-16 05:57:26 +010083static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
84{
85 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
86 trace->name, init_ret);
87}
Steven Rostedt606576c2008-10-06 19:06:12 -040088#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +020089
90#ifdef CONFIG_DYNAMIC_FTRACE
91
Steven Rostedt77a2b372008-05-12 21:20:45 +020092#define __STR(x) #x
93#define STR(x) __STR(x)
Steven Rostedt77a2b372008-05-12 21:20:45 +020094
95/* Test dynamic code modification and ftrace filters */
96int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
97 struct trace_array *tr,
98 int (*func)(void))
99{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200100 int save_ftrace_enabled = ftrace_enabled;
101 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400102 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -0400103 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400104 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200105
106 /* The ftrace test PASSED */
107 printk(KERN_CONT "PASSED\n");
108 pr_info("Testing dynamic ftrace: ");
109
110 /* enable tracing, and record the filter function */
111 ftrace_enabled = 1;
112 tracer_enabled = 1;
113
114 /* passed in by parameter to fool gcc from optimizing */
115 func();
116
Steven Rostedt4e491d12008-05-14 23:49:44 -0400117 /*
118 * Some archs *cough*PowerPC*cough* add charachters to the
119 * start of the function names. We simply put a '*' to
120 * accomodate them.
121 */
122 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
123
Steven Rostedt77a2b372008-05-12 21:20:45 +0200124 /* filter only on our function */
Steven Rostedt4e491d12008-05-14 23:49:44 -0400125 ftrace_set_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200126
127 /* enable tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100128 ret = trace->init(tr);
129 if (ret) {
130 warn_failed_init_tracer(trace, ret);
131 goto out;
132 }
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400133
Steven Rostedt77a2b372008-05-12 21:20:45 +0200134 /* Sleep for a 1/10 of a second */
135 msleep(100);
136
137 /* we should have nothing in the buffer */
138 ret = trace_test_buffer(tr, &count);
139 if (ret)
140 goto out;
141
142 if (count) {
143 ret = -1;
144 printk(KERN_CONT ".. filter did not filter .. ");
145 goto out;
146 }
147
148 /* call our function again */
149 func();
150
151 /* sleep again */
152 msleep(100);
153
154 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500155 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200156 ftrace_enabled = 0;
157
158 /* check the trace buffer */
159 ret = trace_test_buffer(tr, &count);
160 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500161 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200162
163 /* we should only have one item */
164 if (!ret && count != 1) {
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200165 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200166 ret = -1;
167 goto out;
168 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500169
Steven Rostedt77a2b372008-05-12 21:20:45 +0200170 out:
171 ftrace_enabled = save_ftrace_enabled;
172 tracer_enabled = save_tracer_enabled;
173
174 /* Enable tracing on all functions again */
175 ftrace_set_filter(NULL, 0, 1);
176
177 return ret;
178}
179#else
180# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
181#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedt60a11772008-05-12 21:20:44 +0200182/*
183 * Simple verification test of ftrace function tracer.
184 * Enable ftrace, sleep 1/10 second, and then read the trace
185 * buffer to see if all is in order.
186 */
187int
188trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
189{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200190 int save_ftrace_enabled = ftrace_enabled;
191 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400192 unsigned long count;
193 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200194
Steven Rostedt77a2b372008-05-12 21:20:45 +0200195 /* make sure msleep has been recorded */
196 msleep(1);
197
Steven Rostedt60a11772008-05-12 21:20:44 +0200198 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200199 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200200 tracer_enabled = 1;
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200201
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100202 ret = trace->init(tr);
203 if (ret) {
204 warn_failed_init_tracer(trace, ret);
205 goto out;
206 }
207
Steven Rostedt60a11772008-05-12 21:20:44 +0200208 /* Sleep for a 1/10 of a second */
209 msleep(100);
210 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500211 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200212 ftrace_enabled = 0;
213
Steven Rostedt60a11772008-05-12 21:20:44 +0200214 /* check the trace buffer */
215 ret = trace_test_buffer(tr, &count);
216 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500217 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200218
219 if (!ret && !count) {
220 printk(KERN_CONT ".. no entries found ..");
221 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200222 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200223 }
224
Steven Rostedt77a2b372008-05-12 21:20:45 +0200225 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
226 DYN_FTRACE_TEST_NAME);
227
228 out:
229 ftrace_enabled = save_ftrace_enabled;
230 tracer_enabled = save_tracer_enabled;
231
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200232 /* kill ftrace totally if we failed */
233 if (ret)
234 ftrace_kill();
235
Steven Rostedt60a11772008-05-12 21:20:44 +0200236 return ret;
237}
Steven Rostedt606576c2008-10-06 19:06:12 -0400238#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200239
240#ifdef CONFIG_IRQSOFF_TRACER
241int
242trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
243{
244 unsigned long save_max = tracing_max_latency;
245 unsigned long count;
246 int ret;
247
248 /* start the tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100249 ret = trace->init(tr);
250 if (ret) {
251 warn_failed_init_tracer(trace, ret);
252 return ret;
253 }
254
Steven Rostedt60a11772008-05-12 21:20:44 +0200255 /* reset the max latency */
256 tracing_max_latency = 0;
257 /* disable interrupts for a bit */
258 local_irq_disable();
259 udelay(100);
260 local_irq_enable();
261 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500262 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200263 /* check both trace buffers */
264 ret = trace_test_buffer(tr, NULL);
265 if (!ret)
266 ret = trace_test_buffer(&max_tr, &count);
267 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500268 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200269
270 if (!ret && !count) {
271 printk(KERN_CONT ".. no entries found ..");
272 ret = -1;
273 }
274
275 tracing_max_latency = save_max;
276
277 return ret;
278}
279#endif /* CONFIG_IRQSOFF_TRACER */
280
281#ifdef CONFIG_PREEMPT_TRACER
282int
283trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
284{
285 unsigned long save_max = tracing_max_latency;
286 unsigned long count;
287 int ret;
288
Steven Rostedt769c48e2008-11-07 22:36:02 -0500289 /*
290 * Now that the big kernel lock is no longer preemptable,
291 * and this is called with the BKL held, it will always
292 * fail. If preemption is already disabled, simply
293 * pass the test. When the BKL is removed, or becomes
294 * preemptible again, we will once again test this,
295 * so keep it in.
296 */
297 if (preempt_count()) {
298 printk(KERN_CONT "can not test ... force ");
299 return 0;
300 }
301
Steven Rostedt60a11772008-05-12 21:20:44 +0200302 /* start the tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100303 ret = trace->init(tr);
304 if (ret) {
305 warn_failed_init_tracer(trace, ret);
306 return ret;
307 }
308
Steven Rostedt60a11772008-05-12 21:20:44 +0200309 /* reset the max latency */
310 tracing_max_latency = 0;
311 /* disable preemption for a bit */
312 preempt_disable();
313 udelay(100);
314 preempt_enable();
315 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500316 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200317 /* check both trace buffers */
318 ret = trace_test_buffer(tr, NULL);
319 if (!ret)
320 ret = trace_test_buffer(&max_tr, &count);
321 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500322 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200323
324 if (!ret && !count) {
325 printk(KERN_CONT ".. no entries found ..");
326 ret = -1;
327 }
328
329 tracing_max_latency = save_max;
330
331 return ret;
332}
333#endif /* CONFIG_PREEMPT_TRACER */
334
335#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
336int
337trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
338{
339 unsigned long save_max = tracing_max_latency;
340 unsigned long count;
341 int ret;
342
Steven Rostedt769c48e2008-11-07 22:36:02 -0500343 /*
344 * Now that the big kernel lock is no longer preemptable,
345 * and this is called with the BKL held, it will always
346 * fail. If preemption is already disabled, simply
347 * pass the test. When the BKL is removed, or becomes
348 * preemptible again, we will once again test this,
349 * so keep it in.
350 */
351 if (preempt_count()) {
352 printk(KERN_CONT "can not test ... force ");
353 return 0;
354 }
355
Steven Rostedt60a11772008-05-12 21:20:44 +0200356 /* start the tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100357 ret = trace->init(tr);
358 if (ret) {
359 warn_failed_init_tracer(trace, ret);
360 goto out;
361 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200362
363 /* reset the max latency */
364 tracing_max_latency = 0;
365
366 /* disable preemption and interrupts for a bit */
367 preempt_disable();
368 local_irq_disable();
369 udelay(100);
370 preempt_enable();
371 /* reverse the order of preempt vs irqs */
372 local_irq_enable();
373
374 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500375 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200376 /* check both trace buffers */
377 ret = trace_test_buffer(tr, NULL);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500378 if (ret) {
379 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200380 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500381 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200382
383 ret = trace_test_buffer(&max_tr, &count);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500384 if (ret) {
385 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200386 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500387 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200388
389 if (!ret && !count) {
390 printk(KERN_CONT ".. no entries found ..");
391 ret = -1;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500392 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200393 goto out;
394 }
395
396 /* do the test by disabling interrupts first this time */
397 tracing_max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500398 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200399 preempt_disable();
400 local_irq_disable();
401 udelay(100);
402 preempt_enable();
403 /* reverse the order of preempt vs irqs */
404 local_irq_enable();
405
406 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500407 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200408 /* check both trace buffers */
409 ret = trace_test_buffer(tr, NULL);
410 if (ret)
411 goto out;
412
413 ret = trace_test_buffer(&max_tr, &count);
414
415 if (!ret && !count) {
416 printk(KERN_CONT ".. no entries found ..");
417 ret = -1;
418 goto out;
419 }
420
421 out:
422 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500423 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200424 tracing_max_latency = save_max;
425
426 return ret;
427}
428#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
429
Steven Noonanfb1b6d82008-09-19 03:06:43 -0700430#ifdef CONFIG_NOP_TRACER
431int
432trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
433{
434 /* What could possibly go wrong? */
435 return 0;
436}
437#endif
438
Steven Rostedt60a11772008-05-12 21:20:44 +0200439#ifdef CONFIG_SCHED_TRACER
440static int trace_wakeup_test_thread(void *data)
441{
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200442 /* Make this a RT thread, doesn't need to be too high */
443 struct sched_param param = { .sched_priority = 5 };
Steven Rostedt60a11772008-05-12 21:20:44 +0200444 struct completion *x = data;
445
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200446 sched_setscheduler(current, SCHED_FIFO, &param);
Steven Rostedt60a11772008-05-12 21:20:44 +0200447
448 /* Make it know we have a new prio */
449 complete(x);
450
451 /* now go to sleep and let the test wake us up */
452 set_current_state(TASK_INTERRUPTIBLE);
453 schedule();
454
455 /* we are awake, now wait to disappear */
456 while (!kthread_should_stop()) {
457 /*
458 * This is an RT task, do short sleeps to let
459 * others run.
460 */
461 msleep(100);
462 }
463
464 return 0;
465}
466
467int
468trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
469{
470 unsigned long save_max = tracing_max_latency;
471 struct task_struct *p;
472 struct completion isrt;
473 unsigned long count;
474 int ret;
475
476 init_completion(&isrt);
477
478 /* create a high prio thread */
479 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200480 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200481 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
482 return -1;
483 }
484
485 /* make sure the thread is running at an RT prio */
486 wait_for_completion(&isrt);
487
488 /* start the tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100489 ret = trace->init(tr);
490 if (ret) {
491 warn_failed_init_tracer(trace, ret);
492 return ret;
493 }
494
Steven Rostedt60a11772008-05-12 21:20:44 +0200495 /* reset the max latency */
496 tracing_max_latency = 0;
497
498 /* sleep to let the RT thread sleep too */
499 msleep(100);
500
501 /*
502 * Yes this is slightly racy. It is possible that for some
503 * strange reason that the RT thread we created, did not
504 * call schedule for 100ms after doing the completion,
505 * and we do a wakeup on a task that already is awake.
506 * But that is extremely unlikely, and the worst thing that
507 * happens in such a case, is that we disable tracing.
508 * Honestly, if this race does happen something is horrible
509 * wrong with the system.
510 */
511
512 wake_up_process(p);
513
Steven Rostedt5aa60c62008-09-29 23:02:37 -0400514 /* give a little time to let the thread wake up */
515 msleep(100);
516
Steven Rostedt60a11772008-05-12 21:20:44 +0200517 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500518 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200519 /* check both trace buffers */
520 ret = trace_test_buffer(tr, NULL);
521 if (!ret)
522 ret = trace_test_buffer(&max_tr, &count);
523
524
525 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500526 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200527
528 tracing_max_latency = save_max;
529
530 /* kill the thread */
531 kthread_stop(p);
532
533 if (!ret && !count) {
534 printk(KERN_CONT ".. no entries found ..");
535 ret = -1;
536 }
537
538 return ret;
539}
540#endif /* CONFIG_SCHED_TRACER */
541
542#ifdef CONFIG_CONTEXT_SWITCH_TRACER
543int
544trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
545{
546 unsigned long count;
547 int ret;
548
549 /* start the tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100550 ret = trace->init(tr);
551 if (ret) {
552 warn_failed_init_tracer(trace, ret);
553 return ret;
554 }
555
Steven Rostedt60a11772008-05-12 21:20:44 +0200556 /* Sleep for a 1/10 of a second */
557 msleep(100);
558 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500559 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200560 /* check the trace buffer */
561 ret = trace_test_buffer(tr, &count);
562 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500563 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200564
565 if (!ret && !count) {
566 printk(KERN_CONT ".. no entries found ..");
567 ret = -1;
568 }
569
570 return ret;
571}
572#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200573
574#ifdef CONFIG_SYSPROF_TRACER
575int
576trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
577{
578 unsigned long count;
579 int ret;
580
581 /* start the tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100582 ret = trace->init(tr);
583 if (ret) {
584 warn_failed_init_tracer(trace, ret);
585 return 0;
586 }
587
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200588 /* Sleep for a 1/10 of a second */
589 msleep(100);
590 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500591 tracing_stop();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200592 /* check the trace buffer */
593 ret = trace_test_buffer(tr, &count);
594 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500595 tracing_start();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200596
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200597 return ret;
598}
599#endif /* CONFIG_SYSPROF_TRACER */
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500600
601#ifdef CONFIG_BRANCH_TRACER
602int
603trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
604{
605 unsigned long count;
606 int ret;
607
608 /* start the tracing */
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100609 ret = trace->init(tr);
610 if (ret) {
611 warn_failed_init_tracer(trace, ret);
612 return ret;
613 }
614
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500615 /* Sleep for a 1/10 of a second */
616 msleep(100);
617 /* stop the tracing. */
618 tracing_stop();
619 /* check the trace buffer */
620 ret = trace_test_buffer(tr, &count);
621 trace->reset(tr);
622 tracing_start();
623
624 return ret;
625}
626#endif /* CONFIG_BRANCH_TRACER */