blob: 445700e51f6d4bffb301cf51755db31cd65c16af [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
3#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02004#include <linux/delay.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02005
Ingo Molnare309b412008-05-12 21:20:51 +02006static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +02007{
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020011 case TRACE_WAKE:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020012 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040013 case TRACE_PRINT:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020014 case TRACE_SPECIAL:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050015 case TRACE_BRANCH:
Steven Rostedt60a11772008-05-12 21:20:44 +020016 return 1;
17 }
18 return 0;
19}
20
Steven Rostedt3928a8a2008-09-29 23:02:41 -040021static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020022{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040023 struct ring_buffer_event *event;
24 struct trace_entry *entry;
Steven Rostedt60a11772008-05-12 21:20:44 +020025
Steven Rostedt3928a8a2008-09-29 23:02:41 -040026 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
27 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020028
Steven Rostedt3928a8a2008-09-29 23:02:41 -040029 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020030 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040031 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020032 goto failed;
33 }
Steven Rostedt60a11772008-05-12 21:20:44 +020034 }
Steven Rostedt60a11772008-05-12 21:20:44 +020035 return 0;
36
37 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020038 /* disable tracing */
39 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020040 printk(KERN_CONT ".. corrupted trace buffer .. ");
41 return -1;
42}
43
44/*
45 * Test the trace buffer to see if all the elements
46 * are still sane.
47 */
48static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
49{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020050 unsigned long flags, cnt = 0;
51 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020052
Steven Rostedt30afdcb2008-05-12 21:20:56 +020053 /* Don't allow flipping of max traces now */
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050054 local_irq_save(flags);
Steven Rostedt30afdcb2008-05-12 21:20:56 +020055 __raw_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040056
57 cnt = ring_buffer_entries(tr->buffer);
58
Steven Rostedt60a11772008-05-12 21:20:44 +020059 for_each_possible_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040060 ret = trace_test_buffer_cpu(tr, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020061 if (ret)
62 break;
63 }
Steven Rostedt30afdcb2008-05-12 21:20:56 +020064 __raw_spin_unlock(&ftrace_max_lock);
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050065 local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020066
67 if (count)
68 *count = cnt;
69
70 return ret;
71}
72
Frederic Weisbecker1c800252008-11-16 05:57:26 +010073static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
74{
75 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
76 trace->name, init_ret);
77}
Steven Rostedt606576c2008-10-06 19:06:12 -040078#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +020079
80#ifdef CONFIG_DYNAMIC_FTRACE
81
Steven Rostedt77a2b372008-05-12 21:20:45 +020082#define __STR(x) #x
83#define STR(x) __STR(x)
Steven Rostedt77a2b372008-05-12 21:20:45 +020084
85/* Test dynamic code modification and ftrace filters */
86int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
87 struct trace_array *tr,
88 int (*func)(void))
89{
Steven Rostedt77a2b372008-05-12 21:20:45 +020090 int save_ftrace_enabled = ftrace_enabled;
91 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -040092 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -040093 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -040094 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +020095
96 /* The ftrace test PASSED */
97 printk(KERN_CONT "PASSED\n");
98 pr_info("Testing dynamic ftrace: ");
99
100 /* enable tracing, and record the filter function */
101 ftrace_enabled = 1;
102 tracer_enabled = 1;
103
104 /* passed in by parameter to fool gcc from optimizing */
105 func();
106
Steven Rostedt4e491d12008-05-14 23:49:44 -0400107 /*
108 * Some archs *cough*PowerPC*cough* add charachters to the
109 * start of the function names. We simply put a '*' to
110 * accomodate them.
111 */
112 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
113
Steven Rostedt77a2b372008-05-12 21:20:45 +0200114 /* filter only on our function */
Steven Rostedt4e491d12008-05-14 23:49:44 -0400115 ftrace_set_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200116
117 /* enable tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200118 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100119 if (ret) {
120 warn_failed_init_tracer(trace, ret);
121 goto out;
122 }
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400123
Steven Rostedt77a2b372008-05-12 21:20:45 +0200124 /* Sleep for a 1/10 of a second */
125 msleep(100);
126
127 /* we should have nothing in the buffer */
128 ret = trace_test_buffer(tr, &count);
129 if (ret)
130 goto out;
131
132 if (count) {
133 ret = -1;
134 printk(KERN_CONT ".. filter did not filter .. ");
135 goto out;
136 }
137
138 /* call our function again */
139 func();
140
141 /* sleep again */
142 msleep(100);
143
144 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500145 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200146 ftrace_enabled = 0;
147
148 /* check the trace buffer */
149 ret = trace_test_buffer(tr, &count);
150 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500151 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200152
153 /* we should only have one item */
154 if (!ret && count != 1) {
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200155 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200156 ret = -1;
157 goto out;
158 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500159
Steven Rostedt77a2b372008-05-12 21:20:45 +0200160 out:
161 ftrace_enabled = save_ftrace_enabled;
162 tracer_enabled = save_tracer_enabled;
163
164 /* Enable tracing on all functions again */
165 ftrace_set_filter(NULL, 0, 1);
166
167 return ret;
168}
169#else
170# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
171#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedt60a11772008-05-12 21:20:44 +0200172/*
173 * Simple verification test of ftrace function tracer.
174 * Enable ftrace, sleep 1/10 second, and then read the trace
175 * buffer to see if all is in order.
176 */
177int
178trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
179{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200180 int save_ftrace_enabled = ftrace_enabled;
181 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400182 unsigned long count;
183 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200184
Steven Rostedt77a2b372008-05-12 21:20:45 +0200185 /* make sure msleep has been recorded */
186 msleep(1);
187
Steven Rostedt60a11772008-05-12 21:20:44 +0200188 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200189 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200190 tracer_enabled = 1;
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200191
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200192 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100193 if (ret) {
194 warn_failed_init_tracer(trace, ret);
195 goto out;
196 }
197
Steven Rostedt60a11772008-05-12 21:20:44 +0200198 /* Sleep for a 1/10 of a second */
199 msleep(100);
200 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500201 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200202 ftrace_enabled = 0;
203
Steven Rostedt60a11772008-05-12 21:20:44 +0200204 /* check the trace buffer */
205 ret = trace_test_buffer(tr, &count);
206 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500207 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200208
209 if (!ret && !count) {
210 printk(KERN_CONT ".. no entries found ..");
211 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200212 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200213 }
214
Steven Rostedt77a2b372008-05-12 21:20:45 +0200215 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
216 DYN_FTRACE_TEST_NAME);
217
218 out:
219 ftrace_enabled = save_ftrace_enabled;
220 tracer_enabled = save_tracer_enabled;
221
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200222 /* kill ftrace totally if we failed */
223 if (ret)
224 ftrace_kill();
225
Steven Rostedt60a11772008-05-12 21:20:44 +0200226 return ret;
227}
Steven Rostedt606576c2008-10-06 19:06:12 -0400228#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200229
230#ifdef CONFIG_IRQSOFF_TRACER
231int
232trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
233{
234 unsigned long save_max = tracing_max_latency;
235 unsigned long count;
236 int ret;
237
238 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200239 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100240 if (ret) {
241 warn_failed_init_tracer(trace, ret);
242 return ret;
243 }
244
Steven Rostedt60a11772008-05-12 21:20:44 +0200245 /* reset the max latency */
246 tracing_max_latency = 0;
247 /* disable interrupts for a bit */
248 local_irq_disable();
249 udelay(100);
250 local_irq_enable();
251 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500252 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200253 /* check both trace buffers */
254 ret = trace_test_buffer(tr, NULL);
255 if (!ret)
256 ret = trace_test_buffer(&max_tr, &count);
257 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500258 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200259
260 if (!ret && !count) {
261 printk(KERN_CONT ".. no entries found ..");
262 ret = -1;
263 }
264
265 tracing_max_latency = save_max;
266
267 return ret;
268}
269#endif /* CONFIG_IRQSOFF_TRACER */
270
271#ifdef CONFIG_PREEMPT_TRACER
272int
273trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
274{
275 unsigned long save_max = tracing_max_latency;
276 unsigned long count;
277 int ret;
278
Steven Rostedt769c48e2008-11-07 22:36:02 -0500279 /*
280 * Now that the big kernel lock is no longer preemptable,
281 * and this is called with the BKL held, it will always
282 * fail. If preemption is already disabled, simply
283 * pass the test. When the BKL is removed, or becomes
284 * preemptible again, we will once again test this,
285 * so keep it in.
286 */
287 if (preempt_count()) {
288 printk(KERN_CONT "can not test ... force ");
289 return 0;
290 }
291
Steven Rostedt60a11772008-05-12 21:20:44 +0200292 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200293 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100294 if (ret) {
295 warn_failed_init_tracer(trace, ret);
296 return ret;
297 }
298
Steven Rostedt60a11772008-05-12 21:20:44 +0200299 /* reset the max latency */
300 tracing_max_latency = 0;
301 /* disable preemption for a bit */
302 preempt_disable();
303 udelay(100);
304 preempt_enable();
305 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500306 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200307 /* check both trace buffers */
308 ret = trace_test_buffer(tr, NULL);
309 if (!ret)
310 ret = trace_test_buffer(&max_tr, &count);
311 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500312 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200313
314 if (!ret && !count) {
315 printk(KERN_CONT ".. no entries found ..");
316 ret = -1;
317 }
318
319 tracing_max_latency = save_max;
320
321 return ret;
322}
323#endif /* CONFIG_PREEMPT_TRACER */
324
325#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
326int
327trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
328{
329 unsigned long save_max = tracing_max_latency;
330 unsigned long count;
331 int ret;
332
Steven Rostedt769c48e2008-11-07 22:36:02 -0500333 /*
334 * Now that the big kernel lock is no longer preemptable,
335 * and this is called with the BKL held, it will always
336 * fail. If preemption is already disabled, simply
337 * pass the test. When the BKL is removed, or becomes
338 * preemptible again, we will once again test this,
339 * so keep it in.
340 */
341 if (preempt_count()) {
342 printk(KERN_CONT "can not test ... force ");
343 return 0;
344 }
345
Steven Rostedt60a11772008-05-12 21:20:44 +0200346 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200347 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100348 if (ret) {
349 warn_failed_init_tracer(trace, ret);
350 goto out;
351 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200352
353 /* reset the max latency */
354 tracing_max_latency = 0;
355
356 /* disable preemption and interrupts for a bit */
357 preempt_disable();
358 local_irq_disable();
359 udelay(100);
360 preempt_enable();
361 /* reverse the order of preempt vs irqs */
362 local_irq_enable();
363
364 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500365 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200366 /* check both trace buffers */
367 ret = trace_test_buffer(tr, NULL);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500368 if (ret) {
369 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200370 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500371 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200372
373 ret = trace_test_buffer(&max_tr, &count);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500374 if (ret) {
375 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200376 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500377 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200378
379 if (!ret && !count) {
380 printk(KERN_CONT ".. no entries found ..");
381 ret = -1;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500382 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200383 goto out;
384 }
385
386 /* do the test by disabling interrupts first this time */
387 tracing_max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500388 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200389 preempt_disable();
390 local_irq_disable();
391 udelay(100);
392 preempt_enable();
393 /* reverse the order of preempt vs irqs */
394 local_irq_enable();
395
396 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500397 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200398 /* check both trace buffers */
399 ret = trace_test_buffer(tr, NULL);
400 if (ret)
401 goto out;
402
403 ret = trace_test_buffer(&max_tr, &count);
404
405 if (!ret && !count) {
406 printk(KERN_CONT ".. no entries found ..");
407 ret = -1;
408 goto out;
409 }
410
411 out:
412 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500413 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200414 tracing_max_latency = save_max;
415
416 return ret;
417}
418#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
419
Steven Noonanfb1b6d82008-09-19 03:06:43 -0700420#ifdef CONFIG_NOP_TRACER
421int
422trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
423{
424 /* What could possibly go wrong? */
425 return 0;
426}
427#endif
428
Steven Rostedt60a11772008-05-12 21:20:44 +0200429#ifdef CONFIG_SCHED_TRACER
430static int trace_wakeup_test_thread(void *data)
431{
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200432 /* Make this a RT thread, doesn't need to be too high */
433 struct sched_param param = { .sched_priority = 5 };
Steven Rostedt60a11772008-05-12 21:20:44 +0200434 struct completion *x = data;
435
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200436 sched_setscheduler(current, SCHED_FIFO, &param);
Steven Rostedt60a11772008-05-12 21:20:44 +0200437
438 /* Make it know we have a new prio */
439 complete(x);
440
441 /* now go to sleep and let the test wake us up */
442 set_current_state(TASK_INTERRUPTIBLE);
443 schedule();
444
445 /* we are awake, now wait to disappear */
446 while (!kthread_should_stop()) {
447 /*
448 * This is an RT task, do short sleeps to let
449 * others run.
450 */
451 msleep(100);
452 }
453
454 return 0;
455}
456
457int
458trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
459{
460 unsigned long save_max = tracing_max_latency;
461 struct task_struct *p;
462 struct completion isrt;
463 unsigned long count;
464 int ret;
465
466 init_completion(&isrt);
467
468 /* create a high prio thread */
469 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200470 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200471 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
472 return -1;
473 }
474
475 /* make sure the thread is running at an RT prio */
476 wait_for_completion(&isrt);
477
478 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200479 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100480 if (ret) {
481 warn_failed_init_tracer(trace, ret);
482 return ret;
483 }
484
Steven Rostedt60a11772008-05-12 21:20:44 +0200485 /* reset the max latency */
486 tracing_max_latency = 0;
487
488 /* sleep to let the RT thread sleep too */
489 msleep(100);
490
491 /*
492 * Yes this is slightly racy. It is possible that for some
493 * strange reason that the RT thread we created, did not
494 * call schedule for 100ms after doing the completion,
495 * and we do a wakeup on a task that already is awake.
496 * But that is extremely unlikely, and the worst thing that
497 * happens in such a case, is that we disable tracing.
498 * Honestly, if this race does happen something is horrible
499 * wrong with the system.
500 */
501
502 wake_up_process(p);
503
Steven Rostedt5aa60c62008-09-29 23:02:37 -0400504 /* give a little time to let the thread wake up */
505 msleep(100);
506
Steven Rostedt60a11772008-05-12 21:20:44 +0200507 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500508 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200509 /* check both trace buffers */
510 ret = trace_test_buffer(tr, NULL);
511 if (!ret)
512 ret = trace_test_buffer(&max_tr, &count);
513
514
515 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500516 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200517
518 tracing_max_latency = save_max;
519
520 /* kill the thread */
521 kthread_stop(p);
522
523 if (!ret && !count) {
524 printk(KERN_CONT ".. no entries found ..");
525 ret = -1;
526 }
527
528 return ret;
529}
530#endif /* CONFIG_SCHED_TRACER */
531
532#ifdef CONFIG_CONTEXT_SWITCH_TRACER
533int
534trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
535{
536 unsigned long count;
537 int ret;
538
539 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200540 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100541 if (ret) {
542 warn_failed_init_tracer(trace, ret);
543 return ret;
544 }
545
Steven Rostedt60a11772008-05-12 21:20:44 +0200546 /* Sleep for a 1/10 of a second */
547 msleep(100);
548 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500549 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200550 /* check the trace buffer */
551 ret = trace_test_buffer(tr, &count);
552 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500553 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200554
555 if (!ret && !count) {
556 printk(KERN_CONT ".. no entries found ..");
557 ret = -1;
558 }
559
560 return ret;
561}
562#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200563
564#ifdef CONFIG_SYSPROF_TRACER
565int
566trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
567{
568 unsigned long count;
569 int ret;
570
571 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200572 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100573 if (ret) {
574 warn_failed_init_tracer(trace, ret);
575 return 0;
576 }
577
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200578 /* Sleep for a 1/10 of a second */
579 msleep(100);
580 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500581 tracing_stop();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200582 /* check the trace buffer */
583 ret = trace_test_buffer(tr, &count);
584 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500585 tracing_start();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200586
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200587 return ret;
588}
589#endif /* CONFIG_SYSPROF_TRACER */
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500590
591#ifdef CONFIG_BRANCH_TRACER
592int
593trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
594{
595 unsigned long count;
596 int ret;
597
598 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200599 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100600 if (ret) {
601 warn_failed_init_tracer(trace, ret);
602 return ret;
603 }
604
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500605 /* Sleep for a 1/10 of a second */
606 msleep(100);
607 /* stop the tracing. */
608 tracing_stop();
609 /* check the trace buffer */
610 ret = trace_test_buffer(tr, &count);
611 trace->reset(tr);
612 tracing_start();
613
614 return ret;
615}
616#endif /* CONFIG_BRANCH_TRACER */