blob: 24e6e075e6d65c517303390bba7314de39918ca2 [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
3#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02004#include <linux/delay.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02005
Ingo Molnare309b412008-05-12 21:20:51 +02006static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +02007{
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020011 case TRACE_WAKE:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040012 case TRACE_CONT:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020013 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040014 case TRACE_PRINT:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020015 case TRACE_SPECIAL:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050016 case TRACE_BRANCH:
Steven Rostedt60a11772008-05-12 21:20:44 +020017 return 1;
18 }
19 return 0;
20}
21
Steven Rostedt3928a8a2008-09-29 23:02:41 -040022static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020023{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040024 struct ring_buffer_event *event;
25 struct trace_entry *entry;
Steven Rostedt60a11772008-05-12 21:20:44 +020026
Steven Rostedt3928a8a2008-09-29 23:02:41 -040027 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
28 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020029
Steven Rostedt3928a8a2008-09-29 23:02:41 -040030 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020031 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040032 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020033 goto failed;
34 }
Steven Rostedt60a11772008-05-12 21:20:44 +020035 }
Steven Rostedt60a11772008-05-12 21:20:44 +020036 return 0;
37
38 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020039 /* disable tracing */
40 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020041 printk(KERN_CONT ".. corrupted trace buffer .. ");
42 return -1;
43}
44
45/*
46 * Test the trace buffer to see if all the elements
47 * are still sane.
48 */
49static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
50{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020051 unsigned long flags, cnt = 0;
52 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020053
Steven Rostedt30afdcb2008-05-12 21:20:56 +020054 /* Don't allow flipping of max traces now */
55 raw_local_irq_save(flags);
56 __raw_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040057
58 cnt = ring_buffer_entries(tr->buffer);
59
Steven Rostedt60a11772008-05-12 21:20:44 +020060 for_each_possible_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040061 ret = trace_test_buffer_cpu(tr, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020062 if (ret)
63 break;
64 }
Steven Rostedt30afdcb2008-05-12 21:20:56 +020065 __raw_spin_unlock(&ftrace_max_lock);
66 raw_local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020067
68 if (count)
69 *count = cnt;
70
71 return ret;
72}
73
Steven Rostedt606576c2008-10-06 19:06:12 -040074#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +020075
76#ifdef CONFIG_DYNAMIC_FTRACE
77
Steven Rostedt77a2b372008-05-12 21:20:45 +020078#define __STR(x) #x
79#define STR(x) __STR(x)
Steven Rostedt77a2b372008-05-12 21:20:45 +020080
81/* Test dynamic code modification and ftrace filters */
82int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
83 struct trace_array *tr,
84 int (*func)(void))
85{
Steven Rostedt77a2b372008-05-12 21:20:45 +020086 int save_ftrace_enabled = ftrace_enabled;
87 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -040088 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -040089 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -040090 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +020091
92 /* The ftrace test PASSED */
93 printk(KERN_CONT "PASSED\n");
94 pr_info("Testing dynamic ftrace: ");
95
96 /* enable tracing, and record the filter function */
97 ftrace_enabled = 1;
98 tracer_enabled = 1;
99
100 /* passed in by parameter to fool gcc from optimizing */
101 func();
102
Steven Rostedt4e491d12008-05-14 23:49:44 -0400103 /*
104 * Some archs *cough*PowerPC*cough* add charachters to the
105 * start of the function names. We simply put a '*' to
106 * accomodate them.
107 */
108 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
109
Steven Rostedt77a2b372008-05-12 21:20:45 +0200110 /* filter only on our function */
Steven Rostedt4e491d12008-05-14 23:49:44 -0400111 ftrace_set_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200112
113 /* enable tracing */
Steven Rostedt77a2b372008-05-12 21:20:45 +0200114 trace->init(tr);
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400115
Steven Rostedt77a2b372008-05-12 21:20:45 +0200116 /* Sleep for a 1/10 of a second */
117 msleep(100);
118
119 /* we should have nothing in the buffer */
120 ret = trace_test_buffer(tr, &count);
121 if (ret)
122 goto out;
123
124 if (count) {
125 ret = -1;
126 printk(KERN_CONT ".. filter did not filter .. ");
127 goto out;
128 }
129
130 /* call our function again */
131 func();
132
133 /* sleep again */
134 msleep(100);
135
136 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500137 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200138 ftrace_enabled = 0;
139
140 /* check the trace buffer */
141 ret = trace_test_buffer(tr, &count);
142 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500143 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200144
145 /* we should only have one item */
146 if (!ret && count != 1) {
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200147 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200148 ret = -1;
149 goto out;
150 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500151
Steven Rostedt77a2b372008-05-12 21:20:45 +0200152 out:
153 ftrace_enabled = save_ftrace_enabled;
154 tracer_enabled = save_tracer_enabled;
155
156 /* Enable tracing on all functions again */
157 ftrace_set_filter(NULL, 0, 1);
158
159 return ret;
160}
161#else
162# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
163#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedt60a11772008-05-12 21:20:44 +0200164/*
165 * Simple verification test of ftrace function tracer.
166 * Enable ftrace, sleep 1/10 second, and then read the trace
167 * buffer to see if all is in order.
168 */
169int
170trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
171{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200172 int save_ftrace_enabled = ftrace_enabled;
173 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400174 unsigned long count;
175 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200176
Steven Rostedt77a2b372008-05-12 21:20:45 +0200177 /* make sure msleep has been recorded */
178 msleep(1);
179
Steven Rostedt60a11772008-05-12 21:20:44 +0200180 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200181 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200182 tracer_enabled = 1;
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200183
Steven Rostedt60a11772008-05-12 21:20:44 +0200184 trace->init(tr);
185 /* Sleep for a 1/10 of a second */
186 msleep(100);
187 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500188 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200189 ftrace_enabled = 0;
190
Steven Rostedt60a11772008-05-12 21:20:44 +0200191 /* check the trace buffer */
192 ret = trace_test_buffer(tr, &count);
193 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500194 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200195
196 if (!ret && !count) {
197 printk(KERN_CONT ".. no entries found ..");
198 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200199 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200200 }
201
Steven Rostedt77a2b372008-05-12 21:20:45 +0200202 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
203 DYN_FTRACE_TEST_NAME);
204
205 out:
206 ftrace_enabled = save_ftrace_enabled;
207 tracer_enabled = save_tracer_enabled;
208
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200209 /* kill ftrace totally if we failed */
210 if (ret)
211 ftrace_kill();
212
Steven Rostedt60a11772008-05-12 21:20:44 +0200213 return ret;
214}
Steven Rostedt606576c2008-10-06 19:06:12 -0400215#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200216
217#ifdef CONFIG_IRQSOFF_TRACER
218int
219trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
220{
221 unsigned long save_max = tracing_max_latency;
222 unsigned long count;
223 int ret;
224
225 /* start the tracing */
Steven Rostedt60a11772008-05-12 21:20:44 +0200226 trace->init(tr);
227 /* reset the max latency */
228 tracing_max_latency = 0;
229 /* disable interrupts for a bit */
230 local_irq_disable();
231 udelay(100);
232 local_irq_enable();
233 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500234 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200235 /* check both trace buffers */
236 ret = trace_test_buffer(tr, NULL);
237 if (!ret)
238 ret = trace_test_buffer(&max_tr, &count);
239 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500240 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200241
242 if (!ret && !count) {
243 printk(KERN_CONT ".. no entries found ..");
244 ret = -1;
245 }
246
247 tracing_max_latency = save_max;
248
249 return ret;
250}
251#endif /* CONFIG_IRQSOFF_TRACER */
252
253#ifdef CONFIG_PREEMPT_TRACER
254int
255trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
256{
257 unsigned long save_max = tracing_max_latency;
258 unsigned long count;
259 int ret;
260
Steven Rostedt769c48e2008-11-07 22:36:02 -0500261 /*
262 * Now that the big kernel lock is no longer preemptable,
263 * and this is called with the BKL held, it will always
264 * fail. If preemption is already disabled, simply
265 * pass the test. When the BKL is removed, or becomes
266 * preemptible again, we will once again test this,
267 * so keep it in.
268 */
269 if (preempt_count()) {
270 printk(KERN_CONT "can not test ... force ");
271 return 0;
272 }
273
Steven Rostedt60a11772008-05-12 21:20:44 +0200274 /* start the tracing */
Steven Rostedt60a11772008-05-12 21:20:44 +0200275 trace->init(tr);
276 /* reset the max latency */
277 tracing_max_latency = 0;
278 /* disable preemption for a bit */
279 preempt_disable();
280 udelay(100);
281 preempt_enable();
282 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500283 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200284 /* check both trace buffers */
285 ret = trace_test_buffer(tr, NULL);
286 if (!ret)
287 ret = trace_test_buffer(&max_tr, &count);
288 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500289 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200290
291 if (!ret && !count) {
292 printk(KERN_CONT ".. no entries found ..");
293 ret = -1;
294 }
295
296 tracing_max_latency = save_max;
297
298 return ret;
299}
300#endif /* CONFIG_PREEMPT_TRACER */
301
302#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
303int
304trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
305{
306 unsigned long save_max = tracing_max_latency;
307 unsigned long count;
308 int ret;
309
Steven Rostedt769c48e2008-11-07 22:36:02 -0500310 /*
311 * Now that the big kernel lock is no longer preemptable,
312 * and this is called with the BKL held, it will always
313 * fail. If preemption is already disabled, simply
314 * pass the test. When the BKL is removed, or becomes
315 * preemptible again, we will once again test this,
316 * so keep it in.
317 */
318 if (preempt_count()) {
319 printk(KERN_CONT "can not test ... force ");
320 return 0;
321 }
322
Steven Rostedt60a11772008-05-12 21:20:44 +0200323 /* start the tracing */
Steven Rostedt60a11772008-05-12 21:20:44 +0200324 trace->init(tr);
325
326 /* reset the max latency */
327 tracing_max_latency = 0;
328
329 /* disable preemption and interrupts for a bit */
330 preempt_disable();
331 local_irq_disable();
332 udelay(100);
333 preempt_enable();
334 /* reverse the order of preempt vs irqs */
335 local_irq_enable();
336
337 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500338 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200339 /* check both trace buffers */
340 ret = trace_test_buffer(tr, NULL);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500341 if (ret) {
342 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200343 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500344 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200345
346 ret = trace_test_buffer(&max_tr, &count);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500347 if (ret) {
348 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200349 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500350 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200351
352 if (!ret && !count) {
353 printk(KERN_CONT ".. no entries found ..");
354 ret = -1;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500355 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200356 goto out;
357 }
358
359 /* do the test by disabling interrupts first this time */
360 tracing_max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500361 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200362 preempt_disable();
363 local_irq_disable();
364 udelay(100);
365 preempt_enable();
366 /* reverse the order of preempt vs irqs */
367 local_irq_enable();
368
369 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500370 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200371 /* check both trace buffers */
372 ret = trace_test_buffer(tr, NULL);
373 if (ret)
374 goto out;
375
376 ret = trace_test_buffer(&max_tr, &count);
377
378 if (!ret && !count) {
379 printk(KERN_CONT ".. no entries found ..");
380 ret = -1;
381 goto out;
382 }
383
384 out:
385 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500386 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200387 tracing_max_latency = save_max;
388
389 return ret;
390}
391#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
392
Steven Noonanfb1b6d82008-09-19 03:06:43 -0700393#ifdef CONFIG_NOP_TRACER
394int
395trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
396{
397 /* What could possibly go wrong? */
398 return 0;
399}
400#endif
401
Steven Rostedt60a11772008-05-12 21:20:44 +0200402#ifdef CONFIG_SCHED_TRACER
403static int trace_wakeup_test_thread(void *data)
404{
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200405 /* Make this a RT thread, doesn't need to be too high */
406 struct sched_param param = { .sched_priority = 5 };
Steven Rostedt60a11772008-05-12 21:20:44 +0200407 struct completion *x = data;
408
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200409 sched_setscheduler(current, SCHED_FIFO, &param);
Steven Rostedt60a11772008-05-12 21:20:44 +0200410
411 /* Make it know we have a new prio */
412 complete(x);
413
414 /* now go to sleep and let the test wake us up */
415 set_current_state(TASK_INTERRUPTIBLE);
416 schedule();
417
418 /* we are awake, now wait to disappear */
419 while (!kthread_should_stop()) {
420 /*
421 * This is an RT task, do short sleeps to let
422 * others run.
423 */
424 msleep(100);
425 }
426
427 return 0;
428}
429
430int
431trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
432{
433 unsigned long save_max = tracing_max_latency;
434 struct task_struct *p;
435 struct completion isrt;
436 unsigned long count;
437 int ret;
438
439 init_completion(&isrt);
440
441 /* create a high prio thread */
442 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200443 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200444 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
445 return -1;
446 }
447
448 /* make sure the thread is running at an RT prio */
449 wait_for_completion(&isrt);
450
451 /* start the tracing */
Steven Rostedt60a11772008-05-12 21:20:44 +0200452 trace->init(tr);
453 /* reset the max latency */
454 tracing_max_latency = 0;
455
456 /* sleep to let the RT thread sleep too */
457 msleep(100);
458
459 /*
460 * Yes this is slightly racy. It is possible that for some
461 * strange reason that the RT thread we created, did not
462 * call schedule for 100ms after doing the completion,
463 * and we do a wakeup on a task that already is awake.
464 * But that is extremely unlikely, and the worst thing that
465 * happens in such a case, is that we disable tracing.
466 * Honestly, if this race does happen something is horrible
467 * wrong with the system.
468 */
469
470 wake_up_process(p);
471
Steven Rostedt5aa60c62008-09-29 23:02:37 -0400472 /* give a little time to let the thread wake up */
473 msleep(100);
474
Steven Rostedt60a11772008-05-12 21:20:44 +0200475 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500476 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200477 /* check both trace buffers */
478 ret = trace_test_buffer(tr, NULL);
479 if (!ret)
480 ret = trace_test_buffer(&max_tr, &count);
481
482
483 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500484 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200485
486 tracing_max_latency = save_max;
487
488 /* kill the thread */
489 kthread_stop(p);
490
491 if (!ret && !count) {
492 printk(KERN_CONT ".. no entries found ..");
493 ret = -1;
494 }
495
496 return ret;
497}
498#endif /* CONFIG_SCHED_TRACER */
499
500#ifdef CONFIG_CONTEXT_SWITCH_TRACER
501int
502trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
503{
504 unsigned long count;
505 int ret;
506
507 /* start the tracing */
Steven Rostedt60a11772008-05-12 21:20:44 +0200508 trace->init(tr);
509 /* Sleep for a 1/10 of a second */
510 msleep(100);
511 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500512 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200513 /* check the trace buffer */
514 ret = trace_test_buffer(tr, &count);
515 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500516 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200517
518 if (!ret && !count) {
519 printk(KERN_CONT ".. no entries found ..");
520 ret = -1;
521 }
522
523 return ret;
524}
525#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200526
527#ifdef CONFIG_SYSPROF_TRACER
528int
529trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
530{
531 unsigned long count;
532 int ret;
533
534 /* start the tracing */
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200535 trace->init(tr);
536 /* Sleep for a 1/10 of a second */
537 msleep(100);
538 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500539 tracing_stop();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200540 /* check the trace buffer */
541 ret = trace_test_buffer(tr, &count);
542 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500543 tracing_start();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200544
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200545 return ret;
546}
547#endif /* CONFIG_SYSPROF_TRACER */
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500548
549#ifdef CONFIG_BRANCH_TRACER
550int
551trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
552{
553 unsigned long count;
554 int ret;
555
556 /* start the tracing */
557 trace->init(tr);
558 /* Sleep for a 1/10 of a second */
559 msleep(100);
560 /* stop the tracing. */
561 tracing_stop();
562 /* check the trace buffer */
563 ret = trace_test_buffer(tr, &count);
564 trace->reset(tr);
565 tracing_start();
566
567 return ret;
568}
569#endif /* CONFIG_BRANCH_TRACER */