blob: adb008a0136f8db51b11846d63d1102b35ca35ca [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
Steven Rostedt9cc26a22009-03-09 16:00:22 -04003#include <linux/stringify.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02004#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02005#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09006#include <linux/slab.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02007
Ingo Molnare309b412008-05-12 21:20:51 +02008static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +02009{
10 switch (entry->type) {
11 case TRACE_FN:
12 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020013 case TRACE_WAKE:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020014 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040015 case TRACE_PRINT:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050016 case TRACE_BRANCH:
Frederic Weisbecker7447dce2009-02-07 21:33:57 +010017 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
Steven Rostedt60a11772008-05-12 21:20:44 +020019 return 1;
20 }
21 return 0;
22}
23
Steven Rostedt3928a8a2008-09-29 23:02:41 -040024static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020025{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040026 struct ring_buffer_event *event;
27 struct trace_entry *entry;
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050028 unsigned int loops = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020029
Steven Rostedt66a8cb92010-03-31 13:21:56 -040030 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040031 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020032
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050033 /*
34 * The ring buffer is a size of trace_buf_size, if
35 * we loop more than the size, there's something wrong
36 * with the ring buffer.
37 */
38 if (loops++ > trace_buf_size) {
39 printk(KERN_CONT ".. bad ring buffer ");
40 goto failed;
41 }
Steven Rostedt3928a8a2008-09-29 23:02:41 -040042 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020043 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040044 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020045 goto failed;
46 }
Steven Rostedt60a11772008-05-12 21:20:44 +020047 }
Steven Rostedt60a11772008-05-12 21:20:44 +020048 return 0;
49
50 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020051 /* disable tracing */
52 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020053 printk(KERN_CONT ".. corrupted trace buffer .. ");
54 return -1;
55}
56
57/*
58 * Test the trace buffer to see if all the elements
59 * are still sane.
60 */
61static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
62{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020063 unsigned long flags, cnt = 0;
64 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020065
Steven Rostedt30afdcb2008-05-12 21:20:56 +020066 /* Don't allow flipping of max traces now */
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050067 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010068 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040069
70 cnt = ring_buffer_entries(tr->buffer);
71
Steven Rostedt0c5119c2009-02-18 18:33:57 -050072 /*
73 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 * If the calling tracer is broken, and is constantly filling
75 * the buffer, this will run forever, and hard lock the box.
76 * We disable the ring buffer while we do this test to prevent
77 * a hard lock up.
78 */
79 tracing_off();
Steven Rostedt60a11772008-05-12 21:20:44 +020080 for_each_possible_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040081 ret = trace_test_buffer_cpu(tr, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020082 if (ret)
83 break;
84 }
Steven Rostedt0c5119c2009-02-18 18:33:57 -050085 tracing_on();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010086 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050087 local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020088
89 if (count)
90 *count = cnt;
91
92 return ret;
93}
94
Frederic Weisbecker1c800252008-11-16 05:57:26 +010095static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96{
97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 trace->name, init_ret);
99}
Steven Rostedt606576c2008-10-06 19:06:12 -0400100#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +0200101
102#ifdef CONFIG_DYNAMIC_FTRACE
103
Steven Rostedt95950c22011-05-06 00:08:51 -0400104static int trace_selftest_test_probe1_cnt;
105static void trace_selftest_test_probe1_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400106 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400107 struct ftrace_ops *op,
108 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400109{
110 trace_selftest_test_probe1_cnt++;
111}
112
113static int trace_selftest_test_probe2_cnt;
114static void trace_selftest_test_probe2_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400115 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400116 struct ftrace_ops *op,
117 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400118{
119 trace_selftest_test_probe2_cnt++;
120}
121
122static int trace_selftest_test_probe3_cnt;
123static void trace_selftest_test_probe3_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400124 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400125 struct ftrace_ops *op,
126 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400127{
128 trace_selftest_test_probe3_cnt++;
129}
130
131static int trace_selftest_test_global_cnt;
132static void trace_selftest_test_global_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400133 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400134 struct ftrace_ops *op,
135 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400136{
137 trace_selftest_test_global_cnt++;
138}
139
140static int trace_selftest_test_dyn_cnt;
141static void trace_selftest_test_dyn_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400142 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400143 struct ftrace_ops *op,
144 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400145{
146 trace_selftest_test_dyn_cnt++;
147}
148
149static struct ftrace_ops test_probe1 = {
150 .func = trace_selftest_test_probe1_func,
Steven Rostedt47409742012-07-20 11:04:44 -0400151 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400152};
153
154static struct ftrace_ops test_probe2 = {
155 .func = trace_selftest_test_probe2_func,
Steven Rostedt47409742012-07-20 11:04:44 -0400156 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400157};
158
159static struct ftrace_ops test_probe3 = {
160 .func = trace_selftest_test_probe3_func,
Steven Rostedt47409742012-07-20 11:04:44 -0400161 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400162};
163
164static struct ftrace_ops test_global = {
Steven Rostedt47409742012-07-20 11:04:44 -0400165 .func = trace_selftest_test_global_func,
166 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400167};
168
169static void print_counts(void)
170{
171 printk("(%d %d %d %d %d) ",
172 trace_selftest_test_probe1_cnt,
173 trace_selftest_test_probe2_cnt,
174 trace_selftest_test_probe3_cnt,
175 trace_selftest_test_global_cnt,
176 trace_selftest_test_dyn_cnt);
177}
178
179static void reset_counts(void)
180{
181 trace_selftest_test_probe1_cnt = 0;
182 trace_selftest_test_probe2_cnt = 0;
183 trace_selftest_test_probe3_cnt = 0;
184 trace_selftest_test_global_cnt = 0;
185 trace_selftest_test_dyn_cnt = 0;
186}
187
188static int trace_selftest_ops(int cnt)
189{
190 int save_ftrace_enabled = ftrace_enabled;
191 struct ftrace_ops *dyn_ops;
192 char *func1_name;
193 char *func2_name;
194 int len1;
195 int len2;
196 int ret = -1;
197
198 printk(KERN_CONT "PASSED\n");
199 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
200
201 ftrace_enabled = 1;
202 reset_counts();
203
204 /* Handle PPC64 '.' name */
205 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
206 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
207 len1 = strlen(func1_name);
208 len2 = strlen(func2_name);
209
210 /*
211 * Probe 1 will trace function 1.
212 * Probe 2 will trace function 2.
213 * Probe 3 will trace functions 1 and 2.
214 */
215 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
216 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
217 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
218 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
219
220 register_ftrace_function(&test_probe1);
221 register_ftrace_function(&test_probe2);
222 register_ftrace_function(&test_probe3);
223 register_ftrace_function(&test_global);
224
225 DYN_FTRACE_TEST_NAME();
226
227 print_counts();
228
229 if (trace_selftest_test_probe1_cnt != 1)
230 goto out;
231 if (trace_selftest_test_probe2_cnt != 0)
232 goto out;
233 if (trace_selftest_test_probe3_cnt != 1)
234 goto out;
235 if (trace_selftest_test_global_cnt == 0)
236 goto out;
237
238 DYN_FTRACE_TEST_NAME2();
239
240 print_counts();
241
242 if (trace_selftest_test_probe1_cnt != 1)
243 goto out;
244 if (trace_selftest_test_probe2_cnt != 1)
245 goto out;
246 if (trace_selftest_test_probe3_cnt != 2)
247 goto out;
248
249 /* Add a dynamic probe */
250 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
251 if (!dyn_ops) {
252 printk("MEMORY ERROR ");
253 goto out;
254 }
255
256 dyn_ops->func = trace_selftest_test_dyn_func;
257
258 register_ftrace_function(dyn_ops);
259
260 trace_selftest_test_global_cnt = 0;
261
262 DYN_FTRACE_TEST_NAME();
263
264 print_counts();
265
266 if (trace_selftest_test_probe1_cnt != 2)
267 goto out_free;
268 if (trace_selftest_test_probe2_cnt != 1)
269 goto out_free;
270 if (trace_selftest_test_probe3_cnt != 3)
271 goto out_free;
272 if (trace_selftest_test_global_cnt == 0)
273 goto out;
274 if (trace_selftest_test_dyn_cnt == 0)
275 goto out_free;
276
277 DYN_FTRACE_TEST_NAME2();
278
279 print_counts();
280
281 if (trace_selftest_test_probe1_cnt != 2)
282 goto out_free;
283 if (trace_selftest_test_probe2_cnt != 2)
284 goto out_free;
285 if (trace_selftest_test_probe3_cnt != 4)
286 goto out_free;
287
288 ret = 0;
289 out_free:
290 unregister_ftrace_function(dyn_ops);
291 kfree(dyn_ops);
292
293 out:
294 /* Purposely unregister in the same order */
295 unregister_ftrace_function(&test_probe1);
296 unregister_ftrace_function(&test_probe2);
297 unregister_ftrace_function(&test_probe3);
298 unregister_ftrace_function(&test_global);
299
300 /* Make sure everything is off */
301 reset_counts();
302 DYN_FTRACE_TEST_NAME();
303 DYN_FTRACE_TEST_NAME();
304
305 if (trace_selftest_test_probe1_cnt ||
306 trace_selftest_test_probe2_cnt ||
307 trace_selftest_test_probe3_cnt ||
308 trace_selftest_test_global_cnt ||
309 trace_selftest_test_dyn_cnt)
310 ret = -1;
311
312 ftrace_enabled = save_ftrace_enabled;
313
314 return ret;
315}
316
Steven Rostedt77a2b372008-05-12 21:20:45 +0200317/* Test dynamic code modification and ftrace filters */
318int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
319 struct trace_array *tr,
320 int (*func)(void))
321{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200322 int save_ftrace_enabled = ftrace_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400323 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -0400324 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400325 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200326
327 /* The ftrace test PASSED */
328 printk(KERN_CONT "PASSED\n");
329 pr_info("Testing dynamic ftrace: ");
330
331 /* enable tracing, and record the filter function */
332 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200333
334 /* passed in by parameter to fool gcc from optimizing */
335 func();
336
Steven Rostedt4e491d12008-05-14 23:49:44 -0400337 /*
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500338 * Some archs *cough*PowerPC*cough* add characters to the
Steven Rostedt4e491d12008-05-14 23:49:44 -0400339 * start of the function names. We simply put a '*' to
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500340 * accommodate them.
Steven Rostedt4e491d12008-05-14 23:49:44 -0400341 */
Steven Rostedt9cc26a22009-03-09 16:00:22 -0400342 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
Steven Rostedt4e491d12008-05-14 23:49:44 -0400343
Steven Rostedt77a2b372008-05-12 21:20:45 +0200344 /* filter only on our function */
Steven Rostedt936e0742011-05-05 22:54:01 -0400345 ftrace_set_global_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200346
347 /* enable tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200348 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100349 if (ret) {
350 warn_failed_init_tracer(trace, ret);
351 goto out;
352 }
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400353
Steven Rostedt77a2b372008-05-12 21:20:45 +0200354 /* Sleep for a 1/10 of a second */
355 msleep(100);
356
357 /* we should have nothing in the buffer */
358 ret = trace_test_buffer(tr, &count);
359 if (ret)
360 goto out;
361
362 if (count) {
363 ret = -1;
364 printk(KERN_CONT ".. filter did not filter .. ");
365 goto out;
366 }
367
368 /* call our function again */
369 func();
370
371 /* sleep again */
372 msleep(100);
373
374 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500375 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200376 ftrace_enabled = 0;
377
378 /* check the trace buffer */
379 ret = trace_test_buffer(tr, &count);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500380 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200381
382 /* we should only have one item */
383 if (!ret && count != 1) {
Steven Rostedt95950c22011-05-06 00:08:51 -0400384 trace->reset(tr);
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200385 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200386 ret = -1;
387 goto out;
388 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500389
Steven Rostedt95950c22011-05-06 00:08:51 -0400390 /* Test the ops with global tracing running */
391 ret = trace_selftest_ops(1);
392 trace->reset(tr);
393
Steven Rostedt77a2b372008-05-12 21:20:45 +0200394 out:
395 ftrace_enabled = save_ftrace_enabled;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200396
397 /* Enable tracing on all functions again */
Steven Rostedt936e0742011-05-05 22:54:01 -0400398 ftrace_set_global_filter(NULL, 0, 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200399
Steven Rostedt95950c22011-05-06 00:08:51 -0400400 /* Test the ops with global tracing off */
401 if (!ret)
402 ret = trace_selftest_ops(2);
403
Steven Rostedt77a2b372008-05-12 21:20:45 +0200404 return ret;
405}
Steven Rostedtea701f12012-07-20 13:08:05 -0400406
407static int trace_selftest_recursion_cnt;
408static void trace_selftest_test_recursion_func(unsigned long ip,
409 unsigned long pip,
410 struct ftrace_ops *op,
411 struct pt_regs *pt_regs)
412{
413 /*
414 * This function is registered without the recursion safe flag.
415 * The ftrace infrastructure should provide the recursion
416 * protection. If not, this will crash the kernel!
417 */
418 trace_selftest_recursion_cnt++;
419 DYN_FTRACE_TEST_NAME();
420}
421
422static void trace_selftest_test_recursion_safe_func(unsigned long ip,
423 unsigned long pip,
424 struct ftrace_ops *op,
425 struct pt_regs *pt_regs)
426{
427 /*
428 * We said we would provide our own recursion. By calling
429 * this function again, we should recurse back into this function
430 * and count again. But this only happens if the arch supports
431 * all of ftrace features and nothing else is using the function
432 * tracing utility.
433 */
434 if (trace_selftest_recursion_cnt++)
435 return;
436 DYN_FTRACE_TEST_NAME();
437}
438
439static struct ftrace_ops test_rec_probe = {
440 .func = trace_selftest_test_recursion_func,
441};
442
443static struct ftrace_ops test_recsafe_probe = {
444 .func = trace_selftest_test_recursion_safe_func,
445 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
446};
447
448static int
449trace_selftest_function_recursion(void)
450{
451 int save_ftrace_enabled = ftrace_enabled;
Steven Rostedtea701f12012-07-20 13:08:05 -0400452 char *func_name;
453 int len;
454 int ret;
Steven Rostedtea701f12012-07-20 13:08:05 -0400455
456 /* The previous test PASSED */
457 pr_cont("PASSED\n");
458 pr_info("Testing ftrace recursion: ");
459
460
461 /* enable tracing, and record the filter function */
462 ftrace_enabled = 1;
Steven Rostedtea701f12012-07-20 13:08:05 -0400463
464 /* Handle PPC64 '.' name */
465 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
466 len = strlen(func_name);
467
468 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
469 if (ret) {
470 pr_cont("*Could not set filter* ");
471 goto out;
472 }
473
474 ret = register_ftrace_function(&test_rec_probe);
475 if (ret) {
476 pr_cont("*could not register callback* ");
477 goto out;
478 }
479
480 DYN_FTRACE_TEST_NAME();
481
482 unregister_ftrace_function(&test_rec_probe);
483
484 ret = -1;
485 if (trace_selftest_recursion_cnt != 1) {
486 pr_cont("*callback not called once (%d)* ",
487 trace_selftest_recursion_cnt);
488 goto out;
489 }
490
491 trace_selftest_recursion_cnt = 1;
492
493 pr_cont("PASSED\n");
494 pr_info("Testing ftrace recursion safe: ");
495
496 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
497 if (ret) {
498 pr_cont("*Could not set filter* ");
499 goto out;
500 }
501
502 ret = register_ftrace_function(&test_recsafe_probe);
503 if (ret) {
504 pr_cont("*could not register callback* ");
505 goto out;
506 }
507
508 DYN_FTRACE_TEST_NAME();
509
510 unregister_ftrace_function(&test_recsafe_probe);
511
Steven Rostedtea701f12012-07-20 13:08:05 -0400512 ret = -1;
Steven Rostedt05cbbf62013-01-22 23:35:11 -0500513 if (trace_selftest_recursion_cnt != 2) {
514 pr_cont("*callback not called expected 2 times (%d)* ",
515 trace_selftest_recursion_cnt);
Steven Rostedtea701f12012-07-20 13:08:05 -0400516 goto out;
517 }
518
519 ret = 0;
520out:
521 ftrace_enabled = save_ftrace_enabled;
Steven Rostedtea701f12012-07-20 13:08:05 -0400522
523 return ret;
524}
Steven Rostedt77a2b372008-05-12 21:20:45 +0200525#else
526# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
Steven Rostedtea701f12012-07-20 13:08:05 -0400527# define trace_selftest_function_recursion() ({ 0; })
Steven Rostedt77a2b372008-05-12 21:20:45 +0200528#endif /* CONFIG_DYNAMIC_FTRACE */
Ingo Molnare9a22d12009-03-13 11:54:40 +0100529
Steven Rostedtad977722012-07-20 13:45:59 -0400530static enum {
531 TRACE_SELFTEST_REGS_START,
532 TRACE_SELFTEST_REGS_FOUND,
533 TRACE_SELFTEST_REGS_NOT_FOUND,
534} trace_selftest_regs_stat;
535
536static void trace_selftest_test_regs_func(unsigned long ip,
537 unsigned long pip,
538 struct ftrace_ops *op,
539 struct pt_regs *pt_regs)
540{
541 if (pt_regs)
542 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
543 else
544 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
545}
546
547static struct ftrace_ops test_regs_probe = {
548 .func = trace_selftest_test_regs_func,
549 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
550};
551
552static int
553trace_selftest_function_regs(void)
554{
555 int save_ftrace_enabled = ftrace_enabled;
Steven Rostedtad977722012-07-20 13:45:59 -0400556 char *func_name;
557 int len;
558 int ret;
559 int supported = 0;
560
Masami Hiramatsu06aeaae2012-09-28 17:15:17 +0900561#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
Steven Rostedtad977722012-07-20 13:45:59 -0400562 supported = 1;
563#endif
564
565 /* The previous test PASSED */
566 pr_cont("PASSED\n");
567 pr_info("Testing ftrace regs%s: ",
568 !supported ? "(no arch support)" : "");
569
570 /* enable tracing, and record the filter function */
571 ftrace_enabled = 1;
Steven Rostedtad977722012-07-20 13:45:59 -0400572
573 /* Handle PPC64 '.' name */
574 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
575 len = strlen(func_name);
576
577 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
578 /*
579 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
580 * This test really doesn't care.
581 */
582 if (ret && ret != -ENODEV) {
583 pr_cont("*Could not set filter* ");
584 goto out;
585 }
586
587 ret = register_ftrace_function(&test_regs_probe);
588 /*
589 * Now if the arch does not support passing regs, then this should
590 * have failed.
591 */
592 if (!supported) {
593 if (!ret) {
594 pr_cont("*registered save-regs without arch support* ");
595 goto out;
596 }
597 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
598 ret = register_ftrace_function(&test_regs_probe);
599 }
600 if (ret) {
601 pr_cont("*could not register callback* ");
602 goto out;
603 }
604
605
606 DYN_FTRACE_TEST_NAME();
607
608 unregister_ftrace_function(&test_regs_probe);
609
610 ret = -1;
611
612 switch (trace_selftest_regs_stat) {
613 case TRACE_SELFTEST_REGS_START:
614 pr_cont("*callback never called* ");
615 goto out;
616
617 case TRACE_SELFTEST_REGS_FOUND:
618 if (supported)
619 break;
620 pr_cont("*callback received regs without arch support* ");
621 goto out;
622
623 case TRACE_SELFTEST_REGS_NOT_FOUND:
624 if (!supported)
625 break;
626 pr_cont("*callback received NULL regs* ");
627 goto out;
628 }
629
630 ret = 0;
631out:
632 ftrace_enabled = save_ftrace_enabled;
Steven Rostedtad977722012-07-20 13:45:59 -0400633
634 return ret;
635}
636
Steven Rostedt60a11772008-05-12 21:20:44 +0200637/*
638 * Simple verification test of ftrace function tracer.
639 * Enable ftrace, sleep 1/10 second, and then read the trace
640 * buffer to see if all is in order.
641 */
642int
643trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
644{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200645 int save_ftrace_enabled = ftrace_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400646 unsigned long count;
647 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200648
Steven Rostedt77a2b372008-05-12 21:20:45 +0200649 /* make sure msleep has been recorded */
650 msleep(1);
651
Steven Rostedt60a11772008-05-12 21:20:44 +0200652 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200653 ftrace_enabled = 1;
654
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200655 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100656 if (ret) {
657 warn_failed_init_tracer(trace, ret);
658 goto out;
659 }
660
Steven Rostedt60a11772008-05-12 21:20:44 +0200661 /* Sleep for a 1/10 of a second */
662 msleep(100);
663 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500664 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200665 ftrace_enabled = 0;
666
Steven Rostedt60a11772008-05-12 21:20:44 +0200667 /* check the trace buffer */
668 ret = trace_test_buffer(tr, &count);
669 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500670 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200671
672 if (!ret && !count) {
673 printk(KERN_CONT ".. no entries found ..");
674 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200675 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200676 }
677
Steven Rostedt77a2b372008-05-12 21:20:45 +0200678 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
679 DYN_FTRACE_TEST_NAME);
Steven Rostedtea701f12012-07-20 13:08:05 -0400680 if (ret)
681 goto out;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200682
Steven Rostedtea701f12012-07-20 13:08:05 -0400683 ret = trace_selftest_function_recursion();
Steven Rostedtad977722012-07-20 13:45:59 -0400684 if (ret)
685 goto out;
686
687 ret = trace_selftest_function_regs();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200688 out:
689 ftrace_enabled = save_ftrace_enabled;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200690
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200691 /* kill ftrace totally if we failed */
692 if (ret)
693 ftrace_kill();
694
Steven Rostedt60a11772008-05-12 21:20:44 +0200695 return ret;
696}
Steven Rostedt606576c2008-10-06 19:06:12 -0400697#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200698
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100699
700#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100701
702/* Maximum number of functions to trace before diagnosing a hang */
703#define GRAPH_MAX_FUNC_TEST 100000000
704
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200705static void
706__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100707static unsigned int graph_hang_thresh;
708
709/* Wrap the real function entry probe to avoid possible hanging */
710static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
711{
712 /* This is harmlessly racy, we want to approximately detect a hang */
713 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
714 ftrace_graph_stop();
715 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
716 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200717 __ftrace_dump(false, DUMP_ALL);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100718 return 0;
719 }
720
721 return trace_graph_entry(trace);
722}
723
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100724/*
725 * Pretty much the same than for the function tracer from which the selftest
726 * has been borrowed.
727 */
728int
729trace_selftest_startup_function_graph(struct tracer *trace,
730 struct trace_array *tr)
731{
732 int ret;
733 unsigned long count;
734
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100735 /*
736 * Simulate the init() callback but we attach a watchdog callback
737 * to detect and recover from possible hangs
738 */
739 tracing_reset_online_cpus(tr);
Frederic Weisbecker1a0799a2009-07-29 18:59:58 +0200740 set_graph_array(tr);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100741 ret = register_ftrace_graph(&trace_graph_return,
742 &trace_graph_entry_watchdog);
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100743 if (ret) {
744 warn_failed_init_tracer(trace, ret);
745 goto out;
746 }
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100747 tracing_start_cmdline_record();
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100748
749 /* Sleep for a 1/10 of a second */
750 msleep(100);
751
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100752 /* Have we just recovered from a hang? */
753 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
Frederic Weisbecker0cf53ff2009-03-22 15:13:07 +0100754 tracing_selftest_disabled = true;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100755 ret = -1;
756 goto out;
757 }
758
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100759 tracing_stop();
760
761 /* check the trace buffer */
762 ret = trace_test_buffer(tr, &count);
763
764 trace->reset(tr);
765 tracing_start();
766
767 if (!ret && !count) {
768 printk(KERN_CONT ".. no entries found ..");
769 ret = -1;
770 goto out;
771 }
772
773 /* Don't test dynamic tracing, the function tracer already did */
774
775out:
776 /* Stop it if we failed */
777 if (ret)
778 ftrace_graph_stop();
779
780 return ret;
781}
782#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
783
784
Steven Rostedt60a11772008-05-12 21:20:44 +0200785#ifdef CONFIG_IRQSOFF_TRACER
786int
787trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
788{
789 unsigned long save_max = tracing_max_latency;
790 unsigned long count;
791 int ret;
792
793 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200794 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100795 if (ret) {
796 warn_failed_init_tracer(trace, ret);
797 return ret;
798 }
799
Steven Rostedt60a11772008-05-12 21:20:44 +0200800 /* reset the max latency */
801 tracing_max_latency = 0;
802 /* disable interrupts for a bit */
803 local_irq_disable();
804 udelay(100);
805 local_irq_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100806
807 /*
808 * Stop the tracer to avoid a warning subsequent
809 * to buffer flipping failure because tracing_stop()
810 * disables the tr and max buffers, making flipping impossible
811 * in case of parallels max irqs off latencies.
812 */
813 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200814 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500815 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200816 /* check both trace buffers */
817 ret = trace_test_buffer(tr, NULL);
818 if (!ret)
819 ret = trace_test_buffer(&max_tr, &count);
820 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500821 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200822
823 if (!ret && !count) {
824 printk(KERN_CONT ".. no entries found ..");
825 ret = -1;
826 }
827
828 tracing_max_latency = save_max;
829
830 return ret;
831}
832#endif /* CONFIG_IRQSOFF_TRACER */
833
834#ifdef CONFIG_PREEMPT_TRACER
835int
836trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
837{
838 unsigned long save_max = tracing_max_latency;
839 unsigned long count;
840 int ret;
841
Steven Rostedt769c48e2008-11-07 22:36:02 -0500842 /*
843 * Now that the big kernel lock is no longer preemptable,
844 * and this is called with the BKL held, it will always
845 * fail. If preemption is already disabled, simply
846 * pass the test. When the BKL is removed, or becomes
847 * preemptible again, we will once again test this,
848 * so keep it in.
849 */
850 if (preempt_count()) {
851 printk(KERN_CONT "can not test ... force ");
852 return 0;
853 }
854
Steven Rostedt60a11772008-05-12 21:20:44 +0200855 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200856 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100857 if (ret) {
858 warn_failed_init_tracer(trace, ret);
859 return ret;
860 }
861
Steven Rostedt60a11772008-05-12 21:20:44 +0200862 /* reset the max latency */
863 tracing_max_latency = 0;
864 /* disable preemption for a bit */
865 preempt_disable();
866 udelay(100);
867 preempt_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100868
869 /*
870 * Stop the tracer to avoid a warning subsequent
871 * to buffer flipping failure because tracing_stop()
872 * disables the tr and max buffers, making flipping impossible
873 * in case of parallels max preempt off latencies.
874 */
875 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200876 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500877 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200878 /* check both trace buffers */
879 ret = trace_test_buffer(tr, NULL);
880 if (!ret)
881 ret = trace_test_buffer(&max_tr, &count);
882 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500883 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200884
885 if (!ret && !count) {
886 printk(KERN_CONT ".. no entries found ..");
887 ret = -1;
888 }
889
890 tracing_max_latency = save_max;
891
892 return ret;
893}
894#endif /* CONFIG_PREEMPT_TRACER */
895
896#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
897int
898trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
899{
900 unsigned long save_max = tracing_max_latency;
901 unsigned long count;
902 int ret;
903
Steven Rostedt769c48e2008-11-07 22:36:02 -0500904 /*
905 * Now that the big kernel lock is no longer preemptable,
906 * and this is called with the BKL held, it will always
907 * fail. If preemption is already disabled, simply
908 * pass the test. When the BKL is removed, or becomes
909 * preemptible again, we will once again test this,
910 * so keep it in.
911 */
912 if (preempt_count()) {
913 printk(KERN_CONT "can not test ... force ");
914 return 0;
915 }
916
Steven Rostedt60a11772008-05-12 21:20:44 +0200917 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200918 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100919 if (ret) {
920 warn_failed_init_tracer(trace, ret);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100921 goto out_no_start;
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100922 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200923
924 /* reset the max latency */
925 tracing_max_latency = 0;
926
927 /* disable preemption and interrupts for a bit */
928 preempt_disable();
929 local_irq_disable();
930 udelay(100);
931 preempt_enable();
932 /* reverse the order of preempt vs irqs */
933 local_irq_enable();
934
Frederic Weisbecker49036202009-03-17 22:38:58 +0100935 /*
936 * Stop the tracer to avoid a warning subsequent
937 * to buffer flipping failure because tracing_stop()
938 * disables the tr and max buffers, making flipping impossible
939 * in case of parallels max irqs/preempt off latencies.
940 */
941 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200942 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500943 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200944 /* check both trace buffers */
945 ret = trace_test_buffer(tr, NULL);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100946 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200947 goto out;
948
949 ret = trace_test_buffer(&max_tr, &count);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100950 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200951 goto out;
952
953 if (!ret && !count) {
954 printk(KERN_CONT ".. no entries found ..");
955 ret = -1;
956 goto out;
957 }
958
959 /* do the test by disabling interrupts first this time */
960 tracing_max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500961 tracing_start();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100962 trace->start(tr);
963
Steven Rostedt60a11772008-05-12 21:20:44 +0200964 preempt_disable();
965 local_irq_disable();
966 udelay(100);
967 preempt_enable();
968 /* reverse the order of preempt vs irqs */
969 local_irq_enable();
970
Frederic Weisbecker49036202009-03-17 22:38:58 +0100971 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200972 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500973 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200974 /* check both trace buffers */
975 ret = trace_test_buffer(tr, NULL);
976 if (ret)
977 goto out;
978
979 ret = trace_test_buffer(&max_tr, &count);
980
981 if (!ret && !count) {
982 printk(KERN_CONT ".. no entries found ..");
983 ret = -1;
984 goto out;
985 }
986
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100987out:
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500988 tracing_start();
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100989out_no_start:
990 trace->reset(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200991 tracing_max_latency = save_max;
992
993 return ret;
994}
995#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
996
Steven Noonanfb1b6d82008-09-19 03:06:43 -0700997#ifdef CONFIG_NOP_TRACER
998int
999trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1000{
1001 /* What could possibly go wrong? */
1002 return 0;
1003}
1004#endif
1005
Steven Rostedt60a11772008-05-12 21:20:44 +02001006#ifdef CONFIG_SCHED_TRACER
1007static int trace_wakeup_test_thread(void *data)
1008{
Steven Rostedt05bd68c2008-05-12 21:20:59 +02001009 /* Make this a RT thread, doesn't need to be too high */
Peter Zijlstrac9b5f502011-01-07 13:41:40 +01001010 static const struct sched_param param = { .sched_priority = 5 };
Steven Rostedt60a11772008-05-12 21:20:44 +02001011 struct completion *x = data;
1012
Steven Rostedt05bd68c2008-05-12 21:20:59 +02001013 sched_setscheduler(current, SCHED_FIFO, &param);
Steven Rostedt60a11772008-05-12 21:20:44 +02001014
1015 /* Make it know we have a new prio */
1016 complete(x);
1017
1018 /* now go to sleep and let the test wake us up */
1019 set_current_state(TASK_INTERRUPTIBLE);
1020 schedule();
1021
Steven Rostedt3c18c102012-07-31 10:23:37 -04001022 complete(x);
1023
Steven Rostedt60a11772008-05-12 21:20:44 +02001024 /* we are awake, now wait to disappear */
1025 while (!kthread_should_stop()) {
1026 /*
1027 * This is an RT task, do short sleeps to let
1028 * others run.
1029 */
1030 msleep(100);
1031 }
1032
1033 return 0;
1034}
1035
1036int
1037trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1038{
1039 unsigned long save_max = tracing_max_latency;
1040 struct task_struct *p;
1041 struct completion isrt;
1042 unsigned long count;
1043 int ret;
1044
1045 init_completion(&isrt);
1046
1047 /* create a high prio thread */
1048 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001049 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +02001050 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1051 return -1;
1052 }
1053
1054 /* make sure the thread is running at an RT prio */
1055 wait_for_completion(&isrt);
1056
1057 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02001058 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01001059 if (ret) {
1060 warn_failed_init_tracer(trace, ret);
1061 return ret;
1062 }
1063
Steven Rostedt60a11772008-05-12 21:20:44 +02001064 /* reset the max latency */
1065 tracing_max_latency = 0;
1066
Steven Rostedt3c18c102012-07-31 10:23:37 -04001067 while (p->on_rq) {
1068 /*
1069 * Sleep to make sure the RT thread is asleep too.
1070 * On virtual machines we can't rely on timings,
1071 * but we want to make sure this test still works.
1072 */
1073 msleep(100);
1074 }
Steven Rostedt60a11772008-05-12 21:20:44 +02001075
Steven Rostedt3c18c102012-07-31 10:23:37 -04001076 init_completion(&isrt);
Steven Rostedt60a11772008-05-12 21:20:44 +02001077
1078 wake_up_process(p);
1079
Steven Rostedt3c18c102012-07-31 10:23:37 -04001080 /* Wait for the task to wake up */
1081 wait_for_completion(&isrt);
Steven Rostedt5aa60c62008-09-29 23:02:37 -04001082
Steven Rostedt60a11772008-05-12 21:20:44 +02001083 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001084 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +02001085 /* check both trace buffers */
1086 ret = trace_test_buffer(tr, NULL);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001087 printk("ret = %d\n", ret);
Steven Rostedt60a11772008-05-12 21:20:44 +02001088 if (!ret)
1089 ret = trace_test_buffer(&max_tr, &count);
1090
1091
1092 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001093 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +02001094
1095 tracing_max_latency = save_max;
1096
1097 /* kill the thread */
1098 kthread_stop(p);
1099
1100 if (!ret && !count) {
1101 printk(KERN_CONT ".. no entries found ..");
1102 ret = -1;
1103 }
1104
1105 return ret;
1106}
1107#endif /* CONFIG_SCHED_TRACER */
1108
1109#ifdef CONFIG_CONTEXT_SWITCH_TRACER
1110int
1111trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1112{
1113 unsigned long count;
1114 int ret;
1115
1116 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02001117 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01001118 if (ret) {
1119 warn_failed_init_tracer(trace, ret);
1120 return ret;
1121 }
1122
Steven Rostedt60a11772008-05-12 21:20:44 +02001123 /* Sleep for a 1/10 of a second */
1124 msleep(100);
1125 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001126 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +02001127 /* check the trace buffer */
1128 ret = trace_test_buffer(tr, &count);
1129 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001130 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +02001131
1132 if (!ret && !count) {
1133 printk(KERN_CONT ".. no entries found ..");
1134 ret = -1;
1135 }
1136
1137 return ret;
1138}
1139#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +02001140
Steven Rostedt80e5ea42008-11-12 15:24:24 -05001141#ifdef CONFIG_BRANCH_TRACER
1142int
1143trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1144{
1145 unsigned long count;
1146 int ret;
1147
1148 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02001149 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01001150 if (ret) {
1151 warn_failed_init_tracer(trace, ret);
1152 return ret;
1153 }
1154
Steven Rostedt80e5ea42008-11-12 15:24:24 -05001155 /* Sleep for a 1/10 of a second */
1156 msleep(100);
1157 /* stop the tracing. */
1158 tracing_stop();
1159 /* check the trace buffer */
1160 ret = trace_test_buffer(tr, &count);
1161 trace->reset(tr);
1162 tracing_start();
1163
Wenji Huangd2ef7c22009-02-17 01:09:47 -05001164 if (!ret && !count) {
1165 printk(KERN_CONT ".. no entries found ..");
1166 ret = -1;
1167 }
1168
Steven Rostedt80e5ea42008-11-12 15:24:24 -05001169 return ret;
1170}
1171#endif /* CONFIG_BRANCH_TRACER */
Markus Metzger321bb5e2009-03-13 10:50:27 +01001172