blob: 1fb6da85ff8bfe1b300ed93984ae2e13c2b1256e [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
Steven Rostedt9cc26a22009-03-09 16:00:22 -04003#include <linux/stringify.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02004#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02005#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09006#include <linux/slab.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02007
Ingo Molnare309b412008-05-12 21:20:51 +02008static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +02009{
10 switch (entry->type) {
11 case TRACE_FN:
12 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020013 case TRACE_WAKE:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020014 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040015 case TRACE_PRINT:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050016 case TRACE_BRANCH:
Frederic Weisbecker7447dce2009-02-07 21:33:57 +010017 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
Steven Rostedt60a11772008-05-12 21:20:44 +020019 return 1;
20 }
21 return 0;
22}
23
Steven Rostedt3928a8a2008-09-29 23:02:41 -040024static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020025{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040026 struct ring_buffer_event *event;
27 struct trace_entry *entry;
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050028 unsigned int loops = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020029
Steven Rostedt66a8cb92010-03-31 13:21:56 -040030 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040031 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020032
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050033 /*
34 * The ring buffer is a size of trace_buf_size, if
35 * we loop more than the size, there's something wrong
36 * with the ring buffer.
37 */
38 if (loops++ > trace_buf_size) {
39 printk(KERN_CONT ".. bad ring buffer ");
40 goto failed;
41 }
Steven Rostedt3928a8a2008-09-29 23:02:41 -040042 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020043 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040044 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020045 goto failed;
46 }
Steven Rostedt60a11772008-05-12 21:20:44 +020047 }
Steven Rostedt60a11772008-05-12 21:20:44 +020048 return 0;
49
50 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020051 /* disable tracing */
52 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020053 printk(KERN_CONT ".. corrupted trace buffer .. ");
54 return -1;
55}
56
57/*
58 * Test the trace buffer to see if all the elements
59 * are still sane.
60 */
61static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
62{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020063 unsigned long flags, cnt = 0;
64 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020065
Steven Rostedt30afdcb2008-05-12 21:20:56 +020066 /* Don't allow flipping of max traces now */
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050067 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010068 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040069
70 cnt = ring_buffer_entries(tr->buffer);
71
Steven Rostedt0c5119c2009-02-18 18:33:57 -050072 /*
73 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 * If the calling tracer is broken, and is constantly filling
75 * the buffer, this will run forever, and hard lock the box.
76 * We disable the ring buffer while we do this test to prevent
77 * a hard lock up.
78 */
79 tracing_off();
Steven Rostedt60a11772008-05-12 21:20:44 +020080 for_each_possible_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040081 ret = trace_test_buffer_cpu(tr, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020082 if (ret)
83 break;
84 }
Steven Rostedt0c5119c2009-02-18 18:33:57 -050085 tracing_on();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010086 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050087 local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020088
89 if (count)
90 *count = cnt;
91
92 return ret;
93}
94
Frederic Weisbecker1c800252008-11-16 05:57:26 +010095static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96{
97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 trace->name, init_ret);
99}
Steven Rostedt606576c2008-10-06 19:06:12 -0400100#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +0200101
102#ifdef CONFIG_DYNAMIC_FTRACE
103
Steven Rostedt95950c22011-05-06 00:08:51 -0400104static int trace_selftest_test_probe1_cnt;
105static void trace_selftest_test_probe1_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400106 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400107 struct ftrace_ops *op,
108 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400109{
110 trace_selftest_test_probe1_cnt++;
111}
112
113static int trace_selftest_test_probe2_cnt;
114static void trace_selftest_test_probe2_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400115 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400116 struct ftrace_ops *op,
117 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400118{
119 trace_selftest_test_probe2_cnt++;
120}
121
122static int trace_selftest_test_probe3_cnt;
123static void trace_selftest_test_probe3_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400124 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400125 struct ftrace_ops *op,
126 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400127{
128 trace_selftest_test_probe3_cnt++;
129}
130
131static int trace_selftest_test_global_cnt;
132static void trace_selftest_test_global_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400133 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400134 struct ftrace_ops *op,
135 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400136{
137 trace_selftest_test_global_cnt++;
138}
139
140static int trace_selftest_test_dyn_cnt;
141static void trace_selftest_test_dyn_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400142 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400143 struct ftrace_ops *op,
144 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400145{
146 trace_selftest_test_dyn_cnt++;
147}
148
149static struct ftrace_ops test_probe1 = {
150 .func = trace_selftest_test_probe1_func,
Steven Rostedt47409742012-07-20 11:04:44 -0400151 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400152};
153
154static struct ftrace_ops test_probe2 = {
155 .func = trace_selftest_test_probe2_func,
Steven Rostedt47409742012-07-20 11:04:44 -0400156 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400157};
158
159static struct ftrace_ops test_probe3 = {
160 .func = trace_selftest_test_probe3_func,
Steven Rostedt47409742012-07-20 11:04:44 -0400161 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400162};
163
164static struct ftrace_ops test_global = {
Steven Rostedt47409742012-07-20 11:04:44 -0400165 .func = trace_selftest_test_global_func,
166 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400167};
168
169static void print_counts(void)
170{
171 printk("(%d %d %d %d %d) ",
172 trace_selftest_test_probe1_cnt,
173 trace_selftest_test_probe2_cnt,
174 trace_selftest_test_probe3_cnt,
175 trace_selftest_test_global_cnt,
176 trace_selftest_test_dyn_cnt);
177}
178
179static void reset_counts(void)
180{
181 trace_selftest_test_probe1_cnt = 0;
182 trace_selftest_test_probe2_cnt = 0;
183 trace_selftest_test_probe3_cnt = 0;
184 trace_selftest_test_global_cnt = 0;
185 trace_selftest_test_dyn_cnt = 0;
186}
187
188static int trace_selftest_ops(int cnt)
189{
190 int save_ftrace_enabled = ftrace_enabled;
191 struct ftrace_ops *dyn_ops;
192 char *func1_name;
193 char *func2_name;
194 int len1;
195 int len2;
196 int ret = -1;
197
198 printk(KERN_CONT "PASSED\n");
199 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
200
201 ftrace_enabled = 1;
202 reset_counts();
203
204 /* Handle PPC64 '.' name */
205 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
206 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
207 len1 = strlen(func1_name);
208 len2 = strlen(func2_name);
209
210 /*
211 * Probe 1 will trace function 1.
212 * Probe 2 will trace function 2.
213 * Probe 3 will trace functions 1 and 2.
214 */
215 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
216 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
217 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
218 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
219
220 register_ftrace_function(&test_probe1);
221 register_ftrace_function(&test_probe2);
222 register_ftrace_function(&test_probe3);
223 register_ftrace_function(&test_global);
224
225 DYN_FTRACE_TEST_NAME();
226
227 print_counts();
228
229 if (trace_selftest_test_probe1_cnt != 1)
230 goto out;
231 if (trace_selftest_test_probe2_cnt != 0)
232 goto out;
233 if (trace_selftest_test_probe3_cnt != 1)
234 goto out;
235 if (trace_selftest_test_global_cnt == 0)
236 goto out;
237
238 DYN_FTRACE_TEST_NAME2();
239
240 print_counts();
241
242 if (trace_selftest_test_probe1_cnt != 1)
243 goto out;
244 if (trace_selftest_test_probe2_cnt != 1)
245 goto out;
246 if (trace_selftest_test_probe3_cnt != 2)
247 goto out;
248
249 /* Add a dynamic probe */
250 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
251 if (!dyn_ops) {
252 printk("MEMORY ERROR ");
253 goto out;
254 }
255
256 dyn_ops->func = trace_selftest_test_dyn_func;
257
258 register_ftrace_function(dyn_ops);
259
260 trace_selftest_test_global_cnt = 0;
261
262 DYN_FTRACE_TEST_NAME();
263
264 print_counts();
265
266 if (trace_selftest_test_probe1_cnt != 2)
267 goto out_free;
268 if (trace_selftest_test_probe2_cnt != 1)
269 goto out_free;
270 if (trace_selftest_test_probe3_cnt != 3)
271 goto out_free;
272 if (trace_selftest_test_global_cnt == 0)
273 goto out;
274 if (trace_selftest_test_dyn_cnt == 0)
275 goto out_free;
276
277 DYN_FTRACE_TEST_NAME2();
278
279 print_counts();
280
281 if (trace_selftest_test_probe1_cnt != 2)
282 goto out_free;
283 if (trace_selftest_test_probe2_cnt != 2)
284 goto out_free;
285 if (trace_selftest_test_probe3_cnt != 4)
286 goto out_free;
287
288 ret = 0;
289 out_free:
290 unregister_ftrace_function(dyn_ops);
291 kfree(dyn_ops);
292
293 out:
294 /* Purposely unregister in the same order */
295 unregister_ftrace_function(&test_probe1);
296 unregister_ftrace_function(&test_probe2);
297 unregister_ftrace_function(&test_probe3);
298 unregister_ftrace_function(&test_global);
299
300 /* Make sure everything is off */
301 reset_counts();
302 DYN_FTRACE_TEST_NAME();
303 DYN_FTRACE_TEST_NAME();
304
305 if (trace_selftest_test_probe1_cnt ||
306 trace_selftest_test_probe2_cnt ||
307 trace_selftest_test_probe3_cnt ||
308 trace_selftest_test_global_cnt ||
309 trace_selftest_test_dyn_cnt)
310 ret = -1;
311
312 ftrace_enabled = save_ftrace_enabled;
313
314 return ret;
315}
316
Steven Rostedt77a2b372008-05-12 21:20:45 +0200317/* Test dynamic code modification and ftrace filters */
318int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
319 struct trace_array *tr,
320 int (*func)(void))
321{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200322 int save_ftrace_enabled = ftrace_enabled;
323 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400324 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -0400325 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400326 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200327
328 /* The ftrace test PASSED */
329 printk(KERN_CONT "PASSED\n");
330 pr_info("Testing dynamic ftrace: ");
331
332 /* enable tracing, and record the filter function */
333 ftrace_enabled = 1;
334 tracer_enabled = 1;
335
336 /* passed in by parameter to fool gcc from optimizing */
337 func();
338
Steven Rostedt4e491d12008-05-14 23:49:44 -0400339 /*
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500340 * Some archs *cough*PowerPC*cough* add characters to the
Steven Rostedt4e491d12008-05-14 23:49:44 -0400341 * start of the function names. We simply put a '*' to
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500342 * accommodate them.
Steven Rostedt4e491d12008-05-14 23:49:44 -0400343 */
Steven Rostedt9cc26a22009-03-09 16:00:22 -0400344 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
Steven Rostedt4e491d12008-05-14 23:49:44 -0400345
Steven Rostedt77a2b372008-05-12 21:20:45 +0200346 /* filter only on our function */
Steven Rostedt936e0742011-05-05 22:54:01 -0400347 ftrace_set_global_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200348
349 /* enable tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200350 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100351 if (ret) {
352 warn_failed_init_tracer(trace, ret);
353 goto out;
354 }
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400355
Steven Rostedt77a2b372008-05-12 21:20:45 +0200356 /* Sleep for a 1/10 of a second */
357 msleep(100);
358
359 /* we should have nothing in the buffer */
360 ret = trace_test_buffer(tr, &count);
361 if (ret)
362 goto out;
363
364 if (count) {
365 ret = -1;
366 printk(KERN_CONT ".. filter did not filter .. ");
367 goto out;
368 }
369
370 /* call our function again */
371 func();
372
373 /* sleep again */
374 msleep(100);
375
376 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500377 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200378 ftrace_enabled = 0;
379
380 /* check the trace buffer */
381 ret = trace_test_buffer(tr, &count);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500382 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200383
384 /* we should only have one item */
385 if (!ret && count != 1) {
Steven Rostedt95950c22011-05-06 00:08:51 -0400386 trace->reset(tr);
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200387 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200388 ret = -1;
389 goto out;
390 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500391
Steven Rostedt95950c22011-05-06 00:08:51 -0400392 /* Test the ops with global tracing running */
393 ret = trace_selftest_ops(1);
394 trace->reset(tr);
395
Steven Rostedt77a2b372008-05-12 21:20:45 +0200396 out:
397 ftrace_enabled = save_ftrace_enabled;
398 tracer_enabled = save_tracer_enabled;
399
400 /* Enable tracing on all functions again */
Steven Rostedt936e0742011-05-05 22:54:01 -0400401 ftrace_set_global_filter(NULL, 0, 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200402
Steven Rostedt95950c22011-05-06 00:08:51 -0400403 /* Test the ops with global tracing off */
404 if (!ret)
405 ret = trace_selftest_ops(2);
406
Steven Rostedt77a2b372008-05-12 21:20:45 +0200407 return ret;
408}
409#else
410# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
411#endif /* CONFIG_DYNAMIC_FTRACE */
Ingo Molnare9a22d12009-03-13 11:54:40 +0100412
Steven Rostedt60a11772008-05-12 21:20:44 +0200413/*
414 * Simple verification test of ftrace function tracer.
415 * Enable ftrace, sleep 1/10 second, and then read the trace
416 * buffer to see if all is in order.
417 */
418int
419trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
420{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200421 int save_ftrace_enabled = ftrace_enabled;
422 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400423 unsigned long count;
424 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200425
Steven Rostedt77a2b372008-05-12 21:20:45 +0200426 /* make sure msleep has been recorded */
427 msleep(1);
428
Steven Rostedt60a11772008-05-12 21:20:44 +0200429 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200430 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200431 tracer_enabled = 1;
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200432
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200433 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100434 if (ret) {
435 warn_failed_init_tracer(trace, ret);
436 goto out;
437 }
438
Steven Rostedt60a11772008-05-12 21:20:44 +0200439 /* Sleep for a 1/10 of a second */
440 msleep(100);
441 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500442 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200443 ftrace_enabled = 0;
444
Steven Rostedt60a11772008-05-12 21:20:44 +0200445 /* check the trace buffer */
446 ret = trace_test_buffer(tr, &count);
447 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500448 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200449
450 if (!ret && !count) {
451 printk(KERN_CONT ".. no entries found ..");
452 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200453 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200454 }
455
Steven Rostedt77a2b372008-05-12 21:20:45 +0200456 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
457 DYN_FTRACE_TEST_NAME);
458
459 out:
460 ftrace_enabled = save_ftrace_enabled;
461 tracer_enabled = save_tracer_enabled;
462
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200463 /* kill ftrace totally if we failed */
464 if (ret)
465 ftrace_kill();
466
Steven Rostedt60a11772008-05-12 21:20:44 +0200467 return ret;
468}
Steven Rostedt606576c2008-10-06 19:06:12 -0400469#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200470
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100471
472#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100473
474/* Maximum number of functions to trace before diagnosing a hang */
475#define GRAPH_MAX_FUNC_TEST 100000000
476
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200477static void
478__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100479static unsigned int graph_hang_thresh;
480
481/* Wrap the real function entry probe to avoid possible hanging */
482static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
483{
484 /* This is harmlessly racy, we want to approximately detect a hang */
485 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
486 ftrace_graph_stop();
487 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
488 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200489 __ftrace_dump(false, DUMP_ALL);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100490 return 0;
491 }
492
493 return trace_graph_entry(trace);
494}
495
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100496/*
497 * Pretty much the same than for the function tracer from which the selftest
498 * has been borrowed.
499 */
500int
501trace_selftest_startup_function_graph(struct tracer *trace,
502 struct trace_array *tr)
503{
504 int ret;
505 unsigned long count;
506
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100507 /*
508 * Simulate the init() callback but we attach a watchdog callback
509 * to detect and recover from possible hangs
510 */
511 tracing_reset_online_cpus(tr);
Frederic Weisbecker1a0799a2009-07-29 18:59:58 +0200512 set_graph_array(tr);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100513 ret = register_ftrace_graph(&trace_graph_return,
514 &trace_graph_entry_watchdog);
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100515 if (ret) {
516 warn_failed_init_tracer(trace, ret);
517 goto out;
518 }
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100519 tracing_start_cmdline_record();
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100520
521 /* Sleep for a 1/10 of a second */
522 msleep(100);
523
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100524 /* Have we just recovered from a hang? */
525 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
Frederic Weisbecker0cf53ff2009-03-22 15:13:07 +0100526 tracing_selftest_disabled = true;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100527 ret = -1;
528 goto out;
529 }
530
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100531 tracing_stop();
532
533 /* check the trace buffer */
534 ret = trace_test_buffer(tr, &count);
535
536 trace->reset(tr);
537 tracing_start();
538
539 if (!ret && !count) {
540 printk(KERN_CONT ".. no entries found ..");
541 ret = -1;
542 goto out;
543 }
544
545 /* Don't test dynamic tracing, the function tracer already did */
546
547out:
548 /* Stop it if we failed */
549 if (ret)
550 ftrace_graph_stop();
551
552 return ret;
553}
554#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
555
556
Steven Rostedt60a11772008-05-12 21:20:44 +0200557#ifdef CONFIG_IRQSOFF_TRACER
558int
559trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
560{
561 unsigned long save_max = tracing_max_latency;
562 unsigned long count;
563 int ret;
564
565 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200566 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100567 if (ret) {
568 warn_failed_init_tracer(trace, ret);
569 return ret;
570 }
571
Steven Rostedt60a11772008-05-12 21:20:44 +0200572 /* reset the max latency */
573 tracing_max_latency = 0;
574 /* disable interrupts for a bit */
575 local_irq_disable();
576 udelay(100);
577 local_irq_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100578
579 /*
580 * Stop the tracer to avoid a warning subsequent
581 * to buffer flipping failure because tracing_stop()
582 * disables the tr and max buffers, making flipping impossible
583 * in case of parallels max irqs off latencies.
584 */
585 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200586 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500587 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200588 /* check both trace buffers */
589 ret = trace_test_buffer(tr, NULL);
590 if (!ret)
591 ret = trace_test_buffer(&max_tr, &count);
592 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500593 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200594
595 if (!ret && !count) {
596 printk(KERN_CONT ".. no entries found ..");
597 ret = -1;
598 }
599
600 tracing_max_latency = save_max;
601
602 return ret;
603}
604#endif /* CONFIG_IRQSOFF_TRACER */
605
606#ifdef CONFIG_PREEMPT_TRACER
607int
608trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
609{
610 unsigned long save_max = tracing_max_latency;
611 unsigned long count;
612 int ret;
613
Steven Rostedt769c48e2008-11-07 22:36:02 -0500614 /*
615 * Now that the big kernel lock is no longer preemptable,
616 * and this is called with the BKL held, it will always
617 * fail. If preemption is already disabled, simply
618 * pass the test. When the BKL is removed, or becomes
619 * preemptible again, we will once again test this,
620 * so keep it in.
621 */
622 if (preempt_count()) {
623 printk(KERN_CONT "can not test ... force ");
624 return 0;
625 }
626
Steven Rostedt60a11772008-05-12 21:20:44 +0200627 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200628 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100629 if (ret) {
630 warn_failed_init_tracer(trace, ret);
631 return ret;
632 }
633
Steven Rostedt60a11772008-05-12 21:20:44 +0200634 /* reset the max latency */
635 tracing_max_latency = 0;
636 /* disable preemption for a bit */
637 preempt_disable();
638 udelay(100);
639 preempt_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100640
641 /*
642 * Stop the tracer to avoid a warning subsequent
643 * to buffer flipping failure because tracing_stop()
644 * disables the tr and max buffers, making flipping impossible
645 * in case of parallels max preempt off latencies.
646 */
647 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200648 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500649 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200650 /* check both trace buffers */
651 ret = trace_test_buffer(tr, NULL);
652 if (!ret)
653 ret = trace_test_buffer(&max_tr, &count);
654 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500655 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200656
657 if (!ret && !count) {
658 printk(KERN_CONT ".. no entries found ..");
659 ret = -1;
660 }
661
662 tracing_max_latency = save_max;
663
664 return ret;
665}
666#endif /* CONFIG_PREEMPT_TRACER */
667
668#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
669int
670trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
671{
672 unsigned long save_max = tracing_max_latency;
673 unsigned long count;
674 int ret;
675
Steven Rostedt769c48e2008-11-07 22:36:02 -0500676 /*
677 * Now that the big kernel lock is no longer preemptable,
678 * and this is called with the BKL held, it will always
679 * fail. If preemption is already disabled, simply
680 * pass the test. When the BKL is removed, or becomes
681 * preemptible again, we will once again test this,
682 * so keep it in.
683 */
684 if (preempt_count()) {
685 printk(KERN_CONT "can not test ... force ");
686 return 0;
687 }
688
Steven Rostedt60a11772008-05-12 21:20:44 +0200689 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200690 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100691 if (ret) {
692 warn_failed_init_tracer(trace, ret);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100693 goto out_no_start;
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100694 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200695
696 /* reset the max latency */
697 tracing_max_latency = 0;
698
699 /* disable preemption and interrupts for a bit */
700 preempt_disable();
701 local_irq_disable();
702 udelay(100);
703 preempt_enable();
704 /* reverse the order of preempt vs irqs */
705 local_irq_enable();
706
Frederic Weisbecker49036202009-03-17 22:38:58 +0100707 /*
708 * Stop the tracer to avoid a warning subsequent
709 * to buffer flipping failure because tracing_stop()
710 * disables the tr and max buffers, making flipping impossible
711 * in case of parallels max irqs/preempt off latencies.
712 */
713 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200714 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500715 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200716 /* check both trace buffers */
717 ret = trace_test_buffer(tr, NULL);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100718 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200719 goto out;
720
721 ret = trace_test_buffer(&max_tr, &count);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100722 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200723 goto out;
724
725 if (!ret && !count) {
726 printk(KERN_CONT ".. no entries found ..");
727 ret = -1;
728 goto out;
729 }
730
731 /* do the test by disabling interrupts first this time */
732 tracing_max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500733 tracing_start();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100734 trace->start(tr);
735
Steven Rostedt60a11772008-05-12 21:20:44 +0200736 preempt_disable();
737 local_irq_disable();
738 udelay(100);
739 preempt_enable();
740 /* reverse the order of preempt vs irqs */
741 local_irq_enable();
742
Frederic Weisbecker49036202009-03-17 22:38:58 +0100743 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200744 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500745 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200746 /* check both trace buffers */
747 ret = trace_test_buffer(tr, NULL);
748 if (ret)
749 goto out;
750
751 ret = trace_test_buffer(&max_tr, &count);
752
753 if (!ret && !count) {
754 printk(KERN_CONT ".. no entries found ..");
755 ret = -1;
756 goto out;
757 }
758
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100759out:
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500760 tracing_start();
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100761out_no_start:
762 trace->reset(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200763 tracing_max_latency = save_max;
764
765 return ret;
766}
767#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
768
Steven Noonanfb1b6d82008-09-19 03:06:43 -0700769#ifdef CONFIG_NOP_TRACER
770int
771trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
772{
773 /* What could possibly go wrong? */
774 return 0;
775}
776#endif
777
Steven Rostedt60a11772008-05-12 21:20:44 +0200778#ifdef CONFIG_SCHED_TRACER
779static int trace_wakeup_test_thread(void *data)
780{
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200781 /* Make this a RT thread, doesn't need to be too high */
Peter Zijlstrac9b5f502011-01-07 13:41:40 +0100782 static const struct sched_param param = { .sched_priority = 5 };
Steven Rostedt60a11772008-05-12 21:20:44 +0200783 struct completion *x = data;
784
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200785 sched_setscheduler(current, SCHED_FIFO, &param);
Steven Rostedt60a11772008-05-12 21:20:44 +0200786
787 /* Make it know we have a new prio */
788 complete(x);
789
790 /* now go to sleep and let the test wake us up */
791 set_current_state(TASK_INTERRUPTIBLE);
792 schedule();
793
794 /* we are awake, now wait to disappear */
795 while (!kthread_should_stop()) {
796 /*
797 * This is an RT task, do short sleeps to let
798 * others run.
799 */
800 msleep(100);
801 }
802
803 return 0;
804}
805
806int
807trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
808{
809 unsigned long save_max = tracing_max_latency;
810 struct task_struct *p;
811 struct completion isrt;
812 unsigned long count;
813 int ret;
814
815 init_completion(&isrt);
816
817 /* create a high prio thread */
818 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200819 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200820 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
821 return -1;
822 }
823
824 /* make sure the thread is running at an RT prio */
825 wait_for_completion(&isrt);
826
827 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200828 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100829 if (ret) {
830 warn_failed_init_tracer(trace, ret);
831 return ret;
832 }
833
Steven Rostedt60a11772008-05-12 21:20:44 +0200834 /* reset the max latency */
835 tracing_max_latency = 0;
836
837 /* sleep to let the RT thread sleep too */
838 msleep(100);
839
840 /*
841 * Yes this is slightly racy. It is possible that for some
842 * strange reason that the RT thread we created, did not
843 * call schedule for 100ms after doing the completion,
844 * and we do a wakeup on a task that already is awake.
845 * But that is extremely unlikely, and the worst thing that
846 * happens in such a case, is that we disable tracing.
847 * Honestly, if this race does happen something is horrible
848 * wrong with the system.
849 */
850
851 wake_up_process(p);
852
Steven Rostedt5aa60c62008-09-29 23:02:37 -0400853 /* give a little time to let the thread wake up */
854 msleep(100);
855
Steven Rostedt60a11772008-05-12 21:20:44 +0200856 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500857 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200858 /* check both trace buffers */
859 ret = trace_test_buffer(tr, NULL);
860 if (!ret)
861 ret = trace_test_buffer(&max_tr, &count);
862
863
864 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500865 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200866
867 tracing_max_latency = save_max;
868
869 /* kill the thread */
870 kthread_stop(p);
871
872 if (!ret && !count) {
873 printk(KERN_CONT ".. no entries found ..");
874 ret = -1;
875 }
876
877 return ret;
878}
879#endif /* CONFIG_SCHED_TRACER */
880
881#ifdef CONFIG_CONTEXT_SWITCH_TRACER
882int
883trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
884{
885 unsigned long count;
886 int ret;
887
888 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200889 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100890 if (ret) {
891 warn_failed_init_tracer(trace, ret);
892 return ret;
893 }
894
Steven Rostedt60a11772008-05-12 21:20:44 +0200895 /* Sleep for a 1/10 of a second */
896 msleep(100);
897 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500898 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200899 /* check the trace buffer */
900 ret = trace_test_buffer(tr, &count);
901 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500902 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200903
904 if (!ret && !count) {
905 printk(KERN_CONT ".. no entries found ..");
906 ret = -1;
907 }
908
909 return ret;
910}
911#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200912
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500913#ifdef CONFIG_BRANCH_TRACER
914int
915trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
916{
917 unsigned long count;
918 int ret;
919
920 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200921 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100922 if (ret) {
923 warn_failed_init_tracer(trace, ret);
924 return ret;
925 }
926
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500927 /* Sleep for a 1/10 of a second */
928 msleep(100);
929 /* stop the tracing. */
930 tracing_stop();
931 /* check the trace buffer */
932 ret = trace_test_buffer(tr, &count);
933 trace->reset(tr);
934 tracing_start();
935
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500936 if (!ret && !count) {
937 printk(KERN_CONT ".. no entries found ..");
938 ret = -1;
939 }
940
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500941 return ret;
942}
943#endif /* CONFIG_BRANCH_TRACER */
Markus Metzger321bb5e2009-03-13 10:50:27 +0100944