| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 1 | /* Include in trace.c */ | 
|  | 2 |  | 
| Steven Rostedt | 9cc26a2 | 2009-03-09 16:00:22 -0400 | [diff] [blame] | 3 | #include <linux/stringify.h> | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 4 | #include <linux/kthread.h> | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 5 | #include <linux/delay.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 6 | #include <linux/slab.h> | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 7 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 8 | static inline int trace_valid_entry(struct trace_entry *entry) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 9 | { | 
|  | 10 | switch (entry->type) { | 
|  | 11 | case TRACE_FN: | 
|  | 12 | case TRACE_CTX: | 
| Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 13 | case TRACE_WAKE: | 
| Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 14 | case TRACE_STACK: | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 15 | case TRACE_PRINT: | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 16 | case TRACE_BRANCH: | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 17 | case TRACE_GRAPH_ENT: | 
|  | 18 | case TRACE_GRAPH_RET: | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 19 | return 1; | 
|  | 20 | } | 
|  | 21 | return 0; | 
|  | 22 | } | 
|  | 23 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 24 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 25 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 26 | struct ring_buffer_event *event; | 
|  | 27 | struct trace_entry *entry; | 
| Steven Rostedt | 4b3e3d2 | 2009-02-18 22:50:01 -0500 | [diff] [blame] | 28 | unsigned int loops = 0; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 29 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 30 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 31 | entry = ring_buffer_event_data(event); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 32 |  | 
| Steven Rostedt | 4b3e3d2 | 2009-02-18 22:50:01 -0500 | [diff] [blame] | 33 | /* | 
|  | 34 | * The ring buffer is a size of trace_buf_size, if | 
|  | 35 | * we loop more than the size, there's something wrong | 
|  | 36 | * with the ring buffer. | 
|  | 37 | */ | 
|  | 38 | if (loops++ > trace_buf_size) { | 
|  | 39 | printk(KERN_CONT ".. bad ring buffer "); | 
|  | 40 | goto failed; | 
|  | 41 | } | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 42 | if (!trace_valid_entry(entry)) { | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 43 | printk(KERN_CONT ".. invalid entry %d ", | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 44 | entry->type); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 45 | goto failed; | 
|  | 46 | } | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 47 | } | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 48 | return 0; | 
|  | 49 |  | 
|  | 50 | failed: | 
| Steven Rostedt | 08bafa0 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 51 | /* disable tracing */ | 
|  | 52 | tracing_disabled = 1; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 53 | printk(KERN_CONT ".. corrupted trace buffer .. "); | 
|  | 54 | return -1; | 
|  | 55 | } | 
|  | 56 |  | 
|  | 57 | /* | 
|  | 58 | * Test the trace buffer to see if all the elements | 
|  | 59 | * are still sane. | 
|  | 60 | */ | 
|  | 61 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | 
|  | 62 | { | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 63 | unsigned long flags, cnt = 0; | 
|  | 64 | int cpu, ret = 0; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 65 |  | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 66 | /* Don't allow flipping of max traces now */ | 
| Steven Rostedt | d51ad7a | 2008-11-15 15:48:29 -0500 | [diff] [blame] | 67 | local_irq_save(flags); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 68 | arch_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 69 |  | 
|  | 70 | cnt = ring_buffer_entries(tr->buffer); | 
|  | 71 |  | 
| Steven Rostedt | 0c5119c | 2009-02-18 18:33:57 -0500 | [diff] [blame] | 72 | /* | 
|  | 73 | * The trace_test_buffer_cpu runs a while loop to consume all data. | 
|  | 74 | * If the calling tracer is broken, and is constantly filling | 
|  | 75 | * the buffer, this will run forever, and hard lock the box. | 
|  | 76 | * We disable the ring buffer while we do this test to prevent | 
|  | 77 | * a hard lock up. | 
|  | 78 | */ | 
|  | 79 | tracing_off(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 80 | for_each_possible_cpu(cpu) { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 81 | ret = trace_test_buffer_cpu(tr, cpu); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 82 | if (ret) | 
|  | 83 | break; | 
|  | 84 | } | 
| Steven Rostedt | 0c5119c | 2009-02-18 18:33:57 -0500 | [diff] [blame] | 85 | tracing_on(); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 86 | arch_spin_unlock(&ftrace_max_lock); | 
| Steven Rostedt | d51ad7a | 2008-11-15 15:48:29 -0500 | [diff] [blame] | 87 | local_irq_restore(flags); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 88 |  | 
|  | 89 | if (count) | 
|  | 90 | *count = cnt; | 
|  | 91 |  | 
|  | 92 | return ret; | 
|  | 93 | } | 
|  | 94 |  | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 95 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | 
|  | 96 | { | 
|  | 97 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | 
|  | 98 | trace->name, init_ret); | 
|  | 99 | } | 
| Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 100 | #ifdef CONFIG_FUNCTION_TRACER | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 101 |  | 
|  | 102 | #ifdef CONFIG_DYNAMIC_FTRACE | 
|  | 103 |  | 
| Steven Rostedt | 95950c2 | 2011-05-06 00:08:51 -0400 | [diff] [blame] | 104 | static int trace_selftest_test_probe1_cnt; | 
|  | 105 | static void trace_selftest_test_probe1_func(unsigned long ip, | 
|  | 106 | unsigned long pip) | 
|  | 107 | { | 
|  | 108 | trace_selftest_test_probe1_cnt++; | 
|  | 109 | } | 
|  | 110 |  | 
|  | 111 | static int trace_selftest_test_probe2_cnt; | 
|  | 112 | static void trace_selftest_test_probe2_func(unsigned long ip, | 
|  | 113 | unsigned long pip) | 
|  | 114 | { | 
|  | 115 | trace_selftest_test_probe2_cnt++; | 
|  | 116 | } | 
|  | 117 |  | 
|  | 118 | static int trace_selftest_test_probe3_cnt; | 
|  | 119 | static void trace_selftest_test_probe3_func(unsigned long ip, | 
|  | 120 | unsigned long pip) | 
|  | 121 | { | 
|  | 122 | trace_selftest_test_probe3_cnt++; | 
|  | 123 | } | 
|  | 124 |  | 
|  | 125 | static int trace_selftest_test_global_cnt; | 
|  | 126 | static void trace_selftest_test_global_func(unsigned long ip, | 
|  | 127 | unsigned long pip) | 
|  | 128 | { | 
|  | 129 | trace_selftest_test_global_cnt++; | 
|  | 130 | } | 
|  | 131 |  | 
|  | 132 | static int trace_selftest_test_dyn_cnt; | 
|  | 133 | static void trace_selftest_test_dyn_func(unsigned long ip, | 
|  | 134 | unsigned long pip) | 
|  | 135 | { | 
|  | 136 | trace_selftest_test_dyn_cnt++; | 
|  | 137 | } | 
|  | 138 |  | 
|  | 139 | static struct ftrace_ops test_probe1 = { | 
|  | 140 | .func			= trace_selftest_test_probe1_func, | 
|  | 141 | }; | 
|  | 142 |  | 
|  | 143 | static struct ftrace_ops test_probe2 = { | 
|  | 144 | .func			= trace_selftest_test_probe2_func, | 
|  | 145 | }; | 
|  | 146 |  | 
|  | 147 | static struct ftrace_ops test_probe3 = { | 
|  | 148 | .func			= trace_selftest_test_probe3_func, | 
|  | 149 | }; | 
|  | 150 |  | 
|  | 151 | static struct ftrace_ops test_global = { | 
|  | 152 | .func			= trace_selftest_test_global_func, | 
|  | 153 | .flags			= FTRACE_OPS_FL_GLOBAL, | 
|  | 154 | }; | 
|  | 155 |  | 
|  | 156 | static void print_counts(void) | 
|  | 157 | { | 
|  | 158 | printk("(%d %d %d %d %d) ", | 
|  | 159 | trace_selftest_test_probe1_cnt, | 
|  | 160 | trace_selftest_test_probe2_cnt, | 
|  | 161 | trace_selftest_test_probe3_cnt, | 
|  | 162 | trace_selftest_test_global_cnt, | 
|  | 163 | trace_selftest_test_dyn_cnt); | 
|  | 164 | } | 
|  | 165 |  | 
|  | 166 | static void reset_counts(void) | 
|  | 167 | { | 
|  | 168 | trace_selftest_test_probe1_cnt = 0; | 
|  | 169 | trace_selftest_test_probe2_cnt = 0; | 
|  | 170 | trace_selftest_test_probe3_cnt = 0; | 
|  | 171 | trace_selftest_test_global_cnt = 0; | 
|  | 172 | trace_selftest_test_dyn_cnt = 0; | 
|  | 173 | } | 
|  | 174 |  | 
|  | 175 | static int trace_selftest_ops(int cnt) | 
|  | 176 | { | 
|  | 177 | int save_ftrace_enabled = ftrace_enabled; | 
|  | 178 | struct ftrace_ops *dyn_ops; | 
|  | 179 | char *func1_name; | 
|  | 180 | char *func2_name; | 
|  | 181 | int len1; | 
|  | 182 | int len2; | 
|  | 183 | int ret = -1; | 
|  | 184 |  | 
|  | 185 | printk(KERN_CONT "PASSED\n"); | 
|  | 186 | pr_info("Testing dynamic ftrace ops #%d: ", cnt); | 
|  | 187 |  | 
|  | 188 | ftrace_enabled = 1; | 
|  | 189 | reset_counts(); | 
|  | 190 |  | 
|  | 191 | /* Handle PPC64 '.' name */ | 
|  | 192 | func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | 
|  | 193 | func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); | 
|  | 194 | len1 = strlen(func1_name); | 
|  | 195 | len2 = strlen(func2_name); | 
|  | 196 |  | 
|  | 197 | /* | 
|  | 198 | * Probe 1 will trace function 1. | 
|  | 199 | * Probe 2 will trace function 2. | 
|  | 200 | * Probe 3 will trace functions 1 and 2. | 
|  | 201 | */ | 
|  | 202 | ftrace_set_filter(&test_probe1, func1_name, len1, 1); | 
|  | 203 | ftrace_set_filter(&test_probe2, func2_name, len2, 1); | 
|  | 204 | ftrace_set_filter(&test_probe3, func1_name, len1, 1); | 
|  | 205 | ftrace_set_filter(&test_probe3, func2_name, len2, 0); | 
|  | 206 |  | 
|  | 207 | register_ftrace_function(&test_probe1); | 
|  | 208 | register_ftrace_function(&test_probe2); | 
|  | 209 | register_ftrace_function(&test_probe3); | 
|  | 210 | register_ftrace_function(&test_global); | 
|  | 211 |  | 
|  | 212 | DYN_FTRACE_TEST_NAME(); | 
|  | 213 |  | 
|  | 214 | print_counts(); | 
|  | 215 |  | 
|  | 216 | if (trace_selftest_test_probe1_cnt != 1) | 
|  | 217 | goto out; | 
|  | 218 | if (trace_selftest_test_probe2_cnt != 0) | 
|  | 219 | goto out; | 
|  | 220 | if (trace_selftest_test_probe3_cnt != 1) | 
|  | 221 | goto out; | 
|  | 222 | if (trace_selftest_test_global_cnt == 0) | 
|  | 223 | goto out; | 
|  | 224 |  | 
|  | 225 | DYN_FTRACE_TEST_NAME2(); | 
|  | 226 |  | 
|  | 227 | print_counts(); | 
|  | 228 |  | 
|  | 229 | if (trace_selftest_test_probe1_cnt != 1) | 
|  | 230 | goto out; | 
|  | 231 | if (trace_selftest_test_probe2_cnt != 1) | 
|  | 232 | goto out; | 
|  | 233 | if (trace_selftest_test_probe3_cnt != 2) | 
|  | 234 | goto out; | 
|  | 235 |  | 
|  | 236 | /* Add a dynamic probe */ | 
|  | 237 | dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); | 
|  | 238 | if (!dyn_ops) { | 
|  | 239 | printk("MEMORY ERROR "); | 
|  | 240 | goto out; | 
|  | 241 | } | 
|  | 242 |  | 
|  | 243 | dyn_ops->func = trace_selftest_test_dyn_func; | 
|  | 244 |  | 
|  | 245 | register_ftrace_function(dyn_ops); | 
|  | 246 |  | 
|  | 247 | trace_selftest_test_global_cnt = 0; | 
|  | 248 |  | 
|  | 249 | DYN_FTRACE_TEST_NAME(); | 
|  | 250 |  | 
|  | 251 | print_counts(); | 
|  | 252 |  | 
|  | 253 | if (trace_selftest_test_probe1_cnt != 2) | 
|  | 254 | goto out_free; | 
|  | 255 | if (trace_selftest_test_probe2_cnt != 1) | 
|  | 256 | goto out_free; | 
|  | 257 | if (trace_selftest_test_probe3_cnt != 3) | 
|  | 258 | goto out_free; | 
|  | 259 | if (trace_selftest_test_global_cnt == 0) | 
|  | 260 | goto out; | 
|  | 261 | if (trace_selftest_test_dyn_cnt == 0) | 
|  | 262 | goto out_free; | 
|  | 263 |  | 
|  | 264 | DYN_FTRACE_TEST_NAME2(); | 
|  | 265 |  | 
|  | 266 | print_counts(); | 
|  | 267 |  | 
|  | 268 | if (trace_selftest_test_probe1_cnt != 2) | 
|  | 269 | goto out_free; | 
|  | 270 | if (trace_selftest_test_probe2_cnt != 2) | 
|  | 271 | goto out_free; | 
|  | 272 | if (trace_selftest_test_probe3_cnt != 4) | 
|  | 273 | goto out_free; | 
|  | 274 |  | 
|  | 275 | ret = 0; | 
|  | 276 | out_free: | 
|  | 277 | unregister_ftrace_function(dyn_ops); | 
|  | 278 | kfree(dyn_ops); | 
|  | 279 |  | 
|  | 280 | out: | 
|  | 281 | /* Purposely unregister in the same order */ | 
|  | 282 | unregister_ftrace_function(&test_probe1); | 
|  | 283 | unregister_ftrace_function(&test_probe2); | 
|  | 284 | unregister_ftrace_function(&test_probe3); | 
|  | 285 | unregister_ftrace_function(&test_global); | 
|  | 286 |  | 
|  | 287 | /* Make sure everything is off */ | 
|  | 288 | reset_counts(); | 
|  | 289 | DYN_FTRACE_TEST_NAME(); | 
|  | 290 | DYN_FTRACE_TEST_NAME(); | 
|  | 291 |  | 
|  | 292 | if (trace_selftest_test_probe1_cnt || | 
|  | 293 | trace_selftest_test_probe2_cnt || | 
|  | 294 | trace_selftest_test_probe3_cnt || | 
|  | 295 | trace_selftest_test_global_cnt || | 
|  | 296 | trace_selftest_test_dyn_cnt) | 
|  | 297 | ret = -1; | 
|  | 298 |  | 
|  | 299 | ftrace_enabled = save_ftrace_enabled; | 
|  | 300 |  | 
|  | 301 | return ret; | 
|  | 302 | } | 
|  | 303 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 304 | /* Test dynamic code modification and ftrace filters */ | 
|  | 305 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | 
|  | 306 | struct trace_array *tr, | 
|  | 307 | int (*func)(void)) | 
|  | 308 | { | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 309 | int save_ftrace_enabled = ftrace_enabled; | 
|  | 310 | int save_tracer_enabled = tracer_enabled; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 311 | unsigned long count; | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 312 | char *func_name; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 313 | int ret; | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 314 |  | 
|  | 315 | /* The ftrace test PASSED */ | 
|  | 316 | printk(KERN_CONT "PASSED\n"); | 
|  | 317 | pr_info("Testing dynamic ftrace: "); | 
|  | 318 |  | 
|  | 319 | /* enable tracing, and record the filter function */ | 
|  | 320 | ftrace_enabled = 1; | 
|  | 321 | tracer_enabled = 1; | 
|  | 322 |  | 
|  | 323 | /* passed in by parameter to fool gcc from optimizing */ | 
|  | 324 | func(); | 
|  | 325 |  | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 326 | /* | 
| Wenji Huang | 73d8b8b | 2009-02-17 01:10:02 -0500 | [diff] [blame] | 327 | * Some archs *cough*PowerPC*cough* add characters to the | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 328 | * start of the function names. We simply put a '*' to | 
| Wenji Huang | 73d8b8b | 2009-02-17 01:10:02 -0500 | [diff] [blame] | 329 | * accommodate them. | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 330 | */ | 
| Steven Rostedt | 9cc26a2 | 2009-03-09 16:00:22 -0400 | [diff] [blame] | 331 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 332 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 333 | /* filter only on our function */ | 
| Steven Rostedt | 936e074 | 2011-05-05 22:54:01 -0400 | [diff] [blame] | 334 | ftrace_set_global_filter(func_name, strlen(func_name), 1); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 335 |  | 
|  | 336 | /* enable tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 337 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 338 | if (ret) { | 
|  | 339 | warn_failed_init_tracer(trace, ret); | 
|  | 340 | goto out; | 
|  | 341 | } | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 342 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 343 | /* Sleep for a 1/10 of a second */ | 
|  | 344 | msleep(100); | 
|  | 345 |  | 
|  | 346 | /* we should have nothing in the buffer */ | 
|  | 347 | ret = trace_test_buffer(tr, &count); | 
|  | 348 | if (ret) | 
|  | 349 | goto out; | 
|  | 350 |  | 
|  | 351 | if (count) { | 
|  | 352 | ret = -1; | 
|  | 353 | printk(KERN_CONT ".. filter did not filter .. "); | 
|  | 354 | goto out; | 
|  | 355 | } | 
|  | 356 |  | 
|  | 357 | /* call our function again */ | 
|  | 358 | func(); | 
|  | 359 |  | 
|  | 360 | /* sleep again */ | 
|  | 361 | msleep(100); | 
|  | 362 |  | 
|  | 363 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 364 | tracing_stop(); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 365 | ftrace_enabled = 0; | 
|  | 366 |  | 
|  | 367 | /* check the trace buffer */ | 
|  | 368 | ret = trace_test_buffer(tr, &count); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 369 | tracing_start(); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 370 |  | 
|  | 371 | /* we should only have one item */ | 
|  | 372 | if (!ret && count != 1) { | 
| Steven Rostedt | 95950c2 | 2011-05-06 00:08:51 -0400 | [diff] [blame] | 373 | trace->reset(tr); | 
| Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 374 | printk(KERN_CONT ".. filter failed count=%ld ..", count); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 375 | ret = -1; | 
|  | 376 | goto out; | 
|  | 377 | } | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 378 |  | 
| Steven Rostedt | 95950c2 | 2011-05-06 00:08:51 -0400 | [diff] [blame] | 379 | /* Test the ops with global tracing running */ | 
|  | 380 | ret = trace_selftest_ops(1); | 
|  | 381 | trace->reset(tr); | 
|  | 382 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 383 | out: | 
|  | 384 | ftrace_enabled = save_ftrace_enabled; | 
|  | 385 | tracer_enabled = save_tracer_enabled; | 
|  | 386 |  | 
|  | 387 | /* Enable tracing on all functions again */ | 
| Steven Rostedt | 936e074 | 2011-05-05 22:54:01 -0400 | [diff] [blame] | 388 | ftrace_set_global_filter(NULL, 0, 1); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 389 |  | 
| Steven Rostedt | 95950c2 | 2011-05-06 00:08:51 -0400 | [diff] [blame] | 390 | /* Test the ops with global tracing off */ | 
|  | 391 | if (!ret) | 
|  | 392 | ret = trace_selftest_ops(2); | 
|  | 393 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 394 | return ret; | 
|  | 395 | } | 
|  | 396 | #else | 
|  | 397 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | 
|  | 398 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 
| Ingo Molnar | e9a22d1 | 2009-03-13 11:54:40 +0100 | [diff] [blame] | 399 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 400 | /* | 
|  | 401 | * Simple verification test of ftrace function tracer. | 
|  | 402 | * Enable ftrace, sleep 1/10 second, and then read the trace | 
|  | 403 | * buffer to see if all is in order. | 
|  | 404 | */ | 
|  | 405 | int | 
|  | 406 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | 
|  | 407 | { | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 408 | int save_ftrace_enabled = ftrace_enabled; | 
|  | 409 | int save_tracer_enabled = tracer_enabled; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 410 | unsigned long count; | 
|  | 411 | int ret; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 412 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 413 | /* make sure msleep has been recorded */ | 
|  | 414 | msleep(1); | 
|  | 415 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 416 | /* start the tracing */ | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 417 | ftrace_enabled = 1; | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 418 | tracer_enabled = 1; | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 419 |  | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 420 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 421 | if (ret) { | 
|  | 422 | warn_failed_init_tracer(trace, ret); | 
|  | 423 | goto out; | 
|  | 424 | } | 
|  | 425 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 426 | /* Sleep for a 1/10 of a second */ | 
|  | 427 | msleep(100); | 
|  | 428 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 429 | tracing_stop(); | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 430 | ftrace_enabled = 0; | 
|  | 431 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 432 | /* check the trace buffer */ | 
|  | 433 | ret = trace_test_buffer(tr, &count); | 
|  | 434 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 435 | tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 436 |  | 
|  | 437 | if (!ret && !count) { | 
|  | 438 | printk(KERN_CONT ".. no entries found .."); | 
|  | 439 | ret = -1; | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 440 | goto out; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 441 | } | 
|  | 442 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 443 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, | 
|  | 444 | DYN_FTRACE_TEST_NAME); | 
|  | 445 |  | 
|  | 446 | out: | 
|  | 447 | ftrace_enabled = save_ftrace_enabled; | 
|  | 448 | tracer_enabled = save_tracer_enabled; | 
|  | 449 |  | 
| Steven Rostedt | 4eebcc8 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 450 | /* kill ftrace totally if we failed */ | 
|  | 451 | if (ret) | 
|  | 452 | ftrace_kill(); | 
|  | 453 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 454 | return ret; | 
|  | 455 | } | 
| Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 456 | #endif /* CONFIG_FUNCTION_TRACER */ | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 457 |  | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 458 |  | 
|  | 459 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 460 |  | 
|  | 461 | /* Maximum number of functions to trace before diagnosing a hang */ | 
|  | 462 | #define GRAPH_MAX_FUNC_TEST	100000000 | 
|  | 463 |  | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 464 | static void | 
|  | 465 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode); | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 466 | static unsigned int graph_hang_thresh; | 
|  | 467 |  | 
|  | 468 | /* Wrap the real function entry probe to avoid possible hanging */ | 
|  | 469 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | 
|  | 470 | { | 
|  | 471 | /* This is harmlessly racy, we want to approximately detect a hang */ | 
|  | 472 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | 
|  | 473 | ftrace_graph_stop(); | 
|  | 474 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | 
|  | 475 | if (ftrace_dump_on_oops) | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 476 | __ftrace_dump(false, DUMP_ALL); | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 477 | return 0; | 
|  | 478 | } | 
|  | 479 |  | 
|  | 480 | return trace_graph_entry(trace); | 
|  | 481 | } | 
|  | 482 |  | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 483 | /* | 
|  | 484 | * Pretty much the same than for the function tracer from which the selftest | 
|  | 485 | * has been borrowed. | 
|  | 486 | */ | 
|  | 487 | int | 
|  | 488 | trace_selftest_startup_function_graph(struct tracer *trace, | 
|  | 489 | struct trace_array *tr) | 
|  | 490 | { | 
|  | 491 | int ret; | 
|  | 492 | unsigned long count; | 
|  | 493 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 494 | /* | 
|  | 495 | * Simulate the init() callback but we attach a watchdog callback | 
|  | 496 | * to detect and recover from possible hangs | 
|  | 497 | */ | 
|  | 498 | tracing_reset_online_cpus(tr); | 
| Frederic Weisbecker | 1a0799a | 2009-07-29 18:59:58 +0200 | [diff] [blame] | 499 | set_graph_array(tr); | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 500 | ret = register_ftrace_graph(&trace_graph_return, | 
|  | 501 | &trace_graph_entry_watchdog); | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 502 | if (ret) { | 
|  | 503 | warn_failed_init_tracer(trace, ret); | 
|  | 504 | goto out; | 
|  | 505 | } | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 506 | tracing_start_cmdline_record(); | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 507 |  | 
|  | 508 | /* Sleep for a 1/10 of a second */ | 
|  | 509 | msleep(100); | 
|  | 510 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 511 | /* Have we just recovered from a hang? */ | 
|  | 512 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | 
| Frederic Weisbecker | 0cf53ff | 2009-03-22 15:13:07 +0100 | [diff] [blame] | 513 | tracing_selftest_disabled = true; | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 514 | ret = -1; | 
|  | 515 | goto out; | 
|  | 516 | } | 
|  | 517 |  | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 518 | tracing_stop(); | 
|  | 519 |  | 
|  | 520 | /* check the trace buffer */ | 
|  | 521 | ret = trace_test_buffer(tr, &count); | 
|  | 522 |  | 
|  | 523 | trace->reset(tr); | 
|  | 524 | tracing_start(); | 
|  | 525 |  | 
|  | 526 | if (!ret && !count) { | 
|  | 527 | printk(KERN_CONT ".. no entries found .."); | 
|  | 528 | ret = -1; | 
|  | 529 | goto out; | 
|  | 530 | } | 
|  | 531 |  | 
|  | 532 | /* Don't test dynamic tracing, the function tracer already did */ | 
|  | 533 |  | 
|  | 534 | out: | 
|  | 535 | /* Stop it if we failed */ | 
|  | 536 | if (ret) | 
|  | 537 | ftrace_graph_stop(); | 
|  | 538 |  | 
|  | 539 | return ret; | 
|  | 540 | } | 
|  | 541 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 
|  | 542 |  | 
|  | 543 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 544 | #ifdef CONFIG_IRQSOFF_TRACER | 
|  | 545 | int | 
|  | 546 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | 
|  | 547 | { | 
|  | 548 | unsigned long save_max = tracing_max_latency; | 
|  | 549 | unsigned long count; | 
|  | 550 | int ret; | 
|  | 551 |  | 
|  | 552 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 553 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 554 | if (ret) { | 
|  | 555 | warn_failed_init_tracer(trace, ret); | 
|  | 556 | return ret; | 
|  | 557 | } | 
|  | 558 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 559 | /* reset the max latency */ | 
|  | 560 | tracing_max_latency = 0; | 
|  | 561 | /* disable interrupts for a bit */ | 
|  | 562 | local_irq_disable(); | 
|  | 563 | udelay(100); | 
|  | 564 | local_irq_enable(); | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 565 |  | 
|  | 566 | /* | 
|  | 567 | * Stop the tracer to avoid a warning subsequent | 
|  | 568 | * to buffer flipping failure because tracing_stop() | 
|  | 569 | * disables the tr and max buffers, making flipping impossible | 
|  | 570 | * in case of parallels max irqs off latencies. | 
|  | 571 | */ | 
|  | 572 | trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 573 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 574 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 575 | /* check both trace buffers */ | 
|  | 576 | ret = trace_test_buffer(tr, NULL); | 
|  | 577 | if (!ret) | 
|  | 578 | ret = trace_test_buffer(&max_tr, &count); | 
|  | 579 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 580 | tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 581 |  | 
|  | 582 | if (!ret && !count) { | 
|  | 583 | printk(KERN_CONT ".. no entries found .."); | 
|  | 584 | ret = -1; | 
|  | 585 | } | 
|  | 586 |  | 
|  | 587 | tracing_max_latency = save_max; | 
|  | 588 |  | 
|  | 589 | return ret; | 
|  | 590 | } | 
|  | 591 | #endif /* CONFIG_IRQSOFF_TRACER */ | 
|  | 592 |  | 
|  | 593 | #ifdef CONFIG_PREEMPT_TRACER | 
|  | 594 | int | 
|  | 595 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | 
|  | 596 | { | 
|  | 597 | unsigned long save_max = tracing_max_latency; | 
|  | 598 | unsigned long count; | 
|  | 599 | int ret; | 
|  | 600 |  | 
| Steven Rostedt | 769c48e | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 601 | /* | 
|  | 602 | * Now that the big kernel lock is no longer preemptable, | 
|  | 603 | * and this is called with the BKL held, it will always | 
|  | 604 | * fail. If preemption is already disabled, simply | 
|  | 605 | * pass the test. When the BKL is removed, or becomes | 
|  | 606 | * preemptible again, we will once again test this, | 
|  | 607 | * so keep it in. | 
|  | 608 | */ | 
|  | 609 | if (preempt_count()) { | 
|  | 610 | printk(KERN_CONT "can not test ... force "); | 
|  | 611 | return 0; | 
|  | 612 | } | 
|  | 613 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 614 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 615 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 616 | if (ret) { | 
|  | 617 | warn_failed_init_tracer(trace, ret); | 
|  | 618 | return ret; | 
|  | 619 | } | 
|  | 620 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 621 | /* reset the max latency */ | 
|  | 622 | tracing_max_latency = 0; | 
|  | 623 | /* disable preemption for a bit */ | 
|  | 624 | preempt_disable(); | 
|  | 625 | udelay(100); | 
|  | 626 | preempt_enable(); | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 627 |  | 
|  | 628 | /* | 
|  | 629 | * Stop the tracer to avoid a warning subsequent | 
|  | 630 | * to buffer flipping failure because tracing_stop() | 
|  | 631 | * disables the tr and max buffers, making flipping impossible | 
|  | 632 | * in case of parallels max preempt off latencies. | 
|  | 633 | */ | 
|  | 634 | trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 635 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 636 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 637 | /* check both trace buffers */ | 
|  | 638 | ret = trace_test_buffer(tr, NULL); | 
|  | 639 | if (!ret) | 
|  | 640 | ret = trace_test_buffer(&max_tr, &count); | 
|  | 641 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 642 | tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 643 |  | 
|  | 644 | if (!ret && !count) { | 
|  | 645 | printk(KERN_CONT ".. no entries found .."); | 
|  | 646 | ret = -1; | 
|  | 647 | } | 
|  | 648 |  | 
|  | 649 | tracing_max_latency = save_max; | 
|  | 650 |  | 
|  | 651 | return ret; | 
|  | 652 | } | 
|  | 653 | #endif /* CONFIG_PREEMPT_TRACER */ | 
|  | 654 |  | 
|  | 655 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | 
|  | 656 | int | 
|  | 657 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | 
|  | 658 | { | 
|  | 659 | unsigned long save_max = tracing_max_latency; | 
|  | 660 | unsigned long count; | 
|  | 661 | int ret; | 
|  | 662 |  | 
| Steven Rostedt | 769c48e | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 663 | /* | 
|  | 664 | * Now that the big kernel lock is no longer preemptable, | 
|  | 665 | * and this is called with the BKL held, it will always | 
|  | 666 | * fail. If preemption is already disabled, simply | 
|  | 667 | * pass the test. When the BKL is removed, or becomes | 
|  | 668 | * preemptible again, we will once again test this, | 
|  | 669 | * so keep it in. | 
|  | 670 | */ | 
|  | 671 | if (preempt_count()) { | 
|  | 672 | printk(KERN_CONT "can not test ... force "); | 
|  | 673 | return 0; | 
|  | 674 | } | 
|  | 675 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 676 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 677 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 678 | if (ret) { | 
|  | 679 | warn_failed_init_tracer(trace, ret); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 680 | goto out_no_start; | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 681 | } | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 682 |  | 
|  | 683 | /* reset the max latency */ | 
|  | 684 | tracing_max_latency = 0; | 
|  | 685 |  | 
|  | 686 | /* disable preemption and interrupts for a bit */ | 
|  | 687 | preempt_disable(); | 
|  | 688 | local_irq_disable(); | 
|  | 689 | udelay(100); | 
|  | 690 | preempt_enable(); | 
|  | 691 | /* reverse the order of preempt vs irqs */ | 
|  | 692 | local_irq_enable(); | 
|  | 693 |  | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 694 | /* | 
|  | 695 | * Stop the tracer to avoid a warning subsequent | 
|  | 696 | * to buffer flipping failure because tracing_stop() | 
|  | 697 | * disables the tr and max buffers, making flipping impossible | 
|  | 698 | * in case of parallels max irqs/preempt off latencies. | 
|  | 699 | */ | 
|  | 700 | trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 701 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 702 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 703 | /* check both trace buffers */ | 
|  | 704 | ret = trace_test_buffer(tr, NULL); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 705 | if (ret) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 706 | goto out; | 
|  | 707 |  | 
|  | 708 | ret = trace_test_buffer(&max_tr, &count); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 709 | if (ret) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 710 | goto out; | 
|  | 711 |  | 
|  | 712 | if (!ret && !count) { | 
|  | 713 | printk(KERN_CONT ".. no entries found .."); | 
|  | 714 | ret = -1; | 
|  | 715 | goto out; | 
|  | 716 | } | 
|  | 717 |  | 
|  | 718 | /* do the test by disabling interrupts first this time */ | 
|  | 719 | tracing_max_latency = 0; | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 720 | tracing_start(); | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 721 | trace->start(tr); | 
|  | 722 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 723 | preempt_disable(); | 
|  | 724 | local_irq_disable(); | 
|  | 725 | udelay(100); | 
|  | 726 | preempt_enable(); | 
|  | 727 | /* reverse the order of preempt vs irqs */ | 
|  | 728 | local_irq_enable(); | 
|  | 729 |  | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 730 | trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 731 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 732 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 733 | /* check both trace buffers */ | 
|  | 734 | ret = trace_test_buffer(tr, NULL); | 
|  | 735 | if (ret) | 
|  | 736 | goto out; | 
|  | 737 |  | 
|  | 738 | ret = trace_test_buffer(&max_tr, &count); | 
|  | 739 |  | 
|  | 740 | if (!ret && !count) { | 
|  | 741 | printk(KERN_CONT ".. no entries found .."); | 
|  | 742 | ret = -1; | 
|  | 743 | goto out; | 
|  | 744 | } | 
|  | 745 |  | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 746 | out: | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 747 | tracing_start(); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 748 | out_no_start: | 
|  | 749 | trace->reset(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 750 | tracing_max_latency = save_max; | 
|  | 751 |  | 
|  | 752 | return ret; | 
|  | 753 | } | 
|  | 754 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | 
|  | 755 |  | 
| Steven Noonan | fb1b6d8 | 2008-09-19 03:06:43 -0700 | [diff] [blame] | 756 | #ifdef CONFIG_NOP_TRACER | 
|  | 757 | int | 
|  | 758 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | 
|  | 759 | { | 
|  | 760 | /* What could possibly go wrong? */ | 
|  | 761 | return 0; | 
|  | 762 | } | 
|  | 763 | #endif | 
|  | 764 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 765 | #ifdef CONFIG_SCHED_TRACER | 
|  | 766 | static int trace_wakeup_test_thread(void *data) | 
|  | 767 | { | 
| Steven Rostedt | 05bd68c | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 768 | /* Make this a RT thread, doesn't need to be too high */ | 
| Peter Zijlstra | c9b5f50 | 2011-01-07 13:41:40 +0100 | [diff] [blame] | 769 | static const struct sched_param param = { .sched_priority = 5 }; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 770 | struct completion *x = data; | 
|  | 771 |  | 
| Steven Rostedt | 05bd68c | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 772 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 773 |  | 
|  | 774 | /* Make it know we have a new prio */ | 
|  | 775 | complete(x); | 
|  | 776 |  | 
|  | 777 | /* now go to sleep and let the test wake us up */ | 
|  | 778 | set_current_state(TASK_INTERRUPTIBLE); | 
|  | 779 | schedule(); | 
|  | 780 |  | 
|  | 781 | /* we are awake, now wait to disappear */ | 
|  | 782 | while (!kthread_should_stop()) { | 
|  | 783 | /* | 
|  | 784 | * This is an RT task, do short sleeps to let | 
|  | 785 | * others run. | 
|  | 786 | */ | 
|  | 787 | msleep(100); | 
|  | 788 | } | 
|  | 789 |  | 
|  | 790 | return 0; | 
|  | 791 | } | 
|  | 792 |  | 
|  | 793 | int | 
|  | 794 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | 
|  | 795 | { | 
|  | 796 | unsigned long save_max = tracing_max_latency; | 
|  | 797 | struct task_struct *p; | 
|  | 798 | struct completion isrt; | 
|  | 799 | unsigned long count; | 
|  | 800 | int ret; | 
|  | 801 |  | 
|  | 802 | init_completion(&isrt); | 
|  | 803 |  | 
|  | 804 | /* create a high prio thread */ | 
|  | 805 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 806 | if (IS_ERR(p)) { | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 807 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); | 
|  | 808 | return -1; | 
|  | 809 | } | 
|  | 810 |  | 
|  | 811 | /* make sure the thread is running at an RT prio */ | 
|  | 812 | wait_for_completion(&isrt); | 
|  | 813 |  | 
|  | 814 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 815 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 816 | if (ret) { | 
|  | 817 | warn_failed_init_tracer(trace, ret); | 
|  | 818 | return ret; | 
|  | 819 | } | 
|  | 820 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 821 | /* reset the max latency */ | 
|  | 822 | tracing_max_latency = 0; | 
|  | 823 |  | 
|  | 824 | /* sleep to let the RT thread sleep too */ | 
|  | 825 | msleep(100); | 
|  | 826 |  | 
|  | 827 | /* | 
|  | 828 | * Yes this is slightly racy. It is possible that for some | 
|  | 829 | * strange reason that the RT thread we created, did not | 
|  | 830 | * call schedule for 100ms after doing the completion, | 
|  | 831 | * and we do a wakeup on a task that already is awake. | 
|  | 832 | * But that is extremely unlikely, and the worst thing that | 
|  | 833 | * happens in such a case, is that we disable tracing. | 
|  | 834 | * Honestly, if this race does happen something is horrible | 
|  | 835 | * wrong with the system. | 
|  | 836 | */ | 
|  | 837 |  | 
|  | 838 | wake_up_process(p); | 
|  | 839 |  | 
| Steven Rostedt | 5aa60c6 | 2008-09-29 23:02:37 -0400 | [diff] [blame] | 840 | /* give a little time to let the thread wake up */ | 
|  | 841 | msleep(100); | 
|  | 842 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 843 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 844 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 845 | /* check both trace buffers */ | 
|  | 846 | ret = trace_test_buffer(tr, NULL); | 
|  | 847 | if (!ret) | 
|  | 848 | ret = trace_test_buffer(&max_tr, &count); | 
|  | 849 |  | 
|  | 850 |  | 
|  | 851 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 852 | tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 853 |  | 
|  | 854 | tracing_max_latency = save_max; | 
|  | 855 |  | 
|  | 856 | /* kill the thread */ | 
|  | 857 | kthread_stop(p); | 
|  | 858 |  | 
|  | 859 | if (!ret && !count) { | 
|  | 860 | printk(KERN_CONT ".. no entries found .."); | 
|  | 861 | ret = -1; | 
|  | 862 | } | 
|  | 863 |  | 
|  | 864 | return ret; | 
|  | 865 | } | 
|  | 866 | #endif /* CONFIG_SCHED_TRACER */ | 
|  | 867 |  | 
|  | 868 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | 
|  | 869 | int | 
|  | 870 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | 
|  | 871 | { | 
|  | 872 | unsigned long count; | 
|  | 873 | int ret; | 
|  | 874 |  | 
|  | 875 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 876 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 877 | if (ret) { | 
|  | 878 | warn_failed_init_tracer(trace, ret); | 
|  | 879 | return ret; | 
|  | 880 | } | 
|  | 881 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 882 | /* Sleep for a 1/10 of a second */ | 
|  | 883 | msleep(100); | 
|  | 884 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 885 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 886 | /* check the trace buffer */ | 
|  | 887 | ret = trace_test_buffer(tr, &count); | 
|  | 888 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 889 | tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 890 |  | 
|  | 891 | if (!ret && !count) { | 
|  | 892 | printk(KERN_CONT ".. no entries found .."); | 
|  | 893 | ret = -1; | 
|  | 894 | } | 
|  | 895 |  | 
|  | 896 | return ret; | 
|  | 897 | } | 
|  | 898 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 899 |  | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 900 | #ifdef CONFIG_BRANCH_TRACER | 
|  | 901 | int | 
|  | 902 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | 
|  | 903 | { | 
|  | 904 | unsigned long count; | 
|  | 905 | int ret; | 
|  | 906 |  | 
|  | 907 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 908 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 909 | if (ret) { | 
|  | 910 | warn_failed_init_tracer(trace, ret); | 
|  | 911 | return ret; | 
|  | 912 | } | 
|  | 913 |  | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 914 | /* Sleep for a 1/10 of a second */ | 
|  | 915 | msleep(100); | 
|  | 916 | /* stop the tracing. */ | 
|  | 917 | tracing_stop(); | 
|  | 918 | /* check the trace buffer */ | 
|  | 919 | ret = trace_test_buffer(tr, &count); | 
|  | 920 | trace->reset(tr); | 
|  | 921 | tracing_start(); | 
|  | 922 |  | 
| Wenji Huang | d2ef7c2 | 2009-02-17 01:09:47 -0500 | [diff] [blame] | 923 | if (!ret && !count) { | 
|  | 924 | printk(KERN_CONT ".. no entries found .."); | 
|  | 925 | ret = -1; | 
|  | 926 | } | 
|  | 927 |  | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 928 | return ret; | 
|  | 929 | } | 
|  | 930 | #endif /* CONFIG_BRANCH_TRACER */ | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 931 |  |