| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 1 | /* Include in trace.c */ | 
|  | 2 |  | 
| Steven Rostedt | 9cc26a2 | 2009-03-09 16:00:22 -0400 | [diff] [blame] | 3 | #include <linux/stringify.h> | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 4 | #include <linux/kthread.h> | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 5 | #include <linux/delay.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 6 | #include <linux/slab.h> | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 7 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 8 | static inline int trace_valid_entry(struct trace_entry *entry) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 9 | { | 
|  | 10 | switch (entry->type) { | 
|  | 11 | case TRACE_FN: | 
|  | 12 | case TRACE_CTX: | 
| Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 13 | case TRACE_WAKE: | 
| Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 14 | case TRACE_STACK: | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 15 | case TRACE_PRINT: | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 16 | case TRACE_BRANCH: | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 17 | case TRACE_GRAPH_ENT: | 
|  | 18 | case TRACE_GRAPH_RET: | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 19 | return 1; | 
|  | 20 | } | 
|  | 21 | return 0; | 
|  | 22 | } | 
|  | 23 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 24 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 25 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 26 | struct ring_buffer_event *event; | 
|  | 27 | struct trace_entry *entry; | 
| Steven Rostedt | 4b3e3d2 | 2009-02-18 22:50:01 -0500 | [diff] [blame] | 28 | unsigned int loops = 0; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 29 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 30 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 31 | entry = ring_buffer_event_data(event); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 32 |  | 
| Steven Rostedt | 4b3e3d2 | 2009-02-18 22:50:01 -0500 | [diff] [blame] | 33 | /* | 
|  | 34 | * The ring buffer is a size of trace_buf_size, if | 
|  | 35 | * we loop more than the size, there's something wrong | 
|  | 36 | * with the ring buffer. | 
|  | 37 | */ | 
|  | 38 | if (loops++ > trace_buf_size) { | 
|  | 39 | printk(KERN_CONT ".. bad ring buffer "); | 
|  | 40 | goto failed; | 
|  | 41 | } | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 42 | if (!trace_valid_entry(entry)) { | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 43 | printk(KERN_CONT ".. invalid entry %d ", | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 44 | entry->type); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 45 | goto failed; | 
|  | 46 | } | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 47 | } | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 48 | return 0; | 
|  | 49 |  | 
|  | 50 | failed: | 
| Steven Rostedt | 08bafa0 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 51 | /* disable tracing */ | 
|  | 52 | tracing_disabled = 1; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 53 | printk(KERN_CONT ".. corrupted trace buffer .. "); | 
|  | 54 | return -1; | 
|  | 55 | } | 
|  | 56 |  | 
|  | 57 | /* | 
|  | 58 | * Test the trace buffer to see if all the elements | 
|  | 59 | * are still sane. | 
|  | 60 | */ | 
|  | 61 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | 
|  | 62 | { | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 63 | unsigned long flags, cnt = 0; | 
|  | 64 | int cpu, ret = 0; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 65 |  | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 66 | /* Don't allow flipping of max traces now */ | 
| Steven Rostedt | d51ad7a | 2008-11-15 15:48:29 -0500 | [diff] [blame] | 67 | local_irq_save(flags); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 68 | arch_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 69 |  | 
|  | 70 | cnt = ring_buffer_entries(tr->buffer); | 
|  | 71 |  | 
| Steven Rostedt | 0c5119c | 2009-02-18 18:33:57 -0500 | [diff] [blame] | 72 | /* | 
|  | 73 | * The trace_test_buffer_cpu runs a while loop to consume all data. | 
|  | 74 | * If the calling tracer is broken, and is constantly filling | 
|  | 75 | * the buffer, this will run forever, and hard lock the box. | 
|  | 76 | * We disable the ring buffer while we do this test to prevent | 
|  | 77 | * a hard lock up. | 
|  | 78 | */ | 
|  | 79 | tracing_off(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 80 | for_each_possible_cpu(cpu) { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 81 | ret = trace_test_buffer_cpu(tr, cpu); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 82 | if (ret) | 
|  | 83 | break; | 
|  | 84 | } | 
| Steven Rostedt | 0c5119c | 2009-02-18 18:33:57 -0500 | [diff] [blame] | 85 | tracing_on(); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 86 | arch_spin_unlock(&ftrace_max_lock); | 
| Steven Rostedt | d51ad7a | 2008-11-15 15:48:29 -0500 | [diff] [blame] | 87 | local_irq_restore(flags); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 88 |  | 
|  | 89 | if (count) | 
|  | 90 | *count = cnt; | 
|  | 91 |  | 
|  | 92 | return ret; | 
|  | 93 | } | 
|  | 94 |  | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 95 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | 
|  | 96 | { | 
|  | 97 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | 
|  | 98 | trace->name, init_ret); | 
|  | 99 | } | 
| Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 100 | #ifdef CONFIG_FUNCTION_TRACER | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 101 |  | 
|  | 102 | #ifdef CONFIG_DYNAMIC_FTRACE | 
|  | 103 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 104 | /* Test dynamic code modification and ftrace filters */ | 
|  | 105 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | 
|  | 106 | struct trace_array *tr, | 
|  | 107 | int (*func)(void)) | 
|  | 108 | { | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 109 | int save_ftrace_enabled = ftrace_enabled; | 
|  | 110 | int save_tracer_enabled = tracer_enabled; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 111 | unsigned long count; | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 112 | char *func_name; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 113 | int ret; | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 114 |  | 
|  | 115 | /* The ftrace test PASSED */ | 
|  | 116 | printk(KERN_CONT "PASSED\n"); | 
|  | 117 | pr_info("Testing dynamic ftrace: "); | 
|  | 118 |  | 
|  | 119 | /* enable tracing, and record the filter function */ | 
|  | 120 | ftrace_enabled = 1; | 
|  | 121 | tracer_enabled = 1; | 
|  | 122 |  | 
|  | 123 | /* passed in by parameter to fool gcc from optimizing */ | 
|  | 124 | func(); | 
|  | 125 |  | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 126 | /* | 
| Wenji Huang | 73d8b8b | 2009-02-17 01:10:02 -0500 | [diff] [blame] | 127 | * Some archs *cough*PowerPC*cough* add characters to the | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 128 | * start of the function names. We simply put a '*' to | 
| Wenji Huang | 73d8b8b | 2009-02-17 01:10:02 -0500 | [diff] [blame] | 129 | * accommodate them. | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 130 | */ | 
| Steven Rostedt | 9cc26a2 | 2009-03-09 16:00:22 -0400 | [diff] [blame] | 131 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 132 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 133 | /* filter only on our function */ | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 134 | ftrace_set_filter(func_name, strlen(func_name), 1); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 135 |  | 
|  | 136 | /* enable tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 137 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 138 | if (ret) { | 
|  | 139 | warn_failed_init_tracer(trace, ret); | 
|  | 140 | goto out; | 
|  | 141 | } | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 142 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 143 | /* Sleep for a 1/10 of a second */ | 
|  | 144 | msleep(100); | 
|  | 145 |  | 
|  | 146 | /* we should have nothing in the buffer */ | 
|  | 147 | ret = trace_test_buffer(tr, &count); | 
|  | 148 | if (ret) | 
|  | 149 | goto out; | 
|  | 150 |  | 
|  | 151 | if (count) { | 
|  | 152 | ret = -1; | 
|  | 153 | printk(KERN_CONT ".. filter did not filter .. "); | 
|  | 154 | goto out; | 
|  | 155 | } | 
|  | 156 |  | 
|  | 157 | /* call our function again */ | 
|  | 158 | func(); | 
|  | 159 |  | 
|  | 160 | /* sleep again */ | 
|  | 161 | msleep(100); | 
|  | 162 |  | 
|  | 163 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 164 | tracing_stop(); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 165 | ftrace_enabled = 0; | 
|  | 166 |  | 
|  | 167 | /* check the trace buffer */ | 
|  | 168 | ret = trace_test_buffer(tr, &count); | 
|  | 169 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 170 | tracing_start(); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 171 |  | 
|  | 172 | /* we should only have one item */ | 
|  | 173 | if (!ret && count != 1) { | 
| Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 174 | printk(KERN_CONT ".. filter failed count=%ld ..", count); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 175 | ret = -1; | 
|  | 176 | goto out; | 
|  | 177 | } | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 178 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 179 | out: | 
|  | 180 | ftrace_enabled = save_ftrace_enabled; | 
|  | 181 | tracer_enabled = save_tracer_enabled; | 
|  | 182 |  | 
|  | 183 | /* Enable tracing on all functions again */ | 
|  | 184 | ftrace_set_filter(NULL, 0, 1); | 
|  | 185 |  | 
|  | 186 | return ret; | 
|  | 187 | } | 
|  | 188 | #else | 
|  | 189 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | 
|  | 190 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 
| Ingo Molnar | e9a22d1 | 2009-03-13 11:54:40 +0100 | [diff] [blame] | 191 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 192 | /* | 
|  | 193 | * Simple verification test of ftrace function tracer. | 
|  | 194 | * Enable ftrace, sleep 1/10 second, and then read the trace | 
|  | 195 | * buffer to see if all is in order. | 
|  | 196 | */ | 
|  | 197 | int | 
|  | 198 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | 
|  | 199 | { | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 200 | int save_ftrace_enabled = ftrace_enabled; | 
|  | 201 | int save_tracer_enabled = tracer_enabled; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 202 | unsigned long count; | 
|  | 203 | int ret; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 204 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 205 | /* make sure msleep has been recorded */ | 
|  | 206 | msleep(1); | 
|  | 207 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 208 | /* start the tracing */ | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 209 | ftrace_enabled = 1; | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 210 | tracer_enabled = 1; | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 211 |  | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 212 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 213 | if (ret) { | 
|  | 214 | warn_failed_init_tracer(trace, ret); | 
|  | 215 | goto out; | 
|  | 216 | } | 
|  | 217 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 218 | /* Sleep for a 1/10 of a second */ | 
|  | 219 | msleep(100); | 
|  | 220 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 221 | tracing_stop(); | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 222 | ftrace_enabled = 0; | 
|  | 223 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 224 | /* check the trace buffer */ | 
|  | 225 | ret = trace_test_buffer(tr, &count); | 
|  | 226 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 227 | tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 228 |  | 
|  | 229 | if (!ret && !count) { | 
|  | 230 | printk(KERN_CONT ".. no entries found .."); | 
|  | 231 | ret = -1; | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 232 | goto out; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 233 | } | 
|  | 234 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 235 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, | 
|  | 236 | DYN_FTRACE_TEST_NAME); | 
|  | 237 |  | 
|  | 238 | out: | 
|  | 239 | ftrace_enabled = save_ftrace_enabled; | 
|  | 240 | tracer_enabled = save_tracer_enabled; | 
|  | 241 |  | 
| Steven Rostedt | 4eebcc8 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 242 | /* kill ftrace totally if we failed */ | 
|  | 243 | if (ret) | 
|  | 244 | ftrace_kill(); | 
|  | 245 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 246 | return ret; | 
|  | 247 | } | 
| Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 248 | #endif /* CONFIG_FUNCTION_TRACER */ | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 249 |  | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 250 |  | 
|  | 251 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 252 |  | 
|  | 253 | /* Maximum number of functions to trace before diagnosing a hang */ | 
|  | 254 | #define GRAPH_MAX_FUNC_TEST	100000000 | 
|  | 255 |  | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 256 | static void | 
|  | 257 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode); | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 258 | static unsigned int graph_hang_thresh; | 
|  | 259 |  | 
|  | 260 | /* Wrap the real function entry probe to avoid possible hanging */ | 
|  | 261 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | 
|  | 262 | { | 
|  | 263 | /* This is harmlessly racy, we want to approximately detect a hang */ | 
|  | 264 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | 
|  | 265 | ftrace_graph_stop(); | 
|  | 266 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | 
|  | 267 | if (ftrace_dump_on_oops) | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 268 | __ftrace_dump(false, DUMP_ALL); | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 269 | return 0; | 
|  | 270 | } | 
|  | 271 |  | 
|  | 272 | return trace_graph_entry(trace); | 
|  | 273 | } | 
|  | 274 |  | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 275 | /* | 
|  | 276 | * Pretty much the same than for the function tracer from which the selftest | 
|  | 277 | * has been borrowed. | 
|  | 278 | */ | 
|  | 279 | int | 
|  | 280 | trace_selftest_startup_function_graph(struct tracer *trace, | 
|  | 281 | struct trace_array *tr) | 
|  | 282 | { | 
|  | 283 | int ret; | 
|  | 284 | unsigned long count; | 
|  | 285 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 286 | /* | 
|  | 287 | * Simulate the init() callback but we attach a watchdog callback | 
|  | 288 | * to detect and recover from possible hangs | 
|  | 289 | */ | 
|  | 290 | tracing_reset_online_cpus(tr); | 
| Frederic Weisbecker | 1a0799a | 2009-07-29 18:59:58 +0200 | [diff] [blame] | 291 | set_graph_array(tr); | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 292 | ret = register_ftrace_graph(&trace_graph_return, | 
|  | 293 | &trace_graph_entry_watchdog); | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 294 | if (ret) { | 
|  | 295 | warn_failed_init_tracer(trace, ret); | 
|  | 296 | goto out; | 
|  | 297 | } | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 298 | tracing_start_cmdline_record(); | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 299 |  | 
|  | 300 | /* Sleep for a 1/10 of a second */ | 
|  | 301 | msleep(100); | 
|  | 302 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 303 | /* Have we just recovered from a hang? */ | 
|  | 304 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | 
| Frederic Weisbecker | 0cf53ff | 2009-03-22 15:13:07 +0100 | [diff] [blame] | 305 | tracing_selftest_disabled = true; | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 306 | ret = -1; | 
|  | 307 | goto out; | 
|  | 308 | } | 
|  | 309 |  | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 310 | tracing_stop(); | 
|  | 311 |  | 
|  | 312 | /* check the trace buffer */ | 
|  | 313 | ret = trace_test_buffer(tr, &count); | 
|  | 314 |  | 
|  | 315 | trace->reset(tr); | 
|  | 316 | tracing_start(); | 
|  | 317 |  | 
|  | 318 | if (!ret && !count) { | 
|  | 319 | printk(KERN_CONT ".. no entries found .."); | 
|  | 320 | ret = -1; | 
|  | 321 | goto out; | 
|  | 322 | } | 
|  | 323 |  | 
|  | 324 | /* Don't test dynamic tracing, the function tracer already did */ | 
|  | 325 |  | 
|  | 326 | out: | 
|  | 327 | /* Stop it if we failed */ | 
|  | 328 | if (ret) | 
|  | 329 | ftrace_graph_stop(); | 
|  | 330 |  | 
|  | 331 | return ret; | 
|  | 332 | } | 
|  | 333 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 
|  | 334 |  | 
|  | 335 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 336 | #ifdef CONFIG_IRQSOFF_TRACER | 
|  | 337 | int | 
|  | 338 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | 
|  | 339 | { | 
|  | 340 | unsigned long save_max = tracing_max_latency; | 
|  | 341 | unsigned long count; | 
|  | 342 | int ret; | 
|  | 343 |  | 
|  | 344 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 345 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 346 | if (ret) { | 
|  | 347 | warn_failed_init_tracer(trace, ret); | 
|  | 348 | return ret; | 
|  | 349 | } | 
|  | 350 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 351 | /* reset the max latency */ | 
|  | 352 | tracing_max_latency = 0; | 
|  | 353 | /* disable interrupts for a bit */ | 
|  | 354 | local_irq_disable(); | 
|  | 355 | udelay(100); | 
|  | 356 | local_irq_enable(); | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 357 |  | 
|  | 358 | /* | 
|  | 359 | * Stop the tracer to avoid a warning subsequent | 
|  | 360 | * to buffer flipping failure because tracing_stop() | 
|  | 361 | * disables the tr and max buffers, making flipping impossible | 
|  | 362 | * in case of parallels max irqs off latencies. | 
|  | 363 | */ | 
|  | 364 | trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 365 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 366 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 367 | /* check both trace buffers */ | 
|  | 368 | ret = trace_test_buffer(tr, NULL); | 
|  | 369 | if (!ret) | 
|  | 370 | ret = trace_test_buffer(&max_tr, &count); | 
|  | 371 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 372 | tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 373 |  | 
|  | 374 | if (!ret && !count) { | 
|  | 375 | printk(KERN_CONT ".. no entries found .."); | 
|  | 376 | ret = -1; | 
|  | 377 | } | 
|  | 378 |  | 
|  | 379 | tracing_max_latency = save_max; | 
|  | 380 |  | 
|  | 381 | return ret; | 
|  | 382 | } | 
|  | 383 | #endif /* CONFIG_IRQSOFF_TRACER */ | 
|  | 384 |  | 
|  | 385 | #ifdef CONFIG_PREEMPT_TRACER | 
|  | 386 | int | 
|  | 387 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | 
|  | 388 | { | 
|  | 389 | unsigned long save_max = tracing_max_latency; | 
|  | 390 | unsigned long count; | 
|  | 391 | int ret; | 
|  | 392 |  | 
| Steven Rostedt | 769c48e | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 393 | /* | 
|  | 394 | * Now that the big kernel lock is no longer preemptable, | 
|  | 395 | * and this is called with the BKL held, it will always | 
|  | 396 | * fail. If preemption is already disabled, simply | 
|  | 397 | * pass the test. When the BKL is removed, or becomes | 
|  | 398 | * preemptible again, we will once again test this, | 
|  | 399 | * so keep it in. | 
|  | 400 | */ | 
|  | 401 | if (preempt_count()) { | 
|  | 402 | printk(KERN_CONT "can not test ... force "); | 
|  | 403 | return 0; | 
|  | 404 | } | 
|  | 405 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 406 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 407 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 408 | if (ret) { | 
|  | 409 | warn_failed_init_tracer(trace, ret); | 
|  | 410 | return ret; | 
|  | 411 | } | 
|  | 412 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 413 | /* reset the max latency */ | 
|  | 414 | tracing_max_latency = 0; | 
|  | 415 | /* disable preemption for a bit */ | 
|  | 416 | preempt_disable(); | 
|  | 417 | udelay(100); | 
|  | 418 | preempt_enable(); | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 419 |  | 
|  | 420 | /* | 
|  | 421 | * Stop the tracer to avoid a warning subsequent | 
|  | 422 | * to buffer flipping failure because tracing_stop() | 
|  | 423 | * disables the tr and max buffers, making flipping impossible | 
|  | 424 | * in case of parallels max preempt off latencies. | 
|  | 425 | */ | 
|  | 426 | trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 427 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 428 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 429 | /* check both trace buffers */ | 
|  | 430 | ret = trace_test_buffer(tr, NULL); | 
|  | 431 | if (!ret) | 
|  | 432 | ret = trace_test_buffer(&max_tr, &count); | 
|  | 433 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 434 | tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 435 |  | 
|  | 436 | if (!ret && !count) { | 
|  | 437 | printk(KERN_CONT ".. no entries found .."); | 
|  | 438 | ret = -1; | 
|  | 439 | } | 
|  | 440 |  | 
|  | 441 | tracing_max_latency = save_max; | 
|  | 442 |  | 
|  | 443 | return ret; | 
|  | 444 | } | 
|  | 445 | #endif /* CONFIG_PREEMPT_TRACER */ | 
|  | 446 |  | 
|  | 447 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | 
|  | 448 | int | 
|  | 449 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | 
|  | 450 | { | 
|  | 451 | unsigned long save_max = tracing_max_latency; | 
|  | 452 | unsigned long count; | 
|  | 453 | int ret; | 
|  | 454 |  | 
| Steven Rostedt | 769c48e | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 455 | /* | 
|  | 456 | * Now that the big kernel lock is no longer preemptable, | 
|  | 457 | * and this is called with the BKL held, it will always | 
|  | 458 | * fail. If preemption is already disabled, simply | 
|  | 459 | * pass the test. When the BKL is removed, or becomes | 
|  | 460 | * preemptible again, we will once again test this, | 
|  | 461 | * so keep it in. | 
|  | 462 | */ | 
|  | 463 | if (preempt_count()) { | 
|  | 464 | printk(KERN_CONT "can not test ... force "); | 
|  | 465 | return 0; | 
|  | 466 | } | 
|  | 467 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 468 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 469 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 470 | if (ret) { | 
|  | 471 | warn_failed_init_tracer(trace, ret); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 472 | goto out_no_start; | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 473 | } | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 474 |  | 
|  | 475 | /* reset the max latency */ | 
|  | 476 | tracing_max_latency = 0; | 
|  | 477 |  | 
|  | 478 | /* disable preemption and interrupts for a bit */ | 
|  | 479 | preempt_disable(); | 
|  | 480 | local_irq_disable(); | 
|  | 481 | udelay(100); | 
|  | 482 | preempt_enable(); | 
|  | 483 | /* reverse the order of preempt vs irqs */ | 
|  | 484 | local_irq_enable(); | 
|  | 485 |  | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 486 | /* | 
|  | 487 | * Stop the tracer to avoid a warning subsequent | 
|  | 488 | * to buffer flipping failure because tracing_stop() | 
|  | 489 | * disables the tr and max buffers, making flipping impossible | 
|  | 490 | * in case of parallels max irqs/preempt off latencies. | 
|  | 491 | */ | 
|  | 492 | trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 493 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 494 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 495 | /* check both trace buffers */ | 
|  | 496 | ret = trace_test_buffer(tr, NULL); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 497 | if (ret) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 498 | goto out; | 
|  | 499 |  | 
|  | 500 | ret = trace_test_buffer(&max_tr, &count); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 501 | if (ret) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 502 | goto out; | 
|  | 503 |  | 
|  | 504 | if (!ret && !count) { | 
|  | 505 | printk(KERN_CONT ".. no entries found .."); | 
|  | 506 | ret = -1; | 
|  | 507 | goto out; | 
|  | 508 | } | 
|  | 509 |  | 
|  | 510 | /* do the test by disabling interrupts first this time */ | 
|  | 511 | tracing_max_latency = 0; | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 512 | tracing_start(); | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 513 | trace->start(tr); | 
|  | 514 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 515 | preempt_disable(); | 
|  | 516 | local_irq_disable(); | 
|  | 517 | udelay(100); | 
|  | 518 | preempt_enable(); | 
|  | 519 | /* reverse the order of preempt vs irqs */ | 
|  | 520 | local_irq_enable(); | 
|  | 521 |  | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 522 | trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 523 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 524 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 525 | /* check both trace buffers */ | 
|  | 526 | ret = trace_test_buffer(tr, NULL); | 
|  | 527 | if (ret) | 
|  | 528 | goto out; | 
|  | 529 |  | 
|  | 530 | ret = trace_test_buffer(&max_tr, &count); | 
|  | 531 |  | 
|  | 532 | if (!ret && !count) { | 
|  | 533 | printk(KERN_CONT ".. no entries found .."); | 
|  | 534 | ret = -1; | 
|  | 535 | goto out; | 
|  | 536 | } | 
|  | 537 |  | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 538 | out: | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 539 | tracing_start(); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 540 | out_no_start: | 
|  | 541 | trace->reset(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 542 | tracing_max_latency = save_max; | 
|  | 543 |  | 
|  | 544 | return ret; | 
|  | 545 | } | 
|  | 546 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | 
|  | 547 |  | 
| Steven Noonan | fb1b6d8 | 2008-09-19 03:06:43 -0700 | [diff] [blame] | 548 | #ifdef CONFIG_NOP_TRACER | 
|  | 549 | int | 
|  | 550 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | 
|  | 551 | { | 
|  | 552 | /* What could possibly go wrong? */ | 
|  | 553 | return 0; | 
|  | 554 | } | 
|  | 555 | #endif | 
|  | 556 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 557 | #ifdef CONFIG_SCHED_TRACER | 
|  | 558 | static int trace_wakeup_test_thread(void *data) | 
|  | 559 | { | 
| Steven Rostedt | 05bd68c | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 560 | /* Make this a RT thread, doesn't need to be too high */ | 
|  | 561 | struct sched_param param = { .sched_priority = 5 }; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 562 | struct completion *x = data; | 
|  | 563 |  | 
| Steven Rostedt | 05bd68c | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 564 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 565 |  | 
|  | 566 | /* Make it know we have a new prio */ | 
|  | 567 | complete(x); | 
|  | 568 |  | 
|  | 569 | /* now go to sleep and let the test wake us up */ | 
|  | 570 | set_current_state(TASK_INTERRUPTIBLE); | 
|  | 571 | schedule(); | 
|  | 572 |  | 
|  | 573 | /* we are awake, now wait to disappear */ | 
|  | 574 | while (!kthread_should_stop()) { | 
|  | 575 | /* | 
|  | 576 | * This is an RT task, do short sleeps to let | 
|  | 577 | * others run. | 
|  | 578 | */ | 
|  | 579 | msleep(100); | 
|  | 580 | } | 
|  | 581 |  | 
|  | 582 | return 0; | 
|  | 583 | } | 
|  | 584 |  | 
|  | 585 | int | 
|  | 586 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | 
|  | 587 | { | 
|  | 588 | unsigned long save_max = tracing_max_latency; | 
|  | 589 | struct task_struct *p; | 
|  | 590 | struct completion isrt; | 
|  | 591 | unsigned long count; | 
|  | 592 | int ret; | 
|  | 593 |  | 
|  | 594 | init_completion(&isrt); | 
|  | 595 |  | 
|  | 596 | /* create a high prio thread */ | 
|  | 597 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 598 | if (IS_ERR(p)) { | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 599 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); | 
|  | 600 | return -1; | 
|  | 601 | } | 
|  | 602 |  | 
|  | 603 | /* make sure the thread is running at an RT prio */ | 
|  | 604 | wait_for_completion(&isrt); | 
|  | 605 |  | 
|  | 606 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 607 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 608 | if (ret) { | 
|  | 609 | warn_failed_init_tracer(trace, ret); | 
|  | 610 | return ret; | 
|  | 611 | } | 
|  | 612 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 613 | /* reset the max latency */ | 
|  | 614 | tracing_max_latency = 0; | 
|  | 615 |  | 
|  | 616 | /* sleep to let the RT thread sleep too */ | 
|  | 617 | msleep(100); | 
|  | 618 |  | 
|  | 619 | /* | 
|  | 620 | * Yes this is slightly racy. It is possible that for some | 
|  | 621 | * strange reason that the RT thread we created, did not | 
|  | 622 | * call schedule for 100ms after doing the completion, | 
|  | 623 | * and we do a wakeup on a task that already is awake. | 
|  | 624 | * But that is extremely unlikely, and the worst thing that | 
|  | 625 | * happens in such a case, is that we disable tracing. | 
|  | 626 | * Honestly, if this race does happen something is horrible | 
|  | 627 | * wrong with the system. | 
|  | 628 | */ | 
|  | 629 |  | 
|  | 630 | wake_up_process(p); | 
|  | 631 |  | 
| Steven Rostedt | 5aa60c6 | 2008-09-29 23:02:37 -0400 | [diff] [blame] | 632 | /* give a little time to let the thread wake up */ | 
|  | 633 | msleep(100); | 
|  | 634 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 635 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 636 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 637 | /* check both trace buffers */ | 
|  | 638 | ret = trace_test_buffer(tr, NULL); | 
|  | 639 | if (!ret) | 
|  | 640 | ret = trace_test_buffer(&max_tr, &count); | 
|  | 641 |  | 
|  | 642 |  | 
|  | 643 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 644 | tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 645 |  | 
|  | 646 | tracing_max_latency = save_max; | 
|  | 647 |  | 
|  | 648 | /* kill the thread */ | 
|  | 649 | kthread_stop(p); | 
|  | 650 |  | 
|  | 651 | if (!ret && !count) { | 
|  | 652 | printk(KERN_CONT ".. no entries found .."); | 
|  | 653 | ret = -1; | 
|  | 654 | } | 
|  | 655 |  | 
|  | 656 | return ret; | 
|  | 657 | } | 
|  | 658 | #endif /* CONFIG_SCHED_TRACER */ | 
|  | 659 |  | 
|  | 660 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | 
|  | 661 | int | 
|  | 662 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | 
|  | 663 | { | 
|  | 664 | unsigned long count; | 
|  | 665 | int ret; | 
|  | 666 |  | 
|  | 667 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 668 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 669 | if (ret) { | 
|  | 670 | warn_failed_init_tracer(trace, ret); | 
|  | 671 | return ret; | 
|  | 672 | } | 
|  | 673 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 674 | /* Sleep for a 1/10 of a second */ | 
|  | 675 | msleep(100); | 
|  | 676 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 677 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 678 | /* check the trace buffer */ | 
|  | 679 | ret = trace_test_buffer(tr, &count); | 
|  | 680 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 681 | tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 682 |  | 
|  | 683 | if (!ret && !count) { | 
|  | 684 | printk(KERN_CONT ".. no entries found .."); | 
|  | 685 | ret = -1; | 
|  | 686 | } | 
|  | 687 |  | 
|  | 688 | return ret; | 
|  | 689 | } | 
|  | 690 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 691 |  | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 692 | #ifdef CONFIG_BRANCH_TRACER | 
|  | 693 | int | 
|  | 694 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | 
|  | 695 | { | 
|  | 696 | unsigned long count; | 
|  | 697 | int ret; | 
|  | 698 |  | 
|  | 699 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 700 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 701 | if (ret) { | 
|  | 702 | warn_failed_init_tracer(trace, ret); | 
|  | 703 | return ret; | 
|  | 704 | } | 
|  | 705 |  | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 706 | /* Sleep for a 1/10 of a second */ | 
|  | 707 | msleep(100); | 
|  | 708 | /* stop the tracing. */ | 
|  | 709 | tracing_stop(); | 
|  | 710 | /* check the trace buffer */ | 
|  | 711 | ret = trace_test_buffer(tr, &count); | 
|  | 712 | trace->reset(tr); | 
|  | 713 | tracing_start(); | 
|  | 714 |  | 
| Wenji Huang | d2ef7c2 | 2009-02-17 01:09:47 -0500 | [diff] [blame] | 715 | if (!ret && !count) { | 
|  | 716 | printk(KERN_CONT ".. no entries found .."); | 
|  | 717 | ret = -1; | 
|  | 718 | } | 
|  | 719 |  | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 720 | return ret; | 
|  | 721 | } | 
|  | 722 | #endif /* CONFIG_BRANCH_TRACER */ | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 723 |  |