| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 1 | /* Include in trace.c */ | 
|  | 2 |  | 
| Steven Rostedt | 9cc26a2 | 2009-03-09 16:00:22 -0400 | [diff] [blame] | 3 | #include <linux/stringify.h> | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 4 | #include <linux/kthread.h> | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 5 | #include <linux/delay.h> | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 6 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 7 | static inline int trace_valid_entry(struct trace_entry *entry) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 8 | { | 
|  | 9 | switch (entry->type) { | 
|  | 10 | case TRACE_FN: | 
|  | 11 | case TRACE_CTX: | 
| Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 12 | case TRACE_WAKE: | 
| Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 13 | case TRACE_STACK: | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 14 | case TRACE_PRINT: | 
| Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 15 | case TRACE_SPECIAL: | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 16 | case TRACE_BRANCH: | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 17 | case TRACE_GRAPH_ENT: | 
|  | 18 | case TRACE_GRAPH_RET: | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 19 | case TRACE_HW_BRANCHES: | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 20 | return 1; | 
|  | 21 | } | 
|  | 22 | return 0; | 
|  | 23 | } | 
|  | 24 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 25 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 26 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 27 | struct ring_buffer_event *event; | 
|  | 28 | struct trace_entry *entry; | 
| Steven Rostedt | 4b3e3d2 | 2009-02-18 22:50:01 -0500 | [diff] [blame] | 29 | unsigned int loops = 0; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 30 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 31 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { | 
|  | 32 | entry = ring_buffer_event_data(event); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 33 |  | 
| Steven Rostedt | 4b3e3d2 | 2009-02-18 22:50:01 -0500 | [diff] [blame] | 34 | /* | 
|  | 35 | * The ring buffer is a size of trace_buf_size, if | 
|  | 36 | * we loop more than the size, there's something wrong | 
|  | 37 | * with the ring buffer. | 
|  | 38 | */ | 
|  | 39 | if (loops++ > trace_buf_size) { | 
|  | 40 | printk(KERN_CONT ".. bad ring buffer "); | 
|  | 41 | goto failed; | 
|  | 42 | } | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 43 | if (!trace_valid_entry(entry)) { | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 44 | printk(KERN_CONT ".. invalid entry %d ", | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 45 | entry->type); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 46 | goto failed; | 
|  | 47 | } | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 48 | } | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 49 | return 0; | 
|  | 50 |  | 
|  | 51 | failed: | 
| Steven Rostedt | 08bafa0 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 52 | /* disable tracing */ | 
|  | 53 | tracing_disabled = 1; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 54 | printk(KERN_CONT ".. corrupted trace buffer .. "); | 
|  | 55 | return -1; | 
|  | 56 | } | 
|  | 57 |  | 
|  | 58 | /* | 
|  | 59 | * Test the trace buffer to see if all the elements | 
|  | 60 | * are still sane. | 
|  | 61 | */ | 
|  | 62 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | 
|  | 63 | { | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 64 | unsigned long flags, cnt = 0; | 
|  | 65 | int cpu, ret = 0; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 66 |  | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 67 | /* Don't allow flipping of max traces now */ | 
| Steven Rostedt | d51ad7a | 2008-11-15 15:48:29 -0500 | [diff] [blame] | 68 | local_irq_save(flags); | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 69 | __raw_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 70 |  | 
|  | 71 | cnt = ring_buffer_entries(tr->buffer); | 
|  | 72 |  | 
| Steven Rostedt | 0c5119c | 2009-02-18 18:33:57 -0500 | [diff] [blame] | 73 | /* | 
|  | 74 | * The trace_test_buffer_cpu runs a while loop to consume all data. | 
|  | 75 | * If the calling tracer is broken, and is constantly filling | 
|  | 76 | * the buffer, this will run forever, and hard lock the box. | 
|  | 77 | * We disable the ring buffer while we do this test to prevent | 
|  | 78 | * a hard lock up. | 
|  | 79 | */ | 
|  | 80 | tracing_off(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 81 | for_each_possible_cpu(cpu) { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 82 | ret = trace_test_buffer_cpu(tr, cpu); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 83 | if (ret) | 
|  | 84 | break; | 
|  | 85 | } | 
| Steven Rostedt | 0c5119c | 2009-02-18 18:33:57 -0500 | [diff] [blame] | 86 | tracing_on(); | 
| Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 87 | __raw_spin_unlock(&ftrace_max_lock); | 
| Steven Rostedt | d51ad7a | 2008-11-15 15:48:29 -0500 | [diff] [blame] | 88 | local_irq_restore(flags); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 89 |  | 
|  | 90 | if (count) | 
|  | 91 | *count = cnt; | 
|  | 92 |  | 
|  | 93 | return ret; | 
|  | 94 | } | 
|  | 95 |  | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 96 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | 
|  | 97 | { | 
|  | 98 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | 
|  | 99 | trace->name, init_ret); | 
|  | 100 | } | 
| Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 101 | #ifdef CONFIG_FUNCTION_TRACER | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 102 |  | 
|  | 103 | #ifdef CONFIG_DYNAMIC_FTRACE | 
|  | 104 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 105 | /* Test dynamic code modification and ftrace filters */ | 
|  | 106 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | 
|  | 107 | struct trace_array *tr, | 
|  | 108 | int (*func)(void)) | 
|  | 109 | { | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 110 | int save_ftrace_enabled = ftrace_enabled; | 
|  | 111 | int save_tracer_enabled = tracer_enabled; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 112 | unsigned long count; | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 113 | char *func_name; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 114 | int ret; | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 115 |  | 
|  | 116 | /* The ftrace test PASSED */ | 
|  | 117 | printk(KERN_CONT "PASSED\n"); | 
|  | 118 | pr_info("Testing dynamic ftrace: "); | 
|  | 119 |  | 
|  | 120 | /* enable tracing, and record the filter function */ | 
|  | 121 | ftrace_enabled = 1; | 
|  | 122 | tracer_enabled = 1; | 
|  | 123 |  | 
|  | 124 | /* passed in by parameter to fool gcc from optimizing */ | 
|  | 125 | func(); | 
|  | 126 |  | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 127 | /* | 
| Wenji Huang | 73d8b8b | 2009-02-17 01:10:02 -0500 | [diff] [blame] | 128 | * Some archs *cough*PowerPC*cough* add characters to the | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 129 | * start of the function names. We simply put a '*' to | 
| Wenji Huang | 73d8b8b | 2009-02-17 01:10:02 -0500 | [diff] [blame] | 130 | * accommodate them. | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 131 | */ | 
| Steven Rostedt | 9cc26a2 | 2009-03-09 16:00:22 -0400 | [diff] [blame] | 132 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 133 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 134 | /* filter only on our function */ | 
| Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 135 | ftrace_set_filter(func_name, strlen(func_name), 1); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 136 |  | 
|  | 137 | /* enable tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 138 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 139 | if (ret) { | 
|  | 140 | warn_failed_init_tracer(trace, ret); | 
|  | 141 | goto out; | 
|  | 142 | } | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 143 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 144 | /* Sleep for a 1/10 of a second */ | 
|  | 145 | msleep(100); | 
|  | 146 |  | 
|  | 147 | /* we should have nothing in the buffer */ | 
|  | 148 | ret = trace_test_buffer(tr, &count); | 
|  | 149 | if (ret) | 
|  | 150 | goto out; | 
|  | 151 |  | 
|  | 152 | if (count) { | 
|  | 153 | ret = -1; | 
|  | 154 | printk(KERN_CONT ".. filter did not filter .. "); | 
|  | 155 | goto out; | 
|  | 156 | } | 
|  | 157 |  | 
|  | 158 | /* call our function again */ | 
|  | 159 | func(); | 
|  | 160 |  | 
|  | 161 | /* sleep again */ | 
|  | 162 | msleep(100); | 
|  | 163 |  | 
|  | 164 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 165 | tracing_stop(); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 166 | ftrace_enabled = 0; | 
|  | 167 |  | 
|  | 168 | /* check the trace buffer */ | 
|  | 169 | ret = trace_test_buffer(tr, &count); | 
|  | 170 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 171 | tracing_start(); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 172 |  | 
|  | 173 | /* we should only have one item */ | 
|  | 174 | if (!ret && count != 1) { | 
| Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 175 | printk(KERN_CONT ".. filter failed count=%ld ..", count); | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 176 | ret = -1; | 
|  | 177 | goto out; | 
|  | 178 | } | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 179 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 180 | out: | 
|  | 181 | ftrace_enabled = save_ftrace_enabled; | 
|  | 182 | tracer_enabled = save_tracer_enabled; | 
|  | 183 |  | 
|  | 184 | /* Enable tracing on all functions again */ | 
|  | 185 | ftrace_set_filter(NULL, 0, 1); | 
|  | 186 |  | 
|  | 187 | return ret; | 
|  | 188 | } | 
|  | 189 | #else | 
|  | 190 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | 
|  | 191 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 
| Ingo Molnar | e9a22d1 | 2009-03-13 11:54:40 +0100 | [diff] [blame] | 192 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 193 | /* | 
|  | 194 | * Simple verification test of ftrace function tracer. | 
|  | 195 | * Enable ftrace, sleep 1/10 second, and then read the trace | 
|  | 196 | * buffer to see if all is in order. | 
|  | 197 | */ | 
|  | 198 | int | 
|  | 199 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | 
|  | 200 | { | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 201 | int save_ftrace_enabled = ftrace_enabled; | 
|  | 202 | int save_tracer_enabled = tracer_enabled; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 203 | unsigned long count; | 
|  | 204 | int ret; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 205 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 206 | /* make sure msleep has been recorded */ | 
|  | 207 | msleep(1); | 
|  | 208 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 209 | /* start the tracing */ | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 210 | ftrace_enabled = 1; | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 211 | tracer_enabled = 1; | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 212 |  | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 213 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 214 | if (ret) { | 
|  | 215 | warn_failed_init_tracer(trace, ret); | 
|  | 216 | goto out; | 
|  | 217 | } | 
|  | 218 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 219 | /* Sleep for a 1/10 of a second */ | 
|  | 220 | msleep(100); | 
|  | 221 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 222 | tracing_stop(); | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 223 | ftrace_enabled = 0; | 
|  | 224 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 225 | /* check the trace buffer */ | 
|  | 226 | ret = trace_test_buffer(tr, &count); | 
|  | 227 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 228 | tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 229 |  | 
|  | 230 | if (!ret && !count) { | 
|  | 231 | printk(KERN_CONT ".. no entries found .."); | 
|  | 232 | ret = -1; | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 233 | goto out; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 234 | } | 
|  | 235 |  | 
| Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 236 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, | 
|  | 237 | DYN_FTRACE_TEST_NAME); | 
|  | 238 |  | 
|  | 239 | out: | 
|  | 240 | ftrace_enabled = save_ftrace_enabled; | 
|  | 241 | tracer_enabled = save_tracer_enabled; | 
|  | 242 |  | 
| Steven Rostedt | 4eebcc8 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 243 | /* kill ftrace totally if we failed */ | 
|  | 244 | if (ret) | 
|  | 245 | ftrace_kill(); | 
|  | 246 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 247 | return ret; | 
|  | 248 | } | 
| Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 249 | #endif /* CONFIG_FUNCTION_TRACER */ | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 250 |  | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 251 |  | 
|  | 252 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 253 |  | 
|  | 254 | /* Maximum number of functions to trace before diagnosing a hang */ | 
|  | 255 | #define GRAPH_MAX_FUNC_TEST	100000000 | 
|  | 256 |  | 
|  | 257 | static void __ftrace_dump(bool disable_tracing); | 
|  | 258 | static unsigned int graph_hang_thresh; | 
|  | 259 |  | 
|  | 260 | /* Wrap the real function entry probe to avoid possible hanging */ | 
|  | 261 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | 
|  | 262 | { | 
|  | 263 | /* This is harmlessly racy, we want to approximately detect a hang */ | 
|  | 264 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | 
|  | 265 | ftrace_graph_stop(); | 
|  | 266 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | 
|  | 267 | if (ftrace_dump_on_oops) | 
|  | 268 | __ftrace_dump(false); | 
|  | 269 | return 0; | 
|  | 270 | } | 
|  | 271 |  | 
|  | 272 | return trace_graph_entry(trace); | 
|  | 273 | } | 
|  | 274 |  | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 275 | /* | 
|  | 276 | * Pretty much the same than for the function tracer from which the selftest | 
|  | 277 | * has been borrowed. | 
|  | 278 | */ | 
|  | 279 | int | 
|  | 280 | trace_selftest_startup_function_graph(struct tracer *trace, | 
|  | 281 | struct trace_array *tr) | 
|  | 282 | { | 
|  | 283 | int ret; | 
|  | 284 | unsigned long count; | 
|  | 285 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 286 | /* | 
|  | 287 | * Simulate the init() callback but we attach a watchdog callback | 
|  | 288 | * to detect and recover from possible hangs | 
|  | 289 | */ | 
|  | 290 | tracing_reset_online_cpus(tr); | 
|  | 291 | ret = register_ftrace_graph(&trace_graph_return, | 
|  | 292 | &trace_graph_entry_watchdog); | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 293 | if (ret) { | 
|  | 294 | warn_failed_init_tracer(trace, ret); | 
|  | 295 | goto out; | 
|  | 296 | } | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 297 | tracing_start_cmdline_record(); | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 298 |  | 
|  | 299 | /* Sleep for a 1/10 of a second */ | 
|  | 300 | msleep(100); | 
|  | 301 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 302 | /* Have we just recovered from a hang? */ | 
|  | 303 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | 
| Frederic Weisbecker | 0cf53ff | 2009-03-22 15:13:07 +0100 | [diff] [blame] | 304 | tracing_selftest_disabled = true; | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 305 | ret = -1; | 
|  | 306 | goto out; | 
|  | 307 | } | 
|  | 308 |  | 
| Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 309 | tracing_stop(); | 
|  | 310 |  | 
|  | 311 | /* check the trace buffer */ | 
|  | 312 | ret = trace_test_buffer(tr, &count); | 
|  | 313 |  | 
|  | 314 | trace->reset(tr); | 
|  | 315 | tracing_start(); | 
|  | 316 |  | 
|  | 317 | if (!ret && !count) { | 
|  | 318 | printk(KERN_CONT ".. no entries found .."); | 
|  | 319 | ret = -1; | 
|  | 320 | goto out; | 
|  | 321 | } | 
|  | 322 |  | 
|  | 323 | /* Don't test dynamic tracing, the function tracer already did */ | 
|  | 324 |  | 
|  | 325 | out: | 
|  | 326 | /* Stop it if we failed */ | 
|  | 327 | if (ret) | 
|  | 328 | ftrace_graph_stop(); | 
|  | 329 |  | 
|  | 330 | return ret; | 
|  | 331 | } | 
|  | 332 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 
|  | 333 |  | 
|  | 334 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 335 | #ifdef CONFIG_IRQSOFF_TRACER | 
|  | 336 | int | 
|  | 337 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | 
|  | 338 | { | 
|  | 339 | unsigned long save_max = tracing_max_latency; | 
|  | 340 | unsigned long count; | 
|  | 341 | int ret; | 
|  | 342 |  | 
|  | 343 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 344 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 345 | if (ret) { | 
|  | 346 | warn_failed_init_tracer(trace, ret); | 
|  | 347 | return ret; | 
|  | 348 | } | 
|  | 349 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 350 | /* reset the max latency */ | 
|  | 351 | tracing_max_latency = 0; | 
|  | 352 | /* disable interrupts for a bit */ | 
|  | 353 | local_irq_disable(); | 
|  | 354 | udelay(100); | 
|  | 355 | local_irq_enable(); | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 356 |  | 
|  | 357 | /* | 
|  | 358 | * Stop the tracer to avoid a warning subsequent | 
|  | 359 | * to buffer flipping failure because tracing_stop() | 
|  | 360 | * disables the tr and max buffers, making flipping impossible | 
|  | 361 | * in case of parallels max irqs off latencies. | 
|  | 362 | */ | 
|  | 363 | trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 364 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 365 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 366 | /* check both trace buffers */ | 
|  | 367 | ret = trace_test_buffer(tr, NULL); | 
|  | 368 | if (!ret) | 
|  | 369 | ret = trace_test_buffer(&max_tr, &count); | 
|  | 370 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 371 | tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 372 |  | 
|  | 373 | if (!ret && !count) { | 
|  | 374 | printk(KERN_CONT ".. no entries found .."); | 
|  | 375 | ret = -1; | 
|  | 376 | } | 
|  | 377 |  | 
|  | 378 | tracing_max_latency = save_max; | 
|  | 379 |  | 
|  | 380 | return ret; | 
|  | 381 | } | 
|  | 382 | #endif /* CONFIG_IRQSOFF_TRACER */ | 
|  | 383 |  | 
|  | 384 | #ifdef CONFIG_PREEMPT_TRACER | 
|  | 385 | int | 
|  | 386 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | 
|  | 387 | { | 
|  | 388 | unsigned long save_max = tracing_max_latency; | 
|  | 389 | unsigned long count; | 
|  | 390 | int ret; | 
|  | 391 |  | 
| Steven Rostedt | 769c48e | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 392 | /* | 
|  | 393 | * Now that the big kernel lock is no longer preemptable, | 
|  | 394 | * and this is called with the BKL held, it will always | 
|  | 395 | * fail. If preemption is already disabled, simply | 
|  | 396 | * pass the test. When the BKL is removed, or becomes | 
|  | 397 | * preemptible again, we will once again test this, | 
|  | 398 | * so keep it in. | 
|  | 399 | */ | 
|  | 400 | if (preempt_count()) { | 
|  | 401 | printk(KERN_CONT "can not test ... force "); | 
|  | 402 | return 0; | 
|  | 403 | } | 
|  | 404 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 405 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 406 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 407 | if (ret) { | 
|  | 408 | warn_failed_init_tracer(trace, ret); | 
|  | 409 | return ret; | 
|  | 410 | } | 
|  | 411 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 412 | /* reset the max latency */ | 
|  | 413 | tracing_max_latency = 0; | 
|  | 414 | /* disable preemption for a bit */ | 
|  | 415 | preempt_disable(); | 
|  | 416 | udelay(100); | 
|  | 417 | preempt_enable(); | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 418 |  | 
|  | 419 | /* | 
|  | 420 | * Stop the tracer to avoid a warning subsequent | 
|  | 421 | * to buffer flipping failure because tracing_stop() | 
|  | 422 | * disables the tr and max buffers, making flipping impossible | 
|  | 423 | * in case of parallels max preempt off latencies. | 
|  | 424 | */ | 
|  | 425 | trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 426 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 427 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 428 | /* check both trace buffers */ | 
|  | 429 | ret = trace_test_buffer(tr, NULL); | 
|  | 430 | if (!ret) | 
|  | 431 | ret = trace_test_buffer(&max_tr, &count); | 
|  | 432 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 433 | tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 434 |  | 
|  | 435 | if (!ret && !count) { | 
|  | 436 | printk(KERN_CONT ".. no entries found .."); | 
|  | 437 | ret = -1; | 
|  | 438 | } | 
|  | 439 |  | 
|  | 440 | tracing_max_latency = save_max; | 
|  | 441 |  | 
|  | 442 | return ret; | 
|  | 443 | } | 
|  | 444 | #endif /* CONFIG_PREEMPT_TRACER */ | 
|  | 445 |  | 
|  | 446 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | 
|  | 447 | int | 
|  | 448 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | 
|  | 449 | { | 
|  | 450 | unsigned long save_max = tracing_max_latency; | 
|  | 451 | unsigned long count; | 
|  | 452 | int ret; | 
|  | 453 |  | 
| Steven Rostedt | 769c48e | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 454 | /* | 
|  | 455 | * Now that the big kernel lock is no longer preemptable, | 
|  | 456 | * and this is called with the BKL held, it will always | 
|  | 457 | * fail. If preemption is already disabled, simply | 
|  | 458 | * pass the test. When the BKL is removed, or becomes | 
|  | 459 | * preemptible again, we will once again test this, | 
|  | 460 | * so keep it in. | 
|  | 461 | */ | 
|  | 462 | if (preempt_count()) { | 
|  | 463 | printk(KERN_CONT "can not test ... force "); | 
|  | 464 | return 0; | 
|  | 465 | } | 
|  | 466 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 467 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 468 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 469 | if (ret) { | 
|  | 470 | warn_failed_init_tracer(trace, ret); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 471 | goto out_no_start; | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 472 | } | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 473 |  | 
|  | 474 | /* reset the max latency */ | 
|  | 475 | tracing_max_latency = 0; | 
|  | 476 |  | 
|  | 477 | /* disable preemption and interrupts for a bit */ | 
|  | 478 | preempt_disable(); | 
|  | 479 | local_irq_disable(); | 
|  | 480 | udelay(100); | 
|  | 481 | preempt_enable(); | 
|  | 482 | /* reverse the order of preempt vs irqs */ | 
|  | 483 | local_irq_enable(); | 
|  | 484 |  | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 485 | /* | 
|  | 486 | * Stop the tracer to avoid a warning subsequent | 
|  | 487 | * to buffer flipping failure because tracing_stop() | 
|  | 488 | * disables the tr and max buffers, making flipping impossible | 
|  | 489 | * in case of parallels max irqs/preempt off latencies. | 
|  | 490 | */ | 
|  | 491 | trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 492 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 493 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 494 | /* check both trace buffers */ | 
|  | 495 | ret = trace_test_buffer(tr, NULL); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 496 | if (ret) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 497 | goto out; | 
|  | 498 |  | 
|  | 499 | ret = trace_test_buffer(&max_tr, &count); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 500 | if (ret) | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 501 | goto out; | 
|  | 502 |  | 
|  | 503 | if (!ret && !count) { | 
|  | 504 | printk(KERN_CONT ".. no entries found .."); | 
|  | 505 | ret = -1; | 
|  | 506 | goto out; | 
|  | 507 | } | 
|  | 508 |  | 
|  | 509 | /* do the test by disabling interrupts first this time */ | 
|  | 510 | tracing_max_latency = 0; | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 511 | tracing_start(); | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 512 | trace->start(tr); | 
|  | 513 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 514 | preempt_disable(); | 
|  | 515 | local_irq_disable(); | 
|  | 516 | udelay(100); | 
|  | 517 | preempt_enable(); | 
|  | 518 | /* reverse the order of preempt vs irqs */ | 
|  | 519 | local_irq_enable(); | 
|  | 520 |  | 
| Frederic Weisbecker | 4903620 | 2009-03-17 22:38:58 +0100 | [diff] [blame] | 521 | trace->stop(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 522 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 523 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 524 | /* check both trace buffers */ | 
|  | 525 | ret = trace_test_buffer(tr, NULL); | 
|  | 526 | if (ret) | 
|  | 527 | goto out; | 
|  | 528 |  | 
|  | 529 | ret = trace_test_buffer(&max_tr, &count); | 
|  | 530 |  | 
|  | 531 | if (!ret && !count) { | 
|  | 532 | printk(KERN_CONT ".. no entries found .."); | 
|  | 533 | ret = -1; | 
|  | 534 | goto out; | 
|  | 535 | } | 
|  | 536 |  | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 537 | out: | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 538 | tracing_start(); | 
| Frederic Weisbecker | ac1d52d | 2009-03-16 00:32:41 +0100 | [diff] [blame] | 539 | out_no_start: | 
|  | 540 | trace->reset(tr); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 541 | tracing_max_latency = save_max; | 
|  | 542 |  | 
|  | 543 | return ret; | 
|  | 544 | } | 
|  | 545 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | 
|  | 546 |  | 
| Steven Noonan | fb1b6d8 | 2008-09-19 03:06:43 -0700 | [diff] [blame] | 547 | #ifdef CONFIG_NOP_TRACER | 
|  | 548 | int | 
|  | 549 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | 
|  | 550 | { | 
|  | 551 | /* What could possibly go wrong? */ | 
|  | 552 | return 0; | 
|  | 553 | } | 
|  | 554 | #endif | 
|  | 555 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 556 | #ifdef CONFIG_SCHED_TRACER | 
|  | 557 | static int trace_wakeup_test_thread(void *data) | 
|  | 558 | { | 
| Steven Rostedt | 05bd68c | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 559 | /* Make this a RT thread, doesn't need to be too high */ | 
|  | 560 | struct sched_param param = { .sched_priority = 5 }; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 561 | struct completion *x = data; | 
|  | 562 |  | 
| Steven Rostedt | 05bd68c | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 563 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 564 |  | 
|  | 565 | /* Make it know we have a new prio */ | 
|  | 566 | complete(x); | 
|  | 567 |  | 
|  | 568 | /* now go to sleep and let the test wake us up */ | 
|  | 569 | set_current_state(TASK_INTERRUPTIBLE); | 
|  | 570 | schedule(); | 
|  | 571 |  | 
|  | 572 | /* we are awake, now wait to disappear */ | 
|  | 573 | while (!kthread_should_stop()) { | 
|  | 574 | /* | 
|  | 575 | * This is an RT task, do short sleeps to let | 
|  | 576 | * others run. | 
|  | 577 | */ | 
|  | 578 | msleep(100); | 
|  | 579 | } | 
|  | 580 |  | 
|  | 581 | return 0; | 
|  | 582 | } | 
|  | 583 |  | 
|  | 584 | int | 
|  | 585 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | 
|  | 586 | { | 
|  | 587 | unsigned long save_max = tracing_max_latency; | 
|  | 588 | struct task_struct *p; | 
|  | 589 | struct completion isrt; | 
|  | 590 | unsigned long count; | 
|  | 591 | int ret; | 
|  | 592 |  | 
|  | 593 | init_completion(&isrt); | 
|  | 594 |  | 
|  | 595 | /* create a high prio thread */ | 
|  | 596 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | 
| Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 597 | if (IS_ERR(p)) { | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 598 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); | 
|  | 599 | return -1; | 
|  | 600 | } | 
|  | 601 |  | 
|  | 602 | /* make sure the thread is running at an RT prio */ | 
|  | 603 | wait_for_completion(&isrt); | 
|  | 604 |  | 
|  | 605 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 606 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 607 | if (ret) { | 
|  | 608 | warn_failed_init_tracer(trace, ret); | 
|  | 609 | return ret; | 
|  | 610 | } | 
|  | 611 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 612 | /* reset the max latency */ | 
|  | 613 | tracing_max_latency = 0; | 
|  | 614 |  | 
|  | 615 | /* sleep to let the RT thread sleep too */ | 
|  | 616 | msleep(100); | 
|  | 617 |  | 
|  | 618 | /* | 
|  | 619 | * Yes this is slightly racy. It is possible that for some | 
|  | 620 | * strange reason that the RT thread we created, did not | 
|  | 621 | * call schedule for 100ms after doing the completion, | 
|  | 622 | * and we do a wakeup on a task that already is awake. | 
|  | 623 | * But that is extremely unlikely, and the worst thing that | 
|  | 624 | * happens in such a case, is that we disable tracing. | 
|  | 625 | * Honestly, if this race does happen something is horrible | 
|  | 626 | * wrong with the system. | 
|  | 627 | */ | 
|  | 628 |  | 
|  | 629 | wake_up_process(p); | 
|  | 630 |  | 
| Steven Rostedt | 5aa60c6 | 2008-09-29 23:02:37 -0400 | [diff] [blame] | 631 | /* give a little time to let the thread wake up */ | 
|  | 632 | msleep(100); | 
|  | 633 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 634 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 635 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 636 | /* check both trace buffers */ | 
|  | 637 | ret = trace_test_buffer(tr, NULL); | 
|  | 638 | if (!ret) | 
|  | 639 | ret = trace_test_buffer(&max_tr, &count); | 
|  | 640 |  | 
|  | 641 |  | 
|  | 642 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 643 | tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 644 |  | 
|  | 645 | tracing_max_latency = save_max; | 
|  | 646 |  | 
|  | 647 | /* kill the thread */ | 
|  | 648 | kthread_stop(p); | 
|  | 649 |  | 
|  | 650 | if (!ret && !count) { | 
|  | 651 | printk(KERN_CONT ".. no entries found .."); | 
|  | 652 | ret = -1; | 
|  | 653 | } | 
|  | 654 |  | 
|  | 655 | return ret; | 
|  | 656 | } | 
|  | 657 | #endif /* CONFIG_SCHED_TRACER */ | 
|  | 658 |  | 
|  | 659 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | 
|  | 660 | int | 
|  | 661 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | 
|  | 662 | { | 
|  | 663 | unsigned long count; | 
|  | 664 | int ret; | 
|  | 665 |  | 
|  | 666 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 667 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 668 | if (ret) { | 
|  | 669 | warn_failed_init_tracer(trace, ret); | 
|  | 670 | return ret; | 
|  | 671 | } | 
|  | 672 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 673 | /* Sleep for a 1/10 of a second */ | 
|  | 674 | msleep(100); | 
|  | 675 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 676 | tracing_stop(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 677 | /* check the trace buffer */ | 
|  | 678 | ret = trace_test_buffer(tr, &count); | 
|  | 679 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 680 | tracing_start(); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 681 |  | 
|  | 682 | if (!ret && !count) { | 
|  | 683 | printk(KERN_CONT ".. no entries found .."); | 
|  | 684 | ret = -1; | 
|  | 685 | } | 
|  | 686 |  | 
|  | 687 | return ret; | 
|  | 688 | } | 
|  | 689 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 690 |  | 
|  | 691 | #ifdef CONFIG_SYSPROF_TRACER | 
|  | 692 | int | 
|  | 693 | trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | 
|  | 694 | { | 
|  | 695 | unsigned long count; | 
|  | 696 | int ret; | 
|  | 697 |  | 
|  | 698 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 699 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 700 | if (ret) { | 
|  | 701 | warn_failed_init_tracer(trace, ret); | 
| Wenji Huang | d2ef7c2 | 2009-02-17 01:09:47 -0500 | [diff] [blame] | 702 | return ret; | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 703 | } | 
|  | 704 |  | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 705 | /* Sleep for a 1/10 of a second */ | 
|  | 706 | msleep(100); | 
|  | 707 | /* stop the tracing. */ | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 708 | tracing_stop(); | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 709 | /* check the trace buffer */ | 
|  | 710 | ret = trace_test_buffer(tr, &count); | 
|  | 711 | trace->reset(tr); | 
| Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 712 | tracing_start(); | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 713 |  | 
| Wenji Huang | d2ef7c2 | 2009-02-17 01:09:47 -0500 | [diff] [blame] | 714 | if (!ret && !count) { | 
|  | 715 | printk(KERN_CONT ".. no entries found .."); | 
|  | 716 | ret = -1; | 
|  | 717 | } | 
|  | 718 |  | 
| Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 719 | return ret; | 
|  | 720 | } | 
|  | 721 | #endif /* CONFIG_SYSPROF_TRACER */ | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 722 |  | 
|  | 723 | #ifdef CONFIG_BRANCH_TRACER | 
|  | 724 | int | 
|  | 725 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | 
|  | 726 | { | 
|  | 727 | unsigned long count; | 
|  | 728 | int ret; | 
|  | 729 |  | 
|  | 730 | /* start the tracing */ | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 731 | ret = tracer_init(trace, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 732 | if (ret) { | 
|  | 733 | warn_failed_init_tracer(trace, ret); | 
|  | 734 | return ret; | 
|  | 735 | } | 
|  | 736 |  | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 737 | /* Sleep for a 1/10 of a second */ | 
|  | 738 | msleep(100); | 
|  | 739 | /* stop the tracing. */ | 
|  | 740 | tracing_stop(); | 
|  | 741 | /* check the trace buffer */ | 
|  | 742 | ret = trace_test_buffer(tr, &count); | 
|  | 743 | trace->reset(tr); | 
|  | 744 | tracing_start(); | 
|  | 745 |  | 
| Wenji Huang | d2ef7c2 | 2009-02-17 01:09:47 -0500 | [diff] [blame] | 746 | if (!ret && !count) { | 
|  | 747 | printk(KERN_CONT ".. no entries found .."); | 
|  | 748 | ret = -1; | 
|  | 749 | } | 
|  | 750 |  | 
| Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 751 | return ret; | 
|  | 752 | } | 
|  | 753 | #endif /* CONFIG_BRANCH_TRACER */ | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 754 |  | 
|  | 755 | #ifdef CONFIG_HW_BRANCH_TRACER | 
|  | 756 | int | 
|  | 757 | trace_selftest_startup_hw_branches(struct tracer *trace, | 
|  | 758 | struct trace_array *tr) | 
|  | 759 | { | 
| Markus Metzger | 4d657e5 | 2009-04-03 16:43:41 +0200 | [diff] [blame] | 760 | struct trace_iterator *iter; | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 761 | struct tracer tracer; | 
| Ingo Molnar | e9a22d1 | 2009-03-13 11:54:40 +0100 | [diff] [blame] | 762 | unsigned long count; | 
|  | 763 | int ret; | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 764 |  | 
|  | 765 | if (!trace->open) { | 
|  | 766 | printk(KERN_CONT "missing open function..."); | 
|  | 767 | return -1; | 
|  | 768 | } | 
|  | 769 |  | 
|  | 770 | ret = tracer_init(trace, tr); | 
|  | 771 | if (ret) { | 
|  | 772 | warn_failed_init_tracer(trace, ret); | 
|  | 773 | return ret; | 
|  | 774 | } | 
|  | 775 |  | 
|  | 776 | /* | 
|  | 777 | * The hw-branch tracer needs to collect the trace from the various | 
|  | 778 | * cpu trace buffers - before tracing is stopped. | 
|  | 779 | */ | 
| Markus Metzger | 4d657e5 | 2009-04-03 16:43:41 +0200 | [diff] [blame] | 780 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 
|  | 781 | if (!iter) | 
|  | 782 | return -ENOMEM; | 
|  | 783 |  | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 784 | memcpy(&tracer, trace, sizeof(tracer)); | 
|  | 785 |  | 
| Markus Metzger | 4d657e5 | 2009-04-03 16:43:41 +0200 | [diff] [blame] | 786 | iter->trace = &tracer; | 
|  | 787 | iter->tr = tr; | 
|  | 788 | iter->pos = -1; | 
|  | 789 | mutex_init(&iter->mutex); | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 790 |  | 
| Markus Metzger | 4d657e5 | 2009-04-03 16:43:41 +0200 | [diff] [blame] | 791 | trace->open(iter); | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 792 |  | 
| Markus Metzger | 4d657e5 | 2009-04-03 16:43:41 +0200 | [diff] [blame] | 793 | mutex_destroy(&iter->mutex); | 
|  | 794 | kfree(iter); | 
| Markus Metzger | 321bb5e | 2009-03-13 10:50:27 +0100 | [diff] [blame] | 795 |  | 
|  | 796 | tracing_stop(); | 
|  | 797 |  | 
|  | 798 | ret = trace_test_buffer(tr, &count); | 
|  | 799 | trace->reset(tr); | 
|  | 800 | tracing_start(); | 
|  | 801 |  | 
|  | 802 | if (!ret && !count) { | 
|  | 803 | printk(KERN_CONT "no entries found.."); | 
|  | 804 | ret = -1; | 
|  | 805 | } | 
|  | 806 |  | 
|  | 807 | return ret; | 
|  | 808 | } | 
|  | 809 | #endif /* CONFIG_HW_BRANCH_TRACER */ |