| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * ring buffer based function tracer | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 
|  | 5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | 
|  | 6 | * | 
|  | 7 | * Originally taken from the RT patch by: | 
|  | 8 | *    Arnaldo Carvalho de Melo <acme@redhat.com> | 
|  | 9 | * | 
|  | 10 | * Based on code from the latency_tracer, that is: | 
|  | 11 | *  Copyright (C) 2004-2006 Ingo Molnar | 
| Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 12 | *  Copyright (C) 2004 Nadia Yvette Chambers | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 13 | */ | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 14 | #include <linux/ring_buffer.h> | 
| Sam Ravnborg | 273b281 | 2009-10-18 00:52:28 +0200 | [diff] [blame] | 15 | #include <generated/utsrelease.h> | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 16 | #include <linux/stacktrace.h> | 
|  | 17 | #include <linux/writeback.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 18 | #include <linux/kallsyms.h> | 
|  | 19 | #include <linux/seq_file.h> | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 20 | #include <linux/notifier.h> | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 21 | #include <linux/irqflags.h> | 
| Steven Rostedt | 0d5c6e1 | 2012-11-01 20:54:21 -0400 | [diff] [blame] | 22 | #include <linux/irq_work.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 23 | #include <linux/debugfs.h> | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 24 | #include <linux/pagemap.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 25 | #include <linux/hardirq.h> | 
|  | 26 | #include <linux/linkage.h> | 
|  | 27 | #include <linux/uaccess.h> | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 28 | #include <linux/kprobes.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 29 | #include <linux/ftrace.h> | 
|  | 30 | #include <linux/module.h> | 
|  | 31 | #include <linux/percpu.h> | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 32 | #include <linux/splice.h> | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 33 | #include <linux/kdebug.h> | 
| Frederic Weisbecker | 5f0c6c0 | 2009-03-27 14:22:10 +0100 | [diff] [blame] | 34 | #include <linux/string.h> | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 35 | #include <linux/rwsem.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 36 | #include <linux/slab.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 37 | #include <linux/ctype.h> | 
|  | 38 | #include <linux/init.h> | 
| Soeren Sandmann Pedersen | 2a2cc8f | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 39 | #include <linux/poll.h> | 
| Steven Rostedt | b892e5c | 2012-03-01 22:06:48 -0500 | [diff] [blame] | 40 | #include <linux/nmi.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 41 | #include <linux/fs.h> | 
| Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 42 | #include <linux/sched/rt.h> | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 43 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 44 | #include "trace.h" | 
| Steven Rostedt | f0868d1 | 2008-12-23 23:24:12 -0500 | [diff] [blame] | 45 | #include "trace_output.h" | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 46 |  | 
| Frederic Weisbecker | 8e1b82e | 2008-12-06 03:41:33 +0100 | [diff] [blame] | 47 | /* | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 48 | * On boot up, the ring buffer is set to the minimum size, so that | 
|  | 49 | * we do not waste memory on systems that are not using tracing. | 
|  | 50 | */ | 
| Li Zefan | 020e5f8 | 2009-07-01 10:47:05 +0800 | [diff] [blame] | 51 | int ring_buffer_expanded; | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 52 |  | 
|  | 53 | /* | 
| Frederic Weisbecker | 8e1b82e | 2008-12-06 03:41:33 +0100 | [diff] [blame] | 54 | * We need to change this state when a selftest is running. | 
| Frederic Weisbecker | ff32504 | 2008-12-04 23:47:35 +0100 | [diff] [blame] | 55 | * A selftest will lurk into the ring-buffer to count the | 
|  | 56 | * entries inserted during the selftest although some concurrent | 
| Ingo Molnar | 5e1607a | 2009-03-05 10:24:48 +0100 | [diff] [blame] | 57 | * insertions into the ring-buffer such as trace_printk could occurred | 
| Frederic Weisbecker | ff32504 | 2008-12-04 23:47:35 +0100 | [diff] [blame] | 58 | * at the same time, giving false positive or negative results. | 
|  | 59 | */ | 
| Frederic Weisbecker | 8e1b82e | 2008-12-06 03:41:33 +0100 | [diff] [blame] | 60 | static bool __read_mostly tracing_selftest_running; | 
| Frederic Weisbecker | ff32504 | 2008-12-04 23:47:35 +0100 | [diff] [blame] | 61 |  | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 62 | /* | 
|  | 63 | * If a tracer is running, we do not want to run SELFTEST. | 
|  | 64 | */ | 
| Li Zefan | 020e5f8 | 2009-07-01 10:47:05 +0800 | [diff] [blame] | 65 | bool __read_mostly tracing_selftest_disabled; | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 66 |  | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 67 | /* For tracers that don't implement custom flags */ | 
|  | 68 | static struct tracer_opt dummy_tracer_opt[] = { | 
|  | 69 | { } | 
|  | 70 | }; | 
|  | 71 |  | 
|  | 72 | static struct tracer_flags dummy_tracer_flags = { | 
|  | 73 | .val = 0, | 
|  | 74 | .opts = dummy_tracer_opt | 
|  | 75 | }; | 
|  | 76 |  | 
|  | 77 | static int dummy_set_flag(u32 old_flags, u32 bit, int set) | 
|  | 78 | { | 
|  | 79 | return 0; | 
|  | 80 | } | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 81 |  | 
|  | 82 | /* | 
| Steven Rostedt | 7ffbd48 | 2012-10-11 12:14:25 -0400 | [diff] [blame] | 83 | * To prevent the comm cache from being overwritten when no | 
|  | 84 | * tracing is active, only save the comm when a trace event | 
|  | 85 | * occurred. | 
|  | 86 | */ | 
|  | 87 | static DEFINE_PER_CPU(bool, trace_cmdline_save); | 
|  | 88 |  | 
|  | 89 | /* | 
| Steven Rostedt | 0d5c6e1 | 2012-11-01 20:54:21 -0400 | [diff] [blame] | 90 | * When a reader is waiting for data, then this variable is | 
|  | 91 | * set to true. | 
|  | 92 | */ | 
|  | 93 | static bool trace_wakeup_needed; | 
|  | 94 |  | 
|  | 95 | static struct irq_work trace_work_wakeup; | 
|  | 96 |  | 
|  | 97 | /* | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 98 | * Kill all tracing for good (never come back). | 
|  | 99 | * It is initialized to 1 but will turn to zero if the initialization | 
|  | 100 | * of the tracer is successful. But that is the only place that sets | 
|  | 101 | * this back to zero. | 
|  | 102 | */ | 
| Hannes Eder | 4fd2735 | 2009-02-10 19:44:12 +0100 | [diff] [blame] | 103 | static int tracing_disabled = 1; | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 104 |  | 
| Christoph Lameter | 9288f99 | 2009-10-07 19:17:45 -0400 | [diff] [blame] | 105 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 106 |  | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 107 | cpumask_var_t __read_mostly	tracing_buffer_mask; | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 108 |  | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 109 | /* | 
|  | 110 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | 
|  | 111 | * | 
|  | 112 | * If there is an oops (or kernel panic) and the ftrace_dump_on_oops | 
|  | 113 | * is set, then ftrace_dump is called. This will output the contents | 
|  | 114 | * of the ftrace buffers to the console.  This is very useful for | 
|  | 115 | * capturing traces that lead to crashes and outputing it to a | 
|  | 116 | * serial console. | 
|  | 117 | * | 
|  | 118 | * It is default off, but you can enable it with either specifying | 
|  | 119 | * "ftrace_dump_on_oops" in the kernel command line, or setting | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 120 | * /proc/sys/kernel/ftrace_dump_on_oops | 
|  | 121 | * Set 1 if you want to dump buffers of all CPUs | 
|  | 122 | * Set 2 if you want to dump the buffer of the CPU that triggered oops | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 123 | */ | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 124 |  | 
|  | 125 | enum ftrace_dump_mode ftrace_dump_on_oops; | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 126 |  | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 127 | static int tracing_set_tracer(const char *buf); | 
|  | 128 |  | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 129 | #define MAX_TRACER_SIZE		100 | 
|  | 130 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 131 | static char *default_bootup_tracer; | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 132 |  | 
| Frederic Weisbecker | 1beee96 | 2009-10-14 20:50:32 +0200 | [diff] [blame] | 133 | static int __init set_cmdline_ftrace(char *str) | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 134 | { | 
| Chen Gang | 67012ab | 2013-04-08 12:06:44 +0800 | [diff] [blame] | 135 | strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 136 | default_bootup_tracer = bootup_tracer_buf; | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 137 | /* We are using ftrace early, expand it */ | 
|  | 138 | ring_buffer_expanded = 1; | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 139 | return 1; | 
|  | 140 | } | 
| Frederic Weisbecker | 1beee96 | 2009-10-14 20:50:32 +0200 | [diff] [blame] | 141 | __setup("ftrace=", set_cmdline_ftrace); | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 142 |  | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 143 | static int __init set_ftrace_dump_on_oops(char *str) | 
|  | 144 | { | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 145 | if (*str++ != '=' || !*str) { | 
|  | 146 | ftrace_dump_on_oops = DUMP_ALL; | 
|  | 147 | return 1; | 
|  | 148 | } | 
|  | 149 |  | 
|  | 150 | if (!strcmp("orig_cpu", str)) { | 
|  | 151 | ftrace_dump_on_oops = DUMP_ORIG; | 
|  | 152 | return 1; | 
|  | 153 | } | 
|  | 154 |  | 
|  | 155 | return 0; | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 156 | } | 
|  | 157 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 158 |  | 
| Steven Rostedt | 7bcfaf5 | 2012-11-01 22:56:07 -0400 | [diff] [blame] | 159 |  | 
|  | 160 | static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; | 
|  | 161 | static char *trace_boot_options __initdata; | 
|  | 162 |  | 
|  | 163 | static int __init set_trace_boot_options(char *str) | 
|  | 164 | { | 
| Chen Gang | 67012ab | 2013-04-08 12:06:44 +0800 | [diff] [blame] | 165 | strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); | 
| Steven Rostedt | 7bcfaf5 | 2012-11-01 22:56:07 -0400 | [diff] [blame] | 166 | trace_boot_options = trace_boot_options_buf; | 
|  | 167 | return 0; | 
|  | 168 | } | 
|  | 169 | __setup("trace_options=", set_trace_boot_options); | 
|  | 170 |  | 
| Lai Jiangshan | cf8e347 | 2009-03-30 13:48:00 +0800 | [diff] [blame] | 171 | unsigned long long ns2usecs(cycle_t nsec) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 172 | { | 
|  | 173 | nsec += 500; | 
|  | 174 | do_div(nsec, 1000); | 
|  | 175 | return nsec; | 
|  | 176 | } | 
|  | 177 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 178 | /* | 
|  | 179 | * The global_trace is the descriptor that holds the tracing | 
|  | 180 | * buffers for the live tracing. For each CPU, it contains | 
|  | 181 | * a link list of pages that will store trace entries. The | 
|  | 182 | * page descriptor of the pages in the memory is used to hold | 
|  | 183 | * the link list by linking the lru item in the page descriptor | 
|  | 184 | * to each of the pages in the buffer per CPU. | 
|  | 185 | * | 
|  | 186 | * For each active CPU there is a data field that holds the | 
|  | 187 | * pages for the buffer for that CPU. Each CPU has the same number | 
|  | 188 | * of pages allocated for its buffer. | 
|  | 189 | */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 190 | static struct trace_array	global_trace; | 
|  | 191 |  | 
|  | 192 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 
|  | 193 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 194 | int filter_current_check_discard(struct ring_buffer *buffer, | 
|  | 195 | struct ftrace_event_call *call, void *rec, | 
| Tom Zanussi | eb02ce0 | 2009-04-08 03:15:54 -0500 | [diff] [blame] | 196 | struct ring_buffer_event *event) | 
|  | 197 | { | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 198 | return filter_check_discard(call, rec, buffer, event); | 
| Tom Zanussi | eb02ce0 | 2009-04-08 03:15:54 -0500 | [diff] [blame] | 199 | } | 
| Steven Rostedt | 17c873e | 2009-04-10 18:12:50 -0400 | [diff] [blame] | 200 | EXPORT_SYMBOL_GPL(filter_current_check_discard); | 
| Tom Zanussi | eb02ce0 | 2009-04-08 03:15:54 -0500 | [diff] [blame] | 201 |  | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 202 | cycle_t ftrace_now(int cpu) | 
|  | 203 | { | 
|  | 204 | u64 ts; | 
|  | 205 |  | 
|  | 206 | /* Early boot up does not have a buffer yet */ | 
|  | 207 | if (!global_trace.buffer) | 
|  | 208 | return trace_clock_local(); | 
|  | 209 |  | 
|  | 210 | ts = ring_buffer_time_stamp(global_trace.buffer, cpu); | 
|  | 211 | ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts); | 
|  | 212 |  | 
|  | 213 | return ts; | 
|  | 214 | } | 
|  | 215 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 216 | /* | 
|  | 217 | * The max_tr is used to snapshot the global_trace when a maximum | 
|  | 218 | * latency is reached. Some tracers will use this to store a maximum | 
|  | 219 | * trace while it continues examining live traces. | 
|  | 220 | * | 
|  | 221 | * The buffers for the max_tr are set up the same as the global_trace. | 
|  | 222 | * When a snapshot is taken, the link list of the max_tr is swapped | 
|  | 223 | * with the link list of the global_trace and the buffers are reset for | 
|  | 224 | * the global_trace so the tracing can continue. | 
|  | 225 | */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 226 | static struct trace_array	max_tr; | 
|  | 227 |  | 
| Tejun Heo | 9705f69 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 228 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 229 |  | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 230 | int tracing_is_enabled(void) | 
|  | 231 | { | 
| Steven Rostedt | 0fb9656 | 2012-05-11 14:25:30 -0400 | [diff] [blame] | 232 | return tracing_is_on(); | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 233 | } | 
|  | 234 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 235 | /* | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 236 | * trace_buf_size is the size in bytes that is allocated | 
|  | 237 | * for a buffer. Note, the number of bytes is always rounded | 
|  | 238 | * to page size. | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 239 | * | 
|  | 240 | * This number is purposely set to a low number of 16384. | 
|  | 241 | * If the dump on oops happens, it will be much appreciated | 
|  | 242 | * to not have to wait for all that output. Anyway this can be | 
|  | 243 | * boot time and run time configurable. | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 244 | */ | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 245 | #define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */ | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 246 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 247 | static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 248 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 249 | /* trace_types holds a link list of available tracers. */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 250 | static struct tracer		*trace_types __read_mostly; | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 251 |  | 
|  | 252 | /* current_trace points to the tracer that is currently active */ | 
| Steven Rostedt (Red Hat) | d840f71 | 2013-02-01 18:38:47 -0500 | [diff] [blame] | 253 | static struct tracer		*current_trace __read_mostly = &nop_trace; | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 254 |  | 
|  | 255 | /* | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 256 | * trace_types_lock is used to protect the trace_types list. | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 257 | */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 258 | static DEFINE_MUTEX(trace_types_lock); | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 259 |  | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 260 | /* | 
|  | 261 | * serialize the access of the ring buffer | 
|  | 262 | * | 
|  | 263 | * ring buffer serializes readers, but it is low level protection. | 
|  | 264 | * The validity of the events (which returns by ring_buffer_peek() ..etc) | 
|  | 265 | * are not protected by ring buffer. | 
|  | 266 | * | 
|  | 267 | * The content of events may become garbage if we allow other process consumes | 
|  | 268 | * these events concurrently: | 
|  | 269 | *   A) the page of the consumed events may become a normal page | 
|  | 270 | *      (not reader page) in ring buffer, and this page will be rewrited | 
|  | 271 | *      by events producer. | 
|  | 272 | *   B) The page of the consumed events may become a page for splice_read, | 
|  | 273 | *      and this page will be returned to system. | 
|  | 274 | * | 
|  | 275 | * These primitives allow multi process access to different cpu ring buffer | 
|  | 276 | * concurrently. | 
|  | 277 | * | 
|  | 278 | * These primitives don't distinguish read-only and read-consume access. | 
|  | 279 | * Multi read-only access are also serialized. | 
|  | 280 | */ | 
|  | 281 |  | 
|  | 282 | #ifdef CONFIG_SMP | 
|  | 283 | static DECLARE_RWSEM(all_cpu_access_lock); | 
|  | 284 | static DEFINE_PER_CPU(struct mutex, cpu_access_lock); | 
|  | 285 |  | 
|  | 286 | static inline void trace_access_lock(int cpu) | 
|  | 287 | { | 
|  | 288 | if (cpu == TRACE_PIPE_ALL_CPU) { | 
|  | 289 | /* gain it for accessing the whole ring buffer. */ | 
|  | 290 | down_write(&all_cpu_access_lock); | 
|  | 291 | } else { | 
|  | 292 | /* gain it for accessing a cpu ring buffer. */ | 
|  | 293 |  | 
|  | 294 | /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */ | 
|  | 295 | down_read(&all_cpu_access_lock); | 
|  | 296 |  | 
|  | 297 | /* Secondly block other access to this @cpu ring buffer. */ | 
|  | 298 | mutex_lock(&per_cpu(cpu_access_lock, cpu)); | 
|  | 299 | } | 
|  | 300 | } | 
|  | 301 |  | 
|  | 302 | static inline void trace_access_unlock(int cpu) | 
|  | 303 | { | 
|  | 304 | if (cpu == TRACE_PIPE_ALL_CPU) { | 
|  | 305 | up_write(&all_cpu_access_lock); | 
|  | 306 | } else { | 
|  | 307 | mutex_unlock(&per_cpu(cpu_access_lock, cpu)); | 
|  | 308 | up_read(&all_cpu_access_lock); | 
|  | 309 | } | 
|  | 310 | } | 
|  | 311 |  | 
|  | 312 | static inline void trace_access_lock_init(void) | 
|  | 313 | { | 
|  | 314 | int cpu; | 
|  | 315 |  | 
|  | 316 | for_each_possible_cpu(cpu) | 
|  | 317 | mutex_init(&per_cpu(cpu_access_lock, cpu)); | 
|  | 318 | } | 
|  | 319 |  | 
|  | 320 | #else | 
|  | 321 |  | 
|  | 322 | static DEFINE_MUTEX(access_lock); | 
|  | 323 |  | 
|  | 324 | static inline void trace_access_lock(int cpu) | 
|  | 325 | { | 
|  | 326 | (void)cpu; | 
|  | 327 | mutex_lock(&access_lock); | 
|  | 328 | } | 
|  | 329 |  | 
|  | 330 | static inline void trace_access_unlock(int cpu) | 
|  | 331 | { | 
|  | 332 | (void)cpu; | 
|  | 333 | mutex_unlock(&access_lock); | 
|  | 334 | } | 
|  | 335 |  | 
|  | 336 | static inline void trace_access_lock_init(void) | 
|  | 337 | { | 
|  | 338 | } | 
|  | 339 |  | 
|  | 340 | #endif | 
|  | 341 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 342 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ | 
| Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 343 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | 
|  | 344 |  | 
| Steven Rostedt | ee6bce5 | 2008-11-12 17:52:37 -0500 | [diff] [blame] | 345 | /* trace_flags holds trace_options default values */ | 
| Steven Rostedt | 12ef7d4 | 2008-11-12 17:52:38 -0500 | [diff] [blame] | 346 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 
| Steven Rostedt | a2a16d6 | 2009-03-24 23:17:58 -0400 | [diff] [blame] | 347 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | | 
| Steven Rostedt | 77271ce | 2011-11-17 09:34:33 -0500 | [diff] [blame] | 348 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | | 
| Mandeep Singh Baines | 5224c3a | 2012-09-07 18:12:19 -0700 | [diff] [blame] | 349 | TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS; | 
| Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 350 |  | 
| Steven Rostedt | b8de7bd | 2009-08-31 22:32:27 -0400 | [diff] [blame] | 351 | static int trace_stop_count; | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 352 | static DEFINE_RAW_SPINLOCK(tracing_start_lock); | 
| Steven Rostedt | b8de7bd | 2009-08-31 22:32:27 -0400 | [diff] [blame] | 353 |  | 
| Steven Rostedt | 0d5c6e1 | 2012-11-01 20:54:21 -0400 | [diff] [blame] | 354 | /** | 
|  | 355 | * trace_wake_up - wake up tasks waiting for trace input | 
|  | 356 | * | 
|  | 357 | * Schedules a delayed work to wake up any task that is blocked on the | 
|  | 358 | * trace_wait queue. These is used with trace_poll for tasks polling the | 
|  | 359 | * trace. | 
|  | 360 | */ | 
|  | 361 | static void trace_wake_up(struct irq_work *work) | 
| Vaibhav Nagarnaik | e7e2ee8 | 2011-05-10 13:27:21 -0700 | [diff] [blame] | 362 | { | 
| Steven Rostedt | 0d5c6e1 | 2012-11-01 20:54:21 -0400 | [diff] [blame] | 363 | wake_up_all(&trace_wait); | 
| Vaibhav Nagarnaik | e7e2ee8 | 2011-05-10 13:27:21 -0700 | [diff] [blame] | 364 |  | 
| Steven Rostedt | 0d5c6e1 | 2012-11-01 20:54:21 -0400 | [diff] [blame] | 365 | } | 
| Vaibhav Nagarnaik | e7e2ee8 | 2011-05-10 13:27:21 -0700 | [diff] [blame] | 366 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 367 | /** | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 368 | * tracing_on - enable tracing buffers | 
|  | 369 | * | 
|  | 370 | * This function enables tracing buffers that may have been | 
|  | 371 | * disabled with tracing_off. | 
|  | 372 | */ | 
|  | 373 | void tracing_on(void) | 
|  | 374 | { | 
|  | 375 | if (global_trace.buffer) | 
|  | 376 | ring_buffer_record_on(global_trace.buffer); | 
|  | 377 | /* | 
|  | 378 | * This flag is only looked at when buffers haven't been | 
|  | 379 | * allocated yet. We don't really care about the race | 
|  | 380 | * between setting this flag and actually turning | 
|  | 381 | * on the buffer. | 
|  | 382 | */ | 
|  | 383 | global_trace.buffer_disabled = 0; | 
|  | 384 | } | 
|  | 385 | EXPORT_SYMBOL_GPL(tracing_on); | 
|  | 386 |  | 
|  | 387 | /** | 
|  | 388 | * tracing_off - turn off tracing buffers | 
|  | 389 | * | 
|  | 390 | * This function stops the tracing buffers from recording data. | 
|  | 391 | * It does not disable any overhead the tracers themselves may | 
|  | 392 | * be causing. This function simply causes all recording to | 
|  | 393 | * the ring buffers to fail. | 
|  | 394 | */ | 
|  | 395 | void tracing_off(void) | 
|  | 396 | { | 
|  | 397 | if (global_trace.buffer) | 
| Steven Rostedt | f2bf1f6 | 2012-06-06 19:50:40 -0400 | [diff] [blame] | 398 | ring_buffer_record_off(global_trace.buffer); | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 399 | /* | 
|  | 400 | * This flag is only looked at when buffers haven't been | 
|  | 401 | * allocated yet. We don't really care about the race | 
|  | 402 | * between setting this flag and actually turning | 
|  | 403 | * on the buffer. | 
|  | 404 | */ | 
|  | 405 | global_trace.buffer_disabled = 1; | 
|  | 406 | } | 
|  | 407 | EXPORT_SYMBOL_GPL(tracing_off); | 
|  | 408 |  | 
|  | 409 | /** | 
|  | 410 | * tracing_is_on - show state of ring buffers enabled | 
|  | 411 | */ | 
|  | 412 | int tracing_is_on(void) | 
|  | 413 | { | 
|  | 414 | if (global_trace.buffer) | 
|  | 415 | return ring_buffer_record_is_on(global_trace.buffer); | 
|  | 416 | return !global_trace.buffer_disabled; | 
|  | 417 | } | 
|  | 418 | EXPORT_SYMBOL_GPL(tracing_is_on); | 
|  | 419 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 420 | static int __init set_buf_size(char *str) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 421 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 422 | unsigned long buf_size; | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 423 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 424 | if (!str) | 
|  | 425 | return 0; | 
| Li Zefan | 9d612be | 2009-06-24 17:33:15 +0800 | [diff] [blame] | 426 | buf_size = memparse(str, &str); | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 427 | /* nr_entries can not be zero */ | 
| Li Zefan | 9d612be | 2009-06-24 17:33:15 +0800 | [diff] [blame] | 428 | if (buf_size == 0) | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 429 | return 0; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 430 | trace_buf_size = buf_size; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 431 | return 1; | 
|  | 432 | } | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 433 | __setup("trace_buf_size=", set_buf_size); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 434 |  | 
| Tim Bird | 0e95017 | 2010-02-25 15:36:43 -0800 | [diff] [blame] | 435 | static int __init set_tracing_thresh(char *str) | 
|  | 436 | { | 
| Wang Tianhong | 87abb3b | 2012-08-02 14:02:00 +0800 | [diff] [blame] | 437 | unsigned long threshold; | 
| Tim Bird | 0e95017 | 2010-02-25 15:36:43 -0800 | [diff] [blame] | 438 | int ret; | 
|  | 439 |  | 
|  | 440 | if (!str) | 
|  | 441 | return 0; | 
| Daniel Walter | bcd83ea | 2012-09-26 22:08:38 +0200 | [diff] [blame] | 442 | ret = kstrtoul(str, 0, &threshold); | 
| Tim Bird | 0e95017 | 2010-02-25 15:36:43 -0800 | [diff] [blame] | 443 | if (ret < 0) | 
|  | 444 | return 0; | 
| Wang Tianhong | 87abb3b | 2012-08-02 14:02:00 +0800 | [diff] [blame] | 445 | tracing_thresh = threshold * 1000; | 
| Tim Bird | 0e95017 | 2010-02-25 15:36:43 -0800 | [diff] [blame] | 446 | return 1; | 
|  | 447 | } | 
|  | 448 | __setup("tracing_thresh=", set_tracing_thresh); | 
|  | 449 |  | 
| Steven Rostedt | 57f50be | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 450 | unsigned long nsecs_to_usecs(unsigned long nsecs) | 
|  | 451 | { | 
|  | 452 | return nsecs / 1000; | 
|  | 453 | } | 
|  | 454 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 455 | /* These must match the bit postions in trace_iterator_flags */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 456 | static const char *trace_options[] = { | 
|  | 457 | "print-parent", | 
|  | 458 | "sym-offset", | 
|  | 459 | "sym-addr", | 
|  | 460 | "verbose", | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 461 | "raw", | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 462 | "hex", | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 463 | "bin", | 
| Soeren Sandmann Pedersen | 2a2cc8f | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 464 | "block", | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 465 | "stacktrace", | 
| Ingo Molnar | 5e1607a | 2009-03-05 10:24:48 +0100 | [diff] [blame] | 466 | "trace_printk", | 
| Steven Rostedt | b2a866f | 2008-11-03 23:15:57 -0500 | [diff] [blame] | 467 | "ftrace_preempt", | 
| Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 468 | "branch", | 
| Steven Rostedt | 12ef7d4 | 2008-11-12 17:52:38 -0500 | [diff] [blame] | 469 | "annotate", | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 470 | "userstacktrace", | 
| Török Edwin | b54d3de | 2008-11-22 13:28:48 +0200 | [diff] [blame] | 471 | "sym-userobj", | 
| Frederic Weisbecker | 66896a8 | 2008-12-13 20:18:13 +0100 | [diff] [blame] | 472 | "printk-msg-only", | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 473 | "context-info", | 
| Steven Rostedt | c032ef64 | 2009-03-04 20:34:24 -0500 | [diff] [blame] | 474 | "latency-format", | 
| Steven Rostedt | be6f164 | 2009-03-24 11:06:24 -0400 | [diff] [blame] | 475 | "sleep-time", | 
| Steven Rostedt | a2a16d6 | 2009-03-24 23:17:58 -0400 | [diff] [blame] | 476 | "graph-time", | 
| Li Zefan | e870e9a | 2010-07-02 11:07:32 +0800 | [diff] [blame] | 477 | "record-cmd", | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 478 | "overwrite", | 
| Steven Rostedt | cf30cf6 | 2011-06-14 22:44:07 -0400 | [diff] [blame] | 479 | "disable_on_free", | 
| Steven Rostedt | 77271ce | 2011-11-17 09:34:33 -0500 | [diff] [blame] | 480 | "irq-info", | 
| Mandeep Singh Baines | 5224c3a | 2012-09-07 18:12:19 -0700 | [diff] [blame] | 481 | "markers", | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 482 | NULL | 
|  | 483 | }; | 
|  | 484 |  | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 485 | static struct { | 
|  | 486 | u64 (*func)(void); | 
|  | 487 | const char *name; | 
| David Sharp | 8be0709 | 2012-11-13 12:18:22 -0800 | [diff] [blame] | 488 | int in_ns;		/* is this clock in nanoseconds? */ | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 489 | } trace_clocks[] = { | 
| David Sharp | 8be0709 | 2012-11-13 12:18:22 -0800 | [diff] [blame] | 490 | { trace_clock_local,	"local",	1 }, | 
|  | 491 | { trace_clock_global,	"global",	1 }, | 
|  | 492 | { trace_clock_counter,	"counter",	0 }, | 
| David Sharp | 8cbd9cc | 2012-11-13 12:18:21 -0800 | [diff] [blame] | 493 | ARCH_TRACE_CLOCKS | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 494 | }; | 
|  | 495 |  | 
|  | 496 | int trace_clock_id; | 
|  | 497 |  | 
| jolsa@redhat.com | b63f39e | 2009-09-11 17:29:27 +0200 | [diff] [blame] | 498 | /* | 
|  | 499 | * trace_parser_get_init - gets the buffer for trace parser | 
|  | 500 | */ | 
|  | 501 | int trace_parser_get_init(struct trace_parser *parser, int size) | 
|  | 502 | { | 
|  | 503 | memset(parser, 0, sizeof(*parser)); | 
|  | 504 |  | 
|  | 505 | parser->buffer = kmalloc(size, GFP_KERNEL); | 
|  | 506 | if (!parser->buffer) | 
|  | 507 | return 1; | 
|  | 508 |  | 
|  | 509 | parser->size = size; | 
|  | 510 | return 0; | 
|  | 511 | } | 
|  | 512 |  | 
|  | 513 | /* | 
|  | 514 | * trace_parser_put - frees the buffer for trace parser | 
|  | 515 | */ | 
|  | 516 | void trace_parser_put(struct trace_parser *parser) | 
|  | 517 | { | 
|  | 518 | kfree(parser->buffer); | 
|  | 519 | } | 
|  | 520 |  | 
|  | 521 | /* | 
|  | 522 | * trace_get_user - reads the user input string separated by  space | 
|  | 523 | * (matched by isspace(ch)) | 
|  | 524 | * | 
|  | 525 | * For each string found the 'struct trace_parser' is updated, | 
|  | 526 | * and the function returns. | 
|  | 527 | * | 
|  | 528 | * Returns number of bytes read. | 
|  | 529 | * | 
|  | 530 | * See kernel/trace/trace.h for 'struct trace_parser' details. | 
|  | 531 | */ | 
|  | 532 | int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | 
|  | 533 | size_t cnt, loff_t *ppos) | 
|  | 534 | { | 
|  | 535 | char ch; | 
|  | 536 | size_t read = 0; | 
|  | 537 | ssize_t ret; | 
|  | 538 |  | 
|  | 539 | if (!*ppos) | 
|  | 540 | trace_parser_clear(parser); | 
|  | 541 |  | 
|  | 542 | ret = get_user(ch, ubuf++); | 
|  | 543 | if (ret) | 
|  | 544 | goto out; | 
|  | 545 |  | 
|  | 546 | read++; | 
|  | 547 | cnt--; | 
|  | 548 |  | 
|  | 549 | /* | 
|  | 550 | * The parser is not finished with the last write, | 
|  | 551 | * continue reading the user input without skipping spaces. | 
|  | 552 | */ | 
|  | 553 | if (!parser->cont) { | 
|  | 554 | /* skip white space */ | 
|  | 555 | while (cnt && isspace(ch)) { | 
|  | 556 | ret = get_user(ch, ubuf++); | 
|  | 557 | if (ret) | 
|  | 558 | goto out; | 
|  | 559 | read++; | 
|  | 560 | cnt--; | 
|  | 561 | } | 
|  | 562 |  | 
|  | 563 | /* only spaces were written */ | 
|  | 564 | if (isspace(ch)) { | 
|  | 565 | *ppos += read; | 
|  | 566 | ret = read; | 
|  | 567 | goto out; | 
|  | 568 | } | 
|  | 569 |  | 
|  | 570 | parser->idx = 0; | 
|  | 571 | } | 
|  | 572 |  | 
|  | 573 | /* read the non-space input */ | 
|  | 574 | while (cnt && !isspace(ch)) { | 
| Li Zefan | 3c235a3 | 2009-09-22 13:51:54 +0800 | [diff] [blame] | 575 | if (parser->idx < parser->size - 1) | 
| jolsa@redhat.com | b63f39e | 2009-09-11 17:29:27 +0200 | [diff] [blame] | 576 | parser->buffer[parser->idx++] = ch; | 
|  | 577 | else { | 
|  | 578 | ret = -EINVAL; | 
|  | 579 | goto out; | 
|  | 580 | } | 
|  | 581 | ret = get_user(ch, ubuf++); | 
|  | 582 | if (ret) | 
|  | 583 | goto out; | 
|  | 584 | read++; | 
|  | 585 | cnt--; | 
|  | 586 | } | 
|  | 587 |  | 
|  | 588 | /* We either got finished input or we have to wait for another call. */ | 
|  | 589 | if (isspace(ch)) { | 
|  | 590 | parser->buffer[parser->idx] = 0; | 
|  | 591 | parser->cont = false; | 
|  | 592 | } else { | 
|  | 593 | parser->cont = true; | 
|  | 594 | parser->buffer[parser->idx++] = ch; | 
|  | 595 | } | 
|  | 596 |  | 
|  | 597 | *ppos += read; | 
|  | 598 | ret = read; | 
|  | 599 |  | 
|  | 600 | out: | 
|  | 601 | return ret; | 
|  | 602 | } | 
|  | 603 |  | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 604 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | 
|  | 605 | { | 
|  | 606 | int len; | 
|  | 607 | int ret; | 
|  | 608 |  | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 609 | if (!cnt) | 
|  | 610 | return 0; | 
|  | 611 |  | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 612 | if (s->len <= s->readpos) | 
|  | 613 | return -EBUSY; | 
|  | 614 |  | 
|  | 615 | len = s->len - s->readpos; | 
|  | 616 | if (cnt > len) | 
|  | 617 | cnt = len; | 
|  | 618 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 619 | if (ret == cnt) | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 620 | return -EFAULT; | 
|  | 621 |  | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 622 | cnt -= ret; | 
|  | 623 |  | 
| Steven Rostedt | e74da52 | 2009-03-04 20:31:11 -0500 | [diff] [blame] | 624 | s->readpos += cnt; | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 625 | return cnt; | 
| Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 626 | } | 
|  | 627 |  | 
| Dmitri Vorobiev | b8b9426 | 2009-03-22 19:11:11 +0200 | [diff] [blame] | 628 | static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 629 | { | 
|  | 630 | int len; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 631 |  | 
|  | 632 | if (s->len <= s->readpos) | 
|  | 633 | return -EBUSY; | 
|  | 634 |  | 
|  | 635 | len = s->len - s->readpos; | 
|  | 636 | if (cnt > len) | 
|  | 637 | cnt = len; | 
| Dan Carpenter | 5a26c8f | 2012-04-20 09:31:45 +0300 | [diff] [blame] | 638 | memcpy(buf, s->buffer + s->readpos, cnt); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 639 |  | 
| Steven Rostedt | e74da52 | 2009-03-04 20:31:11 -0500 | [diff] [blame] | 640 | s->readpos += cnt; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 641 | return cnt; | 
|  | 642 | } | 
|  | 643 |  | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 644 | /* | 
|  | 645 | * ftrace_max_lock is used to protect the swapping of buffers | 
|  | 646 | * when taking a max snapshot. The buffers themselves are | 
|  | 647 | * protected by per_cpu spinlocks. But the action of the swap | 
|  | 648 | * needs its own lock. | 
|  | 649 | * | 
| Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 650 | * This is defined as a arch_spinlock_t in order to help | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 651 | * with performance when lockdep debugging is enabled. | 
|  | 652 | * | 
|  | 653 | * It is also used in other places outside the update_max_tr | 
|  | 654 | * so it needs to be defined outside of the | 
|  | 655 | * CONFIG_TRACER_MAX_TRACE. | 
|  | 656 | */ | 
| Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 657 | static arch_spinlock_t ftrace_max_lock = | 
| Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 658 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 659 |  | 
| Tim Bird | 0e95017 | 2010-02-25 15:36:43 -0800 | [diff] [blame] | 660 | unsigned long __read_mostly	tracing_thresh; | 
|  | 661 |  | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 662 | #ifdef CONFIG_TRACER_MAX_TRACE | 
|  | 663 | unsigned long __read_mostly	tracing_max_latency; | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 664 |  | 
|  | 665 | /* | 
|  | 666 | * Copy the new maximum trace into the separate maximum-trace | 
|  | 667 | * structure. (this way the maximum trace is permanently saved, | 
|  | 668 | * for later retrieval via /sys/kernel/debug/tracing/latency_trace) | 
|  | 669 | */ | 
|  | 670 | static void | 
|  | 671 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 
|  | 672 | { | 
|  | 673 | struct trace_array_cpu *data = tr->data[cpu]; | 
| Arnaldo Carvalho de Melo | 1acaa1b | 2010-03-05 18:23:50 -0300 | [diff] [blame] | 674 | struct trace_array_cpu *max_data; | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 675 |  | 
|  | 676 | max_tr.cpu = cpu; | 
|  | 677 | max_tr.time_start = data->preempt_timestamp; | 
|  | 678 |  | 
| Steven Rostedt | 8248ac0 | 2009-09-02 12:27:41 -0400 | [diff] [blame] | 679 | max_data = max_tr.data[cpu]; | 
|  | 680 | max_data->saved_latency = tracing_max_latency; | 
|  | 681 | max_data->critical_start = data->critical_start; | 
|  | 682 | max_data->critical_end = data->critical_end; | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 683 |  | 
| Arnaldo Carvalho de Melo | 1acaa1b | 2010-03-05 18:23:50 -0300 | [diff] [blame] | 684 | memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); | 
| Steven Rostedt | 8248ac0 | 2009-09-02 12:27:41 -0400 | [diff] [blame] | 685 | max_data->pid = tsk->pid; | 
|  | 686 | max_data->uid = task_uid(tsk); | 
|  | 687 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | 
|  | 688 | max_data->policy = tsk->policy; | 
|  | 689 | max_data->rt_priority = tsk->rt_priority; | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 690 |  | 
|  | 691 | /* record this tasks comm */ | 
|  | 692 | tracing_record_cmdline(tsk); | 
|  | 693 | } | 
|  | 694 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 695 | /** | 
|  | 696 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr | 
|  | 697 | * @tr: tracer | 
|  | 698 | * @tsk: the task with the latency | 
|  | 699 | * @cpu: The cpu that initiated the trace. | 
|  | 700 | * | 
|  | 701 | * Flip the buffers between the @tr and the max_tr and record information | 
|  | 702 | * about which task was the cause of this latency. | 
|  | 703 | */ | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 704 | void | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 705 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 
|  | 706 | { | 
| Steven Rostedt (Red Hat) | 2721e72 | 2013-03-12 11:32:32 -0400 | [diff] [blame] | 707 | struct ring_buffer *buf; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 708 |  | 
| Steven Rostedt | b8de7bd | 2009-08-31 22:32:27 -0400 | [diff] [blame] | 709 | if (trace_stop_count) | 
|  | 710 | return; | 
|  | 711 |  | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 712 | WARN_ON_ONCE(!irqs_disabled()); | 
| Steven Rostedt | 34600f0 | 2013-01-22 13:35:11 -0500 | [diff] [blame] | 713 |  | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 714 | if (!current_trace->allocated_snapshot) { | 
|  | 715 | /* Only the nop tracer should hit this when disabling */ | 
|  | 716 | WARN_ON_ONCE(current_trace != &nop_trace); | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 717 | return; | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 718 | } | 
| Steven Rostedt | 34600f0 | 2013-01-22 13:35:11 -0500 | [diff] [blame] | 719 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 720 | arch_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 721 |  | 
| Steven Rostedt (Red Hat) | 2721e72 | 2013-03-12 11:32:32 -0400 | [diff] [blame] | 722 | buf = tr->buffer; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 723 | tr->buffer = max_tr.buffer; | 
|  | 724 | max_tr.buffer = buf; | 
|  | 725 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 726 | __update_max_tr(tr, tsk, cpu); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 727 | arch_spin_unlock(&ftrace_max_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 728 | } | 
|  | 729 |  | 
|  | 730 | /** | 
|  | 731 | * update_max_tr_single - only copy one trace over, and reset the rest | 
|  | 732 | * @tr - tracer | 
|  | 733 | * @tsk - task with the latency | 
|  | 734 | * @cpu - the cpu of the buffer to copy. | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 735 | * | 
|  | 736 | * Flip the trace of a single CPU buffer between the @tr and the max_tr. | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 737 | */ | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 738 | void | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 739 | update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | 
|  | 740 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 741 | int ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 742 |  | 
| Steven Rostedt | b8de7bd | 2009-08-31 22:32:27 -0400 | [diff] [blame] | 743 | if (trace_stop_count) | 
|  | 744 | return; | 
|  | 745 |  | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 746 | WARN_ON_ONCE(!irqs_disabled()); | 
| Steven Rostedt (Red Hat) | 2930e04 | 2013-03-26 17:33:00 -0400 | [diff] [blame] | 747 | if (!current_trace->allocated_snapshot) { | 
|  | 748 | /* Only the nop tracer should hit this when disabling */ | 
|  | 749 | WARN_ON_ONCE(current_trace != &nop_trace); | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 750 | return; | 
| Steven Rostedt (Red Hat) | 2930e04 | 2013-03-26 17:33:00 -0400 | [diff] [blame] | 751 | } | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 752 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 753 | arch_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 754 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 755 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); | 
|  | 756 |  | 
| Steven Rostedt | e8165db | 2009-09-03 19:13:05 -0400 | [diff] [blame] | 757 | if (ret == -EBUSY) { | 
|  | 758 | /* | 
|  | 759 | * We failed to swap the buffer due to a commit taking | 
|  | 760 | * place on this CPU. We fail to record, but we reset | 
|  | 761 | * the max trace buffer (no one writes directly to it) | 
|  | 762 | * and flag that it failed. | 
|  | 763 | */ | 
|  | 764 | trace_array_printk(&max_tr, _THIS_IP_, | 
|  | 765 | "Failed to swap buffers due to commit in progress\n"); | 
|  | 766 | } | 
|  | 767 |  | 
| Steven Rostedt | e8165db | 2009-09-03 19:13:05 -0400 | [diff] [blame] | 768 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 769 |  | 
|  | 770 | __update_max_tr(tr, tsk, cpu); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 771 | arch_spin_unlock(&ftrace_max_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 772 | } | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 773 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 774 |  | 
| Steven Rostedt | 0d5c6e1 | 2012-11-01 20:54:21 -0400 | [diff] [blame] | 775 | static void default_wait_pipe(struct trace_iterator *iter) | 
|  | 776 | { | 
|  | 777 | DEFINE_WAIT(wait); | 
|  | 778 |  | 
|  | 779 | prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); | 
|  | 780 |  | 
|  | 781 | /* | 
|  | 782 | * The events can happen in critical sections where | 
|  | 783 | * checking a work queue can cause deadlocks. | 
|  | 784 | * After adding a task to the queue, this flag is set | 
|  | 785 | * only to notify events to try to wake up the queue | 
|  | 786 | * using irq_work. | 
|  | 787 | * | 
|  | 788 | * We don't clear it even if the buffer is no longer | 
|  | 789 | * empty. The flag only causes the next event to run | 
|  | 790 | * irq_work to do the work queue wake up. The worse | 
|  | 791 | * that can happen if we race with !trace_empty() is that | 
|  | 792 | * an event will cause an irq_work to try to wake up | 
|  | 793 | * an empty queue. | 
|  | 794 | * | 
|  | 795 | * There's no reason to protect this flag either, as | 
|  | 796 | * the work queue and irq_work logic will do the necessary | 
|  | 797 | * synchronization for the wake ups. The only thing | 
|  | 798 | * that is necessary is that the wake up happens after | 
|  | 799 | * a task has been queued. It's OK for spurious wake ups. | 
|  | 800 | */ | 
|  | 801 | trace_wakeup_needed = true; | 
|  | 802 |  | 
|  | 803 | if (trace_empty(iter)) | 
|  | 804 | schedule(); | 
|  | 805 |  | 
|  | 806 | finish_wait(&trace_wait, &wait); | 
|  | 807 | } | 
|  | 808 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 809 | /** | 
|  | 810 | * register_tracer - register a tracer with the ftrace system. | 
|  | 811 | * @type - the plugin for the tracer | 
|  | 812 | * | 
|  | 813 | * Register a new plugin tracer. | 
|  | 814 | */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 815 | int register_tracer(struct tracer *type) | 
|  | 816 | { | 
|  | 817 | struct tracer *t; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 818 | int ret = 0; | 
|  | 819 |  | 
|  | 820 | if (!type->name) { | 
|  | 821 | pr_info("Tracer must have a name\n"); | 
|  | 822 | return -1; | 
|  | 823 | } | 
|  | 824 |  | 
| Dan Carpenter | 24a461d | 2010-07-10 12:06:44 +0200 | [diff] [blame] | 825 | if (strlen(type->name) >= MAX_TRACER_SIZE) { | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 826 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); | 
|  | 827 | return -1; | 
|  | 828 | } | 
|  | 829 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 830 | mutex_lock(&trace_types_lock); | 
| Ingo Molnar | 86fa2f6 | 2008-11-19 10:00:15 +0100 | [diff] [blame] | 831 |  | 
| Frederic Weisbecker | 8e1b82e | 2008-12-06 03:41:33 +0100 | [diff] [blame] | 832 | tracing_selftest_running = true; | 
|  | 833 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 834 | for (t = trace_types; t; t = t->next) { | 
|  | 835 | if (strcmp(type->name, t->name) == 0) { | 
|  | 836 | /* already found */ | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 837 | pr_info("Tracer %s already registered\n", | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 838 | type->name); | 
|  | 839 | ret = -1; | 
|  | 840 | goto out; | 
|  | 841 | } | 
|  | 842 | } | 
|  | 843 |  | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 844 | if (!type->set_flag) | 
|  | 845 | type->set_flag = &dummy_set_flag; | 
|  | 846 | if (!type->flags) | 
|  | 847 | type->flags = &dummy_tracer_flags; | 
|  | 848 | else | 
|  | 849 | if (!type->flags->opts) | 
|  | 850 | type->flags->opts = dummy_tracer_opt; | 
| Frederic Weisbecker | 6eaaa5d | 2009-02-11 02:25:00 +0100 | [diff] [blame] | 851 | if (!type->wait_pipe) | 
|  | 852 | type->wait_pipe = default_wait_pipe; | 
|  | 853 |  | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 854 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 855 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 856 | if (type->selftest && !tracing_selftest_disabled) { | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 857 | struct tracer *saved_tracer = current_trace; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 858 | struct trace_array *tr = &global_trace; | 
| Frederic Weisbecker | ff32504 | 2008-12-04 23:47:35 +0100 | [diff] [blame] | 859 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 860 | /* | 
|  | 861 | * Run a selftest on this tracer. | 
|  | 862 | * Here we reset the trace buffer, and set the current | 
|  | 863 | * tracer to be this tracer. The tracer can then run some | 
|  | 864 | * internal tracing to verify that everything is in order. | 
|  | 865 | * If we fail, we do not register this tracer. | 
|  | 866 | */ | 
| Steven Rostedt | 76f0d07 | 2009-09-04 12:12:39 -0400 | [diff] [blame] | 867 | tracing_reset_online_cpus(tr); | 
| Ingo Molnar | 86fa2f6 | 2008-11-19 10:00:15 +0100 | [diff] [blame] | 868 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 869 | current_trace = type; | 
| Steven Rostedt | 4a0b166 | 2011-03-09 20:09:26 -0500 | [diff] [blame] | 870 |  | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 871 | if (type->use_max_tr) { | 
|  | 872 | /* If we expanded the buffers, make sure the max is expanded too */ | 
|  | 873 | if (ring_buffer_expanded) | 
|  | 874 | ring_buffer_resize(max_tr.buffer, trace_buf_size, | 
|  | 875 | RING_BUFFER_ALL_CPUS); | 
|  | 876 | type->allocated_snapshot = true; | 
|  | 877 | } | 
| Steven Rostedt | 4a0b166 | 2011-03-09 20:09:26 -0500 | [diff] [blame] | 878 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 879 | /* the test is responsible for initializing and enabling */ | 
|  | 880 | pr_info("Testing tracer %s: ", type->name); | 
|  | 881 | ret = type->selftest(type, tr); | 
|  | 882 | /* the test is responsible for resetting too */ | 
|  | 883 | current_trace = saved_tracer; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 884 | if (ret) { | 
|  | 885 | printk(KERN_CONT "FAILED!\n"); | 
| Steven Rostedt | 0be61eb | 2012-06-18 09:28:16 -0400 | [diff] [blame] | 886 | /* Add the warning after printing 'FAILED' */ | 
|  | 887 | WARN_ON(1); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 888 | goto out; | 
|  | 889 | } | 
| Steven Rostedt | 1d4db00 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 890 | /* Only reset on passing, to avoid touching corrupted buffers */ | 
| Steven Rostedt | 76f0d07 | 2009-09-04 12:12:39 -0400 | [diff] [blame] | 891 | tracing_reset_online_cpus(tr); | 
| Ingo Molnar | 86fa2f6 | 2008-11-19 10:00:15 +0100 | [diff] [blame] | 892 |  | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 893 | if (type->use_max_tr) { | 
|  | 894 | type->allocated_snapshot = false; | 
|  | 895 |  | 
|  | 896 | /* Shrink the max buffer again */ | 
|  | 897 | if (ring_buffer_expanded) | 
|  | 898 | ring_buffer_resize(max_tr.buffer, 1, | 
|  | 899 | RING_BUFFER_ALL_CPUS); | 
|  | 900 | } | 
| Steven Rostedt | 4a0b166 | 2011-03-09 20:09:26 -0500 | [diff] [blame] | 901 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 902 | printk(KERN_CONT "PASSED\n"); | 
|  | 903 | } | 
|  | 904 | #endif | 
|  | 905 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 906 | type->next = trace_types; | 
|  | 907 | trace_types = type; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 908 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 909 | out: | 
| Frederic Weisbecker | 8e1b82e | 2008-12-06 03:41:33 +0100 | [diff] [blame] | 910 | tracing_selftest_running = false; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 911 | mutex_unlock(&trace_types_lock); | 
|  | 912 |  | 
| Steven Rostedt | dac7494 | 2009-02-05 01:13:38 -0500 | [diff] [blame] | 913 | if (ret || !default_bootup_tracer) | 
|  | 914 | goto out_unlock; | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 915 |  | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 916 | if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) | 
| Steven Rostedt | dac7494 | 2009-02-05 01:13:38 -0500 | [diff] [blame] | 917 | goto out_unlock; | 
|  | 918 |  | 
|  | 919 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); | 
|  | 920 | /* Do we want this tracer to start on bootup? */ | 
|  | 921 | tracing_set_tracer(type->name); | 
|  | 922 | default_bootup_tracer = NULL; | 
|  | 923 | /* disable other selftests, since this will break it. */ | 
|  | 924 | tracing_selftest_disabled = 1; | 
|  | 925 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 
|  | 926 | printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", | 
|  | 927 | type->name); | 
|  | 928 | #endif | 
|  | 929 |  | 
|  | 930 | out_unlock: | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 931 | return ret; | 
|  | 932 | } | 
|  | 933 |  | 
| Steven Rostedt | f633903 | 2009-09-04 12:35:16 -0400 | [diff] [blame] | 934 | void tracing_reset(struct trace_array *tr, int cpu) | 
|  | 935 | { | 
|  | 936 | struct ring_buffer *buffer = tr->buffer; | 
|  | 937 |  | 
| Hiraku Toyooka | a541641 | 2012-12-19 16:02:34 +0900 | [diff] [blame] | 938 | if (!buffer) | 
|  | 939 | return; | 
|  | 940 |  | 
| Steven Rostedt | f633903 | 2009-09-04 12:35:16 -0400 | [diff] [blame] | 941 | ring_buffer_record_disable(buffer); | 
|  | 942 |  | 
|  | 943 | /* Make sure all commits have finished */ | 
|  | 944 | synchronize_sched(); | 
| Steven Rostedt | 6817968 | 2012-05-08 20:57:53 -0400 | [diff] [blame] | 945 | ring_buffer_reset_cpu(buffer, cpu); | 
| Steven Rostedt | f633903 | 2009-09-04 12:35:16 -0400 | [diff] [blame] | 946 |  | 
|  | 947 | ring_buffer_record_enable(buffer); | 
|  | 948 | } | 
|  | 949 |  | 
| Pekka J Enberg | 213cc06 | 2008-12-19 12:08:39 +0200 | [diff] [blame] | 950 | void tracing_reset_online_cpus(struct trace_array *tr) | 
|  | 951 | { | 
| Steven Rostedt | 621968c | 2009-09-04 12:02:35 -0400 | [diff] [blame] | 952 | struct ring_buffer *buffer = tr->buffer; | 
| Pekka J Enberg | 213cc06 | 2008-12-19 12:08:39 +0200 | [diff] [blame] | 953 | int cpu; | 
|  | 954 |  | 
| Hiraku Toyooka | a541641 | 2012-12-19 16:02:34 +0900 | [diff] [blame] | 955 | if (!buffer) | 
|  | 956 | return; | 
|  | 957 |  | 
| Steven Rostedt | 621968c | 2009-09-04 12:02:35 -0400 | [diff] [blame] | 958 | ring_buffer_record_disable(buffer); | 
|  | 959 |  | 
|  | 960 | /* Make sure all commits have finished */ | 
|  | 961 | synchronize_sched(); | 
|  | 962 |  | 
| Pekka J Enberg | 213cc06 | 2008-12-19 12:08:39 +0200 | [diff] [blame] | 963 | tr->time_start = ftrace_now(tr->cpu); | 
|  | 964 |  | 
|  | 965 | for_each_online_cpu(cpu) | 
| Steven Rostedt | 6817968 | 2012-05-08 20:57:53 -0400 | [diff] [blame] | 966 | ring_buffer_reset_cpu(buffer, cpu); | 
| Steven Rostedt | 621968c | 2009-09-04 12:02:35 -0400 | [diff] [blame] | 967 |  | 
|  | 968 | ring_buffer_record_enable(buffer); | 
| Pekka J Enberg | 213cc06 | 2008-12-19 12:08:39 +0200 | [diff] [blame] | 969 | } | 
|  | 970 |  | 
| Steven Rostedt | 9456f0f | 2009-05-06 21:54:09 -0400 | [diff] [blame] | 971 | void tracing_reset_current(int cpu) | 
|  | 972 | { | 
|  | 973 | tracing_reset(&global_trace, cpu); | 
|  | 974 | } | 
|  | 975 |  | 
|  | 976 | void tracing_reset_current_online_cpus(void) | 
|  | 977 | { | 
|  | 978 | tracing_reset_online_cpus(&global_trace); | 
|  | 979 | } | 
|  | 980 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 981 | #define SAVED_CMDLINES 128 | 
| Thomas Gleixner | 2c7eea4 | 2009-03-18 09:03:19 +0100 | [diff] [blame] | 982 | #define NO_CMDLINE_MAP UINT_MAX | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 983 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | 
|  | 984 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 
|  | 985 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 
|  | 986 | static int cmdline_idx; | 
| Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 987 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; | 
| Steven Rostedt | 25b0b44 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 988 |  | 
| Steven Rostedt | 25b0b44 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 989 | /* temporary disable recording */ | 
| Hannes Eder | 4fd2735 | 2009-02-10 19:44:12 +0100 | [diff] [blame] | 990 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 991 |  | 
|  | 992 | static void trace_init_cmdlines(void) | 
|  | 993 | { | 
| Thomas Gleixner | 2c7eea4 | 2009-03-18 09:03:19 +0100 | [diff] [blame] | 994 | memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); | 
|  | 995 | memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 996 | cmdline_idx = 0; | 
|  | 997 | } | 
|  | 998 |  | 
| Carsten Emde | b5130b1 | 2009-09-13 01:43:07 +0200 | [diff] [blame] | 999 | int is_tracing_stopped(void) | 
|  | 1000 | { | 
|  | 1001 | return trace_stop_count; | 
|  | 1002 | } | 
|  | 1003 |  | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1004 | /** | 
| Steven Rostedt | 69bb54e | 2008-11-21 12:59:38 -0500 | [diff] [blame] | 1005 | * ftrace_off_permanent - disable all ftrace code permanently | 
|  | 1006 | * | 
|  | 1007 | * This should only be called when a serious anomally has | 
|  | 1008 | * been detected.  This will turn off the function tracing, | 
|  | 1009 | * ring buffers, and other tracing utilites. It takes no | 
|  | 1010 | * locks and can be called from any context. | 
|  | 1011 | */ | 
|  | 1012 | void ftrace_off_permanent(void) | 
|  | 1013 | { | 
|  | 1014 | tracing_disabled = 1; | 
|  | 1015 | ftrace_stop(); | 
|  | 1016 | tracing_off_permanent(); | 
|  | 1017 | } | 
|  | 1018 |  | 
|  | 1019 | /** | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1020 | * tracing_start - quick start of the tracer | 
|  | 1021 | * | 
|  | 1022 | * If tracing is enabled but was stopped by tracing_stop, | 
|  | 1023 | * this will start the tracer back up. | 
|  | 1024 | */ | 
|  | 1025 | void tracing_start(void) | 
|  | 1026 | { | 
|  | 1027 | struct ring_buffer *buffer; | 
|  | 1028 | unsigned long flags; | 
|  | 1029 |  | 
|  | 1030 | if (tracing_disabled) | 
|  | 1031 | return; | 
|  | 1032 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1033 | raw_spin_lock_irqsave(&tracing_start_lock, flags); | 
| Steven Rostedt | b06a830 | 2009-01-22 14:26:15 -0500 | [diff] [blame] | 1034 | if (--trace_stop_count) { | 
|  | 1035 | if (trace_stop_count < 0) { | 
|  | 1036 | /* Someone screwed up their debugging */ | 
|  | 1037 | WARN_ON_ONCE(1); | 
|  | 1038 | trace_stop_count = 0; | 
|  | 1039 | } | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1040 | goto out; | 
|  | 1041 | } | 
|  | 1042 |  | 
| Steven Rostedt | a2f8071 | 2010-03-12 19:56:00 -0500 | [diff] [blame] | 1043 | /* Prevent the buffers from switching */ | 
|  | 1044 | arch_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1045 |  | 
|  | 1046 | buffer = global_trace.buffer; | 
|  | 1047 | if (buffer) | 
|  | 1048 | ring_buffer_record_enable(buffer); | 
|  | 1049 |  | 
|  | 1050 | buffer = max_tr.buffer; | 
|  | 1051 | if (buffer) | 
|  | 1052 | ring_buffer_record_enable(buffer); | 
|  | 1053 |  | 
| Steven Rostedt | a2f8071 | 2010-03-12 19:56:00 -0500 | [diff] [blame] | 1054 | arch_spin_unlock(&ftrace_max_lock); | 
|  | 1055 |  | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1056 | ftrace_start(); | 
|  | 1057 | out: | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1058 | raw_spin_unlock_irqrestore(&tracing_start_lock, flags); | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1059 | } | 
|  | 1060 |  | 
|  | 1061 | /** | 
|  | 1062 | * tracing_stop - quick stop of the tracer | 
|  | 1063 | * | 
|  | 1064 | * Light weight way to stop tracing. Use in conjunction with | 
|  | 1065 | * tracing_start. | 
|  | 1066 | */ | 
|  | 1067 | void tracing_stop(void) | 
|  | 1068 | { | 
|  | 1069 | struct ring_buffer *buffer; | 
|  | 1070 | unsigned long flags; | 
|  | 1071 |  | 
|  | 1072 | ftrace_stop(); | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1073 | raw_spin_lock_irqsave(&tracing_start_lock, flags); | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1074 | if (trace_stop_count++) | 
|  | 1075 | goto out; | 
|  | 1076 |  | 
| Steven Rostedt | a2f8071 | 2010-03-12 19:56:00 -0500 | [diff] [blame] | 1077 | /* Prevent the buffers from switching */ | 
|  | 1078 | arch_spin_lock(&ftrace_max_lock); | 
|  | 1079 |  | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1080 | buffer = global_trace.buffer; | 
|  | 1081 | if (buffer) | 
|  | 1082 | ring_buffer_record_disable(buffer); | 
|  | 1083 |  | 
|  | 1084 | buffer = max_tr.buffer; | 
|  | 1085 | if (buffer) | 
|  | 1086 | ring_buffer_record_disable(buffer); | 
|  | 1087 |  | 
| Steven Rostedt | a2f8071 | 2010-03-12 19:56:00 -0500 | [diff] [blame] | 1088 | arch_spin_unlock(&ftrace_max_lock); | 
|  | 1089 |  | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1090 | out: | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1091 | raw_spin_unlock_irqrestore(&tracing_start_lock, flags); | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1092 | } | 
|  | 1093 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1094 | void trace_stop_cmdline_recording(void); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1095 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1096 | static void trace_save_cmdline(struct task_struct *tsk) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1097 | { | 
| Carsten Emde | a635cf0 | 2009-03-18 09:00:41 +0100 | [diff] [blame] | 1098 | unsigned pid, idx; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1099 |  | 
|  | 1100 | if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) | 
|  | 1101 | return; | 
|  | 1102 |  | 
|  | 1103 | /* | 
|  | 1104 | * It's not the end of the world if we don't get | 
|  | 1105 | * the lock, but we also don't want to spin | 
|  | 1106 | * nor do we want to disable interrupts, | 
|  | 1107 | * so if we miss here, then better luck next time. | 
|  | 1108 | */ | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 1109 | if (!arch_spin_trylock(&trace_cmdline_lock)) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1110 | return; | 
|  | 1111 |  | 
|  | 1112 | idx = map_pid_to_cmdline[tsk->pid]; | 
| Thomas Gleixner | 2c7eea4 | 2009-03-18 09:03:19 +0100 | [diff] [blame] | 1113 | if (idx == NO_CMDLINE_MAP) { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1114 | idx = (cmdline_idx + 1) % SAVED_CMDLINES; | 
|  | 1115 |  | 
| Carsten Emde | a635cf0 | 2009-03-18 09:00:41 +0100 | [diff] [blame] | 1116 | /* | 
|  | 1117 | * Check whether the cmdline buffer at idx has a pid | 
|  | 1118 | * mapped. We are going to overwrite that entry so we | 
|  | 1119 | * need to clear the map_pid_to_cmdline. Otherwise we | 
|  | 1120 | * would read the new comm for the old pid. | 
|  | 1121 | */ | 
|  | 1122 | pid = map_cmdline_to_pid[idx]; | 
|  | 1123 | if (pid != NO_CMDLINE_MAP) | 
|  | 1124 | map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1125 |  | 
| Carsten Emde | a635cf0 | 2009-03-18 09:00:41 +0100 | [diff] [blame] | 1126 | map_cmdline_to_pid[idx] = tsk->pid; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1127 | map_pid_to_cmdline[tsk->pid] = idx; | 
|  | 1128 |  | 
|  | 1129 | cmdline_idx = idx; | 
|  | 1130 | } | 
|  | 1131 |  | 
|  | 1132 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 
|  | 1133 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 1134 | arch_spin_unlock(&trace_cmdline_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1135 | } | 
|  | 1136 |  | 
| Steven Rostedt | 4ca5308 | 2009-03-16 19:20:15 -0400 | [diff] [blame] | 1137 | void trace_find_cmdline(int pid, char comm[]) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1138 | { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1139 | unsigned map; | 
|  | 1140 |  | 
| Steven Rostedt | 4ca5308 | 2009-03-16 19:20:15 -0400 | [diff] [blame] | 1141 | if (!pid) { | 
|  | 1142 | strcpy(comm, "<idle>"); | 
|  | 1143 | return; | 
|  | 1144 | } | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1145 |  | 
| Steven Rostedt | 74bf407 | 2010-01-25 15:11:53 -0500 | [diff] [blame] | 1146 | if (WARN_ON_ONCE(pid < 0)) { | 
|  | 1147 | strcpy(comm, "<XXX>"); | 
|  | 1148 | return; | 
|  | 1149 | } | 
|  | 1150 |  | 
| Steven Rostedt | 4ca5308 | 2009-03-16 19:20:15 -0400 | [diff] [blame] | 1151 | if (pid > PID_MAX_DEFAULT) { | 
|  | 1152 | strcpy(comm, "<...>"); | 
|  | 1153 | return; | 
|  | 1154 | } | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1155 |  | 
| Heiko Carstens | 5b6045a | 2009-05-26 17:28:02 +0200 | [diff] [blame] | 1156 | preempt_disable(); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 1157 | arch_spin_lock(&trace_cmdline_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1158 | map = map_pid_to_cmdline[pid]; | 
| Thomas Gleixner | 50d8875 | 2009-03-18 08:58:44 +0100 | [diff] [blame] | 1159 | if (map != NO_CMDLINE_MAP) | 
|  | 1160 | strcpy(comm, saved_cmdlines[map]); | 
|  | 1161 | else | 
|  | 1162 | strcpy(comm, "<...>"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1163 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 1164 | arch_spin_unlock(&trace_cmdline_lock); | 
| Heiko Carstens | 5b6045a | 2009-05-26 17:28:02 +0200 | [diff] [blame] | 1165 | preempt_enable(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1166 | } | 
|  | 1167 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1168 | void tracing_record_cmdline(struct task_struct *tsk) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1169 | { | 
| Steven Rostedt | 0fb9656 | 2012-05-11 14:25:30 -0400 | [diff] [blame] | 1170 | if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on()) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1171 | return; | 
|  | 1172 |  | 
| Steven Rostedt | 7ffbd48 | 2012-10-11 12:14:25 -0400 | [diff] [blame] | 1173 | if (!__this_cpu_read(trace_cmdline_save)) | 
|  | 1174 | return; | 
|  | 1175 |  | 
|  | 1176 | __this_cpu_write(trace_cmdline_save, false); | 
|  | 1177 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1178 | trace_save_cmdline(tsk); | 
|  | 1179 | } | 
|  | 1180 |  | 
| Pekka Paalanen | 45dcd8b | 2008-09-16 21:56:41 +0300 | [diff] [blame] | 1181 | void | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1182 | tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | 
|  | 1183 | int pc) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1184 | { | 
|  | 1185 | struct task_struct *tsk = current; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1186 |  | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 1187 | entry->preempt_count		= pc & 0xff; | 
|  | 1188 | entry->pid			= (tsk) ? tsk->pid : 0; | 
|  | 1189 | entry->flags = | 
| Steven Rostedt | 9244489 | 2008-10-24 09:42:59 -0400 | [diff] [blame] | 1190 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 
| Steven Rostedt | 2e2ca15 | 2008-08-01 12:26:40 -0400 | [diff] [blame] | 1191 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 
| Steven Rostedt | 9244489 | 2008-10-24 09:42:59 -0400 | [diff] [blame] | 1192 | #else | 
|  | 1193 | TRACE_FLAG_IRQS_NOSUPPORT | | 
|  | 1194 | #endif | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1195 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | | 
|  | 1196 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | 
|  | 1197 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 
|  | 1198 | } | 
| Frederic Weisbecker | f413cdb | 2009-08-07 01:25:54 +0200 | [diff] [blame] | 1199 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1200 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1201 | struct ring_buffer_event * | 
|  | 1202 | trace_buffer_lock_reserve(struct ring_buffer *buffer, | 
|  | 1203 | int type, | 
|  | 1204 | unsigned long len, | 
|  | 1205 | unsigned long flags, int pc) | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1206 | { | 
|  | 1207 | struct ring_buffer_event *event; | 
|  | 1208 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1209 | event = ring_buffer_lock_reserve(buffer, len); | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1210 | if (event != NULL) { | 
|  | 1211 | struct trace_entry *ent = ring_buffer_event_data(event); | 
|  | 1212 |  | 
|  | 1213 | tracing_generic_entry_update(ent, flags, pc); | 
|  | 1214 | ent->type = type; | 
|  | 1215 | } | 
|  | 1216 |  | 
|  | 1217 | return event; | 
|  | 1218 | } | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1219 |  | 
| Steven Rostedt | 7ffbd48 | 2012-10-11 12:14:25 -0400 | [diff] [blame] | 1220 | void | 
|  | 1221 | __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) | 
|  | 1222 | { | 
|  | 1223 | __this_cpu_write(trace_cmdline_save, true); | 
| Steven Rostedt | 0d5c6e1 | 2012-11-01 20:54:21 -0400 | [diff] [blame] | 1224 | if (trace_wakeup_needed) { | 
|  | 1225 | trace_wakeup_needed = false; | 
|  | 1226 | /* irq_work_queue() supplies it's own memory barriers */ | 
|  | 1227 | irq_work_queue(&trace_work_wakeup); | 
|  | 1228 | } | 
| Steven Rostedt | 7ffbd48 | 2012-10-11 12:14:25 -0400 | [diff] [blame] | 1229 | ring_buffer_unlock_commit(buffer, event); | 
|  | 1230 | } | 
|  | 1231 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1232 | static inline void | 
|  | 1233 | __trace_buffer_unlock_commit(struct ring_buffer *buffer, | 
|  | 1234 | struct ring_buffer_event *event, | 
| Steven Rostedt | 0d5c6e1 | 2012-11-01 20:54:21 -0400 | [diff] [blame] | 1235 | unsigned long flags, int pc) | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1236 | { | 
| Steven Rostedt | 7ffbd48 | 2012-10-11 12:14:25 -0400 | [diff] [blame] | 1237 | __buffer_unlock_commit(buffer, event); | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1238 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1239 | ftrace_trace_stack(buffer, flags, 6, pc); | 
|  | 1240 | ftrace_trace_userstack(buffer, flags, pc); | 
| Frederic Weisbecker | 07edf71 | 2009-03-22 23:10:46 +0100 | [diff] [blame] | 1241 | } | 
|  | 1242 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1243 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, | 
|  | 1244 | struct ring_buffer_event *event, | 
|  | 1245 | unsigned long flags, int pc) | 
| Frederic Weisbecker | 07edf71 | 2009-03-22 23:10:46 +0100 | [diff] [blame] | 1246 | { | 
| Steven Rostedt | 0d5c6e1 | 2012-11-01 20:54:21 -0400 | [diff] [blame] | 1247 | __trace_buffer_unlock_commit(buffer, event, flags, pc); | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1248 | } | 
| Steven Rostedt | 0d5c6e1 | 2012-11-01 20:54:21 -0400 | [diff] [blame] | 1249 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1250 |  | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 1251 | struct ring_buffer_event * | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1252 | trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, | 
|  | 1253 | int type, unsigned long len, | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 1254 | unsigned long flags, int pc) | 
|  | 1255 | { | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1256 | *current_rb = global_trace.buffer; | 
|  | 1257 | return trace_buffer_lock_reserve(*current_rb, | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 1258 | type, len, flags, pc); | 
|  | 1259 | } | 
| Steven Rostedt | 94487d6 | 2009-05-05 19:22:53 -0400 | [diff] [blame] | 1260 | EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 1261 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1262 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, | 
|  | 1263 | struct ring_buffer_event *event, | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 1264 | unsigned long flags, int pc) | 
|  | 1265 | { | 
| Steven Rostedt | 0d5c6e1 | 2012-11-01 20:54:21 -0400 | [diff] [blame] | 1266 | __trace_buffer_unlock_commit(buffer, event, flags, pc); | 
| Frederic Weisbecker | 07edf71 | 2009-03-22 23:10:46 +0100 | [diff] [blame] | 1267 | } | 
| Steven Rostedt | 94487d6 | 2009-05-05 19:22:53 -0400 | [diff] [blame] | 1268 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); | 
| Frederic Weisbecker | 07edf71 | 2009-03-22 23:10:46 +0100 | [diff] [blame] | 1269 |  | 
| Steven Rostedt | 0d5c6e1 | 2012-11-01 20:54:21 -0400 | [diff] [blame] | 1270 | void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, | 
|  | 1271 | struct ring_buffer_event *event, | 
|  | 1272 | unsigned long flags, int pc, | 
|  | 1273 | struct pt_regs *regs) | 
| Masami Hiramatsu | 1fd8df2 | 2011-06-08 16:09:34 +0900 | [diff] [blame] | 1274 | { | 
| Steven Rostedt | 7ffbd48 | 2012-10-11 12:14:25 -0400 | [diff] [blame] | 1275 | __buffer_unlock_commit(buffer, event); | 
| Masami Hiramatsu | 1fd8df2 | 2011-06-08 16:09:34 +0900 | [diff] [blame] | 1276 |  | 
|  | 1277 | ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); | 
|  | 1278 | ftrace_trace_userstack(buffer, flags, pc); | 
|  | 1279 | } | 
| Steven Rostedt | 0d5c6e1 | 2012-11-01 20:54:21 -0400 | [diff] [blame] | 1280 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs); | 
| Masami Hiramatsu | 1fd8df2 | 2011-06-08 16:09:34 +0900 | [diff] [blame] | 1281 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1282 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, | 
|  | 1283 | struct ring_buffer_event *event) | 
| Steven Rostedt | 77d9f46 | 2009-04-02 01:16:59 -0400 | [diff] [blame] | 1284 | { | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1285 | ring_buffer_discard_commit(buffer, event); | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 1286 | } | 
| Steven Rostedt | 12acd47 | 2009-04-17 16:01:56 -0400 | [diff] [blame] | 1287 | EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 1288 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1289 | void | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 1290 | trace_function(struct trace_array *tr, | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1291 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 
|  | 1292 | int pc) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1293 | { | 
| Tom Zanussi | e1112b4 | 2009-03-31 00:48:49 -0500 | [diff] [blame] | 1294 | struct ftrace_event_call *call = &event_function; | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1295 | struct ring_buffer *buffer = tr->buffer; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1296 | struct ring_buffer_event *event; | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 1297 | struct ftrace_entry *entry; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1298 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1299 | /* If we are reading the ring buffer, don't trace */ | 
| Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 1300 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1301 | return; | 
|  | 1302 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1303 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1304 | flags, pc); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1305 | if (!event) | 
|  | 1306 | return; | 
|  | 1307 | entry	= ring_buffer_event_data(event); | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 1308 | entry->ip			= ip; | 
|  | 1309 | entry->parent_ip		= parent_ip; | 
| Tom Zanussi | e1112b4 | 2009-03-31 00:48:49 -0500 | [diff] [blame] | 1310 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1311 | if (!filter_check_discard(call, entry, buffer, event)) | 
| Steven Rostedt | 7ffbd48 | 2012-10-11 12:14:25 -0400 | [diff] [blame] | 1312 | __buffer_unlock_commit(buffer, event); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1313 | } | 
|  | 1314 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1315 | void | 
| Ingo Molnar | 2e0f576 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 1316 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1317 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 
|  | 1318 | int pc) | 
| Ingo Molnar | 2e0f576 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 1319 | { | 
|  | 1320 | if (likely(!atomic_read(&data->disabled))) | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 1321 | trace_function(tr, ip, parent_ip, flags, pc); | 
| Ingo Molnar | 2e0f576 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 1322 | } | 
|  | 1323 |  | 
| Frederic Weisbecker | c0a0d0d | 2009-07-29 17:51:13 +0200 | [diff] [blame] | 1324 | #ifdef CONFIG_STACKTRACE | 
| Steven Rostedt | 4a9bd3f | 2011-07-14 16:36:53 -0400 | [diff] [blame] | 1325 |  | 
|  | 1326 | #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) | 
|  | 1327 | struct ftrace_stack { | 
|  | 1328 | unsigned long		calls[FTRACE_STACK_MAX_ENTRIES]; | 
|  | 1329 | }; | 
|  | 1330 |  | 
|  | 1331 | static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); | 
|  | 1332 | static DEFINE_PER_CPU(int, ftrace_stack_reserve); | 
|  | 1333 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1334 | static void __ftrace_trace_stack(struct ring_buffer *buffer, | 
| Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 1335 | unsigned long flags, | 
| Masami Hiramatsu | 1fd8df2 | 2011-06-08 16:09:34 +0900 | [diff] [blame] | 1336 | int skip, int pc, struct pt_regs *regs) | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1337 | { | 
| Tom Zanussi | e1112b4 | 2009-03-31 00:48:49 -0500 | [diff] [blame] | 1338 | struct ftrace_event_call *call = &event_kernel_stack; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1339 | struct ring_buffer_event *event; | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 1340 | struct stack_entry *entry; | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1341 | struct stack_trace trace; | 
| Steven Rostedt | 4a9bd3f | 2011-07-14 16:36:53 -0400 | [diff] [blame] | 1342 | int use_stack; | 
|  | 1343 | int size = FTRACE_STACK_ENTRIES; | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1344 |  | 
|  | 1345 | trace.nr_entries	= 0; | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1346 | trace.skip		= skip; | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1347 |  | 
| Steven Rostedt | 4a9bd3f | 2011-07-14 16:36:53 -0400 | [diff] [blame] | 1348 | /* | 
|  | 1349 | * Since events can happen in NMIs there's no safe way to | 
|  | 1350 | * use the per cpu ftrace_stacks. We reserve it and if an interrupt | 
|  | 1351 | * or NMI comes in, it will just have to use the default | 
|  | 1352 | * FTRACE_STACK_SIZE. | 
|  | 1353 | */ | 
|  | 1354 | preempt_disable_notrace(); | 
|  | 1355 |  | 
| Shan Wei | 8214652 | 2012-11-19 13:21:01 +0800 | [diff] [blame] | 1356 | use_stack = __this_cpu_inc_return(ftrace_stack_reserve); | 
| Steven Rostedt | 4a9bd3f | 2011-07-14 16:36:53 -0400 | [diff] [blame] | 1357 | /* | 
|  | 1358 | * We don't need any atomic variables, just a barrier. | 
|  | 1359 | * If an interrupt comes in, we don't care, because it would | 
|  | 1360 | * have exited and put the counter back to what we want. | 
|  | 1361 | * We just need a barrier to keep gcc from moving things | 
|  | 1362 | * around. | 
|  | 1363 | */ | 
|  | 1364 | barrier(); | 
|  | 1365 | if (use_stack == 1) { | 
|  | 1366 | trace.entries		= &__get_cpu_var(ftrace_stack).calls[0]; | 
|  | 1367 | trace.max_entries	= FTRACE_STACK_MAX_ENTRIES; | 
|  | 1368 |  | 
|  | 1369 | if (regs) | 
|  | 1370 | save_stack_trace_regs(regs, &trace); | 
|  | 1371 | else | 
|  | 1372 | save_stack_trace(&trace); | 
|  | 1373 |  | 
|  | 1374 | if (trace.nr_entries > size) | 
|  | 1375 | size = trace.nr_entries; | 
|  | 1376 | } else | 
|  | 1377 | /* From now on, use_stack is a boolean */ | 
|  | 1378 | use_stack = 0; | 
|  | 1379 |  | 
|  | 1380 | size *= sizeof(unsigned long); | 
|  | 1381 |  | 
|  | 1382 | event = trace_buffer_lock_reserve(buffer, TRACE_STACK, | 
|  | 1383 | sizeof(*entry) + size, flags, pc); | 
|  | 1384 | if (!event) | 
|  | 1385 | goto out; | 
|  | 1386 | entry = ring_buffer_event_data(event); | 
|  | 1387 |  | 
|  | 1388 | memset(&entry->caller, 0, size); | 
|  | 1389 |  | 
|  | 1390 | if (use_stack) | 
|  | 1391 | memcpy(&entry->caller, trace.entries, | 
|  | 1392 | trace.nr_entries * sizeof(unsigned long)); | 
|  | 1393 | else { | 
|  | 1394 | trace.max_entries	= FTRACE_STACK_ENTRIES; | 
|  | 1395 | trace.entries		= entry->caller; | 
|  | 1396 | if (regs) | 
|  | 1397 | save_stack_trace_regs(regs, &trace); | 
|  | 1398 | else | 
|  | 1399 | save_stack_trace(&trace); | 
|  | 1400 | } | 
|  | 1401 |  | 
|  | 1402 | entry->size = trace.nr_entries; | 
|  | 1403 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1404 | if (!filter_check_discard(call, entry, buffer, event)) | 
| Steven Rostedt | 7ffbd48 | 2012-10-11 12:14:25 -0400 | [diff] [blame] | 1405 | __buffer_unlock_commit(buffer, event); | 
| Steven Rostedt | 4a9bd3f | 2011-07-14 16:36:53 -0400 | [diff] [blame] | 1406 |  | 
|  | 1407 | out: | 
|  | 1408 | /* Again, don't let gcc optimize things here */ | 
|  | 1409 | barrier(); | 
| Shan Wei | 8214652 | 2012-11-19 13:21:01 +0800 | [diff] [blame] | 1410 | __this_cpu_dec(ftrace_stack_reserve); | 
| Steven Rostedt | 4a9bd3f | 2011-07-14 16:36:53 -0400 | [diff] [blame] | 1411 | preempt_enable_notrace(); | 
|  | 1412 |  | 
| Ingo Molnar | f0a920d | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1413 | } | 
|  | 1414 |  | 
| Masami Hiramatsu | 1fd8df2 | 2011-06-08 16:09:34 +0900 | [diff] [blame] | 1415 | void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, | 
|  | 1416 | int skip, int pc, struct pt_regs *regs) | 
|  | 1417 | { | 
|  | 1418 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | 
|  | 1419 | return; | 
|  | 1420 |  | 
|  | 1421 | __ftrace_trace_stack(buffer, flags, skip, pc, regs); | 
|  | 1422 | } | 
|  | 1423 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1424 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, | 
|  | 1425 | int skip, int pc) | 
| Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 1426 | { | 
|  | 1427 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | 
|  | 1428 | return; | 
|  | 1429 |  | 
| Masami Hiramatsu | 1fd8df2 | 2011-06-08 16:09:34 +0900 | [diff] [blame] | 1430 | __ftrace_trace_stack(buffer, flags, skip, pc, NULL); | 
| Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 1431 | } | 
|  | 1432 |  | 
| Frederic Weisbecker | c0a0d0d | 2009-07-29 17:51:13 +0200 | [diff] [blame] | 1433 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | 
|  | 1434 | int pc) | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1435 | { | 
| Masami Hiramatsu | 1fd8df2 | 2011-06-08 16:09:34 +0900 | [diff] [blame] | 1436 | __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL); | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1437 | } | 
|  | 1438 |  | 
| Steven Rostedt | 0388938 | 2009-12-11 09:48:22 -0500 | [diff] [blame] | 1439 | /** | 
|  | 1440 | * trace_dump_stack - record a stack back trace in the trace buffer | 
|  | 1441 | */ | 
|  | 1442 | void trace_dump_stack(void) | 
|  | 1443 | { | 
|  | 1444 | unsigned long flags; | 
|  | 1445 |  | 
|  | 1446 | if (tracing_disabled || tracing_selftest_running) | 
| Steven Rostedt | e36c545 | 2009-12-14 15:58:33 -0500 | [diff] [blame] | 1447 | return; | 
| Steven Rostedt | 0388938 | 2009-12-11 09:48:22 -0500 | [diff] [blame] | 1448 |  | 
|  | 1449 | local_save_flags(flags); | 
|  | 1450 |  | 
|  | 1451 | /* skipping 3 traces, seems to get us at the caller of this function */ | 
| Masami Hiramatsu | 1fd8df2 | 2011-06-08 16:09:34 +0900 | [diff] [blame] | 1452 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL); | 
| Steven Rostedt | 0388938 | 2009-12-11 09:48:22 -0500 | [diff] [blame] | 1453 | } | 
|  | 1454 |  | 
| Steven Rostedt | 91e86e5 | 2010-11-10 12:56:12 +0100 | [diff] [blame] | 1455 | static DEFINE_PER_CPU(int, user_stack_count); | 
|  | 1456 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1457 | void | 
|  | 1458 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1459 | { | 
| Tom Zanussi | e1112b4 | 2009-03-31 00:48:49 -0500 | [diff] [blame] | 1460 | struct ftrace_event_call *call = &event_user_stack; | 
| Török Edwin | 8d7c6a9 | 2008-11-23 12:39:06 +0200 | [diff] [blame] | 1461 | struct ring_buffer_event *event; | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1462 | struct userstack_entry *entry; | 
|  | 1463 | struct stack_trace trace; | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1464 |  | 
|  | 1465 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 
|  | 1466 | return; | 
|  | 1467 |  | 
| Steven Rostedt | b634587 | 2010-03-12 20:03:30 -0500 | [diff] [blame] | 1468 | /* | 
|  | 1469 | * NMIs can not handle page faults, even with fix ups. | 
|  | 1470 | * The save user stack can (and often does) fault. | 
|  | 1471 | */ | 
|  | 1472 | if (unlikely(in_nmi())) | 
|  | 1473 | return; | 
|  | 1474 |  | 
| Steven Rostedt | 91e86e5 | 2010-11-10 12:56:12 +0100 | [diff] [blame] | 1475 | /* | 
|  | 1476 | * prevent recursion, since the user stack tracing may | 
|  | 1477 | * trigger other kernel events. | 
|  | 1478 | */ | 
|  | 1479 | preempt_disable(); | 
|  | 1480 | if (__this_cpu_read(user_stack_count)) | 
|  | 1481 | goto out; | 
|  | 1482 |  | 
|  | 1483 | __this_cpu_inc(user_stack_count); | 
|  | 1484 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1485 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1486 | sizeof(*entry), flags, pc); | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1487 | if (!event) | 
| Li Zefan | 1dbd195 | 2010-12-09 15:47:56 +0800 | [diff] [blame] | 1488 | goto out_drop_count; | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1489 | entry	= ring_buffer_event_data(event); | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1490 |  | 
| Steven Rostedt | 48659d3 | 2009-09-11 11:36:23 -0400 | [diff] [blame] | 1491 | entry->tgid		= current->tgid; | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1492 | memset(&entry->caller, 0, sizeof(entry->caller)); | 
|  | 1493 |  | 
|  | 1494 | trace.nr_entries	= 0; | 
|  | 1495 | trace.max_entries	= FTRACE_STACK_ENTRIES; | 
|  | 1496 | trace.skip		= 0; | 
|  | 1497 | trace.entries		= entry->caller; | 
|  | 1498 |  | 
|  | 1499 | save_stack_trace_user(&trace); | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1500 | if (!filter_check_discard(call, entry, buffer, event)) | 
| Steven Rostedt | 7ffbd48 | 2012-10-11 12:14:25 -0400 | [diff] [blame] | 1501 | __buffer_unlock_commit(buffer, event); | 
| Steven Rostedt | 91e86e5 | 2010-11-10 12:56:12 +0100 | [diff] [blame] | 1502 |  | 
| Li Zefan | 1dbd195 | 2010-12-09 15:47:56 +0800 | [diff] [blame] | 1503 | out_drop_count: | 
| Steven Rostedt | 91e86e5 | 2010-11-10 12:56:12 +0100 | [diff] [blame] | 1504 | __this_cpu_dec(user_stack_count); | 
| Steven Rostedt | 91e86e5 | 2010-11-10 12:56:12 +0100 | [diff] [blame] | 1505 | out: | 
|  | 1506 | preempt_enable(); | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1507 | } | 
|  | 1508 |  | 
| Hannes Eder | 4fd2735 | 2009-02-10 19:44:12 +0100 | [diff] [blame] | 1509 | #ifdef UNUSED | 
|  | 1510 | static void __trace_userstack(struct trace_array *tr, unsigned long flags) | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1511 | { | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 1512 | ftrace_trace_userstack(tr, flags, preempt_count()); | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1513 | } | 
| Hannes Eder | 4fd2735 | 2009-02-10 19:44:12 +0100 | [diff] [blame] | 1514 | #endif /* UNUSED */ | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1515 |  | 
| Frederic Weisbecker | c0a0d0d | 2009-07-29 17:51:13 +0200 | [diff] [blame] | 1516 | #endif /* CONFIG_STACKTRACE */ | 
|  | 1517 |  | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1518 | /* created for use with alloc_percpu */ | 
|  | 1519 | struct trace_buffer_struct { | 
|  | 1520 | char buffer[TRACE_BUF_SIZE]; | 
|  | 1521 | }; | 
|  | 1522 |  | 
|  | 1523 | static struct trace_buffer_struct *trace_percpu_buffer; | 
|  | 1524 | static struct trace_buffer_struct *trace_percpu_sirq_buffer; | 
|  | 1525 | static struct trace_buffer_struct *trace_percpu_irq_buffer; | 
|  | 1526 | static struct trace_buffer_struct *trace_percpu_nmi_buffer; | 
|  | 1527 |  | 
|  | 1528 | /* | 
|  | 1529 | * The buffer used is dependent on the context. There is a per cpu | 
|  | 1530 | * buffer for normal context, softirq contex, hard irq context and | 
|  | 1531 | * for NMI context. Thise allows for lockless recording. | 
|  | 1532 | * | 
|  | 1533 | * Note, if the buffers failed to be allocated, then this returns NULL | 
|  | 1534 | */ | 
|  | 1535 | static char *get_trace_buf(void) | 
|  | 1536 | { | 
|  | 1537 | struct trace_buffer_struct *percpu_buffer; | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1538 |  | 
|  | 1539 | /* | 
|  | 1540 | * If we have allocated per cpu buffers, then we do not | 
|  | 1541 | * need to do any locking. | 
|  | 1542 | */ | 
|  | 1543 | if (in_nmi()) | 
|  | 1544 | percpu_buffer = trace_percpu_nmi_buffer; | 
|  | 1545 | else if (in_irq()) | 
|  | 1546 | percpu_buffer = trace_percpu_irq_buffer; | 
|  | 1547 | else if (in_softirq()) | 
|  | 1548 | percpu_buffer = trace_percpu_sirq_buffer; | 
|  | 1549 | else | 
|  | 1550 | percpu_buffer = trace_percpu_buffer; | 
|  | 1551 |  | 
|  | 1552 | if (!percpu_buffer) | 
|  | 1553 | return NULL; | 
|  | 1554 |  | 
| Shan Wei | d8a0349 | 2012-11-13 09:53:04 +0800 | [diff] [blame] | 1555 | return this_cpu_ptr(&percpu_buffer->buffer[0]); | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1556 | } | 
|  | 1557 |  | 
|  | 1558 | static int alloc_percpu_trace_buffer(void) | 
|  | 1559 | { | 
|  | 1560 | struct trace_buffer_struct *buffers; | 
|  | 1561 | struct trace_buffer_struct *sirq_buffers; | 
|  | 1562 | struct trace_buffer_struct *irq_buffers; | 
|  | 1563 | struct trace_buffer_struct *nmi_buffers; | 
|  | 1564 |  | 
|  | 1565 | buffers = alloc_percpu(struct trace_buffer_struct); | 
|  | 1566 | if (!buffers) | 
|  | 1567 | goto err_warn; | 
|  | 1568 |  | 
|  | 1569 | sirq_buffers = alloc_percpu(struct trace_buffer_struct); | 
|  | 1570 | if (!sirq_buffers) | 
|  | 1571 | goto err_sirq; | 
|  | 1572 |  | 
|  | 1573 | irq_buffers = alloc_percpu(struct trace_buffer_struct); | 
|  | 1574 | if (!irq_buffers) | 
|  | 1575 | goto err_irq; | 
|  | 1576 |  | 
|  | 1577 | nmi_buffers = alloc_percpu(struct trace_buffer_struct); | 
|  | 1578 | if (!nmi_buffers) | 
|  | 1579 | goto err_nmi; | 
|  | 1580 |  | 
|  | 1581 | trace_percpu_buffer = buffers; | 
|  | 1582 | trace_percpu_sirq_buffer = sirq_buffers; | 
|  | 1583 | trace_percpu_irq_buffer = irq_buffers; | 
|  | 1584 | trace_percpu_nmi_buffer = nmi_buffers; | 
|  | 1585 |  | 
|  | 1586 | return 0; | 
|  | 1587 |  | 
|  | 1588 | err_nmi: | 
|  | 1589 | free_percpu(irq_buffers); | 
|  | 1590 | err_irq: | 
|  | 1591 | free_percpu(sirq_buffers); | 
|  | 1592 | err_sirq: | 
|  | 1593 | free_percpu(buffers); | 
|  | 1594 | err_warn: | 
|  | 1595 | WARN(1, "Could not allocate percpu trace_printk buffer"); | 
|  | 1596 | return -ENOMEM; | 
|  | 1597 | } | 
|  | 1598 |  | 
| Steven Rostedt | 8169883 | 2012-10-11 10:15:05 -0400 | [diff] [blame] | 1599 | static int buffers_allocated; | 
|  | 1600 |  | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1601 | void trace_printk_init_buffers(void) | 
|  | 1602 | { | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1603 | if (buffers_allocated) | 
|  | 1604 | return; | 
|  | 1605 |  | 
|  | 1606 | if (alloc_percpu_trace_buffer()) | 
|  | 1607 | return; | 
|  | 1608 |  | 
|  | 1609 | pr_info("ftrace: Allocated trace_printk buffers\n"); | 
|  | 1610 |  | 
| Steven Rostedt | b382ede6 | 2012-10-10 21:44:34 -0400 | [diff] [blame] | 1611 | /* Expand the buffers to set size */ | 
|  | 1612 | tracing_update_buffers(); | 
|  | 1613 |  | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1614 | buffers_allocated = 1; | 
| Steven Rostedt | 8169883 | 2012-10-11 10:15:05 -0400 | [diff] [blame] | 1615 |  | 
|  | 1616 | /* | 
|  | 1617 | * trace_printk_init_buffers() can be called by modules. | 
|  | 1618 | * If that happens, then we need to start cmdline recording | 
|  | 1619 | * directly here. If the global_trace.buffer is already | 
|  | 1620 | * allocated here, then this was called by module code. | 
|  | 1621 | */ | 
|  | 1622 | if (global_trace.buffer) | 
|  | 1623 | tracing_start_cmdline_record(); | 
|  | 1624 | } | 
|  | 1625 |  | 
|  | 1626 | void trace_printk_start_comm(void) | 
|  | 1627 | { | 
|  | 1628 | /* Start tracing comms if trace printk is set */ | 
|  | 1629 | if (!buffers_allocated) | 
|  | 1630 | return; | 
|  | 1631 | tracing_start_cmdline_record(); | 
|  | 1632 | } | 
|  | 1633 |  | 
|  | 1634 | static void trace_printk_start_stop_comm(int enabled) | 
|  | 1635 | { | 
|  | 1636 | if (!buffers_allocated) | 
|  | 1637 | return; | 
|  | 1638 |  | 
|  | 1639 | if (enabled) | 
|  | 1640 | tracing_start_cmdline_record(); | 
|  | 1641 | else | 
|  | 1642 | tracing_stop_cmdline_record(); | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1643 | } | 
|  | 1644 |  | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1645 | /** | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1646 | * trace_vbprintk - write binary msg to tracing buffer | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1647 | * | 
|  | 1648 | */ | 
| Steven Rostedt | 40ce74f | 2009-03-19 14:03:53 -0400 | [diff] [blame] | 1649 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1650 | { | 
| Tom Zanussi | e1112b4 | 2009-03-31 00:48:49 -0500 | [diff] [blame] | 1651 | struct ftrace_event_call *call = &event_bprint; | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1652 | struct ring_buffer_event *event; | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1653 | struct ring_buffer *buffer; | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1654 | struct trace_array *tr = &global_trace; | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1655 | struct bprint_entry *entry; | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1656 | unsigned long flags; | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1657 | char *tbuffer; | 
|  | 1658 | int len = 0, size, pc; | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1659 |  | 
|  | 1660 | if (unlikely(tracing_selftest_running || tracing_disabled)) | 
|  | 1661 | return 0; | 
|  | 1662 |  | 
|  | 1663 | /* Don't pollute graph traces with trace_vprintk internals */ | 
|  | 1664 | pause_graph_tracing(); | 
|  | 1665 |  | 
|  | 1666 | pc = preempt_count(); | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 1667 | preempt_disable_notrace(); | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1668 |  | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1669 | tbuffer = get_trace_buf(); | 
|  | 1670 | if (!tbuffer) { | 
|  | 1671 | len = 0; | 
|  | 1672 | goto out; | 
|  | 1673 | } | 
|  | 1674 |  | 
|  | 1675 | len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); | 
|  | 1676 |  | 
|  | 1677 | if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1678 | goto out; | 
|  | 1679 |  | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1680 | local_save_flags(flags); | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1681 | size = sizeof(*entry) + sizeof(u32) * len; | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1682 | buffer = tr->buffer; | 
|  | 1683 | event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, | 
|  | 1684 | flags, pc); | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1685 | if (!event) | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1686 | goto out; | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1687 | entry = ring_buffer_event_data(event); | 
|  | 1688 | entry->ip			= ip; | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1689 | entry->fmt			= fmt; | 
|  | 1690 |  | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1691 | memcpy(entry->buf, tbuffer, sizeof(u32) * len); | 
| Steven Rostedt | d931369 | 2010-01-06 17:27:11 -0500 | [diff] [blame] | 1692 | if (!filter_check_discard(call, entry, buffer, event)) { | 
| Steven Rostedt | 7ffbd48 | 2012-10-11 12:14:25 -0400 | [diff] [blame] | 1693 | __buffer_unlock_commit(buffer, event); | 
| Steven Rostedt | d931369 | 2010-01-06 17:27:11 -0500 | [diff] [blame] | 1694 | ftrace_trace_stack(buffer, flags, 6, pc); | 
|  | 1695 | } | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1696 |  | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1697 | out: | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 1698 | preempt_enable_notrace(); | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1699 | unpause_graph_tracing(); | 
|  | 1700 |  | 
|  | 1701 | return len; | 
|  | 1702 | } | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1703 | EXPORT_SYMBOL_GPL(trace_vbprintk); | 
|  | 1704 |  | 
| Steven Rostedt | 659372d | 2009-09-03 19:11:07 -0400 | [diff] [blame] | 1705 | int trace_array_printk(struct trace_array *tr, | 
|  | 1706 | unsigned long ip, const char *fmt, ...) | 
|  | 1707 | { | 
|  | 1708 | int ret; | 
|  | 1709 | va_list ap; | 
|  | 1710 |  | 
|  | 1711 | if (!(trace_flags & TRACE_ITER_PRINTK)) | 
|  | 1712 | return 0; | 
|  | 1713 |  | 
|  | 1714 | va_start(ap, fmt); | 
|  | 1715 | ret = trace_array_vprintk(tr, ip, fmt, ap); | 
|  | 1716 | va_end(ap); | 
|  | 1717 | return ret; | 
|  | 1718 | } | 
|  | 1719 |  | 
|  | 1720 | int trace_array_vprintk(struct trace_array *tr, | 
|  | 1721 | unsigned long ip, const char *fmt, va_list args) | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1722 | { | 
| Tom Zanussi | e1112b4 | 2009-03-31 00:48:49 -0500 | [diff] [blame] | 1723 | struct ftrace_event_call *call = &event_print; | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1724 | struct ring_buffer_event *event; | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1725 | struct ring_buffer *buffer; | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1726 | int len = 0, size, pc; | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1727 | struct print_entry *entry; | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1728 | unsigned long flags; | 
|  | 1729 | char *tbuffer; | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1730 |  | 
|  | 1731 | if (tracing_disabled || tracing_selftest_running) | 
|  | 1732 | return 0; | 
|  | 1733 |  | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1734 | /* Don't pollute graph traces with trace_vprintk internals */ | 
|  | 1735 | pause_graph_tracing(); | 
|  | 1736 |  | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1737 | pc = preempt_count(); | 
|  | 1738 | preempt_disable_notrace(); | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1739 |  | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1740 |  | 
|  | 1741 | tbuffer = get_trace_buf(); | 
|  | 1742 | if (!tbuffer) { | 
|  | 1743 | len = 0; | 
|  | 1744 | goto out; | 
|  | 1745 | } | 
|  | 1746 |  | 
|  | 1747 | len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); | 
|  | 1748 | if (len > TRACE_BUF_SIZE) | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1749 | goto out; | 
|  | 1750 |  | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1751 | local_save_flags(flags); | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1752 | size = sizeof(*entry) + len + 1; | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1753 | buffer = tr->buffer; | 
|  | 1754 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1755 | flags, pc); | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1756 | if (!event) | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1757 | goto out; | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1758 | entry = ring_buffer_event_data(event); | 
| Carsten Emde | c13d2f7 | 2009-11-16 20:56:13 +0100 | [diff] [blame] | 1759 | entry->ip = ip; | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1760 |  | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1761 | memcpy(&entry->buf, tbuffer, len); | 
| Carsten Emde | c13d2f7 | 2009-11-16 20:56:13 +0100 | [diff] [blame] | 1762 | entry->buf[len] = '\0'; | 
| Steven Rostedt | d931369 | 2010-01-06 17:27:11 -0500 | [diff] [blame] | 1763 | if (!filter_check_discard(call, entry, buffer, event)) { | 
| Steven Rostedt | 7ffbd48 | 2012-10-11 12:14:25 -0400 | [diff] [blame] | 1764 | __buffer_unlock_commit(buffer, event); | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1765 | ftrace_trace_stack(buffer, flags, 6, pc); | 
| Steven Rostedt | d931369 | 2010-01-06 17:27:11 -0500 | [diff] [blame] | 1766 | } | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1767 | out: | 
|  | 1768 | preempt_enable_notrace(); | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 1769 | unpause_graph_tracing(); | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1770 |  | 
|  | 1771 | return len; | 
|  | 1772 | } | 
| Steven Rostedt | 659372d | 2009-09-03 19:11:07 -0400 | [diff] [blame] | 1773 |  | 
|  | 1774 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 
|  | 1775 | { | 
| Steven Rostedt | a813a15 | 2009-10-09 01:41:35 -0400 | [diff] [blame] | 1776 | return trace_array_vprintk(&global_trace, ip, fmt, args); | 
| Steven Rostedt | 659372d | 2009-09-03 19:11:07 -0400 | [diff] [blame] | 1777 | } | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1778 | EXPORT_SYMBOL_GPL(trace_vprintk); | 
|  | 1779 |  | 
| Robert Richter | e2ac8ef | 2008-11-12 12:59:32 +0100 | [diff] [blame] | 1780 | static void trace_iterator_increment(struct trace_iterator *iter) | 
| Steven Rostedt | 5a90f57 | 2008-09-03 17:42:51 -0400 | [diff] [blame] | 1781 | { | 
| Steven Rostedt | 6d158a8 | 2012-06-27 20:46:14 -0400 | [diff] [blame] | 1782 | struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); | 
|  | 1783 |  | 
| Steven Rostedt | 5a90f57 | 2008-09-03 17:42:51 -0400 | [diff] [blame] | 1784 | iter->idx++; | 
| Steven Rostedt | 6d158a8 | 2012-06-27 20:46:14 -0400 | [diff] [blame] | 1785 | if (buf_iter) | 
|  | 1786 | ring_buffer_read(buf_iter, NULL); | 
| Steven Rostedt | 5a90f57 | 2008-09-03 17:42:51 -0400 | [diff] [blame] | 1787 | } | 
|  | 1788 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1789 | static struct trace_entry * | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1790 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, | 
|  | 1791 | unsigned long *lost_events) | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1792 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1793 | struct ring_buffer_event *event; | 
| Steven Rostedt | 6d158a8 | 2012-06-27 20:46:14 -0400 | [diff] [blame] | 1794 | struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1795 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1796 | if (buf_iter) | 
|  | 1797 | event = ring_buffer_iter_peek(buf_iter, ts); | 
|  | 1798 | else | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1799 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, | 
|  | 1800 | lost_events); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1801 |  | 
| Steven Rostedt | 4a9bd3f | 2011-07-14 16:36:53 -0400 | [diff] [blame] | 1802 | if (event) { | 
|  | 1803 | iter->ent_size = ring_buffer_event_length(event); | 
|  | 1804 | return ring_buffer_event_data(event); | 
|  | 1805 | } | 
|  | 1806 | iter->ent_size = 0; | 
|  | 1807 | return NULL; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1808 | } | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1809 |  | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1810 | static struct trace_entry * | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1811 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, | 
|  | 1812 | unsigned long *missing_events, u64 *ent_ts) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1813 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1814 | struct ring_buffer *buffer = iter->tr->buffer; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1815 | struct trace_entry *ent, *next = NULL; | 
| Lai Jiangshan | aa27497 | 2010-04-05 17:11:05 +0800 | [diff] [blame] | 1816 | unsigned long lost_events = 0, next_lost = 0; | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1817 | int cpu_file = iter->cpu_file; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1818 | u64 next_ts = 0, ts; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1819 | int next_cpu = -1; | 
| Steven Rostedt | 12b5da3 | 2012-03-27 10:43:28 -0400 | [diff] [blame] | 1820 | int next_size = 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1821 | int cpu; | 
|  | 1822 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1823 | /* | 
|  | 1824 | * If we are in a per_cpu trace file, don't bother by iterating over | 
|  | 1825 | * all cpu and peek directly. | 
|  | 1826 | */ | 
|  | 1827 | if (cpu_file > TRACE_PIPE_ALL_CPU) { | 
|  | 1828 | if (ring_buffer_empty_cpu(buffer, cpu_file)) | 
|  | 1829 | return NULL; | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1830 | ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1831 | if (ent_cpu) | 
|  | 1832 | *ent_cpu = cpu_file; | 
|  | 1833 |  | 
|  | 1834 | return ent; | 
|  | 1835 | } | 
|  | 1836 |  | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 1837 | for_each_tracing_cpu(cpu) { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1838 |  | 
|  | 1839 | if (ring_buffer_empty_cpu(buffer, cpu)) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1840 | continue; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1841 |  | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1842 | ent = peek_next_entry(iter, cpu, &ts, &lost_events); | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1843 |  | 
| Ingo Molnar | cdd31cd | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1844 | /* | 
|  | 1845 | * Pick the entry with the smallest timestamp: | 
|  | 1846 | */ | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1847 | if (ent && (!next || ts < next_ts)) { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1848 | next = ent; | 
|  | 1849 | next_cpu = cpu; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1850 | next_ts = ts; | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1851 | next_lost = lost_events; | 
| Steven Rostedt | 12b5da3 | 2012-03-27 10:43:28 -0400 | [diff] [blame] | 1852 | next_size = iter->ent_size; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1853 | } | 
|  | 1854 | } | 
|  | 1855 |  | 
| Steven Rostedt | 12b5da3 | 2012-03-27 10:43:28 -0400 | [diff] [blame] | 1856 | iter->ent_size = next_size; | 
|  | 1857 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1858 | if (ent_cpu) | 
|  | 1859 | *ent_cpu = next_cpu; | 
|  | 1860 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1861 | if (ent_ts) | 
|  | 1862 | *ent_ts = next_ts; | 
|  | 1863 |  | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1864 | if (missing_events) | 
|  | 1865 | *missing_events = next_lost; | 
|  | 1866 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1867 | return next; | 
|  | 1868 | } | 
|  | 1869 |  | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1870 | /* Find the next real entry, without updating the iterator itself */ | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 1871 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 
|  | 1872 | int *ent_cpu, u64 *ent_ts) | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1873 | { | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1874 | return __find_next_entry(iter, ent_cpu, NULL, ent_ts); | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1875 | } | 
| Ingo Molnar | 8c523a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1876 |  | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1877 | /* Find the next real entry, and increment the iterator to the next entry */ | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 1878 | void *trace_find_next_entry_inc(struct trace_iterator *iter) | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1879 | { | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1880 | iter->ent = __find_next_entry(iter, &iter->cpu, | 
|  | 1881 | &iter->lost_events, &iter->ts); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1882 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1883 | if (iter->ent) | 
| Robert Richter | e2ac8ef | 2008-11-12 12:59:32 +0100 | [diff] [blame] | 1884 | trace_iterator_increment(iter); | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1885 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1886 | return iter->ent ? iter : NULL; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1887 | } | 
|  | 1888 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1889 | static void trace_consume(struct trace_iterator *iter) | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1890 | { | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1891 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, | 
|  | 1892 | &iter->lost_events); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1893 | } | 
|  | 1894 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1895 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1896 | { | 
|  | 1897 | struct trace_iterator *iter = m->private; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1898 | int i = (int)*pos; | 
| Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 1899 | void *ent; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1900 |  | 
| Steven Rostedt | a63ce5b | 2009-12-07 09:11:39 -0500 | [diff] [blame] | 1901 | WARN_ON_ONCE(iter->leftover); | 
|  | 1902 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1903 | (*pos)++; | 
|  | 1904 |  | 
|  | 1905 | /* can't go backwards */ | 
|  | 1906 | if (iter->idx > i) | 
|  | 1907 | return NULL; | 
|  | 1908 |  | 
|  | 1909 | if (iter->idx < 0) | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 1910 | ent = trace_find_next_entry_inc(iter); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1911 | else | 
|  | 1912 | ent = iter; | 
|  | 1913 |  | 
|  | 1914 | while (ent && iter->idx < i) | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 1915 | ent = trace_find_next_entry_inc(iter); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1916 |  | 
|  | 1917 | iter->pos = *pos; | 
|  | 1918 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1919 | return ent; | 
|  | 1920 | } | 
|  | 1921 |  | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 1922 | void tracing_iter_reset(struct trace_iterator *iter, int cpu) | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 1923 | { | 
|  | 1924 | struct trace_array *tr = iter->tr; | 
|  | 1925 | struct ring_buffer_event *event; | 
|  | 1926 | struct ring_buffer_iter *buf_iter; | 
|  | 1927 | unsigned long entries = 0; | 
|  | 1928 | u64 ts; | 
|  | 1929 |  | 
|  | 1930 | tr->data[cpu]->skipped_entries = 0; | 
|  | 1931 |  | 
| Steven Rostedt | 6d158a8 | 2012-06-27 20:46:14 -0400 | [diff] [blame] | 1932 | buf_iter = trace_buffer_iter(iter, cpu); | 
|  | 1933 | if (!buf_iter) | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 1934 | return; | 
|  | 1935 |  | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 1936 | ring_buffer_iter_reset(buf_iter); | 
|  | 1937 |  | 
|  | 1938 | /* | 
|  | 1939 | * We could have the case with the max latency tracers | 
|  | 1940 | * that a reset never took place on a cpu. This is evident | 
|  | 1941 | * by the timestamp being before the start of the buffer. | 
|  | 1942 | */ | 
|  | 1943 | while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { | 
|  | 1944 | if (ts >= iter->tr->time_start) | 
|  | 1945 | break; | 
|  | 1946 | entries++; | 
|  | 1947 | ring_buffer_read(buf_iter, NULL); | 
|  | 1948 | } | 
|  | 1949 |  | 
|  | 1950 | tr->data[cpu]->skipped_entries = entries; | 
|  | 1951 | } | 
|  | 1952 |  | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1953 | /* | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1954 | * The current tracer is copied to avoid a global locking | 
|  | 1955 | * all around. | 
|  | 1956 | */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1957 | static void *s_start(struct seq_file *m, loff_t *pos) | 
|  | 1958 | { | 
|  | 1959 | struct trace_iterator *iter = m->private; | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1960 | int cpu_file = iter->cpu_file; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1961 | void *p = NULL; | 
|  | 1962 | loff_t l = 0; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1963 | int cpu; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1964 |  | 
| Hiraku Toyooka | 2fd196e | 2012-12-26 11:52:52 +0900 | [diff] [blame] | 1965 | /* | 
|  | 1966 | * copy the tracer to avoid using a global lock all around. | 
|  | 1967 | * iter->trace is a copy of current_trace, the pointer to the | 
|  | 1968 | * name may be used instead of a strcmp(), as iter->trace->name | 
|  | 1969 | * will point to the same string as current_trace->name. | 
|  | 1970 | */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1971 | mutex_lock(&trace_types_lock); | 
| Hiraku Toyooka | 2fd196e | 2012-12-26 11:52:52 +0900 | [diff] [blame] | 1972 | if (unlikely(current_trace && iter->trace->name != current_trace->name)) | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1973 | *iter->trace = *current_trace; | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1974 | mutex_unlock(&trace_types_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1975 |  | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 1976 | if (iter->snapshot && iter->trace->use_max_tr) | 
|  | 1977 | return ERR_PTR(-EBUSY); | 
|  | 1978 |  | 
|  | 1979 | if (!iter->snapshot) | 
|  | 1980 | atomic_inc(&trace_record_cmdline_disabled); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1981 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1982 | if (*pos != iter->pos) { | 
|  | 1983 | iter->ent = NULL; | 
|  | 1984 | iter->cpu = 0; | 
|  | 1985 | iter->idx = -1; | 
|  | 1986 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1987 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | 
|  | 1988 | for_each_tracing_cpu(cpu) | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 1989 | tracing_iter_reset(iter, cpu); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1990 | } else | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 1991 | tracing_iter_reset(iter, cpu_file); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1992 |  | 
| Lai Jiangshan | ac91d85 | 2010-03-02 17:54:50 +0800 | [diff] [blame] | 1993 | iter->leftover = 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1994 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 
|  | 1995 | ; | 
|  | 1996 |  | 
|  | 1997 | } else { | 
| Steven Rostedt | a63ce5b | 2009-12-07 09:11:39 -0500 | [diff] [blame] | 1998 | /* | 
|  | 1999 | * If we overflowed the seq_file before, then we want | 
|  | 2000 | * to just reuse the trace_seq buffer again. | 
|  | 2001 | */ | 
|  | 2002 | if (iter->leftover) | 
|  | 2003 | p = iter; | 
|  | 2004 | else { | 
|  | 2005 | l = *pos - 1; | 
|  | 2006 | p = s_next(m, p, &l); | 
|  | 2007 | } | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2008 | } | 
|  | 2009 |  | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 2010 | trace_event_read_lock(); | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 2011 | trace_access_lock(cpu_file); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2012 | return p; | 
|  | 2013 | } | 
|  | 2014 |  | 
|  | 2015 | static void s_stop(struct seq_file *m, void *p) | 
|  | 2016 | { | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 2017 | struct trace_iterator *iter = m->private; | 
|  | 2018 |  | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 2019 | if (iter->snapshot && iter->trace->use_max_tr) | 
|  | 2020 | return; | 
|  | 2021 |  | 
|  | 2022 | if (!iter->snapshot) | 
|  | 2023 | atomic_dec(&trace_record_cmdline_disabled); | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 2024 | trace_access_unlock(iter->cpu_file); | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 2025 | trace_event_read_unlock(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2026 | } | 
|  | 2027 |  | 
| Steven Rostedt | 39eaf7e | 2011-11-17 10:35:16 -0500 | [diff] [blame] | 2028 | static void | 
|  | 2029 | get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries) | 
|  | 2030 | { | 
|  | 2031 | unsigned long count; | 
|  | 2032 | int cpu; | 
|  | 2033 |  | 
|  | 2034 | *total = 0; | 
|  | 2035 | *entries = 0; | 
|  | 2036 |  | 
|  | 2037 | for_each_tracing_cpu(cpu) { | 
|  | 2038 | count = ring_buffer_entries_cpu(tr->buffer, cpu); | 
|  | 2039 | /* | 
|  | 2040 | * If this buffer has skipped entries, then we hold all | 
|  | 2041 | * entries for the trace and we need to ignore the | 
|  | 2042 | * ones before the time stamp. | 
|  | 2043 | */ | 
|  | 2044 | if (tr->data[cpu]->skipped_entries) { | 
|  | 2045 | count -= tr->data[cpu]->skipped_entries; | 
|  | 2046 | /* total is the same as the entries */ | 
|  | 2047 | *total += count; | 
|  | 2048 | } else | 
|  | 2049 | *total += count + | 
|  | 2050 | ring_buffer_overrun_cpu(tr->buffer, cpu); | 
|  | 2051 | *entries += count; | 
|  | 2052 | } | 
|  | 2053 | } | 
|  | 2054 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 2055 | static void print_lat_help_header(struct seq_file *m) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2056 | { | 
| Michael Ellerman | a616835 | 2008-08-20 16:36:11 -0700 | [diff] [blame] | 2057 | seq_puts(m, "#                  _------=> CPU#            \n"); | 
|  | 2058 | seq_puts(m, "#                 / _-----=> irqs-off        \n"); | 
|  | 2059 | seq_puts(m, "#                | / _----=> need-resched    \n"); | 
|  | 2060 | seq_puts(m, "#                || / _---=> hardirq/softirq \n"); | 
|  | 2061 | seq_puts(m, "#                ||| / _--=> preempt-depth   \n"); | 
| Steven Rostedt | e6e1e25 | 2011-03-09 10:41:56 -0500 | [diff] [blame] | 2062 | seq_puts(m, "#                |||| /     delay             \n"); | 
|  | 2063 | seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n"); | 
|  | 2064 | seq_puts(m, "#     \\   /      |||||  \\    |   /           \n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2065 | } | 
|  | 2066 |  | 
| Steven Rostedt | 39eaf7e | 2011-11-17 10:35:16 -0500 | [diff] [blame] | 2067 | static void print_event_info(struct trace_array *tr, struct seq_file *m) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2068 | { | 
| Steven Rostedt | 39eaf7e | 2011-11-17 10:35:16 -0500 | [diff] [blame] | 2069 | unsigned long total; | 
|  | 2070 | unsigned long entries; | 
|  | 2071 |  | 
|  | 2072 | get_total_entries(tr, &total, &entries); | 
|  | 2073 | seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n", | 
|  | 2074 | entries, total, num_online_cpus()); | 
|  | 2075 | seq_puts(m, "#\n"); | 
|  | 2076 | } | 
|  | 2077 |  | 
|  | 2078 | static void print_func_help_header(struct trace_array *tr, struct seq_file *m) | 
|  | 2079 | { | 
|  | 2080 | print_event_info(tr, m); | 
| Steven Rostedt | 77271ce | 2011-11-17 09:34:33 -0500 | [diff] [blame] | 2081 | seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n"); | 
| Michael Ellerman | a616835 | 2008-08-20 16:36:11 -0700 | [diff] [blame] | 2082 | seq_puts(m, "#              | |       |          |         |\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2083 | } | 
|  | 2084 |  | 
| Steven Rostedt | 39eaf7e | 2011-11-17 10:35:16 -0500 | [diff] [blame] | 2085 | static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m) | 
| Steven Rostedt | 77271ce | 2011-11-17 09:34:33 -0500 | [diff] [blame] | 2086 | { | 
| Steven Rostedt | 39eaf7e | 2011-11-17 10:35:16 -0500 | [diff] [blame] | 2087 | print_event_info(tr, m); | 
| Steven Rostedt | 77271ce | 2011-11-17 09:34:33 -0500 | [diff] [blame] | 2088 | seq_puts(m, "#                              _-----=> irqs-off\n"); | 
|  | 2089 | seq_puts(m, "#                             / _----=> need-resched\n"); | 
|  | 2090 | seq_puts(m, "#                            | / _---=> hardirq/softirq\n"); | 
|  | 2091 | seq_puts(m, "#                            || / _--=> preempt-depth\n"); | 
|  | 2092 | seq_puts(m, "#                            ||| /     delay\n"); | 
|  | 2093 | seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n"); | 
|  | 2094 | seq_puts(m, "#              | |       |   ||||       |         |\n"); | 
|  | 2095 | } | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2096 |  | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 2097 | void | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2098 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) | 
|  | 2099 | { | 
|  | 2100 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 
|  | 2101 | struct trace_array *tr = iter->tr; | 
|  | 2102 | struct trace_array_cpu *data = tr->data[tr->cpu]; | 
|  | 2103 | struct tracer *type = current_trace; | 
| Steven Rostedt | 39eaf7e | 2011-11-17 10:35:16 -0500 | [diff] [blame] | 2104 | unsigned long entries; | 
|  | 2105 | unsigned long total; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2106 | const char *name = "preemption"; | 
|  | 2107 |  | 
| Steven Rostedt (Red Hat) | d840f71 | 2013-02-01 18:38:47 -0500 | [diff] [blame] | 2108 | name = type->name; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2109 |  | 
| Steven Rostedt | 39eaf7e | 2011-11-17 10:35:16 -0500 | [diff] [blame] | 2110 | get_total_entries(tr, &total, &entries); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2111 |  | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 2112 | seq_printf(m, "# %s latency trace v1.1.5 on %s\n", | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2113 | name, UTS_RELEASE); | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 2114 | seq_puts(m, "# -----------------------------------" | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2115 | "---------------------------------\n"); | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 2116 | seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2117 | " (M:%s VP:%d, KP:%d, SP:%d HP:%d", | 
| Steven Rostedt | 57f50be | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 2118 | nsecs_to_usecs(data->saved_latency), | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2119 | entries, | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 2120 | total, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2121 | tr->cpu, | 
|  | 2122 | #if defined(CONFIG_PREEMPT_NONE) | 
|  | 2123 | "server", | 
|  | 2124 | #elif defined(CONFIG_PREEMPT_VOLUNTARY) | 
|  | 2125 | "desktop", | 
| Steven Rostedt | b5c21b4 | 2008-07-10 20:58:12 -0400 | [diff] [blame] | 2126 | #elif defined(CONFIG_PREEMPT) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2127 | "preempt", | 
|  | 2128 | #else | 
|  | 2129 | "unknown", | 
|  | 2130 | #endif | 
|  | 2131 | /* These are reserved for later use */ | 
|  | 2132 | 0, 0, 0, 0); | 
|  | 2133 | #ifdef CONFIG_SMP | 
|  | 2134 | seq_printf(m, " #P:%d)\n", num_online_cpus()); | 
|  | 2135 | #else | 
|  | 2136 | seq_puts(m, ")\n"); | 
|  | 2137 | #endif | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 2138 | seq_puts(m, "#    -----------------\n"); | 
|  | 2139 | seq_printf(m, "#    | task: %.16s-%d " | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2140 | "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", | 
| Eric W. Biederman | d20b92a | 2012-03-13 16:02:19 -0700 | [diff] [blame] | 2141 | data->comm, data->pid, | 
|  | 2142 | from_kuid_munged(seq_user_ns(m), data->uid), data->nice, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2143 | data->policy, data->rt_priority); | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 2144 | seq_puts(m, "#    -----------------\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2145 |  | 
|  | 2146 | if (data->critical_start) { | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 2147 | seq_puts(m, "#  => started at: "); | 
| Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2148 | seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); | 
|  | 2149 | trace_print_seq(m, &iter->seq); | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 2150 | seq_puts(m, "\n#  => ended at:   "); | 
| Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2151 | seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); | 
|  | 2152 | trace_print_seq(m, &iter->seq); | 
| Steven Rostedt | 8248ac0 | 2009-09-02 12:27:41 -0400 | [diff] [blame] | 2153 | seq_puts(m, "\n#\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2154 | } | 
|  | 2155 |  | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 2156 | seq_puts(m, "#\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2157 | } | 
|  | 2158 |  | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 2159 | static void test_cpu_buff_start(struct trace_iterator *iter) | 
|  | 2160 | { | 
|  | 2161 | struct trace_seq *s = &iter->seq; | 
|  | 2162 |  | 
| Steven Rostedt | 12ef7d4 | 2008-11-12 17:52:38 -0500 | [diff] [blame] | 2163 | if (!(trace_flags & TRACE_ITER_ANNOTATE)) | 
|  | 2164 | return; | 
|  | 2165 |  | 
|  | 2166 | if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) | 
|  | 2167 | return; | 
|  | 2168 |  | 
| Rusty Russell | 4462344 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 2169 | if (cpumask_test_cpu(iter->cpu, iter->started)) | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 2170 | return; | 
|  | 2171 |  | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 2172 | if (iter->tr->data[iter->cpu]->skipped_entries) | 
|  | 2173 | return; | 
|  | 2174 |  | 
| Rusty Russell | 4462344 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 2175 | cpumask_set_cpu(iter->cpu, iter->started); | 
| Frederic Weisbecker | b0dfa97 | 2009-04-01 22:53:08 +0200 | [diff] [blame] | 2176 |  | 
|  | 2177 | /* Don't print started cpu buffer for the first entry of the trace */ | 
|  | 2178 | if (iter->idx > 1) | 
|  | 2179 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", | 
|  | 2180 | iter->cpu); | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 2181 | } | 
|  | 2182 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2183 | static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2184 | { | 
| Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2185 | struct trace_seq *s = &iter->seq; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2186 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 
| Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2187 | struct trace_entry *entry; | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 2188 | struct trace_event *event; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2189 |  | 
| Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2190 | entry = iter->ent; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 2191 |  | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 2192 | test_cpu_buff_start(iter); | 
|  | 2193 |  | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 2194 | event = ftrace_find_event(entry->type); | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 2195 |  | 
|  | 2196 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
| Steven Rostedt | 27d48be | 2009-03-04 21:57:29 -0500 | [diff] [blame] | 2197 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 
|  | 2198 | if (!trace_print_lat_context(iter)) | 
|  | 2199 | goto partial; | 
|  | 2200 | } else { | 
|  | 2201 | if (!trace_print_context(iter)) | 
|  | 2202 | goto partial; | 
|  | 2203 | } | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 2204 | } | 
|  | 2205 |  | 
| Arnaldo Carvalho de Melo | 268ccda | 2009-02-04 20:16:39 -0200 | [diff] [blame] | 2206 | if (event) | 
| Steven Rostedt | a9a5776 | 2010-04-22 18:46:14 -0400 | [diff] [blame] | 2207 | return event->funcs->trace(iter, sym_flags, event); | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 2208 |  | 
|  | 2209 | if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) | 
|  | 2210 | goto partial; | 
| Steven Rostedt | 7104f30 | 2008-10-01 10:52:51 -0400 | [diff] [blame] | 2211 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2212 | return TRACE_TYPE_HANDLED; | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 2213 | partial: | 
|  | 2214 | return TRACE_TYPE_PARTIAL_LINE; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2215 | } | 
|  | 2216 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2217 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2218 | { | 
|  | 2219 | struct trace_seq *s = &iter->seq; | 
|  | 2220 | struct trace_entry *entry; | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 2221 | struct trace_event *event; | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2222 |  | 
|  | 2223 | entry = iter->ent; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 2224 |  | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 2225 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 2226 | if (!trace_seq_printf(s, "%d %d %llu ", | 
|  | 2227 | entry->pid, iter->cpu, iter->ts)) | 
|  | 2228 | goto partial; | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 2229 | } | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2230 |  | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 2231 | event = ftrace_find_event(entry->type); | 
| Arnaldo Carvalho de Melo | 268ccda | 2009-02-04 20:16:39 -0200 | [diff] [blame] | 2232 | if (event) | 
| Steven Rostedt | a9a5776 | 2010-04-22 18:46:14 -0400 | [diff] [blame] | 2233 | return event->funcs->raw(iter, 0, event); | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 2234 |  | 
|  | 2235 | if (!trace_seq_printf(s, "%d ?\n", entry->type)) | 
|  | 2236 | goto partial; | 
| Steven Rostedt | 7104f30 | 2008-10-01 10:52:51 -0400 | [diff] [blame] | 2237 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2238 | return TRACE_TYPE_HANDLED; | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 2239 | partial: | 
|  | 2240 | return TRACE_TYPE_PARTIAL_LINE; | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2241 | } | 
|  | 2242 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2243 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 2244 | { | 
|  | 2245 | struct trace_seq *s = &iter->seq; | 
|  | 2246 | unsigned char newline = '\n'; | 
|  | 2247 | struct trace_entry *entry; | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 2248 | struct trace_event *event; | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 2249 |  | 
|  | 2250 | entry = iter->ent; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 2251 |  | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 2252 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
|  | 2253 | SEQ_PUT_HEX_FIELD_RET(s, entry->pid); | 
|  | 2254 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); | 
|  | 2255 | SEQ_PUT_HEX_FIELD_RET(s, iter->ts); | 
|  | 2256 | } | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 2257 |  | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 2258 | event = ftrace_find_event(entry->type); | 
| Arnaldo Carvalho de Melo | 268ccda | 2009-02-04 20:16:39 -0200 | [diff] [blame] | 2259 | if (event) { | 
| Steven Rostedt | a9a5776 | 2010-04-22 18:46:14 -0400 | [diff] [blame] | 2260 | enum print_line_t ret = event->funcs->hex(iter, 0, event); | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 2261 | if (ret != TRACE_TYPE_HANDLED) | 
|  | 2262 | return ret; | 
|  | 2263 | } | 
| Steven Rostedt | 7104f30 | 2008-10-01 10:52:51 -0400 | [diff] [blame] | 2264 |  | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 2265 | SEQ_PUT_FIELD_RET(s, newline); | 
|  | 2266 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2267 | return TRACE_TYPE_HANDLED; | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 2268 | } | 
|  | 2269 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2270 | static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2271 | { | 
|  | 2272 | struct trace_seq *s = &iter->seq; | 
|  | 2273 | struct trace_entry *entry; | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 2274 | struct trace_event *event; | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2275 |  | 
|  | 2276 | entry = iter->ent; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 2277 |  | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 2278 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
|  | 2279 | SEQ_PUT_FIELD_RET(s, entry->pid); | 
| Steven Rostedt | 1830b52 | 2009-02-07 19:38:43 -0500 | [diff] [blame] | 2280 | SEQ_PUT_FIELD_RET(s, iter->cpu); | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 2281 | SEQ_PUT_FIELD_RET(s, iter->ts); | 
|  | 2282 | } | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2283 |  | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 2284 | event = ftrace_find_event(entry->type); | 
| Steven Rostedt | a9a5776 | 2010-04-22 18:46:14 -0400 | [diff] [blame] | 2285 | return event ? event->funcs->binary(iter, 0, event) : | 
|  | 2286 | TRACE_TYPE_HANDLED; | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2287 | } | 
|  | 2288 |  | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 2289 | int trace_empty(struct trace_iterator *iter) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2290 | { | 
| Steven Rostedt | 6d158a8 | 2012-06-27 20:46:14 -0400 | [diff] [blame] | 2291 | struct ring_buffer_iter *buf_iter; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2292 | int cpu; | 
|  | 2293 |  | 
| Steven Rostedt | 9aba60f | 2009-03-11 19:52:30 -0400 | [diff] [blame] | 2294 | /* If we are looking at one CPU buffer, only check that one */ | 
|  | 2295 | if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { | 
|  | 2296 | cpu = iter->cpu_file; | 
| Steven Rostedt | 6d158a8 | 2012-06-27 20:46:14 -0400 | [diff] [blame] | 2297 | buf_iter = trace_buffer_iter(iter, cpu); | 
|  | 2298 | if (buf_iter) { | 
|  | 2299 | if (!ring_buffer_iter_empty(buf_iter)) | 
| Steven Rostedt | 9aba60f | 2009-03-11 19:52:30 -0400 | [diff] [blame] | 2300 | return 0; | 
|  | 2301 | } else { | 
|  | 2302 | if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) | 
|  | 2303 | return 0; | 
|  | 2304 | } | 
|  | 2305 | return 1; | 
|  | 2306 | } | 
|  | 2307 |  | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2308 | for_each_tracing_cpu(cpu) { | 
| Steven Rostedt | 6d158a8 | 2012-06-27 20:46:14 -0400 | [diff] [blame] | 2309 | buf_iter = trace_buffer_iter(iter, cpu); | 
|  | 2310 | if (buf_iter) { | 
|  | 2311 | if (!ring_buffer_iter_empty(buf_iter)) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2312 | return 0; | 
|  | 2313 | } else { | 
|  | 2314 | if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) | 
|  | 2315 | return 0; | 
|  | 2316 | } | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2317 | } | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2318 |  | 
| Frederic Weisbecker | 797d371 | 2008-09-30 18:13:45 +0200 | [diff] [blame] | 2319 | return 1; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2320 | } | 
|  | 2321 |  | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 2322 | /*  Called with trace_event_read_lock() held. */ | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 2323 | enum print_line_t print_trace_line(struct trace_iterator *iter) | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2324 | { | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2325 | enum print_line_t ret; | 
|  | 2326 |  | 
| Jiri Olsa | ee5e51f | 2011-03-25 12:05:18 +0100 | [diff] [blame] | 2327 | if (iter->lost_events && | 
|  | 2328 | !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", | 
|  | 2329 | iter->cpu, iter->lost_events)) | 
|  | 2330 | return TRACE_TYPE_PARTIAL_LINE; | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 2331 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2332 | if (iter->trace && iter->trace->print_line) { | 
|  | 2333 | ret = iter->trace->print_line(iter); | 
|  | 2334 | if (ret != TRACE_TYPE_UNHANDLED) | 
|  | 2335 | return ret; | 
|  | 2336 | } | 
| Thomas Gleixner | 72829bc | 2008-05-23 21:37:28 +0200 | [diff] [blame] | 2337 |  | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 2338 | if (iter->ent->type == TRACE_BPRINT && | 
|  | 2339 | trace_flags & TRACE_ITER_PRINTK && | 
|  | 2340 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) | 
| Steven Rostedt | 5ef841f | 2009-03-19 12:20:38 -0400 | [diff] [blame] | 2341 | return trace_print_bprintk_msg_only(iter); | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 2342 |  | 
| Frederic Weisbecker | 66896a8 | 2008-12-13 20:18:13 +0100 | [diff] [blame] | 2343 | if (iter->ent->type == TRACE_PRINT && | 
|  | 2344 | trace_flags & TRACE_ITER_PRINTK && | 
|  | 2345 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) | 
| Steven Rostedt | 5ef841f | 2009-03-19 12:20:38 -0400 | [diff] [blame] | 2346 | return trace_print_printk_msg_only(iter); | 
| Frederic Weisbecker | 66896a8 | 2008-12-13 20:18:13 +0100 | [diff] [blame] | 2347 |  | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2348 | if (trace_flags & TRACE_ITER_BIN) | 
|  | 2349 | return print_bin_fmt(iter); | 
|  | 2350 |  | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 2351 | if (trace_flags & TRACE_ITER_HEX) | 
|  | 2352 | return print_hex_fmt(iter); | 
|  | 2353 |  | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2354 | if (trace_flags & TRACE_ITER_RAW) | 
|  | 2355 | return print_raw_fmt(iter); | 
|  | 2356 |  | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2357 | return print_trace_fmt(iter); | 
|  | 2358 | } | 
|  | 2359 |  | 
| Jiri Olsa | 7e9a49e | 2011-11-07 16:08:49 +0100 | [diff] [blame] | 2360 | void trace_latency_header(struct seq_file *m) | 
|  | 2361 | { | 
|  | 2362 | struct trace_iterator *iter = m->private; | 
|  | 2363 |  | 
|  | 2364 | /* print nothing if the buffers are empty */ | 
|  | 2365 | if (trace_empty(iter)) | 
|  | 2366 | return; | 
|  | 2367 |  | 
|  | 2368 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) | 
|  | 2369 | print_trace_header(m, iter); | 
|  | 2370 |  | 
|  | 2371 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | 
|  | 2372 | print_lat_help_header(m); | 
|  | 2373 | } | 
|  | 2374 |  | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 2375 | void trace_default_header(struct seq_file *m) | 
|  | 2376 | { | 
|  | 2377 | struct trace_iterator *iter = m->private; | 
|  | 2378 |  | 
| Jiri Olsa | f56e7f8 | 2011-06-03 16:58:49 +0200 | [diff] [blame] | 2379 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) | 
|  | 2380 | return; | 
|  | 2381 |  | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 2382 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 
|  | 2383 | /* print nothing if the buffers are empty */ | 
|  | 2384 | if (trace_empty(iter)) | 
|  | 2385 | return; | 
|  | 2386 | print_trace_header(m, iter); | 
|  | 2387 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | 
|  | 2388 | print_lat_help_header(m); | 
|  | 2389 | } else { | 
| Steven Rostedt | 77271ce | 2011-11-17 09:34:33 -0500 | [diff] [blame] | 2390 | if (!(trace_flags & TRACE_ITER_VERBOSE)) { | 
|  | 2391 | if (trace_flags & TRACE_ITER_IRQ_INFO) | 
| Steven Rostedt | 39eaf7e | 2011-11-17 10:35:16 -0500 | [diff] [blame] | 2392 | print_func_help_header_irq(iter->tr, m); | 
| Steven Rostedt | 77271ce | 2011-11-17 09:34:33 -0500 | [diff] [blame] | 2393 | else | 
| Steven Rostedt | 39eaf7e | 2011-11-17 10:35:16 -0500 | [diff] [blame] | 2394 | print_func_help_header(iter->tr, m); | 
| Steven Rostedt | 77271ce | 2011-11-17 09:34:33 -0500 | [diff] [blame] | 2395 | } | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 2396 | } | 
|  | 2397 | } | 
|  | 2398 |  | 
| Steven Rostedt | e0a413f | 2011-09-29 21:26:16 -0400 | [diff] [blame] | 2399 | static void test_ftrace_alive(struct seq_file *m) | 
|  | 2400 | { | 
|  | 2401 | if (!ftrace_is_dead()) | 
|  | 2402 | return; | 
|  | 2403 | seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"); | 
|  | 2404 | seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n"); | 
|  | 2405 | } | 
|  | 2406 |  | 
| Steven Rostedt (Red Hat) | d8741e2 | 2013-03-05 10:25:16 -0500 | [diff] [blame] | 2407 | #ifdef CONFIG_TRACER_MAX_TRACE | 
|  | 2408 | static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) | 
|  | 2409 | { | 
|  | 2410 | if (iter->trace->allocated_snapshot) | 
|  | 2411 | seq_printf(m, "#\n# * Snapshot is allocated *\n#\n"); | 
|  | 2412 | else | 
|  | 2413 | seq_printf(m, "#\n# * Snapshot is freed *\n#\n"); | 
|  | 2414 |  | 
|  | 2415 | seq_printf(m, "# Snapshot commands:\n"); | 
|  | 2416 | seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); | 
|  | 2417 | seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); | 
|  | 2418 | seq_printf(m, "#                      Takes a snapshot of the main buffer.\n"); | 
|  | 2419 | seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n"); | 
|  | 2420 | seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n"); | 
|  | 2421 | seq_printf(m, "#                       is not a '0' or '1')\n"); | 
|  | 2422 | } | 
|  | 2423 | #else | 
|  | 2424 | /* Should never be called */ | 
|  | 2425 | static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } | 
|  | 2426 | #endif | 
|  | 2427 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2428 | static int s_show(struct seq_file *m, void *v) | 
|  | 2429 | { | 
|  | 2430 | struct trace_iterator *iter = v; | 
| Steven Rostedt | a63ce5b | 2009-12-07 09:11:39 -0500 | [diff] [blame] | 2431 | int ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2432 |  | 
|  | 2433 | if (iter->ent == NULL) { | 
|  | 2434 | if (iter->tr) { | 
|  | 2435 | seq_printf(m, "# tracer: %s\n", iter->trace->name); | 
|  | 2436 | seq_puts(m, "#\n"); | 
| Steven Rostedt | e0a413f | 2011-09-29 21:26:16 -0400 | [diff] [blame] | 2437 | test_ftrace_alive(m); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2438 | } | 
| Steven Rostedt (Red Hat) | d8741e2 | 2013-03-05 10:25:16 -0500 | [diff] [blame] | 2439 | if (iter->snapshot && trace_empty(iter)) | 
|  | 2440 | print_snapshot_help(m, iter); | 
|  | 2441 | else if (iter->trace && iter->trace->print_header) | 
| Markus Metzger | 8bba1bf | 2008-11-25 09:12:31 +0100 | [diff] [blame] | 2442 | iter->trace->print_header(m); | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 2443 | else | 
|  | 2444 | trace_default_header(m); | 
|  | 2445 |  | 
| Steven Rostedt | a63ce5b | 2009-12-07 09:11:39 -0500 | [diff] [blame] | 2446 | } else if (iter->leftover) { | 
|  | 2447 | /* | 
|  | 2448 | * If we filled the seq_file buffer earlier, we | 
|  | 2449 | * want to just show it now. | 
|  | 2450 | */ | 
|  | 2451 | ret = trace_print_seq(m, &iter->seq); | 
|  | 2452 |  | 
|  | 2453 | /* ret should this time be zero, but you never know */ | 
|  | 2454 | iter->leftover = ret; | 
|  | 2455 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2456 | } else { | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2457 | print_trace_line(iter); | 
| Steven Rostedt | a63ce5b | 2009-12-07 09:11:39 -0500 | [diff] [blame] | 2458 | ret = trace_print_seq(m, &iter->seq); | 
|  | 2459 | /* | 
|  | 2460 | * If we overflow the seq_file buffer, then it will | 
|  | 2461 | * ask us for this data again at start up. | 
|  | 2462 | * Use that instead. | 
|  | 2463 | *  ret is 0 if seq_file write succeeded. | 
|  | 2464 | *        -1 otherwise. | 
|  | 2465 | */ | 
|  | 2466 | iter->leftover = ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2467 | } | 
|  | 2468 |  | 
|  | 2469 | return 0; | 
|  | 2470 | } | 
|  | 2471 |  | 
| James Morris | 88e9d34 | 2009-09-22 16:43:43 -0700 | [diff] [blame] | 2472 | static const struct seq_operations tracer_seq_ops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2473 | .start		= s_start, | 
|  | 2474 | .next		= s_next, | 
|  | 2475 | .stop		= s_stop, | 
|  | 2476 | .show		= s_show, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2477 | }; | 
|  | 2478 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 2479 | static struct trace_iterator * | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 2480 | __tracing_open(struct inode *inode, struct file *file, bool snapshot) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2481 | { | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2482 | long cpu_file = (long) inode->i_private; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2483 | struct trace_iterator *iter; | 
| Jiri Olsa | 50e18b9 | 2012-04-25 10:23:39 +0200 | [diff] [blame] | 2484 | int cpu; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2485 |  | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2486 | if (tracing_disabled) | 
|  | 2487 | return ERR_PTR(-ENODEV); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 2488 |  | 
| Jiri Olsa | 50e18b9 | 2012-04-25 10:23:39 +0200 | [diff] [blame] | 2489 | iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2490 | if (!iter) | 
|  | 2491 | return ERR_PTR(-ENOMEM); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2492 |  | 
| Steven Rostedt | 6d158a8 | 2012-06-27 20:46:14 -0400 | [diff] [blame] | 2493 | iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(), | 
|  | 2494 | GFP_KERNEL); | 
| Dan Carpenter | 93574fc | 2012-07-11 09:35:08 +0300 | [diff] [blame] | 2495 | if (!iter->buffer_iter) | 
|  | 2496 | goto release; | 
|  | 2497 |  | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2498 | /* | 
|  | 2499 | * We make a copy of the current tracer to avoid concurrent | 
|  | 2500 | * changes on it while we are reading. | 
|  | 2501 | */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2502 | mutex_lock(&trace_types_lock); | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2503 | iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2504 | if (!iter->trace) | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2505 | goto fail; | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2506 |  | 
| Steven Rostedt (Red Hat) | d840f71 | 2013-02-01 18:38:47 -0500 | [diff] [blame] | 2507 | *iter->trace = *current_trace; | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2508 |  | 
| Li Zefan | 79f5599 | 2009-06-15 14:58:26 +0800 | [diff] [blame] | 2509 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) | 
| Frederic Weisbecker | b0dfa97 | 2009-04-01 22:53:08 +0200 | [diff] [blame] | 2510 | goto fail; | 
|  | 2511 |  | 
| Steven Rostedt (Red Hat) | d840f71 | 2013-02-01 18:38:47 -0500 | [diff] [blame] | 2512 | if (current_trace->print_max || snapshot) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2513 | iter->tr = &max_tr; | 
|  | 2514 | else | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2515 | iter->tr = &global_trace; | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 2516 | iter->snapshot = snapshot; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2517 | iter->pos = -1; | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2518 | mutex_init(&iter->mutex); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2519 | iter->cpu_file = cpu_file; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2520 |  | 
| Markus Metzger | 8bba1bf | 2008-11-25 09:12:31 +0100 | [diff] [blame] | 2521 | /* Notify the tracer early; before we stop tracing. */ | 
|  | 2522 | if (iter->trace && iter->trace->open) | 
| Markus Metzger | a93751c | 2008-12-11 13:53:26 +0100 | [diff] [blame] | 2523 | iter->trace->open(iter); | 
| Markus Metzger | 8bba1bf | 2008-11-25 09:12:31 +0100 | [diff] [blame] | 2524 |  | 
| Steven Rostedt | 12ef7d4 | 2008-11-12 17:52:38 -0500 | [diff] [blame] | 2525 | /* Annotate start of buffers if we had overruns */ | 
|  | 2526 | if (ring_buffer_overruns(iter->tr->buffer)) | 
|  | 2527 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | 
|  | 2528 |  | 
| David Sharp | 8be0709 | 2012-11-13 12:18:22 -0800 | [diff] [blame] | 2529 | /* Output in nanoseconds only if we are using a clock in nanoseconds. */ | 
|  | 2530 | if (trace_clocks[trace_clock_id].in_ns) | 
|  | 2531 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; | 
|  | 2532 |  | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 2533 | /* stop the trace while dumping if we are not opening "snapshot" */ | 
|  | 2534 | if (!iter->snapshot) | 
|  | 2535 | tracing_stop(); | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 2536 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2537 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 
|  | 2538 | for_each_tracing_cpu(cpu) { | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2539 | iter->buffer_iter[cpu] = | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 2540 | ring_buffer_read_prepare(iter->tr->buffer, cpu); | 
|  | 2541 | } | 
|  | 2542 | ring_buffer_read_prepare_sync(); | 
|  | 2543 | for_each_tracing_cpu(cpu) { | 
|  | 2544 | ring_buffer_read_start(iter->buffer_iter[cpu]); | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 2545 | tracing_iter_reset(iter, cpu); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2546 | } | 
|  | 2547 | } else { | 
|  | 2548 | cpu = iter->cpu_file; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2549 | iter->buffer_iter[cpu] = | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 2550 | ring_buffer_read_prepare(iter->tr->buffer, cpu); | 
|  | 2551 | ring_buffer_read_prepare_sync(); | 
|  | 2552 | ring_buffer_read_start(iter->buffer_iter[cpu]); | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 2553 | tracing_iter_reset(iter, cpu); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2554 | } | 
|  | 2555 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2556 | mutex_unlock(&trace_types_lock); | 
|  | 2557 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2558 | return iter; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2559 |  | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2560 | fail: | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2561 | mutex_unlock(&trace_types_lock); | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2562 | kfree(iter->trace); | 
| Steven Rostedt | 6d158a8 | 2012-06-27 20:46:14 -0400 | [diff] [blame] | 2563 | kfree(iter->buffer_iter); | 
| Dan Carpenter | 93574fc | 2012-07-11 09:35:08 +0300 | [diff] [blame] | 2564 | release: | 
| Jiri Olsa | 50e18b9 | 2012-04-25 10:23:39 +0200 | [diff] [blame] | 2565 | seq_release_private(inode, file); | 
|  | 2566 | return ERR_PTR(-ENOMEM); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2567 | } | 
|  | 2568 |  | 
|  | 2569 | int tracing_open_generic(struct inode *inode, struct file *filp) | 
|  | 2570 | { | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 2571 | if (tracing_disabled) | 
|  | 2572 | return -ENODEV; | 
|  | 2573 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2574 | filp->private_data = inode->i_private; | 
|  | 2575 | return 0; | 
|  | 2576 | } | 
|  | 2577 |  | 
| Hannes Eder | 4fd2735 | 2009-02-10 19:44:12 +0100 | [diff] [blame] | 2578 | static int tracing_release(struct inode *inode, struct file *file) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2579 | { | 
| matt mooney | 907f278 | 2010-09-27 19:04:53 -0700 | [diff] [blame] | 2580 | struct seq_file *m = file->private_data; | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2581 | struct trace_iterator *iter; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2582 | int cpu; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2583 |  | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2584 | if (!(file->f_mode & FMODE_READ)) | 
|  | 2585 | return 0; | 
|  | 2586 |  | 
|  | 2587 | iter = m->private; | 
|  | 2588 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2589 | mutex_lock(&trace_types_lock); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2590 | for_each_tracing_cpu(cpu) { | 
|  | 2591 | if (iter->buffer_iter[cpu]) | 
|  | 2592 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | 
|  | 2593 | } | 
|  | 2594 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2595 | if (iter->trace && iter->trace->close) | 
|  | 2596 | iter->trace->close(iter); | 
|  | 2597 |  | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 2598 | if (!iter->snapshot) | 
|  | 2599 | /* reenable tracing if it was previously enabled */ | 
|  | 2600 | tracing_start(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2601 | mutex_unlock(&trace_types_lock); | 
|  | 2602 |  | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2603 | mutex_destroy(&iter->mutex); | 
| Frederic Weisbecker | b0dfa97 | 2009-04-01 22:53:08 +0200 | [diff] [blame] | 2604 | free_cpumask_var(iter->started); | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2605 | kfree(iter->trace); | 
| Steven Rostedt | 6d158a8 | 2012-06-27 20:46:14 -0400 | [diff] [blame] | 2606 | kfree(iter->buffer_iter); | 
| Jiri Olsa | 50e18b9 | 2012-04-25 10:23:39 +0200 | [diff] [blame] | 2607 | seq_release_private(inode, file); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2608 | return 0; | 
|  | 2609 | } | 
|  | 2610 |  | 
|  | 2611 | static int tracing_open(struct inode *inode, struct file *file) | 
|  | 2612 | { | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2613 | struct trace_iterator *iter; | 
|  | 2614 | int ret = 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2615 |  | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2616 | /* If this file was open for write, then erase contents */ | 
|  | 2617 | if ((file->f_mode & FMODE_WRITE) && | 
| Steven Rostedt | 8650ae3 | 2009-07-22 23:29:30 -0400 | [diff] [blame] | 2618 | (file->f_flags & O_TRUNC)) { | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2619 | long cpu = (long) inode->i_private; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2620 |  | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2621 | if (cpu == TRACE_PIPE_ALL_CPU) | 
|  | 2622 | tracing_reset_online_cpus(&global_trace); | 
|  | 2623 | else | 
|  | 2624 | tracing_reset(&global_trace, cpu); | 
|  | 2625 | } | 
|  | 2626 |  | 
|  | 2627 | if (file->f_mode & FMODE_READ) { | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 2628 | iter = __tracing_open(inode, file, false); | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2629 | if (IS_ERR(iter)) | 
|  | 2630 | ret = PTR_ERR(iter); | 
|  | 2631 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) | 
|  | 2632 | iter->iter_flags |= TRACE_FILE_LAT_FMT; | 
|  | 2633 | } | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2634 | return ret; | 
|  | 2635 | } | 
|  | 2636 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 2637 | static void * | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2638 | t_next(struct seq_file *m, void *v, loff_t *pos) | 
|  | 2639 | { | 
| Li Zefan | f129e96 | 2009-06-24 09:53:44 +0800 | [diff] [blame] | 2640 | struct tracer *t = v; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2641 |  | 
|  | 2642 | (*pos)++; | 
|  | 2643 |  | 
|  | 2644 | if (t) | 
|  | 2645 | t = t->next; | 
|  | 2646 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2647 | return t; | 
|  | 2648 | } | 
|  | 2649 |  | 
|  | 2650 | static void *t_start(struct seq_file *m, loff_t *pos) | 
|  | 2651 | { | 
| Li Zefan | f129e96 | 2009-06-24 09:53:44 +0800 | [diff] [blame] | 2652 | struct tracer *t; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2653 | loff_t l = 0; | 
|  | 2654 |  | 
|  | 2655 | mutex_lock(&trace_types_lock); | 
| Li Zefan | f129e96 | 2009-06-24 09:53:44 +0800 | [diff] [blame] | 2656 | for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2657 | ; | 
|  | 2658 |  | 
|  | 2659 | return t; | 
|  | 2660 | } | 
|  | 2661 |  | 
|  | 2662 | static void t_stop(struct seq_file *m, void *p) | 
|  | 2663 | { | 
|  | 2664 | mutex_unlock(&trace_types_lock); | 
|  | 2665 | } | 
|  | 2666 |  | 
|  | 2667 | static int t_show(struct seq_file *m, void *v) | 
|  | 2668 | { | 
|  | 2669 | struct tracer *t = v; | 
|  | 2670 |  | 
|  | 2671 | if (!t) | 
|  | 2672 | return 0; | 
|  | 2673 |  | 
|  | 2674 | seq_printf(m, "%s", t->name); | 
|  | 2675 | if (t->next) | 
|  | 2676 | seq_putc(m, ' '); | 
|  | 2677 | else | 
|  | 2678 | seq_putc(m, '\n'); | 
|  | 2679 |  | 
|  | 2680 | return 0; | 
|  | 2681 | } | 
|  | 2682 |  | 
| James Morris | 88e9d34 | 2009-09-22 16:43:43 -0700 | [diff] [blame] | 2683 | static const struct seq_operations show_traces_seq_ops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2684 | .start		= t_start, | 
|  | 2685 | .next		= t_next, | 
|  | 2686 | .stop		= t_stop, | 
|  | 2687 | .show		= t_show, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2688 | }; | 
|  | 2689 |  | 
|  | 2690 | static int show_traces_open(struct inode *inode, struct file *file) | 
|  | 2691 | { | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 2692 | if (tracing_disabled) | 
|  | 2693 | return -ENODEV; | 
|  | 2694 |  | 
| Li Zefan | f129e96 | 2009-06-24 09:53:44 +0800 | [diff] [blame] | 2695 | return seq_open(file, &show_traces_seq_ops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2696 | } | 
|  | 2697 |  | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2698 | static ssize_t | 
|  | 2699 | tracing_write_stub(struct file *filp, const char __user *ubuf, | 
|  | 2700 | size_t count, loff_t *ppos) | 
|  | 2701 | { | 
|  | 2702 | return count; | 
|  | 2703 | } | 
|  | 2704 |  | 
| Slava Pestov | 364829b | 2010-11-24 15:13:16 -0800 | [diff] [blame] | 2705 | static loff_t tracing_seek(struct file *file, loff_t offset, int origin) | 
|  | 2706 | { | 
|  | 2707 | if (file->f_mode & FMODE_READ) | 
|  | 2708 | return seq_lseek(file, offset, origin); | 
|  | 2709 | else | 
|  | 2710 | return 0; | 
|  | 2711 | } | 
|  | 2712 |  | 
| Steven Rostedt | 5e2336a | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 2713 | static const struct file_operations tracing_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2714 | .open		= tracing_open, | 
|  | 2715 | .read		= seq_read, | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2716 | .write		= tracing_write_stub, | 
| Slava Pestov | 364829b | 2010-11-24 15:13:16 -0800 | [diff] [blame] | 2717 | .llseek		= tracing_seek, | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2718 | .release	= tracing_release, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2719 | }; | 
|  | 2720 |  | 
| Steven Rostedt | 5e2336a | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 2721 | static const struct file_operations show_traces_fops = { | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2722 | .open		= show_traces_open, | 
|  | 2723 | .read		= seq_read, | 
|  | 2724 | .release	= seq_release, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 2725 | .llseek		= seq_lseek, | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2726 | }; | 
|  | 2727 |  | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2728 | /* | 
|  | 2729 | * Only trace on a CPU if the bitmask is set: | 
|  | 2730 | */ | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2731 | static cpumask_var_t tracing_cpumask; | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2732 |  | 
|  | 2733 | /* | 
|  | 2734 | * The tracer itself will not take this lock, but still we want | 
|  | 2735 | * to provide a consistent cpumask to user-space: | 
|  | 2736 | */ | 
|  | 2737 | static DEFINE_MUTEX(tracing_cpumask_update_lock); | 
|  | 2738 |  | 
|  | 2739 | /* | 
|  | 2740 | * Temporary storage for the character representation of the | 
|  | 2741 | * CPU bitmask (and one more byte for the newline): | 
|  | 2742 | */ | 
|  | 2743 | static char mask_str[NR_CPUS + 1]; | 
|  | 2744 |  | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2745 | static ssize_t | 
|  | 2746 | tracing_cpumask_read(struct file *filp, char __user *ubuf, | 
|  | 2747 | size_t count, loff_t *ppos) | 
|  | 2748 | { | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2749 | int len; | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2750 |  | 
|  | 2751 | mutex_lock(&tracing_cpumask_update_lock); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2752 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2753 | len = cpumask_scnprintf(mask_str, count, tracing_cpumask); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2754 | if (count - len < 2) { | 
|  | 2755 | count = -EINVAL; | 
|  | 2756 | goto out_err; | 
|  | 2757 | } | 
|  | 2758 | len += sprintf(mask_str + len, "\n"); | 
|  | 2759 | count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); | 
|  | 2760 |  | 
|  | 2761 | out_err: | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2762 | mutex_unlock(&tracing_cpumask_update_lock); | 
|  | 2763 |  | 
|  | 2764 | return count; | 
|  | 2765 | } | 
|  | 2766 |  | 
|  | 2767 | static ssize_t | 
|  | 2768 | tracing_cpumask_write(struct file *filp, const char __user *ubuf, | 
|  | 2769 | size_t count, loff_t *ppos) | 
|  | 2770 | { | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2771 | int err, cpu; | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2772 | cpumask_var_t tracing_cpumask_new; | 
|  | 2773 |  | 
|  | 2774 | if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) | 
|  | 2775 | return -ENOMEM; | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2776 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2777 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2778 | if (err) | 
|  | 2779 | goto err_unlock; | 
|  | 2780 |  | 
| Li Zefan | 215368e | 2009-06-15 10:56:42 +0800 | [diff] [blame] | 2781 | mutex_lock(&tracing_cpumask_update_lock); | 
|  | 2782 |  | 
| Steven Rostedt | a5e2588 | 2008-12-02 15:34:05 -0500 | [diff] [blame] | 2783 | local_irq_disable(); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 2784 | arch_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2785 | for_each_tracing_cpu(cpu) { | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2786 | /* | 
|  | 2787 | * Increase/decrease the disabled counter if we are | 
|  | 2788 | * about to flip a bit in the cpumask: | 
|  | 2789 | */ | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2790 | if (cpumask_test_cpu(cpu, tracing_cpumask) && | 
|  | 2791 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2792 | atomic_inc(&global_trace.data[cpu]->disabled); | 
| Vaibhav Nagarnaik | 71babb2 | 2012-05-03 18:59:52 -0700 | [diff] [blame] | 2793 | ring_buffer_record_disable_cpu(global_trace.buffer, cpu); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2794 | } | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2795 | if (!cpumask_test_cpu(cpu, tracing_cpumask) && | 
|  | 2796 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2797 | atomic_dec(&global_trace.data[cpu]->disabled); | 
| Vaibhav Nagarnaik | 71babb2 | 2012-05-03 18:59:52 -0700 | [diff] [blame] | 2798 | ring_buffer_record_enable_cpu(global_trace.buffer, cpu); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2799 | } | 
|  | 2800 | } | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 2801 | arch_spin_unlock(&ftrace_max_lock); | 
| Steven Rostedt | a5e2588 | 2008-12-02 15:34:05 -0500 | [diff] [blame] | 2802 | local_irq_enable(); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2803 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2804 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2805 |  | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2806 | mutex_unlock(&tracing_cpumask_update_lock); | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2807 | free_cpumask_var(tracing_cpumask_new); | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2808 |  | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2809 | return count; | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2810 |  | 
|  | 2811 | err_unlock: | 
| Li Zefan | 215368e | 2009-06-15 10:56:42 +0800 | [diff] [blame] | 2812 | free_cpumask_var(tracing_cpumask_new); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2813 |  | 
|  | 2814 | return err; | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2815 | } | 
|  | 2816 |  | 
| Steven Rostedt | 5e2336a | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 2817 | static const struct file_operations tracing_cpumask_fops = { | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2818 | .open		= tracing_open_generic, | 
|  | 2819 | .read		= tracing_cpumask_read, | 
|  | 2820 | .write		= tracing_cpumask_write, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 2821 | .llseek		= generic_file_llseek, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2822 | }; | 
|  | 2823 |  | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2824 | static int tracing_trace_options_show(struct seq_file *m, void *v) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2825 | { | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2826 | struct tracer_opt *trace_opts; | 
|  | 2827 | u32 tracer_flags; | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2828 | int i; | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2829 |  | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2830 | mutex_lock(&trace_types_lock); | 
|  | 2831 | tracer_flags = current_trace->flags->val; | 
|  | 2832 | trace_opts = current_trace->flags->opts; | 
|  | 2833 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2834 | for (i = 0; trace_options[i]; i++) { | 
|  | 2835 | if (trace_flags & (1 << i)) | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2836 | seq_printf(m, "%s\n", trace_options[i]); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2837 | else | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2838 | seq_printf(m, "no%s\n", trace_options[i]); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2839 | } | 
|  | 2840 |  | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2841 | for (i = 0; trace_opts[i].name; i++) { | 
|  | 2842 | if (tracer_flags & trace_opts[i].bit) | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2843 | seq_printf(m, "%s\n", trace_opts[i].name); | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2844 | else | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2845 | seq_printf(m, "no%s\n", trace_opts[i].name); | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2846 | } | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2847 | mutex_unlock(&trace_types_lock); | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2848 |  | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2849 | return 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2850 | } | 
|  | 2851 |  | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 2852 | static int __set_tracer_option(struct tracer *trace, | 
|  | 2853 | struct tracer_flags *tracer_flags, | 
|  | 2854 | struct tracer_opt *opts, int neg) | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2855 | { | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 2856 | int ret; | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2857 |  | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 2858 | ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2859 | if (ret) | 
|  | 2860 | return ret; | 
|  | 2861 |  | 
|  | 2862 | if (neg) | 
| Zhaolei | 7770841 | 2009-08-07 18:53:21 +0800 | [diff] [blame] | 2863 | tracer_flags->val &= ~opts->bit; | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2864 | else | 
| Zhaolei | 7770841 | 2009-08-07 18:53:21 +0800 | [diff] [blame] | 2865 | tracer_flags->val |= opts->bit; | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2866 | return 0; | 
|  | 2867 | } | 
|  | 2868 |  | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 2869 | /* Try to assign a tracer specific option */ | 
|  | 2870 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | 
|  | 2871 | { | 
|  | 2872 | struct tracer_flags *tracer_flags = trace->flags; | 
|  | 2873 | struct tracer_opt *opts = NULL; | 
|  | 2874 | int i; | 
|  | 2875 |  | 
|  | 2876 | for (i = 0; tracer_flags->opts[i].name; i++) { | 
|  | 2877 | opts = &tracer_flags->opts[i]; | 
|  | 2878 |  | 
|  | 2879 | if (strcmp(cmp, opts->name) == 0) | 
|  | 2880 | return __set_tracer_option(trace, trace->flags, | 
|  | 2881 | opts, neg); | 
|  | 2882 | } | 
|  | 2883 |  | 
|  | 2884 | return -EINVAL; | 
|  | 2885 | } | 
|  | 2886 |  | 
| Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 2887 | /* Some tracers require overwrite to stay enabled */ | 
|  | 2888 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) | 
|  | 2889 | { | 
|  | 2890 | if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) | 
|  | 2891 | return -1; | 
|  | 2892 |  | 
|  | 2893 | return 0; | 
|  | 2894 | } | 
|  | 2895 |  | 
|  | 2896 | int set_tracer_flag(unsigned int mask, int enabled) | 
| Steven Rostedt | af4617b | 2009-03-17 18:09:55 -0400 | [diff] [blame] | 2897 | { | 
|  | 2898 | /* do nothing if flag is already set */ | 
|  | 2899 | if (!!(trace_flags & mask) == !!enabled) | 
| Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 2900 | return 0; | 
|  | 2901 |  | 
|  | 2902 | /* Give the tracer a chance to approve the change */ | 
|  | 2903 | if (current_trace->flag_changed) | 
|  | 2904 | if (current_trace->flag_changed(current_trace, mask, !!enabled)) | 
|  | 2905 | return -EINVAL; | 
| Steven Rostedt | af4617b | 2009-03-17 18:09:55 -0400 | [diff] [blame] | 2906 |  | 
|  | 2907 | if (enabled) | 
|  | 2908 | trace_flags |= mask; | 
|  | 2909 | else | 
|  | 2910 | trace_flags &= ~mask; | 
| Li Zefan | e870e9a | 2010-07-02 11:07:32 +0800 | [diff] [blame] | 2911 |  | 
|  | 2912 | if (mask == TRACE_ITER_RECORD_CMD) | 
|  | 2913 | trace_event_enable_cmd_record(enabled); | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 2914 |  | 
| Steven Rostedt (Red Hat) | 8090282 | 2013-03-14 14:20:54 -0400 | [diff] [blame] | 2915 | if (mask == TRACE_ITER_OVERWRITE) { | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 2916 | ring_buffer_change_overwrite(global_trace.buffer, enabled); | 
| Steven Rostedt (Red Hat) | 8090282 | 2013-03-14 14:20:54 -0400 | [diff] [blame] | 2917 | #ifdef CONFIG_TRACER_MAX_TRACE | 
|  | 2918 | ring_buffer_change_overwrite(max_tr.buffer, enabled); | 
|  | 2919 | #endif | 
|  | 2920 | } | 
| Steven Rostedt | 8169883 | 2012-10-11 10:15:05 -0400 | [diff] [blame] | 2921 |  | 
|  | 2922 | if (mask == TRACE_ITER_PRINTK) | 
|  | 2923 | trace_printk_start_stop_comm(enabled); | 
| Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 2924 |  | 
|  | 2925 | return 0; | 
| Steven Rostedt | af4617b | 2009-03-17 18:09:55 -0400 | [diff] [blame] | 2926 | } | 
|  | 2927 |  | 
| Steven Rostedt | 7bcfaf5 | 2012-11-01 22:56:07 -0400 | [diff] [blame] | 2928 | static int trace_set_options(char *option) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2929 | { | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 2930 | char *cmp; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2931 | int neg = 0; | 
| Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 2932 | int ret = -ENODEV; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2933 | int i; | 
|  | 2934 |  | 
| Steven Rostedt | 7bcfaf5 | 2012-11-01 22:56:07 -0400 | [diff] [blame] | 2935 | cmp = strstrip(option); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2936 |  | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 2937 | if (strncmp(cmp, "no", 2) == 0) { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2938 | neg = 1; | 
|  | 2939 | cmp += 2; | 
|  | 2940 | } | 
|  | 2941 |  | 
| Steven Rostedt (Red Hat) | 69d34da | 2013-03-14 13:50:56 -0400 | [diff] [blame] | 2942 | mutex_lock(&trace_types_lock); | 
|  | 2943 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2944 | for (i = 0; trace_options[i]; i++) { | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 2945 | if (strcmp(cmp, trace_options[i]) == 0) { | 
| Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 2946 | ret = set_tracer_flag(1 << i, !neg); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2947 | break; | 
|  | 2948 | } | 
|  | 2949 | } | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2950 |  | 
|  | 2951 | /* If no option could be set, test the specific tracer options */ | 
| Steven Rostedt (Red Hat) | 69d34da | 2013-03-14 13:50:56 -0400 | [diff] [blame] | 2952 | if (!trace_options[i]) | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2953 | ret = set_tracer_option(current_trace, cmp, neg); | 
| Steven Rostedt (Red Hat) | 69d34da | 2013-03-14 13:50:56 -0400 | [diff] [blame] | 2954 |  | 
|  | 2955 | mutex_unlock(&trace_types_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2956 |  | 
| Steven Rostedt | 7bcfaf5 | 2012-11-01 22:56:07 -0400 | [diff] [blame] | 2957 | return ret; | 
|  | 2958 | } | 
|  | 2959 |  | 
|  | 2960 | static ssize_t | 
|  | 2961 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, | 
|  | 2962 | size_t cnt, loff_t *ppos) | 
|  | 2963 | { | 
|  | 2964 | char buf[64]; | 
| Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 2965 | int ret; | 
| Steven Rostedt | 7bcfaf5 | 2012-11-01 22:56:07 -0400 | [diff] [blame] | 2966 |  | 
|  | 2967 | if (cnt >= sizeof(buf)) | 
|  | 2968 | return -EINVAL; | 
|  | 2969 |  | 
|  | 2970 | if (copy_from_user(&buf, ubuf, cnt)) | 
|  | 2971 | return -EFAULT; | 
|  | 2972 |  | 
| Steven Rostedt | a8dd217 | 2013-01-09 20:54:17 -0500 | [diff] [blame] | 2973 | buf[cnt] = 0; | 
|  | 2974 |  | 
| Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 2975 | ret = trace_set_options(buf); | 
|  | 2976 | if (ret < 0) | 
|  | 2977 | return ret; | 
| Steven Rostedt | 7bcfaf5 | 2012-11-01 22:56:07 -0400 | [diff] [blame] | 2978 |  | 
| Jiri Olsa | cf8517c | 2009-10-23 19:36:16 -0400 | [diff] [blame] | 2979 | *ppos += cnt; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2980 |  | 
|  | 2981 | return cnt; | 
|  | 2982 | } | 
|  | 2983 |  | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2984 | static int tracing_trace_options_open(struct inode *inode, struct file *file) | 
|  | 2985 | { | 
|  | 2986 | if (tracing_disabled) | 
|  | 2987 | return -ENODEV; | 
|  | 2988 | return single_open(file, tracing_trace_options_show, NULL); | 
|  | 2989 | } | 
|  | 2990 |  | 
| Steven Rostedt | 5e2336a | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 2991 | static const struct file_operations tracing_iter_fops = { | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2992 | .open		= tracing_trace_options_open, | 
|  | 2993 | .read		= seq_read, | 
|  | 2994 | .llseek		= seq_lseek, | 
|  | 2995 | .release	= single_release, | 
| Steven Rostedt | ee6bce5 | 2008-11-12 17:52:37 -0500 | [diff] [blame] | 2996 | .write		= tracing_trace_options_write, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2997 | }; | 
|  | 2998 |  | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2999 | static const char readme_msg[] = | 
|  | 3000 | "tracing mini-HOWTO:\n\n" | 
| GeunSik Lim | 156f5a7 | 2009-06-02 15:01:37 +0900 | [diff] [blame] | 3001 | "# mount -t debugfs nodev /sys/kernel/debug\n\n" | 
|  | 3002 | "# cat /sys/kernel/debug/tracing/available_tracers\n" | 
| Geunsik Lim | 1e42e83 | 2012-02-08 19:05:36 +0900 | [diff] [blame] | 3003 | "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n" | 
| GeunSik Lim | 156f5a7 | 2009-06-02 15:01:37 +0900 | [diff] [blame] | 3004 | "# cat /sys/kernel/debug/tracing/current_tracer\n" | 
| Nikanth Karthikesan | bc2b6871 | 2009-03-23 11:58:31 +0530 | [diff] [blame] | 3005 | "nop\n" | 
| Geunsik Lim | 1e42e83 | 2012-02-08 19:05:36 +0900 | [diff] [blame] | 3006 | "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n" | 
| GeunSik Lim | 156f5a7 | 2009-06-02 15:01:37 +0900 | [diff] [blame] | 3007 | "# cat /sys/kernel/debug/tracing/current_tracer\n" | 
| Geunsik Lim | 1e42e83 | 2012-02-08 19:05:36 +0900 | [diff] [blame] | 3008 | "wakeup\n" | 
| GeunSik Lim | 156f5a7 | 2009-06-02 15:01:37 +0900 | [diff] [blame] | 3009 | "# cat /sys/kernel/debug/tracing/trace_options\n" | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 3010 | "noprint-parent nosym-offset nosym-addr noverbose\n" | 
| GeunSik Lim | 156f5a7 | 2009-06-02 15:01:37 +0900 | [diff] [blame] | 3011 | "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" | 
| Geunsik Lim | 9b5f8b3 | 2011-08-12 14:30:22 +0900 | [diff] [blame] | 3012 | "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n" | 
| GeunSik Lim | 156f5a7 | 2009-06-02 15:01:37 +0900 | [diff] [blame] | 3013 | "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n" | 
| Geunsik Lim | 9b5f8b3 | 2011-08-12 14:30:22 +0900 | [diff] [blame] | 3014 | "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n" | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 3015 | ; | 
|  | 3016 |  | 
|  | 3017 | static ssize_t | 
|  | 3018 | tracing_readme_read(struct file *filp, char __user *ubuf, | 
|  | 3019 | size_t cnt, loff_t *ppos) | 
|  | 3020 | { | 
|  | 3021 | return simple_read_from_buffer(ubuf, cnt, ppos, | 
|  | 3022 | readme_msg, strlen(readme_msg)); | 
|  | 3023 | } | 
|  | 3024 |  | 
| Steven Rostedt | 5e2336a | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 3025 | static const struct file_operations tracing_readme_fops = { | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 3026 | .open		= tracing_open_generic, | 
|  | 3027 | .read		= tracing_readme_read, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 3028 | .llseek		= generic_file_llseek, | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 3029 | }; | 
|  | 3030 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3031 | static ssize_t | 
| Avadh Patel | 69abe6a | 2009-04-10 16:04:48 -0400 | [diff] [blame] | 3032 | tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, | 
|  | 3033 | size_t cnt, loff_t *ppos) | 
|  | 3034 | { | 
|  | 3035 | char *buf_comm; | 
|  | 3036 | char *file_buf; | 
|  | 3037 | char *buf; | 
|  | 3038 | int len = 0; | 
|  | 3039 | int pid; | 
|  | 3040 | int i; | 
|  | 3041 |  | 
|  | 3042 | file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); | 
|  | 3043 | if (!file_buf) | 
|  | 3044 | return -ENOMEM; | 
|  | 3045 |  | 
|  | 3046 | buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); | 
|  | 3047 | if (!buf_comm) { | 
|  | 3048 | kfree(file_buf); | 
|  | 3049 | return -ENOMEM; | 
|  | 3050 | } | 
|  | 3051 |  | 
|  | 3052 | buf = file_buf; | 
|  | 3053 |  | 
|  | 3054 | for (i = 0; i < SAVED_CMDLINES; i++) { | 
|  | 3055 | int r; | 
|  | 3056 |  | 
|  | 3057 | pid = map_cmdline_to_pid[i]; | 
|  | 3058 | if (pid == -1 || pid == NO_CMDLINE_MAP) | 
|  | 3059 | continue; | 
|  | 3060 |  | 
|  | 3061 | trace_find_cmdline(pid, buf_comm); | 
|  | 3062 | r = sprintf(buf, "%d %s\n", pid, buf_comm); | 
|  | 3063 | buf += r; | 
|  | 3064 | len += r; | 
|  | 3065 | } | 
|  | 3066 |  | 
|  | 3067 | len = simple_read_from_buffer(ubuf, cnt, ppos, | 
|  | 3068 | file_buf, len); | 
|  | 3069 |  | 
|  | 3070 | kfree(file_buf); | 
|  | 3071 | kfree(buf_comm); | 
|  | 3072 |  | 
|  | 3073 | return len; | 
|  | 3074 | } | 
|  | 3075 |  | 
|  | 3076 | static const struct file_operations tracing_saved_cmdlines_fops = { | 
|  | 3077 | .open       = tracing_open_generic, | 
|  | 3078 | .read       = tracing_saved_cmdlines_read, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 3079 | .llseek	= generic_file_llseek, | 
| Avadh Patel | 69abe6a | 2009-04-10 16:04:48 -0400 | [diff] [blame] | 3080 | }; | 
|  | 3081 |  | 
|  | 3082 | static ssize_t | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3083 | tracing_set_trace_read(struct file *filp, char __user *ubuf, | 
|  | 3084 | size_t cnt, loff_t *ppos) | 
|  | 3085 | { | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 3086 | char buf[MAX_TRACER_SIZE+2]; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3087 | int r; | 
|  | 3088 |  | 
|  | 3089 | mutex_lock(&trace_types_lock); | 
| Steven Rostedt (Red Hat) | d840f71 | 2013-02-01 18:38:47 -0500 | [diff] [blame] | 3090 | r = sprintf(buf, "%s\n", current_trace->name); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3091 | mutex_unlock(&trace_types_lock); | 
|  | 3092 |  | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3093 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3094 | } | 
|  | 3095 |  | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 3096 | int tracer_init(struct tracer *t, struct trace_array *tr) | 
|  | 3097 | { | 
|  | 3098 | tracing_reset_online_cpus(tr); | 
|  | 3099 | return t->init(tr); | 
|  | 3100 | } | 
|  | 3101 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3102 | static void set_buffer_entries(struct trace_array *tr, unsigned long val) | 
|  | 3103 | { | 
|  | 3104 | int cpu; | 
|  | 3105 | for_each_tracing_cpu(cpu) | 
|  | 3106 | tr->data[cpu]->entries = val; | 
|  | 3107 | } | 
|  | 3108 |  | 
| Hiraku Toyooka | d60da50 | 2012-10-17 11:56:16 +0900 | [diff] [blame] | 3109 | /* resize @tr's buffer to the size of @size_tr's entries */ | 
|  | 3110 | static int resize_buffer_duplicate_size(struct trace_array *tr, | 
|  | 3111 | struct trace_array *size_tr, int cpu_id) | 
|  | 3112 | { | 
|  | 3113 | int cpu, ret = 0; | 
|  | 3114 |  | 
|  | 3115 | if (cpu_id == RING_BUFFER_ALL_CPUS) { | 
|  | 3116 | for_each_tracing_cpu(cpu) { | 
|  | 3117 | ret = ring_buffer_resize(tr->buffer, | 
|  | 3118 | size_tr->data[cpu]->entries, cpu); | 
|  | 3119 | if (ret < 0) | 
|  | 3120 | break; | 
|  | 3121 | tr->data[cpu]->entries = size_tr->data[cpu]->entries; | 
|  | 3122 | } | 
|  | 3123 | } else { | 
|  | 3124 | ret = ring_buffer_resize(tr->buffer, | 
|  | 3125 | size_tr->data[cpu_id]->entries, cpu_id); | 
|  | 3126 | if (ret == 0) | 
|  | 3127 | tr->data[cpu_id]->entries = | 
|  | 3128 | size_tr->data[cpu_id]->entries; | 
|  | 3129 | } | 
|  | 3130 |  | 
|  | 3131 | return ret; | 
|  | 3132 | } | 
|  | 3133 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3134 | static int __tracing_resize_ring_buffer(unsigned long size, int cpu) | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 3135 | { | 
|  | 3136 | int ret; | 
|  | 3137 |  | 
|  | 3138 | /* | 
|  | 3139 | * If kernel or user changes the size of the ring buffer | 
| Steven Rostedt | a123c52 | 2009-03-12 11:21:08 -0400 | [diff] [blame] | 3140 | * we use the size that was given, and we can forget about | 
|  | 3141 | * expanding it later. | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 3142 | */ | 
|  | 3143 | ring_buffer_expanded = 1; | 
|  | 3144 |  | 
| Steven Rostedt | b382ede6 | 2012-10-10 21:44:34 -0400 | [diff] [blame] | 3145 | /* May be called before buffers are initialized */ | 
|  | 3146 | if (!global_trace.buffer) | 
|  | 3147 | return 0; | 
|  | 3148 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3149 | ret = ring_buffer_resize(global_trace.buffer, size, cpu); | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 3150 | if (ret < 0) | 
|  | 3151 | return ret; | 
|  | 3152 |  | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 3153 | if (!current_trace->use_max_tr) | 
|  | 3154 | goto out; | 
|  | 3155 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3156 | ret = ring_buffer_resize(max_tr.buffer, size, cpu); | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 3157 | if (ret < 0) { | 
| Hiraku Toyooka | d60da50 | 2012-10-17 11:56:16 +0900 | [diff] [blame] | 3158 | int r = resize_buffer_duplicate_size(&global_trace, | 
|  | 3159 | &global_trace, cpu); | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 3160 | if (r < 0) { | 
| Steven Rostedt | a123c52 | 2009-03-12 11:21:08 -0400 | [diff] [blame] | 3161 | /* | 
|  | 3162 | * AARGH! We are left with different | 
|  | 3163 | * size max buffer!!!! | 
|  | 3164 | * The max buffer is our "snapshot" buffer. | 
|  | 3165 | * When a tracer needs a snapshot (one of the | 
|  | 3166 | * latency tracers), it swaps the max buffer | 
|  | 3167 | * with the saved snap shot. We succeeded to | 
|  | 3168 | * update the size of the main buffer, but failed to | 
|  | 3169 | * update the size of the max buffer. But when we tried | 
|  | 3170 | * to reset the main buffer to the original size, we | 
|  | 3171 | * failed there too. This is very unlikely to | 
|  | 3172 | * happen, but if it does, warn and kill all | 
|  | 3173 | * tracing. | 
|  | 3174 | */ | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 3175 | WARN_ON(1); | 
|  | 3176 | tracing_disabled = 1; | 
|  | 3177 | } | 
|  | 3178 | return ret; | 
|  | 3179 | } | 
|  | 3180 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3181 | if (cpu == RING_BUFFER_ALL_CPUS) | 
|  | 3182 | set_buffer_entries(&max_tr, size); | 
|  | 3183 | else | 
|  | 3184 | max_tr.data[cpu]->entries = size; | 
|  | 3185 |  | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 3186 | out: | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3187 | if (cpu == RING_BUFFER_ALL_CPUS) | 
|  | 3188 | set_buffer_entries(&global_trace, size); | 
|  | 3189 | else | 
|  | 3190 | global_trace.data[cpu]->entries = size; | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 3191 |  | 
|  | 3192 | return ret; | 
|  | 3193 | } | 
|  | 3194 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3195 | static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id) | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3196 | { | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 3197 | int ret = size; | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3198 |  | 
|  | 3199 | mutex_lock(&trace_types_lock); | 
|  | 3200 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3201 | if (cpu_id != RING_BUFFER_ALL_CPUS) { | 
|  | 3202 | /* make sure, this cpu is enabled in the mask */ | 
|  | 3203 | if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { | 
|  | 3204 | ret = -EINVAL; | 
|  | 3205 | goto out; | 
|  | 3206 | } | 
|  | 3207 | } | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3208 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3209 | ret = __tracing_resize_ring_buffer(size, cpu_id); | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3210 | if (ret < 0) | 
|  | 3211 | ret = -ENOMEM; | 
|  | 3212 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3213 | out: | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3214 | mutex_unlock(&trace_types_lock); | 
|  | 3215 |  | 
|  | 3216 | return ret; | 
|  | 3217 | } | 
|  | 3218 |  | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 3219 |  | 
| Steven Rostedt | 1852fcc | 2009-03-11 14:33:00 -0400 | [diff] [blame] | 3220 | /** | 
|  | 3221 | * tracing_update_buffers - used by tracing facility to expand ring buffers | 
|  | 3222 | * | 
|  | 3223 | * To save on memory when the tracing is never used on a system with it | 
|  | 3224 | * configured in. The ring buffers are set to a minimum size. But once | 
|  | 3225 | * a user starts to use the tracing facility, then they need to grow | 
|  | 3226 | * to their default size. | 
|  | 3227 | * | 
|  | 3228 | * This function is to be called when a tracer is about to be used. | 
|  | 3229 | */ | 
|  | 3230 | int tracing_update_buffers(void) | 
|  | 3231 | { | 
|  | 3232 | int ret = 0; | 
|  | 3233 |  | 
| Steven Rostedt | 1027fcb | 2009-03-12 11:33:20 -0400 | [diff] [blame] | 3234 | mutex_lock(&trace_types_lock); | 
| Steven Rostedt | 1852fcc | 2009-03-11 14:33:00 -0400 | [diff] [blame] | 3235 | if (!ring_buffer_expanded) | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3236 | ret = __tracing_resize_ring_buffer(trace_buf_size, | 
|  | 3237 | RING_BUFFER_ALL_CPUS); | 
| Steven Rostedt | 1027fcb | 2009-03-12 11:33:20 -0400 | [diff] [blame] | 3238 | mutex_unlock(&trace_types_lock); | 
| Steven Rostedt | 1852fcc | 2009-03-11 14:33:00 -0400 | [diff] [blame] | 3239 |  | 
|  | 3240 | return ret; | 
|  | 3241 | } | 
|  | 3242 |  | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 3243 | struct trace_option_dentry; | 
|  | 3244 |  | 
|  | 3245 | static struct trace_option_dentry * | 
|  | 3246 | create_trace_option_files(struct tracer *tracer); | 
|  | 3247 |  | 
|  | 3248 | static void | 
|  | 3249 | destroy_trace_option_files(struct trace_option_dentry *topts); | 
|  | 3250 |  | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 3251 | static int tracing_set_tracer(const char *buf) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3252 | { | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 3253 | static struct trace_option_dentry *topts; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3254 | struct trace_array *tr = &global_trace; | 
|  | 3255 | struct tracer *t; | 
| Steven Rostedt | 34600f0 | 2013-01-22 13:35:11 -0500 | [diff] [blame] | 3256 | bool had_max_tr; | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 3257 | int ret = 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3258 |  | 
| Steven Rostedt | 1027fcb | 2009-03-12 11:33:20 -0400 | [diff] [blame] | 3259 | mutex_lock(&trace_types_lock); | 
|  | 3260 |  | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 3261 | if (!ring_buffer_expanded) { | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3262 | ret = __tracing_resize_ring_buffer(trace_buf_size, | 
|  | 3263 | RING_BUFFER_ALL_CPUS); | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 3264 | if (ret < 0) | 
| Frederic Weisbecker | 59f586d | 2009-03-15 22:10:39 +0100 | [diff] [blame] | 3265 | goto out; | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 3266 | ret = 0; | 
|  | 3267 | } | 
|  | 3268 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3269 | for (t = trace_types; t; t = t->next) { | 
|  | 3270 | if (strcmp(t->name, buf) == 0) | 
|  | 3271 | break; | 
|  | 3272 | } | 
| Frederic Weisbecker | c2931e0 | 2008-10-04 22:04:44 +0200 | [diff] [blame] | 3273 | if (!t) { | 
|  | 3274 | ret = -EINVAL; | 
|  | 3275 | goto out; | 
|  | 3276 | } | 
|  | 3277 | if (t == current_trace) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3278 | goto out; | 
|  | 3279 |  | 
| Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 3280 | trace_branch_disable(); | 
| Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 3281 |  | 
|  | 3282 | current_trace->enabled = false; | 
|  | 3283 |  | 
| Steven Rostedt (Red Hat) | d840f71 | 2013-02-01 18:38:47 -0500 | [diff] [blame] | 3284 | if (current_trace->reset) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3285 | current_trace->reset(tr); | 
| Steven Rostedt | 34600f0 | 2013-01-22 13:35:11 -0500 | [diff] [blame] | 3286 |  | 
| Steven Rostedt (Red Hat) | d840f71 | 2013-02-01 18:38:47 -0500 | [diff] [blame] | 3287 | had_max_tr = current_trace->allocated_snapshot; | 
| Steven Rostedt | 34600f0 | 2013-01-22 13:35:11 -0500 | [diff] [blame] | 3288 | current_trace = &nop_trace; | 
|  | 3289 |  | 
|  | 3290 | if (had_max_tr && !t->use_max_tr) { | 
|  | 3291 | /* | 
|  | 3292 | * We need to make sure that the update_max_tr sees that | 
|  | 3293 | * current_trace changed to nop_trace to keep it from | 
|  | 3294 | * swapping the buffers after we resize it. | 
|  | 3295 | * The update_max_tr is called from interrupts disabled | 
|  | 3296 | * so a synchronized_sched() is sufficient. | 
|  | 3297 | */ | 
|  | 3298 | synchronize_sched(); | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 3299 | /* | 
|  | 3300 | * We don't free the ring buffer. instead, resize it because | 
|  | 3301 | * The max_tr ring buffer has some state (e.g. ring->clock) and | 
|  | 3302 | * we want preserve it. | 
|  | 3303 | */ | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3304 | ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); | 
|  | 3305 | set_buffer_entries(&max_tr, 1); | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 3306 | tracing_reset_online_cpus(&max_tr); | 
|  | 3307 | current_trace->allocated_snapshot = false; | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 3308 | } | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 3309 | destroy_trace_option_files(topts); | 
|  | 3310 |  | 
| Anton Vorontsov | b2ad368 | 2012-07-09 17:10:39 -0700 | [diff] [blame] | 3311 | topts = create_trace_option_files(t); | 
| Steven Rostedt | 34600f0 | 2013-01-22 13:35:11 -0500 | [diff] [blame] | 3312 | if (t->use_max_tr && !had_max_tr) { | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3313 | /* we need to make per cpu buffer sizes equivalent */ | 
| Hiraku Toyooka | d60da50 | 2012-10-17 11:56:16 +0900 | [diff] [blame] | 3314 | ret = resize_buffer_duplicate_size(&max_tr, &global_trace, | 
|  | 3315 | RING_BUFFER_ALL_CPUS); | 
|  | 3316 | if (ret < 0) | 
|  | 3317 | goto out; | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 3318 | t->allocated_snapshot = true; | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 3319 | } | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 3320 |  | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 3321 | if (t->init) { | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 3322 | ret = tracer_init(t, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 3323 | if (ret) | 
|  | 3324 | goto out; | 
|  | 3325 | } | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3326 |  | 
| Anton Vorontsov | b2ad368 | 2012-07-09 17:10:39 -0700 | [diff] [blame] | 3327 | current_trace = t; | 
| Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 3328 | current_trace->enabled = true; | 
| Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 3329 | trace_branch_enable(tr); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3330 | out: | 
|  | 3331 | mutex_unlock(&trace_types_lock); | 
|  | 3332 |  | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 3333 | return ret; | 
|  | 3334 | } | 
|  | 3335 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3336 | static ssize_t | 
|  | 3337 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | 
|  | 3338 | size_t cnt, loff_t *ppos) | 
|  | 3339 | { | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 3340 | char buf[MAX_TRACER_SIZE+1]; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3341 | int i; | 
|  | 3342 | size_t ret; | 
| Frederic Weisbecker | e6e7a65 | 2008-11-16 05:53:19 +0100 | [diff] [blame] | 3343 | int err; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3344 |  | 
| Steven Rostedt | 60063a6 | 2008-10-28 10:44:24 -0400 | [diff] [blame] | 3345 | ret = cnt; | 
|  | 3346 |  | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 3347 | if (cnt > MAX_TRACER_SIZE) | 
|  | 3348 | cnt = MAX_TRACER_SIZE; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3349 |  | 
|  | 3350 | if (copy_from_user(&buf, ubuf, cnt)) | 
|  | 3351 | return -EFAULT; | 
|  | 3352 |  | 
|  | 3353 | buf[cnt] = 0; | 
|  | 3354 |  | 
|  | 3355 | /* strip ending whitespace. */ | 
|  | 3356 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | 
|  | 3357 | buf[i] = 0; | 
|  | 3358 |  | 
| Frederic Weisbecker | e6e7a65 | 2008-11-16 05:53:19 +0100 | [diff] [blame] | 3359 | err = tracing_set_tracer(buf); | 
|  | 3360 | if (err) | 
|  | 3361 | return err; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3362 |  | 
| Jiri Olsa | cf8517c | 2009-10-23 19:36:16 -0400 | [diff] [blame] | 3363 | *ppos += ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3364 |  | 
| Frederic Weisbecker | c2931e0 | 2008-10-04 22:04:44 +0200 | [diff] [blame] | 3365 | return ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3366 | } | 
|  | 3367 |  | 
|  | 3368 | static ssize_t | 
|  | 3369 | tracing_max_lat_read(struct file *filp, char __user *ubuf, | 
|  | 3370 | size_t cnt, loff_t *ppos) | 
|  | 3371 | { | 
|  | 3372 | unsigned long *ptr = filp->private_data; | 
|  | 3373 | char buf[64]; | 
|  | 3374 | int r; | 
|  | 3375 |  | 
| Steven Rostedt | cffae43 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 3376 | r = snprintf(buf, sizeof(buf), "%ld\n", | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3377 | *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); | 
| Steven Rostedt | cffae43 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 3378 | if (r > sizeof(buf)) | 
|  | 3379 | r = sizeof(buf); | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3380 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3381 | } | 
|  | 3382 |  | 
|  | 3383 | static ssize_t | 
|  | 3384 | tracing_max_lat_write(struct file *filp, const char __user *ubuf, | 
|  | 3385 | size_t cnt, loff_t *ppos) | 
|  | 3386 | { | 
| Hannes Eder | 5e39841 | 2009-02-10 19:44:34 +0100 | [diff] [blame] | 3387 | unsigned long *ptr = filp->private_data; | 
| Hannes Eder | 5e39841 | 2009-02-10 19:44:34 +0100 | [diff] [blame] | 3388 | unsigned long val; | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 3389 | int ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3390 |  | 
| Peter Huewe | 22fe9b5 | 2011-06-07 21:58:27 +0200 | [diff] [blame] | 3391 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | 
|  | 3392 | if (ret) | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 3393 | return ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3394 |  | 
|  | 3395 | *ptr = val * 1000; | 
|  | 3396 |  | 
|  | 3397 | return cnt; | 
|  | 3398 | } | 
|  | 3399 |  | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3400 | static int tracing_open_pipe(struct inode *inode, struct file *filp) | 
|  | 3401 | { | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3402 | long cpu_file = (long) inode->i_private; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3403 | struct trace_iterator *iter; | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3404 | int ret = 0; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3405 |  | 
|  | 3406 | if (tracing_disabled) | 
|  | 3407 | return -ENODEV; | 
|  | 3408 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3409 | mutex_lock(&trace_types_lock); | 
|  | 3410 |  | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3411 | /* create a buffer to store the information to pass to userspace */ | 
|  | 3412 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3413 | if (!iter) { | 
|  | 3414 | ret = -ENOMEM; | 
|  | 3415 | goto out; | 
|  | 3416 | } | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3417 |  | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3418 | /* | 
|  | 3419 | * We make a copy of the current tracer to avoid concurrent | 
|  | 3420 | * changes on it while we are reading. | 
|  | 3421 | */ | 
|  | 3422 | iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL); | 
|  | 3423 | if (!iter->trace) { | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3424 | ret = -ENOMEM; | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3425 | goto fail; | 
|  | 3426 | } | 
| Steven Rostedt (Red Hat) | d840f71 | 2013-02-01 18:38:47 -0500 | [diff] [blame] | 3427 | *iter->trace = *current_trace; | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3428 |  | 
|  | 3429 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { | 
|  | 3430 | ret = -ENOMEM; | 
|  | 3431 | goto fail; | 
| Rusty Russell | 4462344 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 3432 | } | 
|  | 3433 |  | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 3434 | /* trace pipe does not show start of buffer */ | 
| Rusty Russell | 4462344 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 3435 | cpumask_setall(iter->started); | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 3436 |  | 
| Steven Rostedt | 112f38a7 | 2009-06-01 15:16:05 -0400 | [diff] [blame] | 3437 | if (trace_flags & TRACE_ITER_LATENCY_FMT) | 
|  | 3438 | iter->iter_flags |= TRACE_FILE_LAT_FMT; | 
|  | 3439 |  | 
| David Sharp | 8be0709 | 2012-11-13 12:18:22 -0800 | [diff] [blame] | 3440 | /* Output in nanoseconds only if we are using a clock in nanoseconds. */ | 
|  | 3441 | if (trace_clocks[trace_clock_id].in_ns) | 
|  | 3442 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; | 
|  | 3443 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3444 | iter->cpu_file = cpu_file; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3445 | iter->tr = &global_trace; | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3446 | mutex_init(&iter->mutex); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3447 | filp->private_data = iter; | 
|  | 3448 |  | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3449 | if (iter->trace->pipe_open) | 
|  | 3450 | iter->trace->pipe_open(iter); | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3451 |  | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 3452 | nonseekable_open(inode, filp); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3453 | out: | 
|  | 3454 | mutex_unlock(&trace_types_lock); | 
|  | 3455 | return ret; | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3456 |  | 
|  | 3457 | fail: | 
|  | 3458 | kfree(iter->trace); | 
|  | 3459 | kfree(iter); | 
|  | 3460 | mutex_unlock(&trace_types_lock); | 
|  | 3461 | return ret; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3462 | } | 
|  | 3463 |  | 
|  | 3464 | static int tracing_release_pipe(struct inode *inode, struct file *file) | 
|  | 3465 | { | 
|  | 3466 | struct trace_iterator *iter = file->private_data; | 
|  | 3467 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3468 | mutex_lock(&trace_types_lock); | 
|  | 3469 |  | 
| Steven Rostedt | 29bf4a5 | 2009-12-09 12:37:43 -0500 | [diff] [blame] | 3470 | if (iter->trace->pipe_close) | 
| Steven Rostedt | c521efd | 2009-12-07 09:06:24 -0500 | [diff] [blame] | 3471 | iter->trace->pipe_close(iter); | 
|  | 3472 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3473 | mutex_unlock(&trace_types_lock); | 
|  | 3474 |  | 
| Rusty Russell | 4462344 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 3475 | free_cpumask_var(iter->started); | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3476 | mutex_destroy(&iter->mutex); | 
|  | 3477 | kfree(iter->trace); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3478 | kfree(iter); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3479 |  | 
|  | 3480 | return 0; | 
|  | 3481 | } | 
|  | 3482 |  | 
| Soeren Sandmann Pedersen | 2a2cc8f | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 3483 | static unsigned int | 
|  | 3484 | tracing_poll_pipe(struct file *filp, poll_table *poll_table) | 
|  | 3485 | { | 
|  | 3486 | struct trace_iterator *iter = filp->private_data; | 
|  | 3487 |  | 
|  | 3488 | if (trace_flags & TRACE_ITER_BLOCK) { | 
|  | 3489 | /* | 
|  | 3490 | * Always select as readable when in blocking mode | 
|  | 3491 | */ | 
|  | 3492 | return POLLIN | POLLRDNORM; | 
| Ingo Molnar | afc2abc | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 3493 | } else { | 
| Soeren Sandmann Pedersen | 2a2cc8f | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 3494 | if (!trace_empty(iter)) | 
|  | 3495 | return POLLIN | POLLRDNORM; | 
|  | 3496 | poll_wait(filp, &trace_wait, poll_table); | 
|  | 3497 | if (!trace_empty(iter)) | 
|  | 3498 | return POLLIN | POLLRDNORM; | 
|  | 3499 |  | 
|  | 3500 | return 0; | 
|  | 3501 | } | 
|  | 3502 | } | 
|  | 3503 |  | 
| Frederic Weisbecker | 6eaaa5d | 2009-02-11 02:25:00 +0100 | [diff] [blame] | 3504 | /* | 
|  | 3505 | * This is a make-shift waitqueue. | 
|  | 3506 | * A tracer might use this callback on some rare cases: | 
|  | 3507 | * | 
|  | 3508 | *  1) the current tracer might hold the runqueue lock when it wakes up | 
|  | 3509 | *     a reader, hence a deadlock (sched, function, and function graph tracers) | 
|  | 3510 | *  2) the function tracers, trace all functions, we don't want | 
|  | 3511 | *     the overhead of calling wake_up and friends | 
|  | 3512 | *     (and tracing them too) | 
|  | 3513 | * | 
|  | 3514 | *     Anyway, this is really very primitive wakeup. | 
|  | 3515 | */ | 
|  | 3516 | void poll_wait_pipe(struct trace_iterator *iter) | 
|  | 3517 | { | 
|  | 3518 | set_current_state(TASK_INTERRUPTIBLE); | 
|  | 3519 | /* sleep for 100 msecs, and try again. */ | 
|  | 3520 | schedule_timeout(HZ / 10); | 
|  | 3521 | } | 
|  | 3522 |  | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3523 | /* Must be called with trace_types_lock mutex held. */ | 
|  | 3524 | static int tracing_wait_pipe(struct file *filp) | 
|  | 3525 | { | 
|  | 3526 | struct trace_iterator *iter = filp->private_data; | 
|  | 3527 |  | 
|  | 3528 | while (trace_empty(iter)) { | 
|  | 3529 |  | 
|  | 3530 | if ((filp->f_flags & O_NONBLOCK)) { | 
|  | 3531 | return -EAGAIN; | 
|  | 3532 | } | 
|  | 3533 |  | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3534 | mutex_unlock(&iter->mutex); | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3535 |  | 
| Frederic Weisbecker | 6eaaa5d | 2009-02-11 02:25:00 +0100 | [diff] [blame] | 3536 | iter->trace->wait_pipe(iter); | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3537 |  | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3538 | mutex_lock(&iter->mutex); | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3539 |  | 
| Frederic Weisbecker | 6eaaa5d | 2009-02-11 02:25:00 +0100 | [diff] [blame] | 3540 | if (signal_pending(current)) | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3541 | return -EINTR; | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3542 |  | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3543 | /* | 
| Liu Bo | 250bfd3 | 2013-01-14 10:54:11 +0800 | [diff] [blame] | 3544 | * We block until we read something and tracing is disabled. | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3545 | * We still block if tracing is disabled, but we have never | 
|  | 3546 | * read anything. This allows a user to cat this file, and | 
|  | 3547 | * then enable tracing. But after we have read something, | 
|  | 3548 | * we give an EOF when tracing is again disabled. | 
|  | 3549 | * | 
|  | 3550 | * iter->pos will be 0 if we haven't read anything. | 
|  | 3551 | */ | 
| Liu Bo | 250bfd3 | 2013-01-14 10:54:11 +0800 | [diff] [blame] | 3552 | if (!tracing_is_enabled() && iter->pos) | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3553 | break; | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3554 | } | 
|  | 3555 |  | 
|  | 3556 | return 1; | 
|  | 3557 | } | 
|  | 3558 |  | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3559 | /* | 
|  | 3560 | * Consumer reader. | 
|  | 3561 | */ | 
|  | 3562 | static ssize_t | 
|  | 3563 | tracing_read_pipe(struct file *filp, char __user *ubuf, | 
|  | 3564 | size_t cnt, loff_t *ppos) | 
|  | 3565 | { | 
|  | 3566 | struct trace_iterator *iter = filp->private_data; | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 3567 | ssize_t sret; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3568 |  | 
|  | 3569 | /* return any leftover data */ | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 3570 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 
|  | 3571 | if (sret != -EBUSY) | 
|  | 3572 | return sret; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3573 |  | 
| Steven Rostedt | f952075 | 2009-03-02 14:04:40 -0500 | [diff] [blame] | 3574 | trace_seq_init(&iter->seq); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3575 |  | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3576 | /* copy the tracer to avoid using a global lock all around */ | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3577 | mutex_lock(&trace_types_lock); | 
| Steven Rostedt (Red Hat) | d840f71 | 2013-02-01 18:38:47 -0500 | [diff] [blame] | 3578 | if (unlikely(iter->trace->name != current_trace->name)) | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3579 | *iter->trace = *current_trace; | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3580 | mutex_unlock(&trace_types_lock); | 
|  | 3581 |  | 
|  | 3582 | /* | 
|  | 3583 | * Avoid more than one consumer on a single file descriptor | 
|  | 3584 | * This is just a matter of traces coherency, the ring buffer itself | 
|  | 3585 | * is protected. | 
|  | 3586 | */ | 
|  | 3587 | mutex_lock(&iter->mutex); | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3588 | if (iter->trace->read) { | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 3589 | sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); | 
|  | 3590 | if (sret) | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3591 | goto out; | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3592 | } | 
|  | 3593 |  | 
| Pekka Paalanen | 9ff4b97 | 2008-09-29 20:23:48 +0200 | [diff] [blame] | 3594 | waitagain: | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3595 | sret = tracing_wait_pipe(filp); | 
|  | 3596 | if (sret <= 0) | 
|  | 3597 | goto out; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3598 |  | 
|  | 3599 | /* stop when tracing is finished */ | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3600 | if (trace_empty(iter)) { | 
|  | 3601 | sret = 0; | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3602 | goto out; | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3603 | } | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3604 |  | 
|  | 3605 | if (cnt >= PAGE_SIZE) | 
|  | 3606 | cnt = PAGE_SIZE - 1; | 
|  | 3607 |  | 
| Steven Rostedt | 53d0aa7 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3608 | /* reset all but tr, trace, and overruns */ | 
| Steven Rostedt | 53d0aa7 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3609 | memset(&iter->seq, 0, | 
|  | 3610 | sizeof(struct trace_iterator) - | 
|  | 3611 | offsetof(struct trace_iterator, seq)); | 
| Steven Rostedt | 4823ed7 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3612 | iter->pos = -1; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3613 |  | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 3614 | trace_event_read_lock(); | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 3615 | trace_access_lock(iter->cpu_file); | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 3616 | while (trace_find_next_entry_inc(iter) != NULL) { | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 3617 | enum print_line_t ret; | 
| Steven Rostedt | 088b1e42 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 3618 | int len = iter->seq.len; | 
|  | 3619 |  | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 3620 | ret = print_trace_line(iter); | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 3621 | if (ret == TRACE_TYPE_PARTIAL_LINE) { | 
| Steven Rostedt | 088b1e42 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 3622 | /* don't print partial lines */ | 
|  | 3623 | iter->seq.len = len; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3624 | break; | 
| Steven Rostedt | 088b1e42 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 3625 | } | 
| Frederic Weisbecker | b91facc | 2009-02-06 18:30:44 +0100 | [diff] [blame] | 3626 | if (ret != TRACE_TYPE_NO_CONSUME) | 
|  | 3627 | trace_consume(iter); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3628 |  | 
|  | 3629 | if (iter->seq.len >= cnt) | 
|  | 3630 | break; | 
| Jiri Olsa | ee5e51f | 2011-03-25 12:05:18 +0100 | [diff] [blame] | 3631 |  | 
|  | 3632 | /* | 
|  | 3633 | * Setting the full flag means we reached the trace_seq buffer | 
|  | 3634 | * size and we should leave by partial output condition above. | 
|  | 3635 | * One of the trace_seq_* functions is not used properly. | 
|  | 3636 | */ | 
|  | 3637 | WARN_ONCE(iter->seq.full, "full flag set for trace type %d", | 
|  | 3638 | iter->ent->type); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3639 | } | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 3640 | trace_access_unlock(iter->cpu_file); | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 3641 | trace_event_read_unlock(); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3642 |  | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3643 | /* Now copy what we have to the user */ | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 3644 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 
|  | 3645 | if (iter->seq.readpos >= iter->seq.len) | 
| Steven Rostedt | f952075 | 2009-03-02 14:04:40 -0500 | [diff] [blame] | 3646 | trace_seq_init(&iter->seq); | 
| Pekka Paalanen | 9ff4b97 | 2008-09-29 20:23:48 +0200 | [diff] [blame] | 3647 |  | 
|  | 3648 | /* | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 3649 | * If there was nothing to send to user, in spite of consuming trace | 
| Pekka Paalanen | 9ff4b97 | 2008-09-29 20:23:48 +0200 | [diff] [blame] | 3650 | * entries, go back to wait for more entries. | 
|  | 3651 | */ | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 3652 | if (sret == -EBUSY) | 
| Pekka Paalanen | 9ff4b97 | 2008-09-29 20:23:48 +0200 | [diff] [blame] | 3653 | goto waitagain; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3654 |  | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3655 | out: | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3656 | mutex_unlock(&iter->mutex); | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3657 |  | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 3658 | return sret; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3659 | } | 
|  | 3660 |  | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3661 | static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, | 
|  | 3662 | struct pipe_buffer *buf) | 
|  | 3663 | { | 
|  | 3664 | __free_page(buf->page); | 
|  | 3665 | } | 
|  | 3666 |  | 
|  | 3667 | static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | 
|  | 3668 | unsigned int idx) | 
|  | 3669 | { | 
|  | 3670 | __free_page(spd->pages[idx]); | 
|  | 3671 | } | 
|  | 3672 |  | 
| Alexey Dobriyan | 28dfef8 | 2009-12-15 16:46:48 -0800 | [diff] [blame] | 3673 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3674 | .can_merge		= 0, | 
|  | 3675 | .map			= generic_pipe_buf_map, | 
|  | 3676 | .unmap			= generic_pipe_buf_unmap, | 
|  | 3677 | .confirm		= generic_pipe_buf_confirm, | 
|  | 3678 | .release		= tracing_pipe_buf_release, | 
|  | 3679 | .steal			= generic_pipe_buf_steal, | 
|  | 3680 | .get			= generic_pipe_buf_get, | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3681 | }; | 
|  | 3682 |  | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3683 | static size_t | 
| Frederic Weisbecker | fa7c7f6 | 2009-02-11 02:51:30 +0100 | [diff] [blame] | 3684 | tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3685 | { | 
|  | 3686 | size_t count; | 
|  | 3687 | int ret; | 
|  | 3688 |  | 
|  | 3689 | /* Seq buffer is page-sized, exactly what we need. */ | 
|  | 3690 | for (;;) { | 
|  | 3691 | count = iter->seq.len; | 
|  | 3692 | ret = print_trace_line(iter); | 
|  | 3693 | count = iter->seq.len - count; | 
|  | 3694 | if (rem < count) { | 
|  | 3695 | rem = 0; | 
|  | 3696 | iter->seq.len -= count; | 
|  | 3697 | break; | 
|  | 3698 | } | 
|  | 3699 | if (ret == TRACE_TYPE_PARTIAL_LINE) { | 
|  | 3700 | iter->seq.len -= count; | 
|  | 3701 | break; | 
|  | 3702 | } | 
|  | 3703 |  | 
| Lai Jiangshan | 74e7ff8 | 2009-07-28 20:17:22 +0800 | [diff] [blame] | 3704 | if (ret != TRACE_TYPE_NO_CONSUME) | 
|  | 3705 | trace_consume(iter); | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3706 | rem -= count; | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 3707 | if (!trace_find_next_entry_inc(iter))	{ | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3708 | rem = 0; | 
|  | 3709 | iter->ent = NULL; | 
|  | 3710 | break; | 
|  | 3711 | } | 
|  | 3712 | } | 
|  | 3713 |  | 
|  | 3714 | return rem; | 
|  | 3715 | } | 
|  | 3716 |  | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3717 | static ssize_t tracing_splice_read_pipe(struct file *filp, | 
|  | 3718 | loff_t *ppos, | 
|  | 3719 | struct pipe_inode_info *pipe, | 
|  | 3720 | size_t len, | 
|  | 3721 | unsigned int flags) | 
|  | 3722 | { | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3723 | struct page *pages_def[PIPE_DEF_BUFFERS]; | 
|  | 3724 | struct partial_page partial_def[PIPE_DEF_BUFFERS]; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3725 | struct trace_iterator *iter = filp->private_data; | 
|  | 3726 | struct splice_pipe_desc spd = { | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3727 | .pages		= pages_def, | 
|  | 3728 | .partial	= partial_def, | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3729 | .nr_pages	= 0, /* This gets updated below. */ | 
| Eric Dumazet | 047fe36 | 2012-06-12 15:24:40 +0200 | [diff] [blame] | 3730 | .nr_pages_max	= PIPE_DEF_BUFFERS, | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3731 | .flags		= flags, | 
|  | 3732 | .ops		= &tracing_pipe_buf_ops, | 
|  | 3733 | .spd_release	= tracing_spd_release_pipe, | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3734 | }; | 
|  | 3735 | ssize_t ret; | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3736 | size_t rem; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3737 | unsigned int i; | 
|  | 3738 |  | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3739 | if (splice_grow_spd(pipe, &spd)) | 
|  | 3740 | return -ENOMEM; | 
|  | 3741 |  | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3742 | /* copy the tracer to avoid using a global lock all around */ | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3743 | mutex_lock(&trace_types_lock); | 
| Steven Rostedt (Red Hat) | d840f71 | 2013-02-01 18:38:47 -0500 | [diff] [blame] | 3744 | if (unlikely(iter->trace->name != current_trace->name)) | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3745 | *iter->trace = *current_trace; | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3746 | mutex_unlock(&trace_types_lock); | 
|  | 3747 |  | 
|  | 3748 | mutex_lock(&iter->mutex); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3749 |  | 
|  | 3750 | if (iter->trace->splice_read) { | 
|  | 3751 | ret = iter->trace->splice_read(iter, filp, | 
|  | 3752 | ppos, pipe, len, flags); | 
|  | 3753 | if (ret) | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3754 | goto out_err; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3755 | } | 
|  | 3756 |  | 
|  | 3757 | ret = tracing_wait_pipe(filp); | 
|  | 3758 | if (ret <= 0) | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3759 | goto out_err; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3760 |  | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 3761 | if (!iter->ent && !trace_find_next_entry_inc(iter)) { | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3762 | ret = -EFAULT; | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3763 | goto out_err; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3764 | } | 
|  | 3765 |  | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 3766 | trace_event_read_lock(); | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 3767 | trace_access_lock(iter->cpu_file); | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 3768 |  | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3769 | /* Fill as many pages as possible. */ | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3770 | for (i = 0, rem = len; i < pipe->buffers && rem; i++) { | 
|  | 3771 | spd.pages[i] = alloc_page(GFP_KERNEL); | 
|  | 3772 | if (!spd.pages[i]) | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3773 | break; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3774 |  | 
| Frederic Weisbecker | fa7c7f6 | 2009-02-11 02:51:30 +0100 | [diff] [blame] | 3775 | rem = tracing_fill_pipe_page(rem, iter); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3776 |  | 
|  | 3777 | /* Copy the data into the page, so we can start over. */ | 
|  | 3778 | ret = trace_seq_to_buffer(&iter->seq, | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3779 | page_address(spd.pages[i]), | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3780 | iter->seq.len); | 
|  | 3781 | if (ret < 0) { | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3782 | __free_page(spd.pages[i]); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3783 | break; | 
|  | 3784 | } | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3785 | spd.partial[i].offset = 0; | 
|  | 3786 | spd.partial[i].len = iter->seq.len; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3787 |  | 
| Steven Rostedt | f952075 | 2009-03-02 14:04:40 -0500 | [diff] [blame] | 3788 | trace_seq_init(&iter->seq); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3789 | } | 
|  | 3790 |  | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 3791 | trace_access_unlock(iter->cpu_file); | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 3792 | trace_event_read_unlock(); | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3793 | mutex_unlock(&iter->mutex); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3794 |  | 
|  | 3795 | spd.nr_pages = i; | 
|  | 3796 |  | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3797 | ret = splice_to_pipe(pipe, &spd); | 
|  | 3798 | out: | 
| Eric Dumazet | 047fe36 | 2012-06-12 15:24:40 +0200 | [diff] [blame] | 3799 | splice_shrink_spd(&spd); | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3800 | return ret; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3801 |  | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3802 | out_err: | 
| Frederic Weisbecker | d7350c3f | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3803 | mutex_unlock(&iter->mutex); | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3804 | goto out; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3805 | } | 
|  | 3806 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3807 | struct ftrace_entries_info { | 
|  | 3808 | struct trace_array	*tr; | 
|  | 3809 | int			cpu; | 
|  | 3810 | }; | 
|  | 3811 |  | 
|  | 3812 | static int tracing_entries_open(struct inode *inode, struct file *filp) | 
|  | 3813 | { | 
|  | 3814 | struct ftrace_entries_info *info; | 
|  | 3815 |  | 
|  | 3816 | if (tracing_disabled) | 
|  | 3817 | return -ENODEV; | 
|  | 3818 |  | 
|  | 3819 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 
|  | 3820 | if (!info) | 
|  | 3821 | return -ENOMEM; | 
|  | 3822 |  | 
|  | 3823 | info->tr = &global_trace; | 
|  | 3824 | info->cpu = (unsigned long)inode->i_private; | 
|  | 3825 |  | 
|  | 3826 | filp->private_data = info; | 
|  | 3827 |  | 
|  | 3828 | return 0; | 
|  | 3829 | } | 
|  | 3830 |  | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3831 | static ssize_t | 
|  | 3832 | tracing_entries_read(struct file *filp, char __user *ubuf, | 
|  | 3833 | size_t cnt, loff_t *ppos) | 
|  | 3834 | { | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3835 | struct ftrace_entries_info *info = filp->private_data; | 
|  | 3836 | struct trace_array *tr = info->tr; | 
|  | 3837 | char buf[64]; | 
|  | 3838 | int r = 0; | 
|  | 3839 | ssize_t ret; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3840 |  | 
| Steven Rostedt | db526ca | 2009-03-12 13:53:25 -0400 | [diff] [blame] | 3841 | mutex_lock(&trace_types_lock); | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3842 |  | 
|  | 3843 | if (info->cpu == RING_BUFFER_ALL_CPUS) { | 
|  | 3844 | int cpu, buf_size_same; | 
|  | 3845 | unsigned long size; | 
|  | 3846 |  | 
|  | 3847 | size = 0; | 
|  | 3848 | buf_size_same = 1; | 
|  | 3849 | /* check if all cpu sizes are same */ | 
|  | 3850 | for_each_tracing_cpu(cpu) { | 
|  | 3851 | /* fill in the size from first enabled cpu */ | 
|  | 3852 | if (size == 0) | 
|  | 3853 | size = tr->data[cpu]->entries; | 
|  | 3854 | if (size != tr->data[cpu]->entries) { | 
|  | 3855 | buf_size_same = 0; | 
|  | 3856 | break; | 
|  | 3857 | } | 
|  | 3858 | } | 
|  | 3859 |  | 
|  | 3860 | if (buf_size_same) { | 
|  | 3861 | if (!ring_buffer_expanded) | 
|  | 3862 | r = sprintf(buf, "%lu (expanded: %lu)\n", | 
|  | 3863 | size >> 10, | 
|  | 3864 | trace_buf_size >> 10); | 
|  | 3865 | else | 
|  | 3866 | r = sprintf(buf, "%lu\n", size >> 10); | 
|  | 3867 | } else | 
|  | 3868 | r = sprintf(buf, "X\n"); | 
|  | 3869 | } else | 
|  | 3870 | r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10); | 
|  | 3871 |  | 
| Steven Rostedt | db526ca | 2009-03-12 13:53:25 -0400 | [diff] [blame] | 3872 | mutex_unlock(&trace_types_lock); | 
|  | 3873 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3874 | ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
|  | 3875 | return ret; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3876 | } | 
|  | 3877 |  | 
|  | 3878 | static ssize_t | 
|  | 3879 | tracing_entries_write(struct file *filp, const char __user *ubuf, | 
|  | 3880 | size_t cnt, loff_t *ppos) | 
|  | 3881 | { | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3882 | struct ftrace_entries_info *info = filp->private_data; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3883 | unsigned long val; | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3884 | int ret; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3885 |  | 
| Peter Huewe | 22fe9b5 | 2011-06-07 21:58:27 +0200 | [diff] [blame] | 3886 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | 
|  | 3887 | if (ret) | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 3888 | return ret; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3889 |  | 
|  | 3890 | /* must have at least 1 entry */ | 
|  | 3891 | if (!val) | 
|  | 3892 | return -EINVAL; | 
|  | 3893 |  | 
| Steven Rostedt | 1696b2b | 2008-11-13 00:09:35 -0500 | [diff] [blame] | 3894 | /* value is in KB */ | 
|  | 3895 | val <<= 10; | 
|  | 3896 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3897 | ret = tracing_resize_ring_buffer(val, info->cpu); | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3898 | if (ret < 0) | 
|  | 3899 | return ret; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3900 |  | 
| Jiri Olsa | cf8517c | 2009-10-23 19:36:16 -0400 | [diff] [blame] | 3901 | *ppos += cnt; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3902 |  | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3903 | return cnt; | 
|  | 3904 | } | 
| Steven Rostedt | bf5e651 | 2008-11-10 21:46:00 -0500 | [diff] [blame] | 3905 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3906 | static int | 
|  | 3907 | tracing_entries_release(struct inode *inode, struct file *filp) | 
|  | 3908 | { | 
|  | 3909 | struct ftrace_entries_info *info = filp->private_data; | 
|  | 3910 |  | 
|  | 3911 | kfree(info); | 
|  | 3912 |  | 
|  | 3913 | return 0; | 
|  | 3914 | } | 
|  | 3915 |  | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3916 | static ssize_t | 
| Vaibhav Nagarnaik | f81ab07 | 2011-08-16 14:46:15 -0700 | [diff] [blame] | 3917 | tracing_total_entries_read(struct file *filp, char __user *ubuf, | 
|  | 3918 | size_t cnt, loff_t *ppos) | 
|  | 3919 | { | 
|  | 3920 | struct trace_array *tr = filp->private_data; | 
|  | 3921 | char buf[64]; | 
|  | 3922 | int r, cpu; | 
|  | 3923 | unsigned long size = 0, expanded_size = 0; | 
|  | 3924 |  | 
|  | 3925 | mutex_lock(&trace_types_lock); | 
|  | 3926 | for_each_tracing_cpu(cpu) { | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3927 | size += tr->data[cpu]->entries >> 10; | 
| Vaibhav Nagarnaik | f81ab07 | 2011-08-16 14:46:15 -0700 | [diff] [blame] | 3928 | if (!ring_buffer_expanded) | 
|  | 3929 | expanded_size += trace_buf_size >> 10; | 
|  | 3930 | } | 
|  | 3931 | if (ring_buffer_expanded) | 
|  | 3932 | r = sprintf(buf, "%lu\n", size); | 
|  | 3933 | else | 
|  | 3934 | r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); | 
|  | 3935 | mutex_unlock(&trace_types_lock); | 
|  | 3936 |  | 
|  | 3937 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
|  | 3938 | } | 
|  | 3939 |  | 
|  | 3940 | static ssize_t | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3941 | tracing_free_buffer_write(struct file *filp, const char __user *ubuf, | 
|  | 3942 | size_t cnt, loff_t *ppos) | 
|  | 3943 | { | 
|  | 3944 | /* | 
|  | 3945 | * There is no need to read what the user has written, this function | 
|  | 3946 | * is just to make sure that there is no error when "echo" is used | 
|  | 3947 | */ | 
|  | 3948 |  | 
|  | 3949 | *ppos += cnt; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3950 |  | 
|  | 3951 | return cnt; | 
|  | 3952 | } | 
|  | 3953 |  | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3954 | static int | 
|  | 3955 | tracing_free_buffer_release(struct inode *inode, struct file *filp) | 
|  | 3956 | { | 
| Steven Rostedt | cf30cf6 | 2011-06-14 22:44:07 -0400 | [diff] [blame] | 3957 | /* disable tracing ? */ | 
|  | 3958 | if (trace_flags & TRACE_ITER_STOP_ON_FREE) | 
|  | 3959 | tracing_off(); | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3960 | /* resize the ring buffer to 0 */ | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3961 | tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS); | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3962 |  | 
|  | 3963 | return 0; | 
|  | 3964 | } | 
|  | 3965 |  | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3966 | static ssize_t | 
|  | 3967 | tracing_mark_write(struct file *filp, const char __user *ubuf, | 
|  | 3968 | size_t cnt, loff_t *fpos) | 
|  | 3969 | { | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 3970 | unsigned long addr = (unsigned long)ubuf; | 
|  | 3971 | struct ring_buffer_event *event; | 
|  | 3972 | struct ring_buffer *buffer; | 
|  | 3973 | struct print_entry *entry; | 
|  | 3974 | unsigned long irq_flags; | 
|  | 3975 | struct page *pages[2]; | 
| Steven Rostedt | 6edb2a8 | 2012-05-11 23:28:49 -0400 | [diff] [blame] | 3976 | void *map_page[2]; | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 3977 | int nr_pages = 1; | 
|  | 3978 | ssize_t written; | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 3979 | int offset; | 
|  | 3980 | int size; | 
|  | 3981 | int len; | 
|  | 3982 | int ret; | 
| Steven Rostedt | 6edb2a8 | 2012-05-11 23:28:49 -0400 | [diff] [blame] | 3983 | int i; | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3984 |  | 
| Steven Rostedt | c76f069 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 3985 | if (tracing_disabled) | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3986 | return -EINVAL; | 
|  | 3987 |  | 
| Mandeep Singh Baines | 5224c3a | 2012-09-07 18:12:19 -0700 | [diff] [blame] | 3988 | if (!(trace_flags & TRACE_ITER_MARKERS)) | 
|  | 3989 | return -EINVAL; | 
|  | 3990 |  | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3991 | if (cnt > TRACE_BUF_SIZE) | 
|  | 3992 | cnt = TRACE_BUF_SIZE; | 
|  | 3993 |  | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 3994 | /* | 
|  | 3995 | * Userspace is injecting traces into the kernel trace buffer. | 
|  | 3996 | * We want to be as non intrusive as possible. | 
|  | 3997 | * To do so, we do not want to allocate any special buffers | 
|  | 3998 | * or take any locks, but instead write the userspace data | 
|  | 3999 | * straight into the ring buffer. | 
|  | 4000 | * | 
|  | 4001 | * First we need to pin the userspace buffer into memory, | 
|  | 4002 | * which, most likely it is, because it just referenced it. | 
|  | 4003 | * But there's no guarantee that it is. By using get_user_pages_fast() | 
|  | 4004 | * and kmap_atomic/kunmap_atomic() we can get access to the | 
|  | 4005 | * pages directly. We then write the data directly into the | 
|  | 4006 | * ring buffer. | 
|  | 4007 | */ | 
|  | 4008 | BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 4009 |  | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 4010 | /* check if we cross pages */ | 
|  | 4011 | if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK)) | 
|  | 4012 | nr_pages = 2; | 
|  | 4013 |  | 
|  | 4014 | offset = addr & (PAGE_SIZE - 1); | 
|  | 4015 | addr &= PAGE_MASK; | 
|  | 4016 |  | 
|  | 4017 | ret = get_user_pages_fast(addr, nr_pages, 0, pages); | 
|  | 4018 | if (ret < nr_pages) { | 
|  | 4019 | while (--ret >= 0) | 
|  | 4020 | put_page(pages[ret]); | 
|  | 4021 | written = -EFAULT; | 
|  | 4022 | goto out; | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 4023 | } | 
|  | 4024 |  | 
| Steven Rostedt | 6edb2a8 | 2012-05-11 23:28:49 -0400 | [diff] [blame] | 4025 | for (i = 0; i < nr_pages; i++) | 
|  | 4026 | map_page[i] = kmap_atomic(pages[i]); | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 4027 |  | 
|  | 4028 | local_save_flags(irq_flags); | 
|  | 4029 | size = sizeof(*entry) + cnt + 2; /* possible \n added */ | 
|  | 4030 | buffer = global_trace.buffer; | 
|  | 4031 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 
|  | 4032 | irq_flags, preempt_count()); | 
|  | 4033 | if (!event) { | 
|  | 4034 | /* Ring buffer disabled, return as if not open for write */ | 
|  | 4035 | written = -EBADF; | 
|  | 4036 | goto out_unlock; | 
|  | 4037 | } | 
|  | 4038 |  | 
|  | 4039 | entry = ring_buffer_event_data(event); | 
|  | 4040 | entry->ip = _THIS_IP_; | 
|  | 4041 |  | 
|  | 4042 | if (nr_pages == 2) { | 
|  | 4043 | len = PAGE_SIZE - offset; | 
| Steven Rostedt | 6edb2a8 | 2012-05-11 23:28:49 -0400 | [diff] [blame] | 4044 | memcpy(&entry->buf, map_page[0] + offset, len); | 
|  | 4045 | memcpy(&entry->buf[len], map_page[1], cnt - len); | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 4046 | } else | 
| Steven Rostedt | 6edb2a8 | 2012-05-11 23:28:49 -0400 | [diff] [blame] | 4047 | memcpy(&entry->buf, map_page[0] + offset, cnt); | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 4048 |  | 
|  | 4049 | if (entry->buf[cnt - 1] != '\n') { | 
|  | 4050 | entry->buf[cnt] = '\n'; | 
|  | 4051 | entry->buf[cnt + 1] = '\0'; | 
|  | 4052 | } else | 
|  | 4053 | entry->buf[cnt] = '\0'; | 
|  | 4054 |  | 
| Steven Rostedt | 7ffbd48 | 2012-10-11 12:14:25 -0400 | [diff] [blame] | 4055 | __buffer_unlock_commit(buffer, event); | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 4056 |  | 
|  | 4057 | written = cnt; | 
|  | 4058 |  | 
| Marcin Slusarz | 1aa54bc | 2010-07-28 01:18:01 +0200 | [diff] [blame] | 4059 | *fpos += written; | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 4060 |  | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 4061 | out_unlock: | 
| Steven Rostedt | 6edb2a8 | 2012-05-11 23:28:49 -0400 | [diff] [blame] | 4062 | for (i = 0; i < nr_pages; i++){ | 
|  | 4063 | kunmap_atomic(map_page[i]); | 
|  | 4064 | put_page(pages[i]); | 
|  | 4065 | } | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 4066 | out: | 
| Marcin Slusarz | 1aa54bc | 2010-07-28 01:18:01 +0200 | [diff] [blame] | 4067 | return written; | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 4068 | } | 
|  | 4069 |  | 
| Li Zefan | 13f16d2 | 2009-12-08 11:16:11 +0800 | [diff] [blame] | 4070 | static int tracing_clock_show(struct seq_file *m, void *v) | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 4071 | { | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 4072 | int i; | 
|  | 4073 |  | 
|  | 4074 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) | 
| Li Zefan | 13f16d2 | 2009-12-08 11:16:11 +0800 | [diff] [blame] | 4075 | seq_printf(m, | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 4076 | "%s%s%s%s", i ? " " : "", | 
|  | 4077 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, | 
|  | 4078 | i == trace_clock_id ? "]" : ""); | 
| Li Zefan | 13f16d2 | 2009-12-08 11:16:11 +0800 | [diff] [blame] | 4079 | seq_putc(m, '\n'); | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 4080 |  | 
| Li Zefan | 13f16d2 | 2009-12-08 11:16:11 +0800 | [diff] [blame] | 4081 | return 0; | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 4082 | } | 
|  | 4083 |  | 
|  | 4084 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 
|  | 4085 | size_t cnt, loff_t *fpos) | 
|  | 4086 | { | 
|  | 4087 | char buf[64]; | 
|  | 4088 | const char *clockstr; | 
|  | 4089 | int i; | 
|  | 4090 |  | 
|  | 4091 | if (cnt >= sizeof(buf)) | 
|  | 4092 | return -EINVAL; | 
|  | 4093 |  | 
|  | 4094 | if (copy_from_user(&buf, ubuf, cnt)) | 
|  | 4095 | return -EFAULT; | 
|  | 4096 |  | 
|  | 4097 | buf[cnt] = 0; | 
|  | 4098 |  | 
|  | 4099 | clockstr = strstrip(buf); | 
|  | 4100 |  | 
|  | 4101 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { | 
|  | 4102 | if (strcmp(trace_clocks[i].name, clockstr) == 0) | 
|  | 4103 | break; | 
|  | 4104 | } | 
|  | 4105 | if (i == ARRAY_SIZE(trace_clocks)) | 
|  | 4106 | return -EINVAL; | 
|  | 4107 |  | 
|  | 4108 | trace_clock_id = i; | 
|  | 4109 |  | 
|  | 4110 | mutex_lock(&trace_types_lock); | 
|  | 4111 |  | 
|  | 4112 | ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func); | 
|  | 4113 | if (max_tr.buffer) | 
|  | 4114 | ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); | 
|  | 4115 |  | 
| David Sharp | 60303ed | 2012-10-11 16:27:52 -0700 | [diff] [blame] | 4116 | /* | 
|  | 4117 | * New clock may not be consistent with the previous clock. | 
|  | 4118 | * Reset the buffer so that it doesn't have incomparable timestamps. | 
|  | 4119 | */ | 
|  | 4120 | tracing_reset_online_cpus(&global_trace); | 
| Steven Rostedt | 84c6cf0 | 2012-12-20 21:43:52 -0500 | [diff] [blame] | 4121 | tracing_reset_online_cpus(&max_tr); | 
| David Sharp | 60303ed | 2012-10-11 16:27:52 -0700 | [diff] [blame] | 4122 |  | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 4123 | mutex_unlock(&trace_types_lock); | 
|  | 4124 |  | 
|  | 4125 | *fpos += cnt; | 
|  | 4126 |  | 
|  | 4127 | return cnt; | 
|  | 4128 | } | 
|  | 4129 |  | 
| Li Zefan | 13f16d2 | 2009-12-08 11:16:11 +0800 | [diff] [blame] | 4130 | static int tracing_clock_open(struct inode *inode, struct file *file) | 
|  | 4131 | { | 
|  | 4132 | if (tracing_disabled) | 
|  | 4133 | return -ENODEV; | 
|  | 4134 | return single_open(file, tracing_clock_show, NULL); | 
|  | 4135 | } | 
|  | 4136 |  | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 4137 | #ifdef CONFIG_TRACER_SNAPSHOT | 
|  | 4138 | static int tracing_snapshot_open(struct inode *inode, struct file *file) | 
|  | 4139 | { | 
|  | 4140 | struct trace_iterator *iter; | 
|  | 4141 | int ret = 0; | 
|  | 4142 |  | 
|  | 4143 | if (file->f_mode & FMODE_READ) { | 
|  | 4144 | iter = __tracing_open(inode, file, true); | 
|  | 4145 | if (IS_ERR(iter)) | 
|  | 4146 | ret = PTR_ERR(iter); | 
|  | 4147 | } | 
|  | 4148 | return ret; | 
|  | 4149 | } | 
|  | 4150 |  | 
|  | 4151 | static ssize_t | 
|  | 4152 | tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, | 
|  | 4153 | loff_t *ppos) | 
|  | 4154 | { | 
|  | 4155 | unsigned long val; | 
|  | 4156 | int ret; | 
|  | 4157 |  | 
|  | 4158 | ret = tracing_update_buffers(); | 
|  | 4159 | if (ret < 0) | 
|  | 4160 | return ret; | 
|  | 4161 |  | 
|  | 4162 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | 
|  | 4163 | if (ret) | 
|  | 4164 | return ret; | 
|  | 4165 |  | 
|  | 4166 | mutex_lock(&trace_types_lock); | 
|  | 4167 |  | 
| Steven Rostedt (Red Hat) | d840f71 | 2013-02-01 18:38:47 -0500 | [diff] [blame] | 4168 | if (current_trace->use_max_tr) { | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 4169 | ret = -EBUSY; | 
|  | 4170 | goto out; | 
|  | 4171 | } | 
|  | 4172 |  | 
|  | 4173 | switch (val) { | 
|  | 4174 | case 0: | 
|  | 4175 | if (current_trace->allocated_snapshot) { | 
|  | 4176 | /* free spare buffer */ | 
|  | 4177 | ring_buffer_resize(max_tr.buffer, 1, | 
|  | 4178 | RING_BUFFER_ALL_CPUS); | 
|  | 4179 | set_buffer_entries(&max_tr, 1); | 
|  | 4180 | tracing_reset_online_cpus(&max_tr); | 
|  | 4181 | current_trace->allocated_snapshot = false; | 
|  | 4182 | } | 
|  | 4183 | break; | 
|  | 4184 | case 1: | 
|  | 4185 | if (!current_trace->allocated_snapshot) { | 
|  | 4186 | /* allocate spare buffer */ | 
|  | 4187 | ret = resize_buffer_duplicate_size(&max_tr, | 
|  | 4188 | &global_trace, RING_BUFFER_ALL_CPUS); | 
|  | 4189 | if (ret < 0) | 
|  | 4190 | break; | 
|  | 4191 | current_trace->allocated_snapshot = true; | 
|  | 4192 | } | 
|  | 4193 |  | 
|  | 4194 | local_irq_disable(); | 
|  | 4195 | /* Now, we're going to swap */ | 
|  | 4196 | update_max_tr(&global_trace, current, smp_processor_id()); | 
|  | 4197 | local_irq_enable(); | 
|  | 4198 | break; | 
|  | 4199 | default: | 
|  | 4200 | if (current_trace->allocated_snapshot) | 
|  | 4201 | tracing_reset_online_cpus(&max_tr); | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 4202 | break; | 
|  | 4203 | } | 
|  | 4204 |  | 
|  | 4205 | if (ret >= 0) { | 
|  | 4206 | *ppos += cnt; | 
|  | 4207 | ret = cnt; | 
|  | 4208 | } | 
|  | 4209 | out: | 
|  | 4210 | mutex_unlock(&trace_types_lock); | 
|  | 4211 | return ret; | 
|  | 4212 | } | 
|  | 4213 | #endif /* CONFIG_TRACER_SNAPSHOT */ | 
|  | 4214 |  | 
|  | 4215 |  | 
| Steven Rostedt | 5e2336a | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 4216 | static const struct file_operations tracing_max_lat_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 4217 | .open		= tracing_open_generic, | 
|  | 4218 | .read		= tracing_max_lat_read, | 
|  | 4219 | .write		= tracing_max_lat_write, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 4220 | .llseek		= generic_file_llseek, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4221 | }; | 
|  | 4222 |  | 
| Steven Rostedt | 5e2336a | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 4223 | static const struct file_operations set_tracer_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 4224 | .open		= tracing_open_generic, | 
|  | 4225 | .read		= tracing_set_trace_read, | 
|  | 4226 | .write		= tracing_set_trace_write, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 4227 | .llseek		= generic_file_llseek, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4228 | }; | 
|  | 4229 |  | 
| Steven Rostedt | 5e2336a | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 4230 | static const struct file_operations tracing_pipe_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 4231 | .open		= tracing_open_pipe, | 
| Soeren Sandmann Pedersen | 2a2cc8f | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 4232 | .poll		= tracing_poll_pipe, | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 4233 | .read		= tracing_read_pipe, | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 4234 | .splice_read	= tracing_splice_read_pipe, | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 4235 | .release	= tracing_release_pipe, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 4236 | .llseek		= no_llseek, | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 4237 | }; | 
|  | 4238 |  | 
| Steven Rostedt | 5e2336a | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 4239 | static const struct file_operations tracing_entries_fops = { | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 4240 | .open		= tracing_entries_open, | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 4241 | .read		= tracing_entries_read, | 
|  | 4242 | .write		= tracing_entries_write, | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 4243 | .release	= tracing_entries_release, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 4244 | .llseek		= generic_file_llseek, | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 4245 | }; | 
|  | 4246 |  | 
| Vaibhav Nagarnaik | f81ab07 | 2011-08-16 14:46:15 -0700 | [diff] [blame] | 4247 | static const struct file_operations tracing_total_entries_fops = { | 
|  | 4248 | .open		= tracing_open_generic, | 
|  | 4249 | .read		= tracing_total_entries_read, | 
|  | 4250 | .llseek		= generic_file_llseek, | 
|  | 4251 | }; | 
|  | 4252 |  | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 4253 | static const struct file_operations tracing_free_buffer_fops = { | 
|  | 4254 | .write		= tracing_free_buffer_write, | 
|  | 4255 | .release	= tracing_free_buffer_release, | 
|  | 4256 | }; | 
|  | 4257 |  | 
| Steven Rostedt | 5e2336a | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 4258 | static const struct file_operations tracing_mark_fops = { | 
| Frédéric Weisbecker | 43a1538 | 2008-09-21 20:16:30 +0200 | [diff] [blame] | 4259 | .open		= tracing_open_generic, | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 4260 | .write		= tracing_mark_write, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 4261 | .llseek		= generic_file_llseek, | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 4262 | }; | 
|  | 4263 |  | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 4264 | static const struct file_operations trace_clock_fops = { | 
| Li Zefan | 13f16d2 | 2009-12-08 11:16:11 +0800 | [diff] [blame] | 4265 | .open		= tracing_clock_open, | 
|  | 4266 | .read		= seq_read, | 
|  | 4267 | .llseek		= seq_lseek, | 
|  | 4268 | .release	= single_release, | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 4269 | .write		= tracing_clock_write, | 
|  | 4270 | }; | 
|  | 4271 |  | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 4272 | #ifdef CONFIG_TRACER_SNAPSHOT | 
|  | 4273 | static const struct file_operations snapshot_fops = { | 
|  | 4274 | .open		= tracing_snapshot_open, | 
|  | 4275 | .read		= seq_read, | 
|  | 4276 | .write		= tracing_snapshot_write, | 
|  | 4277 | .llseek		= tracing_seek, | 
|  | 4278 | .release	= tracing_release, | 
|  | 4279 | }; | 
|  | 4280 | #endif /* CONFIG_TRACER_SNAPSHOT */ | 
|  | 4281 |  | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4282 | struct ftrace_buffer_info { | 
|  | 4283 | struct trace_array	*tr; | 
|  | 4284 | void			*spare; | 
|  | 4285 | int			cpu; | 
|  | 4286 | unsigned int		read; | 
|  | 4287 | }; | 
|  | 4288 |  | 
|  | 4289 | static int tracing_buffers_open(struct inode *inode, struct file *filp) | 
|  | 4290 | { | 
|  | 4291 | int cpu = (int)(long)inode->i_private; | 
|  | 4292 | struct ftrace_buffer_info *info; | 
|  | 4293 |  | 
|  | 4294 | if (tracing_disabled) | 
|  | 4295 | return -ENODEV; | 
|  | 4296 |  | 
|  | 4297 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 
|  | 4298 | if (!info) | 
|  | 4299 | return -ENOMEM; | 
|  | 4300 |  | 
|  | 4301 | info->tr	= &global_trace; | 
|  | 4302 | info->cpu	= cpu; | 
| Lai Jiangshan | ddd538f | 2009-04-02 15:16:59 +0800 | [diff] [blame] | 4303 | info->spare	= NULL; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4304 | /* Force reading ring buffer for first read */ | 
|  | 4305 | info->read	= (unsigned int)-1; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4306 |  | 
|  | 4307 | filp->private_data = info; | 
|  | 4308 |  | 
| Lai Jiangshan | d1e7e02 | 2009-04-02 15:16:56 +0800 | [diff] [blame] | 4309 | return nonseekable_open(inode, filp); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4310 | } | 
|  | 4311 |  | 
|  | 4312 | static ssize_t | 
|  | 4313 | tracing_buffers_read(struct file *filp, char __user *ubuf, | 
|  | 4314 | size_t count, loff_t *ppos) | 
|  | 4315 | { | 
|  | 4316 | struct ftrace_buffer_info *info = filp->private_data; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4317 | ssize_t ret; | 
|  | 4318 | size_t size; | 
|  | 4319 |  | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 4320 | if (!count) | 
|  | 4321 | return 0; | 
|  | 4322 |  | 
| Lai Jiangshan | ddd538f | 2009-04-02 15:16:59 +0800 | [diff] [blame] | 4323 | if (!info->spare) | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 4324 | info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu); | 
| Lai Jiangshan | ddd538f | 2009-04-02 15:16:59 +0800 | [diff] [blame] | 4325 | if (!info->spare) | 
|  | 4326 | return -ENOMEM; | 
|  | 4327 |  | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4328 | /* Do we have previous read data to read? */ | 
|  | 4329 | if (info->read < PAGE_SIZE) | 
|  | 4330 | goto read; | 
|  | 4331 |  | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 4332 | trace_access_lock(info->cpu); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4333 | ret = ring_buffer_read_page(info->tr->buffer, | 
|  | 4334 | &info->spare, | 
|  | 4335 | count, | 
|  | 4336 | info->cpu, 0); | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 4337 | trace_access_unlock(info->cpu); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4338 | if (ret < 0) | 
|  | 4339 | return 0; | 
|  | 4340 |  | 
| Steven Rostedt | 436fc28 | 2011-10-14 10:44:25 -0400 | [diff] [blame] | 4341 | info->read = 0; | 
|  | 4342 |  | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4343 | read: | 
|  | 4344 | size = PAGE_SIZE - info->read; | 
|  | 4345 | if (size > count) | 
|  | 4346 | size = count; | 
|  | 4347 |  | 
|  | 4348 | ret = copy_to_user(ubuf, info->spare + info->read, size); | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 4349 | if (ret == size) | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4350 | return -EFAULT; | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 4351 | size -= ret; | 
|  | 4352 |  | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4353 | *ppos += size; | 
|  | 4354 | info->read += size; | 
|  | 4355 |  | 
|  | 4356 | return size; | 
|  | 4357 | } | 
|  | 4358 |  | 
|  | 4359 | static int tracing_buffers_release(struct inode *inode, struct file *file) | 
|  | 4360 | { | 
|  | 4361 | struct ftrace_buffer_info *info = file->private_data; | 
|  | 4362 |  | 
| Lai Jiangshan | ddd538f | 2009-04-02 15:16:59 +0800 | [diff] [blame] | 4363 | if (info->spare) | 
|  | 4364 | ring_buffer_free_read_page(info->tr->buffer, info->spare); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4365 | kfree(info); | 
|  | 4366 |  | 
|  | 4367 | return 0; | 
|  | 4368 | } | 
|  | 4369 |  | 
|  | 4370 | struct buffer_ref { | 
|  | 4371 | struct ring_buffer	*buffer; | 
|  | 4372 | void			*page; | 
|  | 4373 | int			ref; | 
|  | 4374 | }; | 
|  | 4375 |  | 
|  | 4376 | static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, | 
|  | 4377 | struct pipe_buffer *buf) | 
|  | 4378 | { | 
|  | 4379 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; | 
|  | 4380 |  | 
|  | 4381 | if (--ref->ref) | 
|  | 4382 | return; | 
|  | 4383 |  | 
|  | 4384 | ring_buffer_free_read_page(ref->buffer, ref->page); | 
|  | 4385 | kfree(ref); | 
|  | 4386 | buf->private = 0; | 
|  | 4387 | } | 
|  | 4388 |  | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4389 | static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | 
|  | 4390 | struct pipe_buffer *buf) | 
|  | 4391 | { | 
|  | 4392 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; | 
|  | 4393 |  | 
|  | 4394 | ref->ref++; | 
|  | 4395 | } | 
|  | 4396 |  | 
|  | 4397 | /* Pipe buffer operations for a buffer. */ | 
| Alexey Dobriyan | 28dfef8 | 2009-12-15 16:46:48 -0800 | [diff] [blame] | 4398 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4399 | .can_merge		= 0, | 
|  | 4400 | .map			= generic_pipe_buf_map, | 
|  | 4401 | .unmap			= generic_pipe_buf_unmap, | 
|  | 4402 | .confirm		= generic_pipe_buf_confirm, | 
|  | 4403 | .release		= buffer_pipe_buf_release, | 
| Masami Hiramatsu | d55cb6c | 2012-08-09 21:31:10 +0900 | [diff] [blame] | 4404 | .steal			= generic_pipe_buf_steal, | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4405 | .get			= buffer_pipe_buf_get, | 
|  | 4406 | }; | 
|  | 4407 |  | 
|  | 4408 | /* | 
|  | 4409 | * Callback from splice_to_pipe(), if we need to release some pages | 
|  | 4410 | * at the end of the spd in case we error'ed out in filling the pipe. | 
|  | 4411 | */ | 
|  | 4412 | static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) | 
|  | 4413 | { | 
|  | 4414 | struct buffer_ref *ref = | 
|  | 4415 | (struct buffer_ref *)spd->partial[i].private; | 
|  | 4416 |  | 
|  | 4417 | if (--ref->ref) | 
|  | 4418 | return; | 
|  | 4419 |  | 
|  | 4420 | ring_buffer_free_read_page(ref->buffer, ref->page); | 
|  | 4421 | kfree(ref); | 
|  | 4422 | spd->partial[i].private = 0; | 
|  | 4423 | } | 
|  | 4424 |  | 
|  | 4425 | static ssize_t | 
|  | 4426 | tracing_buffers_splice_read(struct file *file, loff_t *ppos, | 
|  | 4427 | struct pipe_inode_info *pipe, size_t len, | 
|  | 4428 | unsigned int flags) | 
|  | 4429 | { | 
|  | 4430 | struct ftrace_buffer_info *info = file->private_data; | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4431 | struct partial_page partial_def[PIPE_DEF_BUFFERS]; | 
|  | 4432 | struct page *pages_def[PIPE_DEF_BUFFERS]; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4433 | struct splice_pipe_desc spd = { | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4434 | .pages		= pages_def, | 
|  | 4435 | .partial	= partial_def, | 
| Eric Dumazet | 047fe36 | 2012-06-12 15:24:40 +0200 | [diff] [blame] | 4436 | .nr_pages_max	= PIPE_DEF_BUFFERS, | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4437 | .flags		= flags, | 
|  | 4438 | .ops		= &buffer_pipe_buf_ops, | 
|  | 4439 | .spd_release	= buffer_spd_release, | 
|  | 4440 | }; | 
|  | 4441 | struct buffer_ref *ref; | 
| Steven Rostedt | 93459c6 | 2009-04-29 00:23:13 -0400 | [diff] [blame] | 4442 | int entries, size, i; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4443 | size_t ret; | 
|  | 4444 |  | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4445 | if (splice_grow_spd(pipe, &spd)) | 
|  | 4446 | return -ENOMEM; | 
|  | 4447 |  | 
| Lai Jiangshan | 93cfb3c | 2009-04-02 15:17:08 +0800 | [diff] [blame] | 4448 | if (*ppos & (PAGE_SIZE - 1)) { | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4449 | ret = -EINVAL; | 
|  | 4450 | goto out; | 
| Lai Jiangshan | 93cfb3c | 2009-04-02 15:17:08 +0800 | [diff] [blame] | 4451 | } | 
|  | 4452 |  | 
|  | 4453 | if (len & (PAGE_SIZE - 1)) { | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4454 | if (len < PAGE_SIZE) { | 
|  | 4455 | ret = -EINVAL; | 
|  | 4456 | goto out; | 
|  | 4457 | } | 
| Lai Jiangshan | 93cfb3c | 2009-04-02 15:17:08 +0800 | [diff] [blame] | 4458 | len &= PAGE_MASK; | 
|  | 4459 | } | 
|  | 4460 |  | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 4461 | trace_access_lock(info->cpu); | 
| Steven Rostedt | 93459c6 | 2009-04-29 00:23:13 -0400 | [diff] [blame] | 4462 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | 
|  | 4463 |  | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4464 | for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4465 | struct page *page; | 
|  | 4466 | int r; | 
|  | 4467 |  | 
|  | 4468 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); | 
|  | 4469 | if (!ref) | 
|  | 4470 | break; | 
|  | 4471 |  | 
| Steven Rostedt | 7267fa6 | 2009-04-29 00:16:21 -0400 | [diff] [blame] | 4472 | ref->ref = 1; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4473 | ref->buffer = info->tr->buffer; | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 4474 | ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4475 | if (!ref->page) { | 
|  | 4476 | kfree(ref); | 
|  | 4477 | break; | 
|  | 4478 | } | 
|  | 4479 |  | 
|  | 4480 | r = ring_buffer_read_page(ref->buffer, &ref->page, | 
| Steven Rostedt | f2957f1 | 2009-04-29 00:26:30 -0400 | [diff] [blame] | 4481 | len, info->cpu, 1); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4482 | if (r < 0) { | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 4483 | ring_buffer_free_read_page(ref->buffer, ref->page); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4484 | kfree(ref); | 
|  | 4485 | break; | 
|  | 4486 | } | 
|  | 4487 |  | 
|  | 4488 | /* | 
|  | 4489 | * zero out any left over data, this is going to | 
|  | 4490 | * user land. | 
|  | 4491 | */ | 
|  | 4492 | size = ring_buffer_page_len(ref->page); | 
|  | 4493 | if (size < PAGE_SIZE) | 
|  | 4494 | memset(ref->page + size, 0, PAGE_SIZE - size); | 
|  | 4495 |  | 
|  | 4496 | page = virt_to_page(ref->page); | 
|  | 4497 |  | 
|  | 4498 | spd.pages[i] = page; | 
|  | 4499 | spd.partial[i].len = PAGE_SIZE; | 
|  | 4500 | spd.partial[i].offset = 0; | 
|  | 4501 | spd.partial[i].private = (unsigned long)ref; | 
|  | 4502 | spd.nr_pages++; | 
| Lai Jiangshan | 93cfb3c | 2009-04-02 15:17:08 +0800 | [diff] [blame] | 4503 | *ppos += PAGE_SIZE; | 
| Steven Rostedt | 93459c6 | 2009-04-29 00:23:13 -0400 | [diff] [blame] | 4504 |  | 
|  | 4505 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4506 | } | 
|  | 4507 |  | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 4508 | trace_access_unlock(info->cpu); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4509 | spd.nr_pages = i; | 
|  | 4510 |  | 
|  | 4511 | /* did we read anything? */ | 
|  | 4512 | if (!spd.nr_pages) { | 
|  | 4513 | if (flags & SPLICE_F_NONBLOCK) | 
|  | 4514 | ret = -EAGAIN; | 
|  | 4515 | else | 
|  | 4516 | ret = 0; | 
|  | 4517 | /* TODO: block */ | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4518 | goto out; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4519 | } | 
|  | 4520 |  | 
|  | 4521 | ret = splice_to_pipe(pipe, &spd); | 
| Eric Dumazet | 047fe36 | 2012-06-12 15:24:40 +0200 | [diff] [blame] | 4522 | splice_shrink_spd(&spd); | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4523 | out: | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4524 | return ret; | 
|  | 4525 | } | 
|  | 4526 |  | 
|  | 4527 | static const struct file_operations tracing_buffers_fops = { | 
|  | 4528 | .open		= tracing_buffers_open, | 
|  | 4529 | .read		= tracing_buffers_read, | 
|  | 4530 | .release	= tracing_buffers_release, | 
|  | 4531 | .splice_read	= tracing_buffers_splice_read, | 
|  | 4532 | .llseek		= no_llseek, | 
|  | 4533 | }; | 
|  | 4534 |  | 
| Steven Rostedt | c8d7718 | 2009-04-29 18:03:45 -0400 | [diff] [blame] | 4535 | static ssize_t | 
|  | 4536 | tracing_stats_read(struct file *filp, char __user *ubuf, | 
|  | 4537 | size_t count, loff_t *ppos) | 
|  | 4538 | { | 
|  | 4539 | unsigned long cpu = (unsigned long)filp->private_data; | 
|  | 4540 | struct trace_array *tr = &global_trace; | 
|  | 4541 | struct trace_seq *s; | 
|  | 4542 | unsigned long cnt; | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 4543 | unsigned long long t; | 
|  | 4544 | unsigned long usec_rem; | 
| Steven Rostedt | c8d7718 | 2009-04-29 18:03:45 -0400 | [diff] [blame] | 4545 |  | 
| Li Zefan | e4f2d10 | 2009-06-15 10:57:28 +0800 | [diff] [blame] | 4546 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 
| Steven Rostedt | c8d7718 | 2009-04-29 18:03:45 -0400 | [diff] [blame] | 4547 | if (!s) | 
| Roel Kluin | a646365 | 2009-11-11 22:26:35 +0100 | [diff] [blame] | 4548 | return -ENOMEM; | 
| Steven Rostedt | c8d7718 | 2009-04-29 18:03:45 -0400 | [diff] [blame] | 4549 |  | 
|  | 4550 | trace_seq_init(s); | 
|  | 4551 |  | 
|  | 4552 | cnt = ring_buffer_entries_cpu(tr->buffer, cpu); | 
|  | 4553 | trace_seq_printf(s, "entries: %ld\n", cnt); | 
|  | 4554 |  | 
|  | 4555 | cnt = ring_buffer_overrun_cpu(tr->buffer, cpu); | 
|  | 4556 | trace_seq_printf(s, "overrun: %ld\n", cnt); | 
|  | 4557 |  | 
|  | 4558 | cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); | 
|  | 4559 | trace_seq_printf(s, "commit overrun: %ld\n", cnt); | 
|  | 4560 |  | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 4561 | cnt = ring_buffer_bytes_cpu(tr->buffer, cpu); | 
|  | 4562 | trace_seq_printf(s, "bytes: %ld\n", cnt); | 
|  | 4563 |  | 
| Yoshihiro YUNOMAE | 11043d8 | 2012-11-13 12:18:23 -0800 | [diff] [blame] | 4564 | if (trace_clocks[trace_clock_id].in_ns) { | 
|  | 4565 | /* local or global for trace_clock */ | 
|  | 4566 | t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu)); | 
|  | 4567 | usec_rem = do_div(t, USEC_PER_SEC); | 
|  | 4568 | trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", | 
|  | 4569 | t, usec_rem); | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 4570 |  | 
| Yoshihiro YUNOMAE | 11043d8 | 2012-11-13 12:18:23 -0800 | [diff] [blame] | 4571 | t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu)); | 
|  | 4572 | usec_rem = do_div(t, USEC_PER_SEC); | 
|  | 4573 | trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); | 
|  | 4574 | } else { | 
|  | 4575 | /* counter or tsc mode for trace_clock */ | 
|  | 4576 | trace_seq_printf(s, "oldest event ts: %llu\n", | 
|  | 4577 | ring_buffer_oldest_event_ts(tr->buffer, cpu)); | 
|  | 4578 |  | 
|  | 4579 | trace_seq_printf(s, "now ts: %llu\n", | 
|  | 4580 | ring_buffer_time_stamp(tr->buffer, cpu)); | 
|  | 4581 | } | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 4582 |  | 
| Slava Pestov | 884bfe8 | 2011-07-15 14:23:58 -0700 | [diff] [blame] | 4583 | cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); | 
|  | 4584 | trace_seq_printf(s, "dropped events: %ld\n", cnt); | 
|  | 4585 |  | 
| Steven Rostedt (Red Hat) | ad96470 | 2013-01-29 17:45:49 -0500 | [diff] [blame] | 4586 | cnt = ring_buffer_read_events_cpu(tr->buffer, cpu); | 
|  | 4587 | trace_seq_printf(s, "read events: %ld\n", cnt); | 
|  | 4588 |  | 
| Steven Rostedt | c8d7718 | 2009-04-29 18:03:45 -0400 | [diff] [blame] | 4589 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); | 
|  | 4590 |  | 
|  | 4591 | kfree(s); | 
|  | 4592 |  | 
|  | 4593 | return count; | 
|  | 4594 | } | 
|  | 4595 |  | 
|  | 4596 | static const struct file_operations tracing_stats_fops = { | 
|  | 4597 | .open		= tracing_open_generic, | 
|  | 4598 | .read		= tracing_stats_read, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 4599 | .llseek		= generic_file_llseek, | 
| Steven Rostedt | c8d7718 | 2009-04-29 18:03:45 -0400 | [diff] [blame] | 4600 | }; | 
|  | 4601 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4602 | #ifdef CONFIG_DYNAMIC_FTRACE | 
|  | 4603 |  | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 4604 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4605 | { | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 4606 | return 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4607 | } | 
|  | 4608 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4609 | static ssize_t | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 4610 | tracing_read_dyn_info(struct file *filp, char __user *ubuf, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4611 | size_t cnt, loff_t *ppos) | 
|  | 4612 | { | 
| Steven Rostedt | a26a2a2 | 2008-10-31 00:03:22 -0400 | [diff] [blame] | 4613 | static char ftrace_dyn_info_buffer[1024]; | 
|  | 4614 | static DEFINE_MUTEX(dyn_info_mutex); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4615 | unsigned long *p = filp->private_data; | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 4616 | char *buf = ftrace_dyn_info_buffer; | 
| Steven Rostedt | a26a2a2 | 2008-10-31 00:03:22 -0400 | [diff] [blame] | 4617 | int size = ARRAY_SIZE(ftrace_dyn_info_buffer); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4618 | int r; | 
|  | 4619 |  | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 4620 | mutex_lock(&dyn_info_mutex); | 
|  | 4621 | r = sprintf(buf, "%ld ", *p); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4622 |  | 
| Steven Rostedt | a26a2a2 | 2008-10-31 00:03:22 -0400 | [diff] [blame] | 4623 | r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 4624 | buf[r++] = '\n'; | 
|  | 4625 |  | 
|  | 4626 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
|  | 4627 |  | 
|  | 4628 | mutex_unlock(&dyn_info_mutex); | 
|  | 4629 |  | 
|  | 4630 | return r; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4631 | } | 
|  | 4632 |  | 
| Steven Rostedt | 5e2336a | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 4633 | static const struct file_operations tracing_dyn_info_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 4634 | .open		= tracing_open_generic, | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 4635 | .read		= tracing_read_dyn_info, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 4636 | .llseek		= generic_file_llseek, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4637 | }; | 
|  | 4638 | #endif | 
|  | 4639 |  | 
|  | 4640 | static struct dentry *d_tracer; | 
|  | 4641 |  | 
|  | 4642 | struct dentry *tracing_init_dentry(void) | 
|  | 4643 | { | 
|  | 4644 | static int once; | 
|  | 4645 |  | 
|  | 4646 | if (d_tracer) | 
|  | 4647 | return d_tracer; | 
|  | 4648 |  | 
| Frederic Weisbecker | 3e1f60b | 2009-03-22 23:10:45 +0100 | [diff] [blame] | 4649 | if (!debugfs_initialized()) | 
|  | 4650 | return NULL; | 
|  | 4651 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4652 | d_tracer = debugfs_create_dir("tracing", NULL); | 
|  | 4653 |  | 
|  | 4654 | if (!d_tracer && !once) { | 
|  | 4655 | once = 1; | 
|  | 4656 | pr_warning("Could not create debugfs directory 'tracing'\n"); | 
|  | 4657 | return NULL; | 
|  | 4658 | } | 
|  | 4659 |  | 
|  | 4660 | return d_tracer; | 
|  | 4661 | } | 
|  | 4662 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4663 | static struct dentry *d_percpu; | 
|  | 4664 |  | 
| Josh Triplett | b736f48 | 2012-11-18 21:27:45 -0800 | [diff] [blame] | 4665 | static struct dentry *tracing_dentry_percpu(void) | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4666 | { | 
|  | 4667 | static int once; | 
|  | 4668 | struct dentry *d_tracer; | 
|  | 4669 |  | 
|  | 4670 | if (d_percpu) | 
|  | 4671 | return d_percpu; | 
|  | 4672 |  | 
|  | 4673 | d_tracer = tracing_init_dentry(); | 
|  | 4674 |  | 
|  | 4675 | if (!d_tracer) | 
|  | 4676 | return NULL; | 
|  | 4677 |  | 
|  | 4678 | d_percpu = debugfs_create_dir("per_cpu", d_tracer); | 
|  | 4679 |  | 
|  | 4680 | if (!d_percpu && !once) { | 
|  | 4681 | once = 1; | 
|  | 4682 | pr_warning("Could not create debugfs directory 'per_cpu'\n"); | 
|  | 4683 | return NULL; | 
|  | 4684 | } | 
|  | 4685 |  | 
|  | 4686 | return d_percpu; | 
|  | 4687 | } | 
|  | 4688 |  | 
|  | 4689 | static void tracing_init_debugfs_percpu(long cpu) | 
|  | 4690 | { | 
|  | 4691 | struct dentry *d_percpu = tracing_dentry_percpu(); | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4692 | struct dentry *d_cpu; | 
| Steven Rostedt | dd49a38 | 2010-10-20 21:51:26 -0400 | [diff] [blame] | 4693 | char cpu_dir[30]; /* 30 characters should be more than enough */ | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4694 |  | 
| Namhyung Kim | 0a3d7ce | 2012-04-23 10:11:57 +0900 | [diff] [blame] | 4695 | if (!d_percpu) | 
|  | 4696 | return; | 
|  | 4697 |  | 
| Steven Rostedt | dd49a38 | 2010-10-20 21:51:26 -0400 | [diff] [blame] | 4698 | snprintf(cpu_dir, 30, "cpu%ld", cpu); | 
| Frederic Weisbecker | 8656e7a | 2009-02-26 00:41:38 +0100 | [diff] [blame] | 4699 | d_cpu = debugfs_create_dir(cpu_dir, d_percpu); | 
|  | 4700 | if (!d_cpu) { | 
|  | 4701 | pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); | 
|  | 4702 | return; | 
|  | 4703 | } | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4704 |  | 
| Frederic Weisbecker | 8656e7a | 2009-02-26 00:41:38 +0100 | [diff] [blame] | 4705 | /* per cpu trace_pipe */ | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4706 | trace_create_file("trace_pipe", 0444, d_cpu, | 
|  | 4707 | (void *) cpu, &tracing_pipe_fops); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4708 |  | 
|  | 4709 | /* per cpu trace */ | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4710 | trace_create_file("trace", 0644, d_cpu, | 
|  | 4711 | (void *) cpu, &tracing_fops); | 
| Steven Rostedt | 7f96f93 | 2009-03-13 00:37:42 -0400 | [diff] [blame] | 4712 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4713 | trace_create_file("trace_pipe_raw", 0444, d_cpu, | 
|  | 4714 | (void *) cpu, &tracing_buffers_fops); | 
| Steven Rostedt | c8d7718 | 2009-04-29 18:03:45 -0400 | [diff] [blame] | 4715 |  | 
|  | 4716 | trace_create_file("stats", 0444, d_cpu, | 
|  | 4717 | (void *) cpu, &tracing_stats_fops); | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 4718 |  | 
|  | 4719 | trace_create_file("buffer_size_kb", 0444, d_cpu, | 
|  | 4720 | (void *) cpu, &tracing_entries_fops); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4721 | } | 
|  | 4722 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 4723 | #ifdef CONFIG_FTRACE_SELFTEST | 
|  | 4724 | /* Let selftest have access to static functions in this file */ | 
|  | 4725 | #include "trace_selftest.c" | 
|  | 4726 | #endif | 
|  | 4727 |  | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4728 | struct trace_option_dentry { | 
|  | 4729 | struct tracer_opt		*opt; | 
|  | 4730 | struct tracer_flags		*flags; | 
|  | 4731 | struct dentry			*entry; | 
|  | 4732 | }; | 
|  | 4733 |  | 
|  | 4734 | static ssize_t | 
|  | 4735 | trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, | 
|  | 4736 | loff_t *ppos) | 
|  | 4737 | { | 
|  | 4738 | struct trace_option_dentry *topt = filp->private_data; | 
|  | 4739 | char *buf; | 
|  | 4740 |  | 
|  | 4741 | if (topt->flags->val & topt->opt->bit) | 
|  | 4742 | buf = "1\n"; | 
|  | 4743 | else | 
|  | 4744 | buf = "0\n"; | 
|  | 4745 |  | 
|  | 4746 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | 
|  | 4747 | } | 
|  | 4748 |  | 
|  | 4749 | static ssize_t | 
|  | 4750 | trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | 
|  | 4751 | loff_t *ppos) | 
|  | 4752 | { | 
|  | 4753 | struct trace_option_dentry *topt = filp->private_data; | 
|  | 4754 | unsigned long val; | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4755 | int ret; | 
|  | 4756 |  | 
| Peter Huewe | 22fe9b5 | 2011-06-07 21:58:27 +0200 | [diff] [blame] | 4757 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | 
|  | 4758 | if (ret) | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4759 | return ret; | 
|  | 4760 |  | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 4761 | if (val != 0 && val != 1) | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4762 | return -EINVAL; | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 4763 |  | 
|  | 4764 | if (!!(topt->flags->val & topt->opt->bit) != val) { | 
|  | 4765 | mutex_lock(&trace_types_lock); | 
|  | 4766 | ret = __set_tracer_option(current_trace, topt->flags, | 
| Steven Rostedt | c757bea | 2009-12-21 22:35:16 -0500 | [diff] [blame] | 4767 | topt->opt, !val); | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 4768 | mutex_unlock(&trace_types_lock); | 
|  | 4769 | if (ret) | 
|  | 4770 | return ret; | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4771 | } | 
|  | 4772 |  | 
|  | 4773 | *ppos += cnt; | 
|  | 4774 |  | 
|  | 4775 | return cnt; | 
|  | 4776 | } | 
|  | 4777 |  | 
|  | 4778 |  | 
|  | 4779 | static const struct file_operations trace_options_fops = { | 
|  | 4780 | .open = tracing_open_generic, | 
|  | 4781 | .read = trace_options_read, | 
|  | 4782 | .write = trace_options_write, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 4783 | .llseek	= generic_file_llseek, | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4784 | }; | 
|  | 4785 |  | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4786 | static ssize_t | 
|  | 4787 | trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, | 
|  | 4788 | loff_t *ppos) | 
|  | 4789 | { | 
|  | 4790 | long index = (long)filp->private_data; | 
|  | 4791 | char *buf; | 
|  | 4792 |  | 
|  | 4793 | if (trace_flags & (1 << index)) | 
|  | 4794 | buf = "1\n"; | 
|  | 4795 | else | 
|  | 4796 | buf = "0\n"; | 
|  | 4797 |  | 
|  | 4798 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | 
|  | 4799 | } | 
|  | 4800 |  | 
|  | 4801 | static ssize_t | 
|  | 4802 | trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | 
|  | 4803 | loff_t *ppos) | 
|  | 4804 | { | 
|  | 4805 | long index = (long)filp->private_data; | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4806 | unsigned long val; | 
|  | 4807 | int ret; | 
|  | 4808 |  | 
| Peter Huewe | 22fe9b5 | 2011-06-07 21:58:27 +0200 | [diff] [blame] | 4809 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | 
|  | 4810 | if (ret) | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4811 | return ret; | 
|  | 4812 |  | 
| Zhaolei | f2d84b6 | 2009-08-07 18:55:48 +0800 | [diff] [blame] | 4813 | if (val != 0 && val != 1) | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4814 | return -EINVAL; | 
| Steven Rostedt (Red Hat) | 69d34da | 2013-03-14 13:50:56 -0400 | [diff] [blame] | 4815 |  | 
|  | 4816 | mutex_lock(&trace_types_lock); | 
| Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 4817 | ret = set_tracer_flag(1 << index, val); | 
| Steven Rostedt (Red Hat) | 69d34da | 2013-03-14 13:50:56 -0400 | [diff] [blame] | 4818 | mutex_unlock(&trace_types_lock); | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4819 |  | 
| Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 4820 | if (ret < 0) | 
|  | 4821 | return ret; | 
|  | 4822 |  | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4823 | *ppos += cnt; | 
|  | 4824 |  | 
|  | 4825 | return cnt; | 
|  | 4826 | } | 
|  | 4827 |  | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4828 | static const struct file_operations trace_options_core_fops = { | 
|  | 4829 | .open = tracing_open_generic, | 
|  | 4830 | .read = trace_options_core_read, | 
|  | 4831 | .write = trace_options_core_write, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 4832 | .llseek = generic_file_llseek, | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4833 | }; | 
|  | 4834 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4835 | struct dentry *trace_create_file(const char *name, | 
| Al Viro | f4ae40a | 2011-07-24 04:33:43 -0400 | [diff] [blame] | 4836 | umode_t mode, | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4837 | struct dentry *parent, | 
|  | 4838 | void *data, | 
|  | 4839 | const struct file_operations *fops) | 
|  | 4840 | { | 
|  | 4841 | struct dentry *ret; | 
|  | 4842 |  | 
|  | 4843 | ret = debugfs_create_file(name, mode, parent, data, fops); | 
|  | 4844 | if (!ret) | 
|  | 4845 | pr_warning("Could not create debugfs '%s' entry\n", name); | 
|  | 4846 |  | 
|  | 4847 | return ret; | 
|  | 4848 | } | 
|  | 4849 |  | 
|  | 4850 |  | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4851 | static struct dentry *trace_options_init_dentry(void) | 
|  | 4852 | { | 
|  | 4853 | struct dentry *d_tracer; | 
|  | 4854 | static struct dentry *t_options; | 
|  | 4855 |  | 
|  | 4856 | if (t_options) | 
|  | 4857 | return t_options; | 
|  | 4858 |  | 
|  | 4859 | d_tracer = tracing_init_dentry(); | 
|  | 4860 | if (!d_tracer) | 
|  | 4861 | return NULL; | 
|  | 4862 |  | 
|  | 4863 | t_options = debugfs_create_dir("options", d_tracer); | 
|  | 4864 | if (!t_options) { | 
|  | 4865 | pr_warning("Could not create debugfs directory 'options'\n"); | 
|  | 4866 | return NULL; | 
|  | 4867 | } | 
|  | 4868 |  | 
|  | 4869 | return t_options; | 
|  | 4870 | } | 
|  | 4871 |  | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4872 | static void | 
|  | 4873 | create_trace_option_file(struct trace_option_dentry *topt, | 
|  | 4874 | struct tracer_flags *flags, | 
|  | 4875 | struct tracer_opt *opt) | 
|  | 4876 | { | 
|  | 4877 | struct dentry *t_options; | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4878 |  | 
|  | 4879 | t_options = trace_options_init_dentry(); | 
|  | 4880 | if (!t_options) | 
|  | 4881 | return; | 
|  | 4882 |  | 
|  | 4883 | topt->flags = flags; | 
|  | 4884 | topt->opt = opt; | 
|  | 4885 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4886 | topt->entry = trace_create_file(opt->name, 0644, t_options, topt, | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4887 | &trace_options_fops); | 
|  | 4888 |  | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4889 | } | 
|  | 4890 |  | 
|  | 4891 | static struct trace_option_dentry * | 
|  | 4892 | create_trace_option_files(struct tracer *tracer) | 
|  | 4893 | { | 
|  | 4894 | struct trace_option_dentry *topts; | 
|  | 4895 | struct tracer_flags *flags; | 
|  | 4896 | struct tracer_opt *opts; | 
|  | 4897 | int cnt; | 
|  | 4898 |  | 
|  | 4899 | if (!tracer) | 
|  | 4900 | return NULL; | 
|  | 4901 |  | 
|  | 4902 | flags = tracer->flags; | 
|  | 4903 |  | 
|  | 4904 | if (!flags || !flags->opts) | 
|  | 4905 | return NULL; | 
|  | 4906 |  | 
|  | 4907 | opts = flags->opts; | 
|  | 4908 |  | 
|  | 4909 | for (cnt = 0; opts[cnt].name; cnt++) | 
|  | 4910 | ; | 
|  | 4911 |  | 
| Steven Rostedt | 0cfe824 | 2009-02-27 10:51:10 -0500 | [diff] [blame] | 4912 | topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4913 | if (!topts) | 
|  | 4914 | return NULL; | 
|  | 4915 |  | 
|  | 4916 | for (cnt = 0; opts[cnt].name; cnt++) | 
|  | 4917 | create_trace_option_file(&topts[cnt], flags, | 
|  | 4918 | &opts[cnt]); | 
|  | 4919 |  | 
|  | 4920 | return topts; | 
|  | 4921 | } | 
|  | 4922 |  | 
|  | 4923 | static void | 
|  | 4924 | destroy_trace_option_files(struct trace_option_dentry *topts) | 
|  | 4925 | { | 
|  | 4926 | int cnt; | 
|  | 4927 |  | 
|  | 4928 | if (!topts) | 
|  | 4929 | return; | 
|  | 4930 |  | 
|  | 4931 | for (cnt = 0; topts[cnt].opt; cnt++) { | 
|  | 4932 | if (topts[cnt].entry) | 
|  | 4933 | debugfs_remove(topts[cnt].entry); | 
|  | 4934 | } | 
|  | 4935 |  | 
|  | 4936 | kfree(topts); | 
|  | 4937 | } | 
|  | 4938 |  | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4939 | static struct dentry * | 
|  | 4940 | create_trace_option_core_file(const char *option, long index) | 
|  | 4941 | { | 
|  | 4942 | struct dentry *t_options; | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4943 |  | 
|  | 4944 | t_options = trace_options_init_dentry(); | 
|  | 4945 | if (!t_options) | 
|  | 4946 | return NULL; | 
|  | 4947 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4948 | return trace_create_file(option, 0644, t_options, (void *)index, | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4949 | &trace_options_core_fops); | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4950 | } | 
|  | 4951 |  | 
|  | 4952 | static __init void create_trace_options_dir(void) | 
|  | 4953 | { | 
|  | 4954 | struct dentry *t_options; | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4955 | int i; | 
|  | 4956 |  | 
|  | 4957 | t_options = trace_options_init_dentry(); | 
|  | 4958 | if (!t_options) | 
|  | 4959 | return; | 
|  | 4960 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4961 | for (i = 0; trace_options[i]; i++) | 
|  | 4962 | create_trace_option_core_file(trace_options[i], i); | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4963 | } | 
|  | 4964 |  | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 4965 | static ssize_t | 
|  | 4966 | rb_simple_read(struct file *filp, char __user *ubuf, | 
|  | 4967 | size_t cnt, loff_t *ppos) | 
|  | 4968 | { | 
| Steven Rostedt | 348f0fc | 2012-04-16 15:41:28 -0400 | [diff] [blame] | 4969 | struct trace_array *tr = filp->private_data; | 
|  | 4970 | struct ring_buffer *buffer = tr->buffer; | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 4971 | char buf[64]; | 
|  | 4972 | int r; | 
|  | 4973 |  | 
|  | 4974 | if (buffer) | 
|  | 4975 | r = ring_buffer_record_is_on(buffer); | 
|  | 4976 | else | 
|  | 4977 | r = 0; | 
|  | 4978 |  | 
|  | 4979 | r = sprintf(buf, "%d\n", r); | 
|  | 4980 |  | 
|  | 4981 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
|  | 4982 | } | 
|  | 4983 |  | 
|  | 4984 | static ssize_t | 
|  | 4985 | rb_simple_write(struct file *filp, const char __user *ubuf, | 
|  | 4986 | size_t cnt, loff_t *ppos) | 
|  | 4987 | { | 
| Steven Rostedt | 348f0fc | 2012-04-16 15:41:28 -0400 | [diff] [blame] | 4988 | struct trace_array *tr = filp->private_data; | 
|  | 4989 | struct ring_buffer *buffer = tr->buffer; | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 4990 | unsigned long val; | 
|  | 4991 | int ret; | 
|  | 4992 |  | 
|  | 4993 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | 
|  | 4994 | if (ret) | 
|  | 4995 | return ret; | 
|  | 4996 |  | 
|  | 4997 | if (buffer) { | 
| Steven Rostedt | 2df8f8a | 2013-01-11 16:14:10 -0500 | [diff] [blame] | 4998 | mutex_lock(&trace_types_lock); | 
|  | 4999 | if (val) { | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 5000 | ring_buffer_record_on(buffer); | 
| Steven Rostedt | 2df8f8a | 2013-01-11 16:14:10 -0500 | [diff] [blame] | 5001 | if (current_trace->start) | 
|  | 5002 | current_trace->start(tr); | 
|  | 5003 | } else { | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 5004 | ring_buffer_record_off(buffer); | 
| Steven Rostedt | 2df8f8a | 2013-01-11 16:14:10 -0500 | [diff] [blame] | 5005 | if (current_trace->stop) | 
|  | 5006 | current_trace->stop(tr); | 
|  | 5007 | } | 
|  | 5008 | mutex_unlock(&trace_types_lock); | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 5009 | } | 
|  | 5010 |  | 
|  | 5011 | (*ppos)++; | 
|  | 5012 |  | 
|  | 5013 | return cnt; | 
|  | 5014 | } | 
|  | 5015 |  | 
|  | 5016 | static const struct file_operations rb_simple_fops = { | 
|  | 5017 | .open		= tracing_open_generic, | 
|  | 5018 | .read		= rb_simple_read, | 
|  | 5019 | .write		= rb_simple_write, | 
|  | 5020 | .llseek		= default_llseek, | 
|  | 5021 | }; | 
|  | 5022 |  | 
| Frédéric Weisbecker | b5ad384 | 2008-09-23 11:34:32 +0100 | [diff] [blame] | 5023 | static __init int tracer_init_debugfs(void) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5024 | { | 
|  | 5025 | struct dentry *d_tracer; | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 5026 | int cpu; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5027 |  | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 5028 | trace_access_lock_init(); | 
|  | 5029 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5030 | d_tracer = tracing_init_dentry(); | 
|  | 5031 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 5032 | trace_create_file("trace_options", 0644, d_tracer, | 
|  | 5033 | NULL, &tracing_iter_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5034 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 5035 | trace_create_file("tracing_cpumask", 0644, d_tracer, | 
|  | 5036 | NULL, &tracing_cpumask_fops); | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 5037 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 5038 | trace_create_file("trace", 0644, d_tracer, | 
|  | 5039 | (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 5040 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 5041 | trace_create_file("available_tracers", 0444, d_tracer, | 
|  | 5042 | &global_trace, &show_traces_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5043 |  | 
| Li Zefan | 339ae5d | 2009-04-17 10:34:30 +0800 | [diff] [blame] | 5044 | trace_create_file("current_tracer", 0644, d_tracer, | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 5045 | &global_trace, &set_tracer_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5046 |  | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 5047 | #ifdef CONFIG_TRACER_MAX_TRACE | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 5048 | trace_create_file("tracing_max_latency", 0644, d_tracer, | 
|  | 5049 | &tracing_max_latency, &tracing_max_lat_fops); | 
| Tim Bird | 0e95017 | 2010-02-25 15:36:43 -0800 | [diff] [blame] | 5050 | #endif | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5051 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 5052 | trace_create_file("tracing_thresh", 0644, d_tracer, | 
|  | 5053 | &tracing_thresh, &tracing_max_lat_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5054 |  | 
| Li Zefan | 339ae5d | 2009-04-17 10:34:30 +0800 | [diff] [blame] | 5055 | trace_create_file("README", 0444, d_tracer, | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 5056 | NULL, &tracing_readme_fops); | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 5057 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 5058 | trace_create_file("trace_pipe", 0444, d_tracer, | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 5059 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5060 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 5061 | trace_create_file("buffer_size_kb", 0644, d_tracer, | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 5062 | (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops); | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 5063 |  | 
| Vaibhav Nagarnaik | f81ab07 | 2011-08-16 14:46:15 -0700 | [diff] [blame] | 5064 | trace_create_file("buffer_total_size_kb", 0444, d_tracer, | 
|  | 5065 | &global_trace, &tracing_total_entries_fops); | 
|  | 5066 |  | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 5067 | trace_create_file("free_buffer", 0644, d_tracer, | 
|  | 5068 | &global_trace, &tracing_free_buffer_fops); | 
|  | 5069 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 5070 | trace_create_file("trace_marker", 0220, d_tracer, | 
|  | 5071 | NULL, &tracing_mark_fops); | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 5072 |  | 
| Avadh Patel | 69abe6a | 2009-04-10 16:04:48 -0400 | [diff] [blame] | 5073 | trace_create_file("saved_cmdlines", 0444, d_tracer, | 
|  | 5074 | NULL, &tracing_saved_cmdlines_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5075 |  | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 5076 | trace_create_file("trace_clock", 0644, d_tracer, NULL, | 
|  | 5077 | &trace_clock_fops); | 
|  | 5078 |  | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 5079 | trace_create_file("tracing_on", 0644, d_tracer, | 
| Steven Rostedt | 348f0fc | 2012-04-16 15:41:28 -0400 | [diff] [blame] | 5080 | &global_trace, &rb_simple_fops); | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 5081 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5082 | #ifdef CONFIG_DYNAMIC_FTRACE | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 5083 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 
|  | 5084 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5085 | #endif | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 5086 |  | 
| Hiraku Toyooka | debdd57 | 2012-12-26 11:53:00 +0900 | [diff] [blame] | 5087 | #ifdef CONFIG_TRACER_SNAPSHOT | 
|  | 5088 | trace_create_file("snapshot", 0644, d_tracer, | 
|  | 5089 | (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops); | 
|  | 5090 | #endif | 
|  | 5091 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 5092 | create_trace_options_dir(); | 
|  | 5093 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 5094 | for_each_tracing_cpu(cpu) | 
|  | 5095 | tracing_init_debugfs_percpu(cpu); | 
|  | 5096 |  | 
| Frédéric Weisbecker | b5ad384 | 2008-09-23 11:34:32 +0100 | [diff] [blame] | 5097 | return 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5098 | } | 
|  | 5099 |  | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5100 | static int trace_panic_handler(struct notifier_block *this, | 
|  | 5101 | unsigned long event, void *unused) | 
|  | 5102 | { | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 5103 | if (ftrace_dump_on_oops) | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 5104 | ftrace_dump(ftrace_dump_on_oops); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5105 | return NOTIFY_OK; | 
|  | 5106 | } | 
|  | 5107 |  | 
|  | 5108 | static struct notifier_block trace_panic_notifier = { | 
|  | 5109 | .notifier_call  = trace_panic_handler, | 
|  | 5110 | .next           = NULL, | 
|  | 5111 | .priority       = 150   /* priority: INT_MAX >= x >= 0 */ | 
|  | 5112 | }; | 
|  | 5113 |  | 
|  | 5114 | static int trace_die_handler(struct notifier_block *self, | 
|  | 5115 | unsigned long val, | 
|  | 5116 | void *data) | 
|  | 5117 | { | 
|  | 5118 | switch (val) { | 
|  | 5119 | case DIE_OOPS: | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 5120 | if (ftrace_dump_on_oops) | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 5121 | ftrace_dump(ftrace_dump_on_oops); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5122 | break; | 
|  | 5123 | default: | 
|  | 5124 | break; | 
|  | 5125 | } | 
|  | 5126 | return NOTIFY_OK; | 
|  | 5127 | } | 
|  | 5128 |  | 
|  | 5129 | static struct notifier_block trace_die_notifier = { | 
|  | 5130 | .notifier_call = trace_die_handler, | 
|  | 5131 | .priority = 200 | 
|  | 5132 | }; | 
|  | 5133 |  | 
|  | 5134 | /* | 
|  | 5135 | * printk is set to max of 1024, we really don't need it that big. | 
|  | 5136 | * Nothing should be printing 1000 characters anyway. | 
|  | 5137 | */ | 
|  | 5138 | #define TRACE_MAX_PRINT		1000 | 
|  | 5139 |  | 
|  | 5140 | /* | 
|  | 5141 | * Define here KERN_TRACE so that we have one place to modify | 
|  | 5142 | * it if we decide to change what log level the ftrace dump | 
|  | 5143 | * should be at. | 
|  | 5144 | */ | 
| Steven Rostedt | 428aee1 | 2009-01-14 12:24:42 -0500 | [diff] [blame] | 5145 | #define KERN_TRACE		KERN_EMERG | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5146 |  | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 5147 | void | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5148 | trace_printk_seq(struct trace_seq *s) | 
|  | 5149 | { | 
|  | 5150 | /* Probably should print a warning here. */ | 
|  | 5151 | if (s->len >= 1000) | 
|  | 5152 | s->len = 1000; | 
|  | 5153 |  | 
|  | 5154 | /* should be zero ended, but we are paranoid. */ | 
|  | 5155 | s->buffer[s->len] = 0; | 
|  | 5156 |  | 
|  | 5157 | printk(KERN_TRACE "%s", s->buffer); | 
|  | 5158 |  | 
| Steven Rostedt | f952075 | 2009-03-02 14:04:40 -0500 | [diff] [blame] | 5159 | trace_seq_init(s); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5160 | } | 
|  | 5161 |  | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 5162 | void trace_init_global_iter(struct trace_iterator *iter) | 
|  | 5163 | { | 
|  | 5164 | iter->tr = &global_trace; | 
|  | 5165 | iter->trace = current_trace; | 
|  | 5166 | iter->cpu_file = TRACE_PIPE_ALL_CPU; | 
|  | 5167 | } | 
|  | 5168 |  | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 5169 | static void | 
|  | 5170 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5171 | { | 
| Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 5172 | static arch_spinlock_t ftrace_dump_lock = | 
| Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 5173 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5174 | /* use static because iter can be a bit big for the stack */ | 
|  | 5175 | static struct trace_iterator iter; | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 5176 | unsigned int old_userobj; | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5177 | static int dump_ran; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 5178 | unsigned long flags; | 
|  | 5179 | int cnt = 0, cpu; | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5180 |  | 
|  | 5181 | /* only one dump */ | 
| Steven Rostedt | cd891ae | 2009-04-28 11:39:34 -0400 | [diff] [blame] | 5182 | local_irq_save(flags); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 5183 | arch_spin_lock(&ftrace_dump_lock); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5184 | if (dump_ran) | 
|  | 5185 | goto out; | 
|  | 5186 |  | 
|  | 5187 | dump_ran = 1; | 
|  | 5188 |  | 
| Steven Rostedt | 0ee6b6c | 2009-01-14 14:50:19 -0500 | [diff] [blame] | 5189 | tracing_off(); | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 5190 |  | 
| Steven Rostedt | e0a413f | 2011-09-29 21:26:16 -0400 | [diff] [blame] | 5191 | /* Did function tracer already get disabled? */ | 
|  | 5192 | if (ftrace_is_dead()) { | 
|  | 5193 | printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); | 
|  | 5194 | printk("#          MAY BE MISSING FUNCTION EVENTS\n"); | 
|  | 5195 | } | 
|  | 5196 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 5197 | if (disable_tracing) | 
|  | 5198 | ftrace_kill(); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5199 |  | 
| Jovi Zhang | 38dbe0b | 2013-01-25 18:03:07 +0800 | [diff] [blame] | 5200 | /* Simulate the iterator */ | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 5201 | trace_init_global_iter(&iter); | 
|  | 5202 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 5203 | for_each_tracing_cpu(cpu) { | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 5204 | atomic_inc(&iter.tr->data[cpu]->disabled); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 5205 | } | 
|  | 5206 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 5207 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; | 
|  | 5208 |  | 
| Török Edwin | b54d3de | 2008-11-22 13:28:48 +0200 | [diff] [blame] | 5209 | /* don't look at user memory in panic mode */ | 
|  | 5210 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 
|  | 5211 |  | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 5212 | switch (oops_dump_mode) { | 
|  | 5213 | case DUMP_ALL: | 
|  | 5214 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | 
|  | 5215 | break; | 
|  | 5216 | case DUMP_ORIG: | 
|  | 5217 | iter.cpu_file = raw_smp_processor_id(); | 
|  | 5218 | break; | 
|  | 5219 | case DUMP_NONE: | 
|  | 5220 | goto out_enable; | 
|  | 5221 | default: | 
|  | 5222 | printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); | 
|  | 5223 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | 
|  | 5224 | } | 
|  | 5225 |  | 
|  | 5226 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5227 |  | 
|  | 5228 | /* | 
|  | 5229 | * We need to stop all tracing on all CPUS to read the | 
|  | 5230 | * the next buffer. This is a bit expensive, but is | 
|  | 5231 | * not done often. We fill all what we can read, | 
|  | 5232 | * and then release the locks again. | 
|  | 5233 | */ | 
|  | 5234 |  | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5235 | while (!trace_empty(&iter)) { | 
|  | 5236 |  | 
|  | 5237 | if (!cnt) | 
|  | 5238 | printk(KERN_TRACE "---------------------------------\n"); | 
|  | 5239 |  | 
|  | 5240 | cnt++; | 
|  | 5241 |  | 
|  | 5242 | /* reset all but tr, trace, and overruns */ | 
|  | 5243 | memset(&iter.seq, 0, | 
|  | 5244 | sizeof(struct trace_iterator) - | 
|  | 5245 | offsetof(struct trace_iterator, seq)); | 
|  | 5246 | iter.iter_flags |= TRACE_FILE_LAT_FMT; | 
|  | 5247 | iter.pos = -1; | 
|  | 5248 |  | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 5249 | if (trace_find_next_entry_inc(&iter) != NULL) { | 
| Lai Jiangshan | 74e7ff8 | 2009-07-28 20:17:22 +0800 | [diff] [blame] | 5250 | int ret; | 
|  | 5251 |  | 
|  | 5252 | ret = print_trace_line(&iter); | 
|  | 5253 | if (ret != TRACE_TYPE_NO_CONSUME) | 
|  | 5254 | trace_consume(&iter); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5255 | } | 
| Steven Rostedt | b892e5c | 2012-03-01 22:06:48 -0500 | [diff] [blame] | 5256 | touch_nmi_watchdog(); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5257 |  | 
|  | 5258 | trace_printk_seq(&iter.seq); | 
|  | 5259 | } | 
|  | 5260 |  | 
|  | 5261 | if (!cnt) | 
|  | 5262 | printk(KERN_TRACE "   (ftrace buffer empty)\n"); | 
|  | 5263 | else | 
|  | 5264 | printk(KERN_TRACE "---------------------------------\n"); | 
|  | 5265 |  | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 5266 | out_enable: | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 5267 | /* Re-enable tracing if requested */ | 
|  | 5268 | if (!disable_tracing) { | 
|  | 5269 | trace_flags |= old_userobj; | 
|  | 5270 |  | 
|  | 5271 | for_each_tracing_cpu(cpu) { | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 5272 | atomic_dec(&iter.tr->data[cpu]->disabled); | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 5273 | } | 
|  | 5274 | tracing_on(); | 
|  | 5275 | } | 
|  | 5276 |  | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5277 | out: | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 5278 | arch_spin_unlock(&ftrace_dump_lock); | 
| Steven Rostedt | cd891ae | 2009-04-28 11:39:34 -0400 | [diff] [blame] | 5279 | local_irq_restore(flags); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5280 | } | 
|  | 5281 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 5282 | /* By default: disable tracing after the dump */ | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 5283 | void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 5284 | { | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 5285 | __ftrace_dump(true, oops_dump_mode); | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 5286 | } | 
| Paul E. McKenney | a8eecf2 | 2011-10-02 11:01:15 -0700 | [diff] [blame] | 5287 | EXPORT_SYMBOL_GPL(ftrace_dump); | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 5288 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5289 | __init static int tracer_alloc_buffers(void) | 
|  | 5290 | { | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 5291 | int ring_buf_size; | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 5292 | enum ring_buffer_flags rb_flags; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5293 | int i; | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 5294 | int ret = -ENOMEM; | 
|  | 5295 |  | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 5296 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 5297 | if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) | 
|  | 5298 | goto out; | 
|  | 5299 |  | 
|  | 5300 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 
|  | 5301 | goto out_free_buffer_mask; | 
|  | 5302 |  | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 5303 | /* Only allocate trace_printk buffers if a trace_printk exists */ | 
|  | 5304 | if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) | 
| Steven Rostedt | 8169883 | 2012-10-11 10:15:05 -0400 | [diff] [blame] | 5305 | /* Must be called before global_trace.buffer is allocated */ | 
| Steven Rostedt | 07d777f | 2011-09-22 14:01:55 -0400 | [diff] [blame] | 5306 | trace_printk_init_buffers(); | 
|  | 5307 |  | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 5308 | /* To save memory, keep the ring buffer size to its minimum */ | 
|  | 5309 | if (ring_buffer_expanded) | 
|  | 5310 | ring_buf_size = trace_buf_size; | 
|  | 5311 | else | 
|  | 5312 | ring_buf_size = 1; | 
|  | 5313 |  | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 5314 | rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; | 
|  | 5315 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 5316 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 
|  | 5317 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5318 |  | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 5319 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 5320 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 5321 | if (!global_trace.buffer) { | 
|  | 5322 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 
|  | 5323 | WARN_ON(1); | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 5324 | goto out_free_cpumask; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 5325 | } | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 5326 | if (global_trace.buffer_disabled) | 
|  | 5327 | tracing_off(); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 5328 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 5329 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 5330 | #ifdef CONFIG_TRACER_MAX_TRACE | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 5331 | max_tr.buffer = ring_buffer_alloc(1, rb_flags); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 5332 | if (!max_tr.buffer) { | 
|  | 5333 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 
|  | 5334 | WARN_ON(1); | 
|  | 5335 | ring_buffer_free(global_trace.buffer); | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 5336 | goto out_free_cpumask; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 5337 | } | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 5338 | #endif | 
|  | 5339 |  | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 5340 | /* Allocate the first page for all buffers */ | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 5341 | for_each_tracing_cpu(i) { | 
| jolsa@redhat.com | 566b0aa | 2009-07-16 21:44:26 +0200 | [diff] [blame] | 5342 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 
| Tejun Heo | 9705f69 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 5343 | max_tr.data[i] = &per_cpu(max_tr_data, i); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5344 | } | 
|  | 5345 |  | 
| Vaibhav Nagarnaik | a591c73 | 2012-05-03 10:40:34 -0700 | [diff] [blame] | 5346 | set_buffer_entries(&global_trace, | 
|  | 5347 | ring_buffer_size(global_trace.buffer, 0)); | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 5348 | #ifdef CONFIG_TRACER_MAX_TRACE | 
|  | 5349 | set_buffer_entries(&max_tr, 1); | 
|  | 5350 | #endif | 
|  | 5351 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5352 | trace_init_cmdlines(); | 
| Steven Rostedt | 0d5c6e1 | 2012-11-01 20:54:21 -0400 | [diff] [blame] | 5353 | init_irq_work(&trace_work_wakeup, trace_wake_up); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5354 |  | 
| Frédéric Weisbecker | 43a1538 | 2008-09-21 20:16:30 +0200 | [diff] [blame] | 5355 | register_tracer(&nop_trace); | 
| Steven Rostedt (Red Hat) | d840f71 | 2013-02-01 18:38:47 -0500 | [diff] [blame] | 5356 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 5357 | /* All seems OK, enable tracing */ | 
|  | 5358 | tracing_disabled = 0; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 5359 |  | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5360 | atomic_notifier_chain_register(&panic_notifier_list, | 
|  | 5361 | &trace_panic_notifier); | 
|  | 5362 |  | 
|  | 5363 | register_die_notifier(&trace_die_notifier); | 
| Frederic Weisbecker | 2fc1dfb | 2009-03-16 01:45:03 +0100 | [diff] [blame] | 5364 |  | 
| Steven Rostedt | 7bcfaf5 | 2012-11-01 22:56:07 -0400 | [diff] [blame] | 5365 | while (trace_boot_options) { | 
|  | 5366 | char *option; | 
|  | 5367 |  | 
|  | 5368 | option = strsep(&trace_boot_options, ","); | 
|  | 5369 | trace_set_options(option); | 
|  | 5370 | } | 
|  | 5371 |  | 
| Frederic Weisbecker | 2fc1dfb | 2009-03-16 01:45:03 +0100 | [diff] [blame] | 5372 | return 0; | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 5373 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 5374 | out_free_cpumask: | 
|  | 5375 | free_cpumask_var(tracing_cpumask); | 
|  | 5376 | out_free_buffer_mask: | 
|  | 5377 | free_cpumask_var(tracing_buffer_mask); | 
|  | 5378 | out: | 
|  | 5379 | return ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 5380 | } | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 5381 |  | 
|  | 5382 | __init static int clear_boot_tracer(void) | 
|  | 5383 | { | 
|  | 5384 | /* | 
|  | 5385 | * The default tracer at boot buffer is an init section. | 
|  | 5386 | * This function is called in lateinit. If we did not | 
|  | 5387 | * find the boot tracer, then clear it out, to prevent | 
|  | 5388 | * later registration from accessing the buffer that is | 
|  | 5389 | * about to be freed. | 
|  | 5390 | */ | 
|  | 5391 | if (!default_bootup_tracer) | 
|  | 5392 | return 0; | 
|  | 5393 |  | 
|  | 5394 | printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", | 
|  | 5395 | default_bootup_tracer); | 
|  | 5396 | default_bootup_tracer = NULL; | 
|  | 5397 |  | 
|  | 5398 | return 0; | 
|  | 5399 | } | 
|  | 5400 |  | 
| Frédéric Weisbecker | b5ad384 | 2008-09-23 11:34:32 +0100 | [diff] [blame] | 5401 | early_initcall(tracer_alloc_buffers); | 
|  | 5402 | fs_initcall(tracer_init_debugfs); | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 5403 | late_initcall(clear_boot_tracer); |