| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* | 
 | 2 |  * ring buffer based function tracer | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 
 | 5 |  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | 
 | 6 |  * | 
 | 7 |  * Originally taken from the RT patch by: | 
 | 8 |  *    Arnaldo Carvalho de Melo <acme@redhat.com> | 
 | 9 |  * | 
 | 10 |  * Based on code from the latency_tracer, that is: | 
 | 11 |  *  Copyright (C) 2004-2006 Ingo Molnar | 
 | 12 |  *  Copyright (C) 2004 William Lee Irwin III | 
 | 13 |  */ | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 14 | #include <linux/ring_buffer.h> | 
| Sam Ravnborg | 273b281 | 2009-10-18 00:52:28 +0200 | [diff] [blame] | 15 | #include <generated/utsrelease.h> | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 16 | #include <linux/stacktrace.h> | 
 | 17 | #include <linux/writeback.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 18 | #include <linux/kallsyms.h> | 
 | 19 | #include <linux/seq_file.h> | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 20 | #include <linux/notifier.h> | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 21 | #include <linux/irqflags.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 22 | #include <linux/debugfs.h> | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 23 | #include <linux/pagemap.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 24 | #include <linux/hardirq.h> | 
 | 25 | #include <linux/linkage.h> | 
 | 26 | #include <linux/uaccess.h> | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 27 | #include <linux/kprobes.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 28 | #include <linux/ftrace.h> | 
 | 29 | #include <linux/module.h> | 
 | 30 | #include <linux/percpu.h> | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 31 | #include <linux/splice.h> | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 32 | #include <linux/kdebug.h> | 
| Frederic Weisbecker | 5f0c6c0 | 2009-03-27 14:22:10 +0100 | [diff] [blame] | 33 | #include <linux/string.h> | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 34 | #include <linux/rwsem.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 35 | #include <linux/slab.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 36 | #include <linux/ctype.h> | 
 | 37 | #include <linux/init.h> | 
| Soeren Sandmann Pedersen | 2a2cc8f | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 38 | #include <linux/poll.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 39 | #include <linux/fs.h> | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 40 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 41 | #include "trace.h" | 
| Steven Rostedt | f0868d1 | 2008-12-23 23:24:12 -0500 | [diff] [blame] | 42 | #include "trace_output.h" | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 43 |  | 
| Frederic Weisbecker | 8e1b82e | 2008-12-06 03:41:33 +0100 | [diff] [blame] | 44 | /* | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 45 |  * On boot up, the ring buffer is set to the minimum size, so that | 
 | 46 |  * we do not waste memory on systems that are not using tracing. | 
 | 47 |  */ | 
| Li Zefan | 020e5f8 | 2009-07-01 10:47:05 +0800 | [diff] [blame] | 48 | int ring_buffer_expanded; | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 49 |  | 
 | 50 | /* | 
| Frederic Weisbecker | 8e1b82e | 2008-12-06 03:41:33 +0100 | [diff] [blame] | 51 |  * We need to change this state when a selftest is running. | 
| Frederic Weisbecker | ff32504 | 2008-12-04 23:47:35 +0100 | [diff] [blame] | 52 |  * A selftest will lurk into the ring-buffer to count the | 
 | 53 |  * entries inserted during the selftest although some concurrent | 
| Ingo Molnar | 5e1607a | 2009-03-05 10:24:48 +0100 | [diff] [blame] | 54 |  * insertions into the ring-buffer such as trace_printk could occurred | 
| Frederic Weisbecker | ff32504 | 2008-12-04 23:47:35 +0100 | [diff] [blame] | 55 |  * at the same time, giving false positive or negative results. | 
 | 56 |  */ | 
| Frederic Weisbecker | 8e1b82e | 2008-12-06 03:41:33 +0100 | [diff] [blame] | 57 | static bool __read_mostly tracing_selftest_running; | 
| Frederic Weisbecker | ff32504 | 2008-12-04 23:47:35 +0100 | [diff] [blame] | 58 |  | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 59 | /* | 
 | 60 |  * If a tracer is running, we do not want to run SELFTEST. | 
 | 61 |  */ | 
| Li Zefan | 020e5f8 | 2009-07-01 10:47:05 +0800 | [diff] [blame] | 62 | bool __read_mostly tracing_selftest_disabled; | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 63 |  | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 64 | /* For tracers that don't implement custom flags */ | 
 | 65 | static struct tracer_opt dummy_tracer_opt[] = { | 
 | 66 | 	{ } | 
 | 67 | }; | 
 | 68 |  | 
 | 69 | static struct tracer_flags dummy_tracer_flags = { | 
 | 70 | 	.val = 0, | 
 | 71 | 	.opts = dummy_tracer_opt | 
 | 72 | }; | 
 | 73 |  | 
 | 74 | static int dummy_set_flag(u32 old_flags, u32 bit, int set) | 
 | 75 | { | 
 | 76 | 	return 0; | 
 | 77 | } | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 78 |  | 
 | 79 | /* | 
 | 80 |  * Kill all tracing for good (never come back). | 
 | 81 |  * It is initialized to 1 but will turn to zero if the initialization | 
 | 82 |  * of the tracer is successful. But that is the only place that sets | 
 | 83 |  * this back to zero. | 
 | 84 |  */ | 
| Hannes Eder | 4fd2735 | 2009-02-10 19:44:12 +0100 | [diff] [blame] | 85 | static int tracing_disabled = 1; | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 86 |  | 
| Christoph Lameter | 9288f99 | 2009-10-07 19:17:45 -0400 | [diff] [blame] | 87 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 88 |  | 
 | 89 | static inline void ftrace_disable_cpu(void) | 
 | 90 | { | 
 | 91 | 	preempt_disable(); | 
| Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 92 | 	__this_cpu_inc(ftrace_cpu_disabled); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 93 | } | 
 | 94 |  | 
 | 95 | static inline void ftrace_enable_cpu(void) | 
 | 96 | { | 
| Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 97 | 	__this_cpu_dec(ftrace_cpu_disabled); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 98 | 	preempt_enable(); | 
 | 99 | } | 
 | 100 |  | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 101 | cpumask_var_t __read_mostly	tracing_buffer_mask; | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 102 |  | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 103 | /* | 
 | 104 |  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | 
 | 105 |  * | 
 | 106 |  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops | 
 | 107 |  * is set, then ftrace_dump is called. This will output the contents | 
 | 108 |  * of the ftrace buffers to the console.  This is very useful for | 
 | 109 |  * capturing traces that lead to crashes and outputing it to a | 
 | 110 |  * serial console. | 
 | 111 |  * | 
 | 112 |  * It is default off, but you can enable it with either specifying | 
 | 113 |  * "ftrace_dump_on_oops" in the kernel command line, or setting | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 114 |  * /proc/sys/kernel/ftrace_dump_on_oops | 
 | 115 |  * Set 1 if you want to dump buffers of all CPUs | 
 | 116 |  * Set 2 if you want to dump the buffer of the CPU that triggered oops | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 117 |  */ | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 118 |  | 
 | 119 | enum ftrace_dump_mode ftrace_dump_on_oops; | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 120 |  | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 121 | static int tracing_set_tracer(const char *buf); | 
 | 122 |  | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 123 | #define MAX_TRACER_SIZE		100 | 
 | 124 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 125 | static char *default_bootup_tracer; | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 126 |  | 
| Frederic Weisbecker | 1beee96 | 2009-10-14 20:50:32 +0200 | [diff] [blame] | 127 | static int __init set_cmdline_ftrace(char *str) | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 128 | { | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 129 | 	strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 130 | 	default_bootup_tracer = bootup_tracer_buf; | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 131 | 	/* We are using ftrace early, expand it */ | 
 | 132 | 	ring_buffer_expanded = 1; | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 133 | 	return 1; | 
 | 134 | } | 
| Frederic Weisbecker | 1beee96 | 2009-10-14 20:50:32 +0200 | [diff] [blame] | 135 | __setup("ftrace=", set_cmdline_ftrace); | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 136 |  | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 137 | static int __init set_ftrace_dump_on_oops(char *str) | 
 | 138 | { | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 139 | 	if (*str++ != '=' || !*str) { | 
 | 140 | 		ftrace_dump_on_oops = DUMP_ALL; | 
 | 141 | 		return 1; | 
 | 142 | 	} | 
 | 143 |  | 
 | 144 | 	if (!strcmp("orig_cpu", str)) { | 
 | 145 | 		ftrace_dump_on_oops = DUMP_ORIG; | 
 | 146 |                 return 1; | 
 | 147 |         } | 
 | 148 |  | 
 | 149 |         return 0; | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 150 | } | 
 | 151 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 152 |  | 
| Lai Jiangshan | cf8e347 | 2009-03-30 13:48:00 +0800 | [diff] [blame] | 153 | unsigned long long ns2usecs(cycle_t nsec) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 154 | { | 
 | 155 | 	nsec += 500; | 
 | 156 | 	do_div(nsec, 1000); | 
 | 157 | 	return nsec; | 
 | 158 | } | 
 | 159 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 160 | /* | 
 | 161 |  * The global_trace is the descriptor that holds the tracing | 
 | 162 |  * buffers for the live tracing. For each CPU, it contains | 
 | 163 |  * a link list of pages that will store trace entries. The | 
 | 164 |  * page descriptor of the pages in the memory is used to hold | 
 | 165 |  * the link list by linking the lru item in the page descriptor | 
 | 166 |  * to each of the pages in the buffer per CPU. | 
 | 167 |  * | 
 | 168 |  * For each active CPU there is a data field that holds the | 
 | 169 |  * pages for the buffer for that CPU. Each CPU has the same number | 
 | 170 |  * of pages allocated for its buffer. | 
 | 171 |  */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 172 | static struct trace_array	global_trace; | 
 | 173 |  | 
 | 174 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 
 | 175 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 176 | int filter_current_check_discard(struct ring_buffer *buffer, | 
 | 177 | 				 struct ftrace_event_call *call, void *rec, | 
| Tom Zanussi | eb02ce0 | 2009-04-08 03:15:54 -0500 | [diff] [blame] | 178 | 				 struct ring_buffer_event *event) | 
 | 179 | { | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 180 | 	return filter_check_discard(call, rec, buffer, event); | 
| Tom Zanussi | eb02ce0 | 2009-04-08 03:15:54 -0500 | [diff] [blame] | 181 | } | 
| Steven Rostedt | 17c873e | 2009-04-10 18:12:50 -0400 | [diff] [blame] | 182 | EXPORT_SYMBOL_GPL(filter_current_check_discard); | 
| Tom Zanussi | eb02ce0 | 2009-04-08 03:15:54 -0500 | [diff] [blame] | 183 |  | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 184 | cycle_t ftrace_now(int cpu) | 
 | 185 | { | 
 | 186 | 	u64 ts; | 
 | 187 |  | 
 | 188 | 	/* Early boot up does not have a buffer yet */ | 
 | 189 | 	if (!global_trace.buffer) | 
 | 190 | 		return trace_clock_local(); | 
 | 191 |  | 
 | 192 | 	ts = ring_buffer_time_stamp(global_trace.buffer, cpu); | 
 | 193 | 	ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts); | 
 | 194 |  | 
 | 195 | 	return ts; | 
 | 196 | } | 
 | 197 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 198 | /* | 
 | 199 |  * The max_tr is used to snapshot the global_trace when a maximum | 
 | 200 |  * latency is reached. Some tracers will use this to store a maximum | 
 | 201 |  * trace while it continues examining live traces. | 
 | 202 |  * | 
 | 203 |  * The buffers for the max_tr are set up the same as the global_trace. | 
 | 204 |  * When a snapshot is taken, the link list of the max_tr is swapped | 
 | 205 |  * with the link list of the global_trace and the buffers are reset for | 
 | 206 |  * the global_trace so the tracing can continue. | 
 | 207 |  */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 208 | static struct trace_array	max_tr; | 
 | 209 |  | 
| Tejun Heo | 9705f69 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 210 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 211 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 212 | /* tracer_enabled is used to toggle activation of a tracer */ | 
| Steven Rostedt | 26994ea | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 213 | static int			tracer_enabled = 1; | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 214 |  | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 215 | /** | 
 | 216 |  * tracing_is_enabled - return tracer_enabled status | 
 | 217 |  * | 
 | 218 |  * This function is used by other tracers to know the status | 
 | 219 |  * of the tracer_enabled flag.  Tracers may use this function | 
 | 220 |  * to know if it should enable their features when starting | 
 | 221 |  * up. See irqsoff tracer for an example (start_irqsoff_tracer). | 
 | 222 |  */ | 
 | 223 | int tracing_is_enabled(void) | 
 | 224 | { | 
 | 225 | 	return tracer_enabled; | 
 | 226 | } | 
 | 227 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 228 | /* | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 229 |  * trace_buf_size is the size in bytes that is allocated | 
 | 230 |  * for a buffer. Note, the number of bytes is always rounded | 
 | 231 |  * to page size. | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 232 |  * | 
 | 233 |  * This number is purposely set to a low number of 16384. | 
 | 234 |  * If the dump on oops happens, it will be much appreciated | 
 | 235 |  * to not have to wait for all that output. Anyway this can be | 
 | 236 |  * boot time and run time configurable. | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 237 |  */ | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 238 | #define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */ | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 239 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 240 | static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 241 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 242 | /* trace_types holds a link list of available tracers. */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 243 | static struct tracer		*trace_types __read_mostly; | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 244 |  | 
 | 245 | /* current_trace points to the tracer that is currently active */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 246 | static struct tracer		*current_trace __read_mostly; | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 247 |  | 
 | 248 | /* | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 249 |  * trace_types_lock is used to protect the trace_types list. | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 250 |  */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 251 | static DEFINE_MUTEX(trace_types_lock); | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 252 |  | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 253 | /* | 
 | 254 |  * serialize the access of the ring buffer | 
 | 255 |  * | 
 | 256 |  * ring buffer serializes readers, but it is low level protection. | 
 | 257 |  * The validity of the events (which returns by ring_buffer_peek() ..etc) | 
 | 258 |  * are not protected by ring buffer. | 
 | 259 |  * | 
 | 260 |  * The content of events may become garbage if we allow other process consumes | 
 | 261 |  * these events concurrently: | 
 | 262 |  *   A) the page of the consumed events may become a normal page | 
 | 263 |  *      (not reader page) in ring buffer, and this page will be rewrited | 
 | 264 |  *      by events producer. | 
 | 265 |  *   B) The page of the consumed events may become a page for splice_read, | 
 | 266 |  *      and this page will be returned to system. | 
 | 267 |  * | 
 | 268 |  * These primitives allow multi process access to different cpu ring buffer | 
 | 269 |  * concurrently. | 
 | 270 |  * | 
 | 271 |  * These primitives don't distinguish read-only and read-consume access. | 
 | 272 |  * Multi read-only access are also serialized. | 
 | 273 |  */ | 
 | 274 |  | 
 | 275 | #ifdef CONFIG_SMP | 
 | 276 | static DECLARE_RWSEM(all_cpu_access_lock); | 
 | 277 | static DEFINE_PER_CPU(struct mutex, cpu_access_lock); | 
 | 278 |  | 
 | 279 | static inline void trace_access_lock(int cpu) | 
 | 280 | { | 
 | 281 | 	if (cpu == TRACE_PIPE_ALL_CPU) { | 
 | 282 | 		/* gain it for accessing the whole ring buffer. */ | 
 | 283 | 		down_write(&all_cpu_access_lock); | 
 | 284 | 	} else { | 
 | 285 | 		/* gain it for accessing a cpu ring buffer. */ | 
 | 286 |  | 
 | 287 | 		/* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */ | 
 | 288 | 		down_read(&all_cpu_access_lock); | 
 | 289 |  | 
 | 290 | 		/* Secondly block other access to this @cpu ring buffer. */ | 
 | 291 | 		mutex_lock(&per_cpu(cpu_access_lock, cpu)); | 
 | 292 | 	} | 
 | 293 | } | 
 | 294 |  | 
 | 295 | static inline void trace_access_unlock(int cpu) | 
 | 296 | { | 
 | 297 | 	if (cpu == TRACE_PIPE_ALL_CPU) { | 
 | 298 | 		up_write(&all_cpu_access_lock); | 
 | 299 | 	} else { | 
 | 300 | 		mutex_unlock(&per_cpu(cpu_access_lock, cpu)); | 
 | 301 | 		up_read(&all_cpu_access_lock); | 
 | 302 | 	} | 
 | 303 | } | 
 | 304 |  | 
 | 305 | static inline void trace_access_lock_init(void) | 
 | 306 | { | 
 | 307 | 	int cpu; | 
 | 308 |  | 
 | 309 | 	for_each_possible_cpu(cpu) | 
 | 310 | 		mutex_init(&per_cpu(cpu_access_lock, cpu)); | 
 | 311 | } | 
 | 312 |  | 
 | 313 | #else | 
 | 314 |  | 
 | 315 | static DEFINE_MUTEX(access_lock); | 
 | 316 |  | 
 | 317 | static inline void trace_access_lock(int cpu) | 
 | 318 | { | 
 | 319 | 	(void)cpu; | 
 | 320 | 	mutex_lock(&access_lock); | 
 | 321 | } | 
 | 322 |  | 
 | 323 | static inline void trace_access_unlock(int cpu) | 
 | 324 | { | 
 | 325 | 	(void)cpu; | 
 | 326 | 	mutex_unlock(&access_lock); | 
 | 327 | } | 
 | 328 |  | 
 | 329 | static inline void trace_access_lock_init(void) | 
 | 330 | { | 
 | 331 | } | 
 | 332 |  | 
 | 333 | #endif | 
 | 334 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 335 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ | 
| Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 336 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | 
 | 337 |  | 
| Steven Rostedt | ee6bce5 | 2008-11-12 17:52:37 -0500 | [diff] [blame] | 338 | /* trace_flags holds trace_options default values */ | 
| Steven Rostedt | 12ef7d4 | 2008-11-12 17:52:38 -0500 | [diff] [blame] | 339 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 
| Steven Rostedt | a2a16d6 | 2009-03-24 23:17:58 -0400 | [diff] [blame] | 340 | 	TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 341 | 	TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE; | 
| Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 342 |  | 
| Steven Rostedt | b8de7bd | 2009-08-31 22:32:27 -0400 | [diff] [blame] | 343 | static int trace_stop_count; | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 344 | static DEFINE_RAW_SPINLOCK(tracing_start_lock); | 
| Steven Rostedt | b8de7bd | 2009-08-31 22:32:27 -0400 | [diff] [blame] | 345 |  | 
| Vaibhav Nagarnaik | e7e2ee8 | 2011-05-10 13:27:21 -0700 | [diff] [blame] | 346 | static void wakeup_work_handler(struct work_struct *work) | 
 | 347 | { | 
 | 348 | 	wake_up(&trace_wait); | 
 | 349 | } | 
 | 350 |  | 
 | 351 | static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); | 
 | 352 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 353 | /** | 
 | 354 |  * trace_wake_up - wake up tasks waiting for trace input | 
 | 355 |  * | 
| Vaibhav Nagarnaik | e7e2ee8 | 2011-05-10 13:27:21 -0700 | [diff] [blame] | 356 |  * Schedules a delayed work to wake up any task that is blocked on the | 
 | 357 |  * trace_wait queue. These is used with trace_poll for tasks polling the | 
 | 358 |  * trace. | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 359 |  */ | 
| Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 360 | void trace_wake_up(void) | 
 | 361 | { | 
| Vaibhav Nagarnaik | e7e2ee8 | 2011-05-10 13:27:21 -0700 | [diff] [blame] | 362 | 	const unsigned long delay = msecs_to_jiffies(2); | 
| Andrew Morton | 89f19f0 | 2009-09-19 11:55:44 -0700 | [diff] [blame] | 363 |  | 
 | 364 | 	if (trace_flags & TRACE_ITER_BLOCK) | 
 | 365 | 		return; | 
| Vaibhav Nagarnaik | e7e2ee8 | 2011-05-10 13:27:21 -0700 | [diff] [blame] | 366 | 	schedule_delayed_work(&wakeup_work, delay); | 
| Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 367 | } | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 368 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 369 | static int __init set_buf_size(char *str) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 370 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 371 | 	unsigned long buf_size; | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 372 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 373 | 	if (!str) | 
 | 374 | 		return 0; | 
| Li Zefan | 9d612be | 2009-06-24 17:33:15 +0800 | [diff] [blame] | 375 | 	buf_size = memparse(str, &str); | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 376 | 	/* nr_entries can not be zero */ | 
| Li Zefan | 9d612be | 2009-06-24 17:33:15 +0800 | [diff] [blame] | 377 | 	if (buf_size == 0) | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 378 | 		return 0; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 379 | 	trace_buf_size = buf_size; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 380 | 	return 1; | 
 | 381 | } | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 382 | __setup("trace_buf_size=", set_buf_size); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 383 |  | 
| Tim Bird | 0e95017 | 2010-02-25 15:36:43 -0800 | [diff] [blame] | 384 | static int __init set_tracing_thresh(char *str) | 
 | 385 | { | 
 | 386 | 	unsigned long threshhold; | 
 | 387 | 	int ret; | 
 | 388 |  | 
 | 389 | 	if (!str) | 
 | 390 | 		return 0; | 
 | 391 | 	ret = strict_strtoul(str, 0, &threshhold); | 
 | 392 | 	if (ret < 0) | 
 | 393 | 		return 0; | 
 | 394 | 	tracing_thresh = threshhold * 1000; | 
 | 395 | 	return 1; | 
 | 396 | } | 
 | 397 | __setup("tracing_thresh=", set_tracing_thresh); | 
 | 398 |  | 
| Steven Rostedt | 57f50be | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 399 | unsigned long nsecs_to_usecs(unsigned long nsecs) | 
 | 400 | { | 
 | 401 | 	return nsecs / 1000; | 
 | 402 | } | 
 | 403 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 404 | /* These must match the bit postions in trace_iterator_flags */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 405 | static const char *trace_options[] = { | 
 | 406 | 	"print-parent", | 
 | 407 | 	"sym-offset", | 
 | 408 | 	"sym-addr", | 
 | 409 | 	"verbose", | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 410 | 	"raw", | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 411 | 	"hex", | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 412 | 	"bin", | 
| Soeren Sandmann Pedersen | 2a2cc8f | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 413 | 	"block", | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 414 | 	"stacktrace", | 
| Ingo Molnar | 5e1607a | 2009-03-05 10:24:48 +0100 | [diff] [blame] | 415 | 	"trace_printk", | 
| Steven Rostedt | b2a866f | 2008-11-03 23:15:57 -0500 | [diff] [blame] | 416 | 	"ftrace_preempt", | 
| Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 417 | 	"branch", | 
| Steven Rostedt | 12ef7d4 | 2008-11-12 17:52:38 -0500 | [diff] [blame] | 418 | 	"annotate", | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 419 | 	"userstacktrace", | 
| Török Edwin | b54d3de | 2008-11-22 13:28:48 +0200 | [diff] [blame] | 420 | 	"sym-userobj", | 
| Frederic Weisbecker | 66896a8 | 2008-12-13 20:18:13 +0100 | [diff] [blame] | 421 | 	"printk-msg-only", | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 422 | 	"context-info", | 
| Steven Rostedt | c032ef64 | 2009-03-04 20:34:24 -0500 | [diff] [blame] | 423 | 	"latency-format", | 
| Steven Rostedt | be6f164 | 2009-03-24 11:06:24 -0400 | [diff] [blame] | 424 | 	"sleep-time", | 
| Steven Rostedt | a2a16d6 | 2009-03-24 23:17:58 -0400 | [diff] [blame] | 425 | 	"graph-time", | 
| Li Zefan | e870e9a | 2010-07-02 11:07:32 +0800 | [diff] [blame] | 426 | 	"record-cmd", | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 427 | 	"overwrite", | 
| Steven Rostedt | cf30cf6 | 2011-06-14 22:44:07 -0400 | [diff] [blame] | 428 | 	"disable_on_free", | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 429 | 	NULL | 
 | 430 | }; | 
 | 431 |  | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 432 | static struct { | 
 | 433 | 	u64 (*func)(void); | 
 | 434 | 	const char *name; | 
 | 435 | } trace_clocks[] = { | 
 | 436 | 	{ trace_clock_local,	"local" }, | 
 | 437 | 	{ trace_clock_global,	"global" }, | 
| Steven Rostedt | 6249687 | 2011-09-19 11:35:58 -0400 | [diff] [blame] | 438 | 	{ trace_clock_counter,	"counter" }, | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 439 | }; | 
 | 440 |  | 
 | 441 | int trace_clock_id; | 
 | 442 |  | 
| jolsa@redhat.com | b63f39e | 2009-09-11 17:29:27 +0200 | [diff] [blame] | 443 | /* | 
 | 444 |  * trace_parser_get_init - gets the buffer for trace parser | 
 | 445 |  */ | 
 | 446 | int trace_parser_get_init(struct trace_parser *parser, int size) | 
 | 447 | { | 
 | 448 | 	memset(parser, 0, sizeof(*parser)); | 
 | 449 |  | 
 | 450 | 	parser->buffer = kmalloc(size, GFP_KERNEL); | 
 | 451 | 	if (!parser->buffer) | 
 | 452 | 		return 1; | 
 | 453 |  | 
 | 454 | 	parser->size = size; | 
 | 455 | 	return 0; | 
 | 456 | } | 
 | 457 |  | 
 | 458 | /* | 
 | 459 |  * trace_parser_put - frees the buffer for trace parser | 
 | 460 |  */ | 
 | 461 | void trace_parser_put(struct trace_parser *parser) | 
 | 462 | { | 
 | 463 | 	kfree(parser->buffer); | 
 | 464 | } | 
 | 465 |  | 
 | 466 | /* | 
 | 467 |  * trace_get_user - reads the user input string separated by  space | 
 | 468 |  * (matched by isspace(ch)) | 
 | 469 |  * | 
 | 470 |  * For each string found the 'struct trace_parser' is updated, | 
 | 471 |  * and the function returns. | 
 | 472 |  * | 
 | 473 |  * Returns number of bytes read. | 
 | 474 |  * | 
 | 475 |  * See kernel/trace/trace.h for 'struct trace_parser' details. | 
 | 476 |  */ | 
 | 477 | int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | 
 | 478 | 	size_t cnt, loff_t *ppos) | 
 | 479 | { | 
 | 480 | 	char ch; | 
 | 481 | 	size_t read = 0; | 
 | 482 | 	ssize_t ret; | 
 | 483 |  | 
 | 484 | 	if (!*ppos) | 
 | 485 | 		trace_parser_clear(parser); | 
 | 486 |  | 
 | 487 | 	ret = get_user(ch, ubuf++); | 
 | 488 | 	if (ret) | 
 | 489 | 		goto out; | 
 | 490 |  | 
 | 491 | 	read++; | 
 | 492 | 	cnt--; | 
 | 493 |  | 
 | 494 | 	/* | 
 | 495 | 	 * The parser is not finished with the last write, | 
 | 496 | 	 * continue reading the user input without skipping spaces. | 
 | 497 | 	 */ | 
 | 498 | 	if (!parser->cont) { | 
 | 499 | 		/* skip white space */ | 
 | 500 | 		while (cnt && isspace(ch)) { | 
 | 501 | 			ret = get_user(ch, ubuf++); | 
 | 502 | 			if (ret) | 
 | 503 | 				goto out; | 
 | 504 | 			read++; | 
 | 505 | 			cnt--; | 
 | 506 | 		} | 
 | 507 |  | 
 | 508 | 		/* only spaces were written */ | 
 | 509 | 		if (isspace(ch)) { | 
 | 510 | 			*ppos += read; | 
 | 511 | 			ret = read; | 
 | 512 | 			goto out; | 
 | 513 | 		} | 
 | 514 |  | 
 | 515 | 		parser->idx = 0; | 
 | 516 | 	} | 
 | 517 |  | 
 | 518 | 	/* read the non-space input */ | 
 | 519 | 	while (cnt && !isspace(ch)) { | 
| Li Zefan | 3c235a3 | 2009-09-22 13:51:54 +0800 | [diff] [blame] | 520 | 		if (parser->idx < parser->size - 1) | 
| jolsa@redhat.com | b63f39e | 2009-09-11 17:29:27 +0200 | [diff] [blame] | 521 | 			parser->buffer[parser->idx++] = ch; | 
 | 522 | 		else { | 
 | 523 | 			ret = -EINVAL; | 
 | 524 | 			goto out; | 
 | 525 | 		} | 
 | 526 | 		ret = get_user(ch, ubuf++); | 
 | 527 | 		if (ret) | 
 | 528 | 			goto out; | 
 | 529 | 		read++; | 
 | 530 | 		cnt--; | 
 | 531 | 	} | 
 | 532 |  | 
 | 533 | 	/* We either got finished input or we have to wait for another call. */ | 
 | 534 | 	if (isspace(ch)) { | 
 | 535 | 		parser->buffer[parser->idx] = 0; | 
 | 536 | 		parser->cont = false; | 
 | 537 | 	} else { | 
 | 538 | 		parser->cont = true; | 
 | 539 | 		parser->buffer[parser->idx++] = ch; | 
 | 540 | 	} | 
 | 541 |  | 
 | 542 | 	*ppos += read; | 
 | 543 | 	ret = read; | 
 | 544 |  | 
 | 545 | out: | 
 | 546 | 	return ret; | 
 | 547 | } | 
 | 548 |  | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 549 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | 
 | 550 | { | 
 | 551 | 	int len; | 
 | 552 | 	int ret; | 
 | 553 |  | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 554 | 	if (!cnt) | 
 | 555 | 		return 0; | 
 | 556 |  | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 557 | 	if (s->len <= s->readpos) | 
 | 558 | 		return -EBUSY; | 
 | 559 |  | 
 | 560 | 	len = s->len - s->readpos; | 
 | 561 | 	if (cnt > len) | 
 | 562 | 		cnt = len; | 
 | 563 | 	ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 564 | 	if (ret == cnt) | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 565 | 		return -EFAULT; | 
 | 566 |  | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 567 | 	cnt -= ret; | 
 | 568 |  | 
| Steven Rostedt | e74da52 | 2009-03-04 20:31:11 -0500 | [diff] [blame] | 569 | 	s->readpos += cnt; | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 570 | 	return cnt; | 
| Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 571 | } | 
 | 572 |  | 
| Dmitri Vorobiev | b8b9426 | 2009-03-22 19:11:11 +0200 | [diff] [blame] | 573 | static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 574 | { | 
 | 575 | 	int len; | 
 | 576 | 	void *ret; | 
 | 577 |  | 
 | 578 | 	if (s->len <= s->readpos) | 
 | 579 | 		return -EBUSY; | 
 | 580 |  | 
 | 581 | 	len = s->len - s->readpos; | 
 | 582 | 	if (cnt > len) | 
 | 583 | 		cnt = len; | 
 | 584 | 	ret = memcpy(buf, s->buffer + s->readpos, cnt); | 
 | 585 | 	if (!ret) | 
 | 586 | 		return -EFAULT; | 
 | 587 |  | 
| Steven Rostedt | e74da52 | 2009-03-04 20:31:11 -0500 | [diff] [blame] | 588 | 	s->readpos += cnt; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 589 | 	return cnt; | 
 | 590 | } | 
 | 591 |  | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 592 | /* | 
 | 593 |  * ftrace_max_lock is used to protect the swapping of buffers | 
 | 594 |  * when taking a max snapshot. The buffers themselves are | 
 | 595 |  * protected by per_cpu spinlocks. But the action of the swap | 
 | 596 |  * needs its own lock. | 
 | 597 |  * | 
| Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 598 |  * This is defined as a arch_spinlock_t in order to help | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 599 |  * with performance when lockdep debugging is enabled. | 
 | 600 |  * | 
 | 601 |  * It is also used in other places outside the update_max_tr | 
 | 602 |  * so it needs to be defined outside of the | 
 | 603 |  * CONFIG_TRACER_MAX_TRACE. | 
 | 604 |  */ | 
| Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 605 | static arch_spinlock_t ftrace_max_lock = | 
| Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 606 | 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 607 |  | 
| Tim Bird | 0e95017 | 2010-02-25 15:36:43 -0800 | [diff] [blame] | 608 | unsigned long __read_mostly	tracing_thresh; | 
 | 609 |  | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 610 | #ifdef CONFIG_TRACER_MAX_TRACE | 
 | 611 | unsigned long __read_mostly	tracing_max_latency; | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 612 |  | 
 | 613 | /* | 
 | 614 |  * Copy the new maximum trace into the separate maximum-trace | 
 | 615 |  * structure. (this way the maximum trace is permanently saved, | 
 | 616 |  * for later retrieval via /sys/kernel/debug/tracing/latency_trace) | 
 | 617 |  */ | 
 | 618 | static void | 
 | 619 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 
 | 620 | { | 
 | 621 | 	struct trace_array_cpu *data = tr->data[cpu]; | 
| Arnaldo Carvalho de Melo | 1acaa1b | 2010-03-05 18:23:50 -0300 | [diff] [blame] | 622 | 	struct trace_array_cpu *max_data; | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 623 |  | 
 | 624 | 	max_tr.cpu = cpu; | 
 | 625 | 	max_tr.time_start = data->preempt_timestamp; | 
 | 626 |  | 
| Steven Rostedt | 8248ac0 | 2009-09-02 12:27:41 -0400 | [diff] [blame] | 627 | 	max_data = max_tr.data[cpu]; | 
 | 628 | 	max_data->saved_latency = tracing_max_latency; | 
 | 629 | 	max_data->critical_start = data->critical_start; | 
 | 630 | 	max_data->critical_end = data->critical_end; | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 631 |  | 
| Arnaldo Carvalho de Melo | 1acaa1b | 2010-03-05 18:23:50 -0300 | [diff] [blame] | 632 | 	memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); | 
| Steven Rostedt | 8248ac0 | 2009-09-02 12:27:41 -0400 | [diff] [blame] | 633 | 	max_data->pid = tsk->pid; | 
 | 634 | 	max_data->uid = task_uid(tsk); | 
 | 635 | 	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | 
 | 636 | 	max_data->policy = tsk->policy; | 
 | 637 | 	max_data->rt_priority = tsk->rt_priority; | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 638 |  | 
 | 639 | 	/* record this tasks comm */ | 
 | 640 | 	tracing_record_cmdline(tsk); | 
 | 641 | } | 
 | 642 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 643 | /** | 
 | 644 |  * update_max_tr - snapshot all trace buffers from global_trace to max_tr | 
 | 645 |  * @tr: tracer | 
 | 646 |  * @tsk: the task with the latency | 
 | 647 |  * @cpu: The cpu that initiated the trace. | 
 | 648 |  * | 
 | 649 |  * Flip the buffers between the @tr and the max_tr and record information | 
 | 650 |  * about which task was the cause of this latency. | 
 | 651 |  */ | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 652 | void | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 653 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 
 | 654 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 655 | 	struct ring_buffer *buf = tr->buffer; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 656 |  | 
| Steven Rostedt | b8de7bd | 2009-08-31 22:32:27 -0400 | [diff] [blame] | 657 | 	if (trace_stop_count) | 
 | 658 | 		return; | 
 | 659 |  | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 660 | 	WARN_ON_ONCE(!irqs_disabled()); | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 661 | 	if (!current_trace->use_max_tr) { | 
 | 662 | 		WARN_ON_ONCE(1); | 
 | 663 | 		return; | 
 | 664 | 	} | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 665 | 	arch_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 666 |  | 
 | 667 | 	tr->buffer = max_tr.buffer; | 
 | 668 | 	max_tr.buffer = buf; | 
 | 669 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 670 | 	__update_max_tr(tr, tsk, cpu); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 671 | 	arch_spin_unlock(&ftrace_max_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 672 | } | 
 | 673 |  | 
 | 674 | /** | 
 | 675 |  * update_max_tr_single - only copy one trace over, and reset the rest | 
 | 676 |  * @tr - tracer | 
 | 677 |  * @tsk - task with the latency | 
 | 678 |  * @cpu - the cpu of the buffer to copy. | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 679 |  * | 
 | 680 |  * Flip the trace of a single CPU buffer between the @tr and the max_tr. | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 681 |  */ | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 682 | void | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 683 | update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | 
 | 684 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 685 | 	int ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 686 |  | 
| Steven Rostedt | b8de7bd | 2009-08-31 22:32:27 -0400 | [diff] [blame] | 687 | 	if (trace_stop_count) | 
 | 688 | 		return; | 
 | 689 |  | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 690 | 	WARN_ON_ONCE(!irqs_disabled()); | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 691 | 	if (!current_trace->use_max_tr) { | 
 | 692 | 		WARN_ON_ONCE(1); | 
 | 693 | 		return; | 
 | 694 | 	} | 
 | 695 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 696 | 	arch_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 697 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 698 | 	ftrace_disable_cpu(); | 
 | 699 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 700 | 	ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); | 
 | 701 |  | 
| Steven Rostedt | e8165db | 2009-09-03 19:13:05 -0400 | [diff] [blame] | 702 | 	if (ret == -EBUSY) { | 
 | 703 | 		/* | 
 | 704 | 		 * We failed to swap the buffer due to a commit taking | 
 | 705 | 		 * place on this CPU. We fail to record, but we reset | 
 | 706 | 		 * the max trace buffer (no one writes directly to it) | 
 | 707 | 		 * and flag that it failed. | 
 | 708 | 		 */ | 
 | 709 | 		trace_array_printk(&max_tr, _THIS_IP_, | 
 | 710 | 			"Failed to swap buffers due to commit in progress\n"); | 
 | 711 | 	} | 
 | 712 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 713 | 	ftrace_enable_cpu(); | 
 | 714 |  | 
| Steven Rostedt | e8165db | 2009-09-03 19:13:05 -0400 | [diff] [blame] | 715 | 	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 716 |  | 
 | 717 | 	__update_max_tr(tr, tsk, cpu); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 718 | 	arch_spin_unlock(&ftrace_max_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 719 | } | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 720 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 721 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 722 | /** | 
 | 723 |  * register_tracer - register a tracer with the ftrace system. | 
 | 724 |  * @type - the plugin for the tracer | 
 | 725 |  * | 
 | 726 |  * Register a new plugin tracer. | 
 | 727 |  */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 728 | int register_tracer(struct tracer *type) | 
| Hannes Eder | e7669b8 | 2009-02-10 19:44:45 +0100 | [diff] [blame] | 729 | __releases(kernel_lock) | 
 | 730 | __acquires(kernel_lock) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 731 | { | 
 | 732 | 	struct tracer *t; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 733 | 	int ret = 0; | 
 | 734 |  | 
 | 735 | 	if (!type->name) { | 
 | 736 | 		pr_info("Tracer must have a name\n"); | 
 | 737 | 		return -1; | 
 | 738 | 	} | 
 | 739 |  | 
| Dan Carpenter | 24a461d | 2010-07-10 12:06:44 +0200 | [diff] [blame] | 740 | 	if (strlen(type->name) >= MAX_TRACER_SIZE) { | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 741 | 		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); | 
 | 742 | 		return -1; | 
 | 743 | 	} | 
 | 744 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 745 | 	mutex_lock(&trace_types_lock); | 
| Ingo Molnar | 86fa2f6 | 2008-11-19 10:00:15 +0100 | [diff] [blame] | 746 |  | 
| Frederic Weisbecker | 8e1b82e | 2008-12-06 03:41:33 +0100 | [diff] [blame] | 747 | 	tracing_selftest_running = true; | 
 | 748 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 749 | 	for (t = trace_types; t; t = t->next) { | 
 | 750 | 		if (strcmp(type->name, t->name) == 0) { | 
 | 751 | 			/* already found */ | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 752 | 			pr_info("Tracer %s already registered\n", | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 753 | 				type->name); | 
 | 754 | 			ret = -1; | 
 | 755 | 			goto out; | 
 | 756 | 		} | 
 | 757 | 	} | 
 | 758 |  | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 759 | 	if (!type->set_flag) | 
 | 760 | 		type->set_flag = &dummy_set_flag; | 
 | 761 | 	if (!type->flags) | 
 | 762 | 		type->flags = &dummy_tracer_flags; | 
 | 763 | 	else | 
 | 764 | 		if (!type->flags->opts) | 
 | 765 | 			type->flags->opts = dummy_tracer_opt; | 
| Frederic Weisbecker | 6eaaa5d | 2009-02-11 02:25:00 +0100 | [diff] [blame] | 766 | 	if (!type->wait_pipe) | 
 | 767 | 		type->wait_pipe = default_wait_pipe; | 
 | 768 |  | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 769 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 770 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 771 | 	if (type->selftest && !tracing_selftest_disabled) { | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 772 | 		struct tracer *saved_tracer = current_trace; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 773 | 		struct trace_array *tr = &global_trace; | 
| Frederic Weisbecker | ff32504 | 2008-12-04 23:47:35 +0100 | [diff] [blame] | 774 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 775 | 		/* | 
 | 776 | 		 * Run a selftest on this tracer. | 
 | 777 | 		 * Here we reset the trace buffer, and set the current | 
 | 778 | 		 * tracer to be this tracer. The tracer can then run some | 
 | 779 | 		 * internal tracing to verify that everything is in order. | 
 | 780 | 		 * If we fail, we do not register this tracer. | 
 | 781 | 		 */ | 
| Steven Rostedt | 76f0d07 | 2009-09-04 12:12:39 -0400 | [diff] [blame] | 782 | 		tracing_reset_online_cpus(tr); | 
| Ingo Molnar | 86fa2f6 | 2008-11-19 10:00:15 +0100 | [diff] [blame] | 783 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 784 | 		current_trace = type; | 
| Steven Rostedt | 4a0b166 | 2011-03-09 20:09:26 -0500 | [diff] [blame] | 785 |  | 
 | 786 | 		/* If we expanded the buffers, make sure the max is expanded too */ | 
 | 787 | 		if (ring_buffer_expanded && type->use_max_tr) | 
 | 788 | 			ring_buffer_resize(max_tr.buffer, trace_buf_size); | 
 | 789 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 790 | 		/* the test is responsible for initializing and enabling */ | 
 | 791 | 		pr_info("Testing tracer %s: ", type->name); | 
 | 792 | 		ret = type->selftest(type, tr); | 
 | 793 | 		/* the test is responsible for resetting too */ | 
 | 794 | 		current_trace = saved_tracer; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 795 | 		if (ret) { | 
 | 796 | 			printk(KERN_CONT "FAILED!\n"); | 
 | 797 | 			goto out; | 
 | 798 | 		} | 
| Steven Rostedt | 1d4db00 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 799 | 		/* Only reset on passing, to avoid touching corrupted buffers */ | 
| Steven Rostedt | 76f0d07 | 2009-09-04 12:12:39 -0400 | [diff] [blame] | 800 | 		tracing_reset_online_cpus(tr); | 
| Ingo Molnar | 86fa2f6 | 2008-11-19 10:00:15 +0100 | [diff] [blame] | 801 |  | 
| Steven Rostedt | 4a0b166 | 2011-03-09 20:09:26 -0500 | [diff] [blame] | 802 | 		/* Shrink the max buffer again */ | 
 | 803 | 		if (ring_buffer_expanded && type->use_max_tr) | 
 | 804 | 			ring_buffer_resize(max_tr.buffer, 1); | 
 | 805 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 806 | 		printk(KERN_CONT "PASSED\n"); | 
 | 807 | 	} | 
 | 808 | #endif | 
 | 809 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 810 | 	type->next = trace_types; | 
 | 811 | 	trace_types = type; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 812 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 813 |  out: | 
| Frederic Weisbecker | 8e1b82e | 2008-12-06 03:41:33 +0100 | [diff] [blame] | 814 | 	tracing_selftest_running = false; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 815 | 	mutex_unlock(&trace_types_lock); | 
 | 816 |  | 
| Steven Rostedt | dac7494 | 2009-02-05 01:13:38 -0500 | [diff] [blame] | 817 | 	if (ret || !default_bootup_tracer) | 
 | 818 | 		goto out_unlock; | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 819 |  | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 820 | 	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) | 
| Steven Rostedt | dac7494 | 2009-02-05 01:13:38 -0500 | [diff] [blame] | 821 | 		goto out_unlock; | 
 | 822 |  | 
 | 823 | 	printk(KERN_INFO "Starting tracer '%s'\n", type->name); | 
 | 824 | 	/* Do we want this tracer to start on bootup? */ | 
 | 825 | 	tracing_set_tracer(type->name); | 
 | 826 | 	default_bootup_tracer = NULL; | 
 | 827 | 	/* disable other selftests, since this will break it. */ | 
 | 828 | 	tracing_selftest_disabled = 1; | 
 | 829 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 
 | 830 | 	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", | 
 | 831 | 	       type->name); | 
 | 832 | #endif | 
 | 833 |  | 
 | 834 |  out_unlock: | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 835 | 	return ret; | 
 | 836 | } | 
 | 837 |  | 
 | 838 | void unregister_tracer(struct tracer *type) | 
 | 839 | { | 
 | 840 | 	struct tracer **t; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 841 |  | 
 | 842 | 	mutex_lock(&trace_types_lock); | 
 | 843 | 	for (t = &trace_types; *t; t = &(*t)->next) { | 
 | 844 | 		if (*t == type) | 
 | 845 | 			goto found; | 
 | 846 | 	} | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 847 | 	pr_info("Tracer %s not registered\n", type->name); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 848 | 	goto out; | 
 | 849 |  | 
 | 850 |  found: | 
 | 851 | 	*t = (*t)->next; | 
| Arnaldo Carvalho de Melo | b5db03c | 2009-02-07 18:52:59 -0200 | [diff] [blame] | 852 |  | 
 | 853 | 	if (type == current_trace && tracer_enabled) { | 
 | 854 | 		tracer_enabled = 0; | 
 | 855 | 		tracing_stop(); | 
 | 856 | 		if (current_trace->stop) | 
 | 857 | 			current_trace->stop(&global_trace); | 
 | 858 | 		current_trace = &nop_trace; | 
 | 859 | 	} | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 860 | out: | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 861 | 	mutex_unlock(&trace_types_lock); | 
 | 862 | } | 
 | 863 |  | 
| Steven Rostedt | 283740c | 2010-03-12 19:48:41 -0500 | [diff] [blame] | 864 | static void __tracing_reset(struct ring_buffer *buffer, int cpu) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 865 | { | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 866 | 	ftrace_disable_cpu(); | 
| Steven Rostedt | 283740c | 2010-03-12 19:48:41 -0500 | [diff] [blame] | 867 | 	ring_buffer_reset_cpu(buffer, cpu); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 868 | 	ftrace_enable_cpu(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 869 | } | 
 | 870 |  | 
| Steven Rostedt | f633903 | 2009-09-04 12:35:16 -0400 | [diff] [blame] | 871 | void tracing_reset(struct trace_array *tr, int cpu) | 
 | 872 | { | 
 | 873 | 	struct ring_buffer *buffer = tr->buffer; | 
 | 874 |  | 
 | 875 | 	ring_buffer_record_disable(buffer); | 
 | 876 |  | 
 | 877 | 	/* Make sure all commits have finished */ | 
 | 878 | 	synchronize_sched(); | 
| Steven Rostedt | 283740c | 2010-03-12 19:48:41 -0500 | [diff] [blame] | 879 | 	__tracing_reset(buffer, cpu); | 
| Steven Rostedt | f633903 | 2009-09-04 12:35:16 -0400 | [diff] [blame] | 880 |  | 
 | 881 | 	ring_buffer_record_enable(buffer); | 
 | 882 | } | 
 | 883 |  | 
| Pekka J Enberg | 213cc06 | 2008-12-19 12:08:39 +0200 | [diff] [blame] | 884 | void tracing_reset_online_cpus(struct trace_array *tr) | 
 | 885 | { | 
| Steven Rostedt | 621968c | 2009-09-04 12:02:35 -0400 | [diff] [blame] | 886 | 	struct ring_buffer *buffer = tr->buffer; | 
| Pekka J Enberg | 213cc06 | 2008-12-19 12:08:39 +0200 | [diff] [blame] | 887 | 	int cpu; | 
 | 888 |  | 
| Steven Rostedt | 621968c | 2009-09-04 12:02:35 -0400 | [diff] [blame] | 889 | 	ring_buffer_record_disable(buffer); | 
 | 890 |  | 
 | 891 | 	/* Make sure all commits have finished */ | 
 | 892 | 	synchronize_sched(); | 
 | 893 |  | 
| Pekka J Enberg | 213cc06 | 2008-12-19 12:08:39 +0200 | [diff] [blame] | 894 | 	tr->time_start = ftrace_now(tr->cpu); | 
 | 895 |  | 
 | 896 | 	for_each_online_cpu(cpu) | 
| Steven Rostedt | 283740c | 2010-03-12 19:48:41 -0500 | [diff] [blame] | 897 | 		__tracing_reset(buffer, cpu); | 
| Steven Rostedt | 621968c | 2009-09-04 12:02:35 -0400 | [diff] [blame] | 898 |  | 
 | 899 | 	ring_buffer_record_enable(buffer); | 
| Pekka J Enberg | 213cc06 | 2008-12-19 12:08:39 +0200 | [diff] [blame] | 900 | } | 
 | 901 |  | 
| Steven Rostedt | 9456f0f | 2009-05-06 21:54:09 -0400 | [diff] [blame] | 902 | void tracing_reset_current(int cpu) | 
 | 903 | { | 
 | 904 | 	tracing_reset(&global_trace, cpu); | 
 | 905 | } | 
 | 906 |  | 
 | 907 | void tracing_reset_current_online_cpus(void) | 
 | 908 | { | 
 | 909 | 	tracing_reset_online_cpus(&global_trace); | 
 | 910 | } | 
 | 911 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 912 | #define SAVED_CMDLINES 128 | 
| Thomas Gleixner | 2c7eea4 | 2009-03-18 09:03:19 +0100 | [diff] [blame] | 913 | #define NO_CMDLINE_MAP UINT_MAX | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 914 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | 
 | 915 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 
 | 916 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 
 | 917 | static int cmdline_idx; | 
| Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 918 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; | 
| Steven Rostedt | 25b0b44 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 919 |  | 
| Steven Rostedt | 25b0b44 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 920 | /* temporary disable recording */ | 
| Hannes Eder | 4fd2735 | 2009-02-10 19:44:12 +0100 | [diff] [blame] | 921 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 922 |  | 
 | 923 | static void trace_init_cmdlines(void) | 
 | 924 | { | 
| Thomas Gleixner | 2c7eea4 | 2009-03-18 09:03:19 +0100 | [diff] [blame] | 925 | 	memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); | 
 | 926 | 	memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 927 | 	cmdline_idx = 0; | 
 | 928 | } | 
 | 929 |  | 
| Carsten Emde | b5130b1 | 2009-09-13 01:43:07 +0200 | [diff] [blame] | 930 | int is_tracing_stopped(void) | 
 | 931 | { | 
 | 932 | 	return trace_stop_count; | 
 | 933 | } | 
 | 934 |  | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 935 | /** | 
| Steven Rostedt | 69bb54e | 2008-11-21 12:59:38 -0500 | [diff] [blame] | 936 |  * ftrace_off_permanent - disable all ftrace code permanently | 
 | 937 |  * | 
 | 938 |  * This should only be called when a serious anomally has | 
 | 939 |  * been detected.  This will turn off the function tracing, | 
 | 940 |  * ring buffers, and other tracing utilites. It takes no | 
 | 941 |  * locks and can be called from any context. | 
 | 942 |  */ | 
 | 943 | void ftrace_off_permanent(void) | 
 | 944 | { | 
 | 945 | 	tracing_disabled = 1; | 
 | 946 | 	ftrace_stop(); | 
 | 947 | 	tracing_off_permanent(); | 
 | 948 | } | 
 | 949 |  | 
 | 950 | /** | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 951 |  * tracing_start - quick start of the tracer | 
 | 952 |  * | 
 | 953 |  * If tracing is enabled but was stopped by tracing_stop, | 
 | 954 |  * this will start the tracer back up. | 
 | 955 |  */ | 
 | 956 | void tracing_start(void) | 
 | 957 | { | 
 | 958 | 	struct ring_buffer *buffer; | 
 | 959 | 	unsigned long flags; | 
 | 960 |  | 
 | 961 | 	if (tracing_disabled) | 
 | 962 | 		return; | 
 | 963 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 964 | 	raw_spin_lock_irqsave(&tracing_start_lock, flags); | 
| Steven Rostedt | b06a830 | 2009-01-22 14:26:15 -0500 | [diff] [blame] | 965 | 	if (--trace_stop_count) { | 
 | 966 | 		if (trace_stop_count < 0) { | 
 | 967 | 			/* Someone screwed up their debugging */ | 
 | 968 | 			WARN_ON_ONCE(1); | 
 | 969 | 			trace_stop_count = 0; | 
 | 970 | 		} | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 971 | 		goto out; | 
 | 972 | 	} | 
 | 973 |  | 
| Steven Rostedt | a2f8071 | 2010-03-12 19:56:00 -0500 | [diff] [blame] | 974 | 	/* Prevent the buffers from switching */ | 
 | 975 | 	arch_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 976 |  | 
 | 977 | 	buffer = global_trace.buffer; | 
 | 978 | 	if (buffer) | 
 | 979 | 		ring_buffer_record_enable(buffer); | 
 | 980 |  | 
 | 981 | 	buffer = max_tr.buffer; | 
 | 982 | 	if (buffer) | 
 | 983 | 		ring_buffer_record_enable(buffer); | 
 | 984 |  | 
| Steven Rostedt | a2f8071 | 2010-03-12 19:56:00 -0500 | [diff] [blame] | 985 | 	arch_spin_unlock(&ftrace_max_lock); | 
 | 986 |  | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 987 | 	ftrace_start(); | 
 | 988 |  out: | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 989 | 	raw_spin_unlock_irqrestore(&tracing_start_lock, flags); | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 990 | } | 
 | 991 |  | 
 | 992 | /** | 
 | 993 |  * tracing_stop - quick stop of the tracer | 
 | 994 |  * | 
 | 995 |  * Light weight way to stop tracing. Use in conjunction with | 
 | 996 |  * tracing_start. | 
 | 997 |  */ | 
 | 998 | void tracing_stop(void) | 
 | 999 | { | 
 | 1000 | 	struct ring_buffer *buffer; | 
 | 1001 | 	unsigned long flags; | 
 | 1002 |  | 
 | 1003 | 	ftrace_stop(); | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1004 | 	raw_spin_lock_irqsave(&tracing_start_lock, flags); | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1005 | 	if (trace_stop_count++) | 
 | 1006 | 		goto out; | 
 | 1007 |  | 
| Steven Rostedt | a2f8071 | 2010-03-12 19:56:00 -0500 | [diff] [blame] | 1008 | 	/* Prevent the buffers from switching */ | 
 | 1009 | 	arch_spin_lock(&ftrace_max_lock); | 
 | 1010 |  | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1011 | 	buffer = global_trace.buffer; | 
 | 1012 | 	if (buffer) | 
 | 1013 | 		ring_buffer_record_disable(buffer); | 
 | 1014 |  | 
 | 1015 | 	buffer = max_tr.buffer; | 
 | 1016 | 	if (buffer) | 
 | 1017 | 		ring_buffer_record_disable(buffer); | 
 | 1018 |  | 
| Steven Rostedt | a2f8071 | 2010-03-12 19:56:00 -0500 | [diff] [blame] | 1019 | 	arch_spin_unlock(&ftrace_max_lock); | 
 | 1020 |  | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1021 |  out: | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1022 | 	raw_spin_unlock_irqrestore(&tracing_start_lock, flags); | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1023 | } | 
 | 1024 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1025 | void trace_stop_cmdline_recording(void); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1026 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1027 | static void trace_save_cmdline(struct task_struct *tsk) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1028 | { | 
| Carsten Emde | a635cf0 | 2009-03-18 09:00:41 +0100 | [diff] [blame] | 1029 | 	unsigned pid, idx; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1030 |  | 
 | 1031 | 	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) | 
 | 1032 | 		return; | 
 | 1033 |  | 
 | 1034 | 	/* | 
 | 1035 | 	 * It's not the end of the world if we don't get | 
 | 1036 | 	 * the lock, but we also don't want to spin | 
 | 1037 | 	 * nor do we want to disable interrupts, | 
 | 1038 | 	 * so if we miss here, then better luck next time. | 
 | 1039 | 	 */ | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 1040 | 	if (!arch_spin_trylock(&trace_cmdline_lock)) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1041 | 		return; | 
 | 1042 |  | 
 | 1043 | 	idx = map_pid_to_cmdline[tsk->pid]; | 
| Thomas Gleixner | 2c7eea4 | 2009-03-18 09:03:19 +0100 | [diff] [blame] | 1044 | 	if (idx == NO_CMDLINE_MAP) { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1045 | 		idx = (cmdline_idx + 1) % SAVED_CMDLINES; | 
 | 1046 |  | 
| Carsten Emde | a635cf0 | 2009-03-18 09:00:41 +0100 | [diff] [blame] | 1047 | 		/* | 
 | 1048 | 		 * Check whether the cmdline buffer at idx has a pid | 
 | 1049 | 		 * mapped. We are going to overwrite that entry so we | 
 | 1050 | 		 * need to clear the map_pid_to_cmdline. Otherwise we | 
 | 1051 | 		 * would read the new comm for the old pid. | 
 | 1052 | 		 */ | 
 | 1053 | 		pid = map_cmdline_to_pid[idx]; | 
 | 1054 | 		if (pid != NO_CMDLINE_MAP) | 
 | 1055 | 			map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1056 |  | 
| Carsten Emde | a635cf0 | 2009-03-18 09:00:41 +0100 | [diff] [blame] | 1057 | 		map_cmdline_to_pid[idx] = tsk->pid; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1058 | 		map_pid_to_cmdline[tsk->pid] = idx; | 
 | 1059 |  | 
 | 1060 | 		cmdline_idx = idx; | 
 | 1061 | 	} | 
 | 1062 |  | 
 | 1063 | 	memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 
 | 1064 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 1065 | 	arch_spin_unlock(&trace_cmdline_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1066 | } | 
 | 1067 |  | 
| Steven Rostedt | 4ca5308 | 2009-03-16 19:20:15 -0400 | [diff] [blame] | 1068 | void trace_find_cmdline(int pid, char comm[]) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1069 | { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1070 | 	unsigned map; | 
 | 1071 |  | 
| Steven Rostedt | 4ca5308 | 2009-03-16 19:20:15 -0400 | [diff] [blame] | 1072 | 	if (!pid) { | 
 | 1073 | 		strcpy(comm, "<idle>"); | 
 | 1074 | 		return; | 
 | 1075 | 	} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1076 |  | 
| Steven Rostedt | 74bf407 | 2010-01-25 15:11:53 -0500 | [diff] [blame] | 1077 | 	if (WARN_ON_ONCE(pid < 0)) { | 
 | 1078 | 		strcpy(comm, "<XXX>"); | 
 | 1079 | 		return; | 
 | 1080 | 	} | 
 | 1081 |  | 
| Steven Rostedt | 4ca5308 | 2009-03-16 19:20:15 -0400 | [diff] [blame] | 1082 | 	if (pid > PID_MAX_DEFAULT) { | 
 | 1083 | 		strcpy(comm, "<...>"); | 
 | 1084 | 		return; | 
 | 1085 | 	} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1086 |  | 
| Heiko Carstens | 5b6045a | 2009-05-26 17:28:02 +0200 | [diff] [blame] | 1087 | 	preempt_disable(); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 1088 | 	arch_spin_lock(&trace_cmdline_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1089 | 	map = map_pid_to_cmdline[pid]; | 
| Thomas Gleixner | 50d8875 | 2009-03-18 08:58:44 +0100 | [diff] [blame] | 1090 | 	if (map != NO_CMDLINE_MAP) | 
 | 1091 | 		strcpy(comm, saved_cmdlines[map]); | 
 | 1092 | 	else | 
 | 1093 | 		strcpy(comm, "<...>"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1094 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 1095 | 	arch_spin_unlock(&trace_cmdline_lock); | 
| Heiko Carstens | 5b6045a | 2009-05-26 17:28:02 +0200 | [diff] [blame] | 1096 | 	preempt_enable(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1097 | } | 
 | 1098 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1099 | void tracing_record_cmdline(struct task_struct *tsk) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1100 | { | 
| Thomas Gleixner | 18aecd3 | 2009-03-18 08:56:58 +0100 | [diff] [blame] | 1101 | 	if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled || | 
 | 1102 | 	    !tracing_is_on()) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1103 | 		return; | 
 | 1104 |  | 
 | 1105 | 	trace_save_cmdline(tsk); | 
 | 1106 | } | 
 | 1107 |  | 
| Pekka Paalanen | 45dcd8b | 2008-09-16 21:56:41 +0300 | [diff] [blame] | 1108 | void | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1109 | tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | 
 | 1110 | 			     int pc) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1111 | { | 
 | 1112 | 	struct task_struct *tsk = current; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1113 |  | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 1114 | 	entry->preempt_count		= pc & 0xff; | 
 | 1115 | 	entry->pid			= (tsk) ? tsk->pid : 0; | 
| Arjan van de Ven | a3a4a5a | 2011-05-05 23:55:18 -0400 | [diff] [blame] | 1116 | 	entry->padding			= 0; | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 1117 | 	entry->flags = | 
| Steven Rostedt | 9244489 | 2008-10-24 09:42:59 -0400 | [diff] [blame] | 1118 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 
| Steven Rostedt | 2e2ca15 | 2008-08-01 12:26:40 -0400 | [diff] [blame] | 1119 | 		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 
| Steven Rostedt | 9244489 | 2008-10-24 09:42:59 -0400 | [diff] [blame] | 1120 | #else | 
 | 1121 | 		TRACE_FLAG_IRQS_NOSUPPORT | | 
 | 1122 | #endif | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1123 | 		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | | 
 | 1124 | 		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | 
 | 1125 | 		(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 
 | 1126 | } | 
| Frederic Weisbecker | f413cdb | 2009-08-07 01:25:54 +0200 | [diff] [blame] | 1127 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1128 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1129 | struct ring_buffer_event * | 
 | 1130 | trace_buffer_lock_reserve(struct ring_buffer *buffer, | 
 | 1131 | 			  int type, | 
 | 1132 | 			  unsigned long len, | 
 | 1133 | 			  unsigned long flags, int pc) | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1134 | { | 
 | 1135 | 	struct ring_buffer_event *event; | 
 | 1136 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1137 | 	event = ring_buffer_lock_reserve(buffer, len); | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1138 | 	if (event != NULL) { | 
 | 1139 | 		struct trace_entry *ent = ring_buffer_event_data(event); | 
 | 1140 |  | 
 | 1141 | 		tracing_generic_entry_update(ent, flags, pc); | 
 | 1142 | 		ent->type = type; | 
 | 1143 | 	} | 
 | 1144 |  | 
 | 1145 | 	return event; | 
 | 1146 | } | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1147 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1148 | static inline void | 
 | 1149 | __trace_buffer_unlock_commit(struct ring_buffer *buffer, | 
 | 1150 | 			     struct ring_buffer_event *event, | 
 | 1151 | 			     unsigned long flags, int pc, | 
 | 1152 | 			     int wake) | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1153 | { | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1154 | 	ring_buffer_unlock_commit(buffer, event); | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1155 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1156 | 	ftrace_trace_stack(buffer, flags, 6, pc); | 
 | 1157 | 	ftrace_trace_userstack(buffer, flags, pc); | 
| Frederic Weisbecker | 07edf71 | 2009-03-22 23:10:46 +0100 | [diff] [blame] | 1158 |  | 
 | 1159 | 	if (wake) | 
 | 1160 | 		trace_wake_up(); | 
 | 1161 | } | 
 | 1162 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1163 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, | 
 | 1164 | 				struct ring_buffer_event *event, | 
 | 1165 | 				unsigned long flags, int pc) | 
| Frederic Weisbecker | 07edf71 | 2009-03-22 23:10:46 +0100 | [diff] [blame] | 1166 | { | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1167 | 	__trace_buffer_unlock_commit(buffer, event, flags, pc, 1); | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1168 | } | 
 | 1169 |  | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 1170 | struct ring_buffer_event * | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1171 | trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, | 
 | 1172 | 				  int type, unsigned long len, | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 1173 | 				  unsigned long flags, int pc) | 
 | 1174 | { | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1175 | 	*current_rb = global_trace.buffer; | 
 | 1176 | 	return trace_buffer_lock_reserve(*current_rb, | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 1177 | 					 type, len, flags, pc); | 
 | 1178 | } | 
| Steven Rostedt | 94487d6 | 2009-05-05 19:22:53 -0400 | [diff] [blame] | 1179 | EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 1180 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1181 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, | 
 | 1182 | 					struct ring_buffer_event *event, | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 1183 | 					unsigned long flags, int pc) | 
 | 1184 | { | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1185 | 	__trace_buffer_unlock_commit(buffer, event, flags, pc, 1); | 
| Frederic Weisbecker | 07edf71 | 2009-03-22 23:10:46 +0100 | [diff] [blame] | 1186 | } | 
| Steven Rostedt | 94487d6 | 2009-05-05 19:22:53 -0400 | [diff] [blame] | 1187 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); | 
| Frederic Weisbecker | 07edf71 | 2009-03-22 23:10:46 +0100 | [diff] [blame] | 1188 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1189 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, | 
 | 1190 | 				       struct ring_buffer_event *event, | 
 | 1191 | 				       unsigned long flags, int pc) | 
| Frederic Weisbecker | 07edf71 | 2009-03-22 23:10:46 +0100 | [diff] [blame] | 1192 | { | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1193 | 	__trace_buffer_unlock_commit(buffer, event, flags, pc, 0); | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 1194 | } | 
| Steven Rostedt | 94487d6 | 2009-05-05 19:22:53 -0400 | [diff] [blame] | 1195 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); | 
| Steven Rostedt | 77d9f46 | 2009-04-02 01:16:59 -0400 | [diff] [blame] | 1196 |  | 
| Masami Hiramatsu | 1fd8df2 | 2011-06-08 16:09:34 +0900 | [diff] [blame] | 1197 | void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer, | 
 | 1198 | 					    struct ring_buffer_event *event, | 
 | 1199 | 					    unsigned long flags, int pc, | 
 | 1200 | 					    struct pt_regs *regs) | 
 | 1201 | { | 
 | 1202 | 	ring_buffer_unlock_commit(buffer, event); | 
 | 1203 |  | 
 | 1204 | 	ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); | 
 | 1205 | 	ftrace_trace_userstack(buffer, flags, pc); | 
 | 1206 | } | 
 | 1207 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs); | 
 | 1208 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1209 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, | 
 | 1210 | 					 struct ring_buffer_event *event) | 
| Steven Rostedt | 77d9f46 | 2009-04-02 01:16:59 -0400 | [diff] [blame] | 1211 | { | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1212 | 	ring_buffer_discard_commit(buffer, event); | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 1213 | } | 
| Steven Rostedt | 12acd47 | 2009-04-17 16:01:56 -0400 | [diff] [blame] | 1214 | EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 1215 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1216 | void | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 1217 | trace_function(struct trace_array *tr, | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1218 | 	       unsigned long ip, unsigned long parent_ip, unsigned long flags, | 
 | 1219 | 	       int pc) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1220 | { | 
| Tom Zanussi | e1112b4 | 2009-03-31 00:48:49 -0500 | [diff] [blame] | 1221 | 	struct ftrace_event_call *call = &event_function; | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1222 | 	struct ring_buffer *buffer = tr->buffer; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1223 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 1224 | 	struct ftrace_entry *entry; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1225 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1226 | 	/* If we are reading the ring buffer, don't trace */ | 
| Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 1227 | 	if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1228 | 		return; | 
 | 1229 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1230 | 	event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1231 | 					  flags, pc); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1232 | 	if (!event) | 
 | 1233 | 		return; | 
 | 1234 | 	entry	= ring_buffer_event_data(event); | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 1235 | 	entry->ip			= ip; | 
 | 1236 | 	entry->parent_ip		= parent_ip; | 
| Tom Zanussi | e1112b4 | 2009-03-31 00:48:49 -0500 | [diff] [blame] | 1237 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1238 | 	if (!filter_check_discard(call, entry, buffer, event)) | 
 | 1239 | 		ring_buffer_unlock_commit(buffer, event); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1240 | } | 
 | 1241 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1242 | void | 
| Ingo Molnar | 2e0f576 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 1243 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1244 |        unsigned long ip, unsigned long parent_ip, unsigned long flags, | 
 | 1245 |        int pc) | 
| Ingo Molnar | 2e0f576 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 1246 | { | 
 | 1247 | 	if (likely(!atomic_read(&data->disabled))) | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 1248 | 		trace_function(tr, ip, parent_ip, flags, pc); | 
| Ingo Molnar | 2e0f576 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 1249 | } | 
 | 1250 |  | 
| Frederic Weisbecker | c0a0d0d | 2009-07-29 17:51:13 +0200 | [diff] [blame] | 1251 | #ifdef CONFIG_STACKTRACE | 
| Steven Rostedt | 4a9bd3f | 2011-07-14 16:36:53 -0400 | [diff] [blame] | 1252 |  | 
 | 1253 | #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) | 
 | 1254 | struct ftrace_stack { | 
 | 1255 | 	unsigned long		calls[FTRACE_STACK_MAX_ENTRIES]; | 
 | 1256 | }; | 
 | 1257 |  | 
 | 1258 | static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); | 
 | 1259 | static DEFINE_PER_CPU(int, ftrace_stack_reserve); | 
 | 1260 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1261 | static void __ftrace_trace_stack(struct ring_buffer *buffer, | 
| Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 1262 | 				 unsigned long flags, | 
| Masami Hiramatsu | 1fd8df2 | 2011-06-08 16:09:34 +0900 | [diff] [blame] | 1263 | 				 int skip, int pc, struct pt_regs *regs) | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1264 | { | 
| Tom Zanussi | e1112b4 | 2009-03-31 00:48:49 -0500 | [diff] [blame] | 1265 | 	struct ftrace_event_call *call = &event_kernel_stack; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1266 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 1267 | 	struct stack_entry *entry; | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1268 | 	struct stack_trace trace; | 
| Steven Rostedt | 4a9bd3f | 2011-07-14 16:36:53 -0400 | [diff] [blame] | 1269 | 	int use_stack; | 
 | 1270 | 	int size = FTRACE_STACK_ENTRIES; | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1271 |  | 
 | 1272 | 	trace.nr_entries	= 0; | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1273 | 	trace.skip		= skip; | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1274 |  | 
| Steven Rostedt | 4a9bd3f | 2011-07-14 16:36:53 -0400 | [diff] [blame] | 1275 | 	/* | 
 | 1276 | 	 * Since events can happen in NMIs there's no safe way to | 
 | 1277 | 	 * use the per cpu ftrace_stacks. We reserve it and if an interrupt | 
 | 1278 | 	 * or NMI comes in, it will just have to use the default | 
 | 1279 | 	 * FTRACE_STACK_SIZE. | 
 | 1280 | 	 */ | 
 | 1281 | 	preempt_disable_notrace(); | 
 | 1282 |  | 
 | 1283 | 	use_stack = ++__get_cpu_var(ftrace_stack_reserve); | 
 | 1284 | 	/* | 
 | 1285 | 	 * We don't need any atomic variables, just a barrier. | 
 | 1286 | 	 * If an interrupt comes in, we don't care, because it would | 
 | 1287 | 	 * have exited and put the counter back to what we want. | 
 | 1288 | 	 * We just need a barrier to keep gcc from moving things | 
 | 1289 | 	 * around. | 
 | 1290 | 	 */ | 
 | 1291 | 	barrier(); | 
 | 1292 | 	if (use_stack == 1) { | 
 | 1293 | 		trace.entries		= &__get_cpu_var(ftrace_stack).calls[0]; | 
 | 1294 | 		trace.max_entries	= FTRACE_STACK_MAX_ENTRIES; | 
 | 1295 |  | 
 | 1296 | 		if (regs) | 
 | 1297 | 			save_stack_trace_regs(regs, &trace); | 
 | 1298 | 		else | 
 | 1299 | 			save_stack_trace(&trace); | 
 | 1300 |  | 
 | 1301 | 		if (trace.nr_entries > size) | 
 | 1302 | 			size = trace.nr_entries; | 
 | 1303 | 	} else | 
 | 1304 | 		/* From now on, use_stack is a boolean */ | 
 | 1305 | 		use_stack = 0; | 
 | 1306 |  | 
 | 1307 | 	size *= sizeof(unsigned long); | 
 | 1308 |  | 
 | 1309 | 	event = trace_buffer_lock_reserve(buffer, TRACE_STACK, | 
 | 1310 | 					  sizeof(*entry) + size, flags, pc); | 
 | 1311 | 	if (!event) | 
 | 1312 | 		goto out; | 
 | 1313 | 	entry = ring_buffer_event_data(event); | 
 | 1314 |  | 
 | 1315 | 	memset(&entry->caller, 0, size); | 
 | 1316 |  | 
 | 1317 | 	if (use_stack) | 
 | 1318 | 		memcpy(&entry->caller, trace.entries, | 
 | 1319 | 		       trace.nr_entries * sizeof(unsigned long)); | 
 | 1320 | 	else { | 
 | 1321 | 		trace.max_entries	= FTRACE_STACK_ENTRIES; | 
 | 1322 | 		trace.entries		= entry->caller; | 
 | 1323 | 		if (regs) | 
 | 1324 | 			save_stack_trace_regs(regs, &trace); | 
 | 1325 | 		else | 
 | 1326 | 			save_stack_trace(&trace); | 
 | 1327 | 	} | 
 | 1328 |  | 
 | 1329 | 	entry->size = trace.nr_entries; | 
 | 1330 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1331 | 	if (!filter_check_discard(call, entry, buffer, event)) | 
 | 1332 | 		ring_buffer_unlock_commit(buffer, event); | 
| Steven Rostedt | 4a9bd3f | 2011-07-14 16:36:53 -0400 | [diff] [blame] | 1333 |  | 
 | 1334 |  out: | 
 | 1335 | 	/* Again, don't let gcc optimize things here */ | 
 | 1336 | 	barrier(); | 
 | 1337 | 	__get_cpu_var(ftrace_stack_reserve)--; | 
 | 1338 | 	preempt_enable_notrace(); | 
 | 1339 |  | 
| Ingo Molnar | f0a920d | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1340 | } | 
 | 1341 |  | 
| Masami Hiramatsu | 1fd8df2 | 2011-06-08 16:09:34 +0900 | [diff] [blame] | 1342 | void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, | 
 | 1343 | 			     int skip, int pc, struct pt_regs *regs) | 
 | 1344 | { | 
 | 1345 | 	if (!(trace_flags & TRACE_ITER_STACKTRACE)) | 
 | 1346 | 		return; | 
 | 1347 |  | 
 | 1348 | 	__ftrace_trace_stack(buffer, flags, skip, pc, regs); | 
 | 1349 | } | 
 | 1350 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1351 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, | 
 | 1352 | 			int skip, int pc) | 
| Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 1353 | { | 
 | 1354 | 	if (!(trace_flags & TRACE_ITER_STACKTRACE)) | 
 | 1355 | 		return; | 
 | 1356 |  | 
| Masami Hiramatsu | 1fd8df2 | 2011-06-08 16:09:34 +0900 | [diff] [blame] | 1357 | 	__ftrace_trace_stack(buffer, flags, skip, pc, NULL); | 
| Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 1358 | } | 
 | 1359 |  | 
| Frederic Weisbecker | c0a0d0d | 2009-07-29 17:51:13 +0200 | [diff] [blame] | 1360 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | 
 | 1361 | 		   int pc) | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1362 | { | 
| Masami Hiramatsu | 1fd8df2 | 2011-06-08 16:09:34 +0900 | [diff] [blame] | 1363 | 	__ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL); | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1364 | } | 
 | 1365 |  | 
| Steven Rostedt | 0388938 | 2009-12-11 09:48:22 -0500 | [diff] [blame] | 1366 | /** | 
 | 1367 |  * trace_dump_stack - record a stack back trace in the trace buffer | 
 | 1368 |  */ | 
 | 1369 | void trace_dump_stack(void) | 
 | 1370 | { | 
 | 1371 | 	unsigned long flags; | 
 | 1372 |  | 
 | 1373 | 	if (tracing_disabled || tracing_selftest_running) | 
| Steven Rostedt | e36c545 | 2009-12-14 15:58:33 -0500 | [diff] [blame] | 1374 | 		return; | 
| Steven Rostedt | 0388938 | 2009-12-11 09:48:22 -0500 | [diff] [blame] | 1375 |  | 
 | 1376 | 	local_save_flags(flags); | 
 | 1377 |  | 
 | 1378 | 	/* skipping 3 traces, seems to get us at the caller of this function */ | 
| Masami Hiramatsu | 1fd8df2 | 2011-06-08 16:09:34 +0900 | [diff] [blame] | 1379 | 	__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL); | 
| Steven Rostedt | 0388938 | 2009-12-11 09:48:22 -0500 | [diff] [blame] | 1380 | } | 
 | 1381 |  | 
| Steven Rostedt | 91e86e5 | 2010-11-10 12:56:12 +0100 | [diff] [blame] | 1382 | static DEFINE_PER_CPU(int, user_stack_count); | 
 | 1383 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1384 | void | 
 | 1385 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1386 | { | 
| Tom Zanussi | e1112b4 | 2009-03-31 00:48:49 -0500 | [diff] [blame] | 1387 | 	struct ftrace_event_call *call = &event_user_stack; | 
| Török Edwin | 8d7c6a9 | 2008-11-23 12:39:06 +0200 | [diff] [blame] | 1388 | 	struct ring_buffer_event *event; | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1389 | 	struct userstack_entry *entry; | 
 | 1390 | 	struct stack_trace trace; | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1391 |  | 
 | 1392 | 	if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 
 | 1393 | 		return; | 
 | 1394 |  | 
| Steven Rostedt | b634587 | 2010-03-12 20:03:30 -0500 | [diff] [blame] | 1395 | 	/* | 
 | 1396 | 	 * NMIs can not handle page faults, even with fix ups. | 
 | 1397 | 	 * The save user stack can (and often does) fault. | 
 | 1398 | 	 */ | 
 | 1399 | 	if (unlikely(in_nmi())) | 
 | 1400 | 		return; | 
 | 1401 |  | 
| Steven Rostedt | 91e86e5 | 2010-11-10 12:56:12 +0100 | [diff] [blame] | 1402 | 	/* | 
 | 1403 | 	 * prevent recursion, since the user stack tracing may | 
 | 1404 | 	 * trigger other kernel events. | 
 | 1405 | 	 */ | 
 | 1406 | 	preempt_disable(); | 
 | 1407 | 	if (__this_cpu_read(user_stack_count)) | 
 | 1408 | 		goto out; | 
 | 1409 |  | 
 | 1410 | 	__this_cpu_inc(user_stack_count); | 
 | 1411 |  | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1412 | 	event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1413 | 					  sizeof(*entry), flags, pc); | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1414 | 	if (!event) | 
| Li Zefan | 1dbd195 | 2010-12-09 15:47:56 +0800 | [diff] [blame] | 1415 | 		goto out_drop_count; | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1416 | 	entry	= ring_buffer_event_data(event); | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1417 |  | 
| Steven Rostedt | 48659d3 | 2009-09-11 11:36:23 -0400 | [diff] [blame] | 1418 | 	entry->tgid		= current->tgid; | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1419 | 	memset(&entry->caller, 0, sizeof(entry->caller)); | 
 | 1420 |  | 
 | 1421 | 	trace.nr_entries	= 0; | 
 | 1422 | 	trace.max_entries	= FTRACE_STACK_ENTRIES; | 
 | 1423 | 	trace.skip		= 0; | 
 | 1424 | 	trace.entries		= entry->caller; | 
 | 1425 |  | 
 | 1426 | 	save_stack_trace_user(&trace); | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1427 | 	if (!filter_check_discard(call, entry, buffer, event)) | 
 | 1428 | 		ring_buffer_unlock_commit(buffer, event); | 
| Steven Rostedt | 91e86e5 | 2010-11-10 12:56:12 +0100 | [diff] [blame] | 1429 |  | 
| Li Zefan | 1dbd195 | 2010-12-09 15:47:56 +0800 | [diff] [blame] | 1430 |  out_drop_count: | 
| Steven Rostedt | 91e86e5 | 2010-11-10 12:56:12 +0100 | [diff] [blame] | 1431 | 	__this_cpu_dec(user_stack_count); | 
| Steven Rostedt | 91e86e5 | 2010-11-10 12:56:12 +0100 | [diff] [blame] | 1432 |  out: | 
 | 1433 | 	preempt_enable(); | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1434 | } | 
 | 1435 |  | 
| Hannes Eder | 4fd2735 | 2009-02-10 19:44:12 +0100 | [diff] [blame] | 1436 | #ifdef UNUSED | 
 | 1437 | static void __trace_userstack(struct trace_array *tr, unsigned long flags) | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1438 | { | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 1439 | 	ftrace_trace_userstack(tr, flags, preempt_count()); | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1440 | } | 
| Hannes Eder | 4fd2735 | 2009-02-10 19:44:12 +0100 | [diff] [blame] | 1441 | #endif /* UNUSED */ | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1442 |  | 
| Frederic Weisbecker | c0a0d0d | 2009-07-29 17:51:13 +0200 | [diff] [blame] | 1443 | #endif /* CONFIG_STACKTRACE */ | 
 | 1444 |  | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1445 | /** | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1446 |  * trace_vbprintk - write binary msg to tracing buffer | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1447 |  * | 
 | 1448 |  */ | 
| Steven Rostedt | 40ce74f | 2009-03-19 14:03:53 -0400 | [diff] [blame] | 1449 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1450 | { | 
| Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 1451 | 	static arch_spinlock_t trace_buf_lock = | 
| Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 1452 | 		(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1453 | 	static u32 trace_buf[TRACE_BUF_SIZE]; | 
 | 1454 |  | 
| Tom Zanussi | e1112b4 | 2009-03-31 00:48:49 -0500 | [diff] [blame] | 1455 | 	struct ftrace_event_call *call = &event_bprint; | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1456 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1457 | 	struct ring_buffer *buffer; | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1458 | 	struct trace_array *tr = &global_trace; | 
 | 1459 | 	struct trace_array_cpu *data; | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1460 | 	struct bprint_entry *entry; | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1461 | 	unsigned long flags; | 
| Steven Rostedt | 3189cdb | 2009-04-17 16:13:55 -0400 | [diff] [blame] | 1462 | 	int disable; | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1463 | 	int cpu, len = 0, size, pc; | 
 | 1464 |  | 
 | 1465 | 	if (unlikely(tracing_selftest_running || tracing_disabled)) | 
 | 1466 | 		return 0; | 
 | 1467 |  | 
 | 1468 | 	/* Don't pollute graph traces with trace_vprintk internals */ | 
 | 1469 | 	pause_graph_tracing(); | 
 | 1470 |  | 
 | 1471 | 	pc = preempt_count(); | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 1472 | 	preempt_disable_notrace(); | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1473 | 	cpu = raw_smp_processor_id(); | 
 | 1474 | 	data = tr->data[cpu]; | 
 | 1475 |  | 
| Steven Rostedt | 3189cdb | 2009-04-17 16:13:55 -0400 | [diff] [blame] | 1476 | 	disable = atomic_inc_return(&data->disabled); | 
 | 1477 | 	if (unlikely(disable != 1)) | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1478 | 		goto out; | 
 | 1479 |  | 
| Steven Rostedt | 80370cb | 2009-03-10 17:16:35 -0400 | [diff] [blame] | 1480 | 	/* Lockdep uses trace_printk for lock tracing */ | 
 | 1481 | 	local_irq_save(flags); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 1482 | 	arch_spin_lock(&trace_buf_lock); | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1483 | 	len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 
 | 1484 |  | 
 | 1485 | 	if (len > TRACE_BUF_SIZE || len < 0) | 
 | 1486 | 		goto out_unlock; | 
 | 1487 |  | 
 | 1488 | 	size = sizeof(*entry) + sizeof(u32) * len; | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1489 | 	buffer = tr->buffer; | 
 | 1490 | 	event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, | 
 | 1491 | 					  flags, pc); | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1492 | 	if (!event) | 
 | 1493 | 		goto out_unlock; | 
 | 1494 | 	entry = ring_buffer_event_data(event); | 
 | 1495 | 	entry->ip			= ip; | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1496 | 	entry->fmt			= fmt; | 
 | 1497 |  | 
 | 1498 | 	memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 
| Steven Rostedt | d931369 | 2010-01-06 17:27:11 -0500 | [diff] [blame] | 1499 | 	if (!filter_check_discard(call, entry, buffer, event)) { | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1500 | 		ring_buffer_unlock_commit(buffer, event); | 
| Steven Rostedt | d931369 | 2010-01-06 17:27:11 -0500 | [diff] [blame] | 1501 | 		ftrace_trace_stack(buffer, flags, 6, pc); | 
 | 1502 | 	} | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1503 |  | 
 | 1504 | out_unlock: | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 1505 | 	arch_spin_unlock(&trace_buf_lock); | 
| Steven Rostedt | 80370cb | 2009-03-10 17:16:35 -0400 | [diff] [blame] | 1506 | 	local_irq_restore(flags); | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1507 |  | 
 | 1508 | out: | 
| Steven Rostedt | 3189cdb | 2009-04-17 16:13:55 -0400 | [diff] [blame] | 1509 | 	atomic_dec_return(&data->disabled); | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 1510 | 	preempt_enable_notrace(); | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1511 | 	unpause_graph_tracing(); | 
 | 1512 |  | 
 | 1513 | 	return len; | 
 | 1514 | } | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1515 | EXPORT_SYMBOL_GPL(trace_vbprintk); | 
 | 1516 |  | 
| Steven Rostedt | 659372d | 2009-09-03 19:11:07 -0400 | [diff] [blame] | 1517 | int trace_array_printk(struct trace_array *tr, | 
 | 1518 | 		       unsigned long ip, const char *fmt, ...) | 
 | 1519 | { | 
 | 1520 | 	int ret; | 
 | 1521 | 	va_list ap; | 
 | 1522 |  | 
 | 1523 | 	if (!(trace_flags & TRACE_ITER_PRINTK)) | 
 | 1524 | 		return 0; | 
 | 1525 |  | 
 | 1526 | 	va_start(ap, fmt); | 
 | 1527 | 	ret = trace_array_vprintk(tr, ip, fmt, ap); | 
 | 1528 | 	va_end(ap); | 
 | 1529 | 	return ret; | 
 | 1530 | } | 
 | 1531 |  | 
 | 1532 | int trace_array_vprintk(struct trace_array *tr, | 
 | 1533 | 			unsigned long ip, const char *fmt, va_list args) | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1534 | { | 
| Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 1535 | 	static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1536 | 	static char trace_buf[TRACE_BUF_SIZE]; | 
 | 1537 |  | 
| Tom Zanussi | e1112b4 | 2009-03-31 00:48:49 -0500 | [diff] [blame] | 1538 | 	struct ftrace_event_call *call = &event_print; | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1539 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1540 | 	struct ring_buffer *buffer; | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1541 | 	struct trace_array_cpu *data; | 
 | 1542 | 	int cpu, len = 0, size, pc; | 
 | 1543 | 	struct print_entry *entry; | 
 | 1544 | 	unsigned long irq_flags; | 
| Steven Rostedt | 3189cdb | 2009-04-17 16:13:55 -0400 | [diff] [blame] | 1545 | 	int disable; | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1546 |  | 
 | 1547 | 	if (tracing_disabled || tracing_selftest_running) | 
 | 1548 | 		return 0; | 
 | 1549 |  | 
 | 1550 | 	pc = preempt_count(); | 
 | 1551 | 	preempt_disable_notrace(); | 
 | 1552 | 	cpu = raw_smp_processor_id(); | 
 | 1553 | 	data = tr->data[cpu]; | 
 | 1554 |  | 
| Steven Rostedt | 3189cdb | 2009-04-17 16:13:55 -0400 | [diff] [blame] | 1555 | 	disable = atomic_inc_return(&data->disabled); | 
 | 1556 | 	if (unlikely(disable != 1)) | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1557 | 		goto out; | 
 | 1558 |  | 
 | 1559 | 	pause_graph_tracing(); | 
 | 1560 | 	raw_local_irq_save(irq_flags); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 1561 | 	arch_spin_lock(&trace_buf_lock); | 
| Carsten Emde | f294248 | 2009-12-06 14:02:44 +0100 | [diff] [blame] | 1562 | 	len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1563 |  | 
 | 1564 | 	size = sizeof(*entry) + len + 1; | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1565 | 	buffer = tr->buffer; | 
 | 1566 | 	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 
 | 1567 | 					  irq_flags, pc); | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1568 | 	if (!event) | 
 | 1569 | 		goto out_unlock; | 
 | 1570 | 	entry = ring_buffer_event_data(event); | 
| Carsten Emde | c13d2f7 | 2009-11-16 20:56:13 +0100 | [diff] [blame] | 1571 | 	entry->ip = ip; | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1572 |  | 
 | 1573 | 	memcpy(&entry->buf, trace_buf, len); | 
| Carsten Emde | c13d2f7 | 2009-11-16 20:56:13 +0100 | [diff] [blame] | 1574 | 	entry->buf[len] = '\0'; | 
| Steven Rostedt | d931369 | 2010-01-06 17:27:11 -0500 | [diff] [blame] | 1575 | 	if (!filter_check_discard(call, entry, buffer, event)) { | 
| Steven Rostedt | e77405a | 2009-09-02 14:17:06 -0400 | [diff] [blame] | 1576 | 		ring_buffer_unlock_commit(buffer, event); | 
| Steven Rostedt | d931369 | 2010-01-06 17:27:11 -0500 | [diff] [blame] | 1577 | 		ftrace_trace_stack(buffer, irq_flags, 6, pc); | 
 | 1578 | 	} | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1579 |  | 
 | 1580 |  out_unlock: | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 1581 | 	arch_spin_unlock(&trace_buf_lock); | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1582 | 	raw_local_irq_restore(irq_flags); | 
 | 1583 | 	unpause_graph_tracing(); | 
 | 1584 |  out: | 
| Steven Rostedt | 3189cdb | 2009-04-17 16:13:55 -0400 | [diff] [blame] | 1585 | 	atomic_dec_return(&data->disabled); | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1586 | 	preempt_enable_notrace(); | 
 | 1587 |  | 
 | 1588 | 	return len; | 
 | 1589 | } | 
| Steven Rostedt | 659372d | 2009-09-03 19:11:07 -0400 | [diff] [blame] | 1590 |  | 
 | 1591 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 
 | 1592 | { | 
| Steven Rostedt | a813a15 | 2009-10-09 01:41:35 -0400 | [diff] [blame] | 1593 | 	return trace_array_vprintk(&global_trace, ip, fmt, args); | 
| Steven Rostedt | 659372d | 2009-09-03 19:11:07 -0400 | [diff] [blame] | 1594 | } | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1595 | EXPORT_SYMBOL_GPL(trace_vprintk); | 
 | 1596 |  | 
| Robert Richter | e2ac8ef | 2008-11-12 12:59:32 +0100 | [diff] [blame] | 1597 | static void trace_iterator_increment(struct trace_iterator *iter) | 
| Steven Rostedt | 5a90f57 | 2008-09-03 17:42:51 -0400 | [diff] [blame] | 1598 | { | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1599 | 	/* Don't allow ftrace to trace into the ring buffers */ | 
 | 1600 | 	ftrace_disable_cpu(); | 
 | 1601 |  | 
| Steven Rostedt | 5a90f57 | 2008-09-03 17:42:51 -0400 | [diff] [blame] | 1602 | 	iter->idx++; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1603 | 	if (iter->buffer_iter[iter->cpu]) | 
 | 1604 | 		ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | 
 | 1605 |  | 
 | 1606 | 	ftrace_enable_cpu(); | 
| Steven Rostedt | 5a90f57 | 2008-09-03 17:42:51 -0400 | [diff] [blame] | 1607 | } | 
 | 1608 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1609 | static struct trace_entry * | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1610 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, | 
 | 1611 | 		unsigned long *lost_events) | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1612 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1613 | 	struct ring_buffer_event *event; | 
 | 1614 | 	struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1615 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1616 | 	/* Don't allow ftrace to trace into the ring buffers */ | 
 | 1617 | 	ftrace_disable_cpu(); | 
 | 1618 |  | 
 | 1619 | 	if (buf_iter) | 
 | 1620 | 		event = ring_buffer_iter_peek(buf_iter, ts); | 
 | 1621 | 	else | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1622 | 		event = ring_buffer_peek(iter->tr->buffer, cpu, ts, | 
 | 1623 | 					 lost_events); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1624 |  | 
 | 1625 | 	ftrace_enable_cpu(); | 
 | 1626 |  | 
| Steven Rostedt | 4a9bd3f | 2011-07-14 16:36:53 -0400 | [diff] [blame] | 1627 | 	if (event) { | 
 | 1628 | 		iter->ent_size = ring_buffer_event_length(event); | 
 | 1629 | 		return ring_buffer_event_data(event); | 
 | 1630 | 	} | 
 | 1631 | 	iter->ent_size = 0; | 
 | 1632 | 	return NULL; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1633 | } | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1634 |  | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1635 | static struct trace_entry * | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1636 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, | 
 | 1637 | 		  unsigned long *missing_events, u64 *ent_ts) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1638 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1639 | 	struct ring_buffer *buffer = iter->tr->buffer; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1640 | 	struct trace_entry *ent, *next = NULL; | 
| Lai Jiangshan | aa27497 | 2010-04-05 17:11:05 +0800 | [diff] [blame] | 1641 | 	unsigned long lost_events = 0, next_lost = 0; | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1642 | 	int cpu_file = iter->cpu_file; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1643 | 	u64 next_ts = 0, ts; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1644 | 	int next_cpu = -1; | 
 | 1645 | 	int cpu; | 
 | 1646 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1647 | 	/* | 
 | 1648 | 	 * If we are in a per_cpu trace file, don't bother by iterating over | 
 | 1649 | 	 * all cpu and peek directly. | 
 | 1650 | 	 */ | 
 | 1651 | 	if (cpu_file > TRACE_PIPE_ALL_CPU) { | 
 | 1652 | 		if (ring_buffer_empty_cpu(buffer, cpu_file)) | 
 | 1653 | 			return NULL; | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1654 | 		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1655 | 		if (ent_cpu) | 
 | 1656 | 			*ent_cpu = cpu_file; | 
 | 1657 |  | 
 | 1658 | 		return ent; | 
 | 1659 | 	} | 
 | 1660 |  | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 1661 | 	for_each_tracing_cpu(cpu) { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1662 |  | 
 | 1663 | 		if (ring_buffer_empty_cpu(buffer, cpu)) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1664 | 			continue; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1665 |  | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1666 | 		ent = peek_next_entry(iter, cpu, &ts, &lost_events); | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1667 |  | 
| Ingo Molnar | cdd31cd | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1668 | 		/* | 
 | 1669 | 		 * Pick the entry with the smallest timestamp: | 
 | 1670 | 		 */ | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1671 | 		if (ent && (!next || ts < next_ts)) { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1672 | 			next = ent; | 
 | 1673 | 			next_cpu = cpu; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1674 | 			next_ts = ts; | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1675 | 			next_lost = lost_events; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1676 | 		} | 
 | 1677 | 	} | 
 | 1678 |  | 
 | 1679 | 	if (ent_cpu) | 
 | 1680 | 		*ent_cpu = next_cpu; | 
 | 1681 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1682 | 	if (ent_ts) | 
 | 1683 | 		*ent_ts = next_ts; | 
 | 1684 |  | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1685 | 	if (missing_events) | 
 | 1686 | 		*missing_events = next_lost; | 
 | 1687 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1688 | 	return next; | 
 | 1689 | } | 
 | 1690 |  | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1691 | /* Find the next real entry, without updating the iterator itself */ | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 1692 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 
 | 1693 | 					  int *ent_cpu, u64 *ent_ts) | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1694 | { | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1695 | 	return __find_next_entry(iter, ent_cpu, NULL, ent_ts); | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1696 | } | 
| Ingo Molnar | 8c523a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1697 |  | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1698 | /* Find the next real entry, and increment the iterator to the next entry */ | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 1699 | void *trace_find_next_entry_inc(struct trace_iterator *iter) | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1700 | { | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1701 | 	iter->ent = __find_next_entry(iter, &iter->cpu, | 
 | 1702 | 				      &iter->lost_events, &iter->ts); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1703 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1704 | 	if (iter->ent) | 
| Robert Richter | e2ac8ef | 2008-11-12 12:59:32 +0100 | [diff] [blame] | 1705 | 		trace_iterator_increment(iter); | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1706 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1707 | 	return iter->ent ? iter : NULL; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1708 | } | 
 | 1709 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1710 | static void trace_consume(struct trace_iterator *iter) | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1711 | { | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1712 | 	/* Don't allow ftrace to trace into the ring buffers */ | 
 | 1713 | 	ftrace_disable_cpu(); | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 1714 | 	ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, | 
 | 1715 | 			    &iter->lost_events); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1716 | 	ftrace_enable_cpu(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1717 | } | 
 | 1718 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1719 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1720 | { | 
 | 1721 | 	struct trace_iterator *iter = m->private; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1722 | 	int i = (int)*pos; | 
| Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 1723 | 	void *ent; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1724 |  | 
| Steven Rostedt | a63ce5b | 2009-12-07 09:11:39 -0500 | [diff] [blame] | 1725 | 	WARN_ON_ONCE(iter->leftover); | 
 | 1726 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1727 | 	(*pos)++; | 
 | 1728 |  | 
 | 1729 | 	/* can't go backwards */ | 
 | 1730 | 	if (iter->idx > i) | 
 | 1731 | 		return NULL; | 
 | 1732 |  | 
 | 1733 | 	if (iter->idx < 0) | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 1734 | 		ent = trace_find_next_entry_inc(iter); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1735 | 	else | 
 | 1736 | 		ent = iter; | 
 | 1737 |  | 
 | 1738 | 	while (ent && iter->idx < i) | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 1739 | 		ent = trace_find_next_entry_inc(iter); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1740 |  | 
 | 1741 | 	iter->pos = *pos; | 
 | 1742 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1743 | 	return ent; | 
 | 1744 | } | 
 | 1745 |  | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 1746 | void tracing_iter_reset(struct trace_iterator *iter, int cpu) | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 1747 | { | 
 | 1748 | 	struct trace_array *tr = iter->tr; | 
 | 1749 | 	struct ring_buffer_event *event; | 
 | 1750 | 	struct ring_buffer_iter *buf_iter; | 
 | 1751 | 	unsigned long entries = 0; | 
 | 1752 | 	u64 ts; | 
 | 1753 |  | 
 | 1754 | 	tr->data[cpu]->skipped_entries = 0; | 
 | 1755 |  | 
 | 1756 | 	if (!iter->buffer_iter[cpu]) | 
 | 1757 | 		return; | 
 | 1758 |  | 
 | 1759 | 	buf_iter = iter->buffer_iter[cpu]; | 
 | 1760 | 	ring_buffer_iter_reset(buf_iter); | 
 | 1761 |  | 
 | 1762 | 	/* | 
 | 1763 | 	 * We could have the case with the max latency tracers | 
 | 1764 | 	 * that a reset never took place on a cpu. This is evident | 
 | 1765 | 	 * by the timestamp being before the start of the buffer. | 
 | 1766 | 	 */ | 
 | 1767 | 	while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { | 
 | 1768 | 		if (ts >= iter->tr->time_start) | 
 | 1769 | 			break; | 
 | 1770 | 		entries++; | 
 | 1771 | 		ring_buffer_read(buf_iter, NULL); | 
 | 1772 | 	} | 
 | 1773 |  | 
 | 1774 | 	tr->data[cpu]->skipped_entries = entries; | 
 | 1775 | } | 
 | 1776 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1777 | /* | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1778 |  * The current tracer is copied to avoid a global locking | 
 | 1779 |  * all around. | 
 | 1780 |  */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1781 | static void *s_start(struct seq_file *m, loff_t *pos) | 
 | 1782 | { | 
 | 1783 | 	struct trace_iterator *iter = m->private; | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1784 | 	static struct tracer *old_tracer; | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1785 | 	int cpu_file = iter->cpu_file; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1786 | 	void *p = NULL; | 
 | 1787 | 	loff_t l = 0; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1788 | 	int cpu; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1789 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1790 | 	/* copy the tracer to avoid using a global lock all around */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1791 | 	mutex_lock(&trace_types_lock); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1792 | 	if (unlikely(old_tracer != current_trace && current_trace)) { | 
 | 1793 | 		old_tracer = current_trace; | 
 | 1794 | 		*iter->trace = *current_trace; | 
| Steven Rostedt | d15f57f | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 1795 | 	} | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1796 | 	mutex_unlock(&trace_types_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1797 |  | 
 | 1798 | 	atomic_inc(&trace_record_cmdline_disabled); | 
 | 1799 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1800 | 	if (*pos != iter->pos) { | 
 | 1801 | 		iter->ent = NULL; | 
 | 1802 | 		iter->cpu = 0; | 
 | 1803 | 		iter->idx = -1; | 
 | 1804 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1805 | 		ftrace_disable_cpu(); | 
 | 1806 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1807 | 		if (cpu_file == TRACE_PIPE_ALL_CPU) { | 
 | 1808 | 			for_each_tracing_cpu(cpu) | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 1809 | 				tracing_iter_reset(iter, cpu); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1810 | 		} else | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 1811 | 			tracing_iter_reset(iter, cpu_file); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1812 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1813 | 		ftrace_enable_cpu(); | 
 | 1814 |  | 
| Lai Jiangshan | ac91d85 | 2010-03-02 17:54:50 +0800 | [diff] [blame] | 1815 | 		iter->leftover = 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1816 | 		for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 
 | 1817 | 			; | 
 | 1818 |  | 
 | 1819 | 	} else { | 
| Steven Rostedt | a63ce5b | 2009-12-07 09:11:39 -0500 | [diff] [blame] | 1820 | 		/* | 
 | 1821 | 		 * If we overflowed the seq_file before, then we want | 
 | 1822 | 		 * to just reuse the trace_seq buffer again. | 
 | 1823 | 		 */ | 
 | 1824 | 		if (iter->leftover) | 
 | 1825 | 			p = iter; | 
 | 1826 | 		else { | 
 | 1827 | 			l = *pos - 1; | 
 | 1828 | 			p = s_next(m, p, &l); | 
 | 1829 | 		} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1830 | 	} | 
 | 1831 |  | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 1832 | 	trace_event_read_lock(); | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 1833 | 	trace_access_lock(cpu_file); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1834 | 	return p; | 
 | 1835 | } | 
 | 1836 |  | 
 | 1837 | static void s_stop(struct seq_file *m, void *p) | 
 | 1838 | { | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 1839 | 	struct trace_iterator *iter = m->private; | 
 | 1840 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1841 | 	atomic_dec(&trace_record_cmdline_disabled); | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 1842 | 	trace_access_unlock(iter->cpu_file); | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 1843 | 	trace_event_read_unlock(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1844 | } | 
 | 1845 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1846 | static void print_lat_help_header(struct seq_file *m) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1847 | { | 
| Michael Ellerman | a616835 | 2008-08-20 16:36:11 -0700 | [diff] [blame] | 1848 | 	seq_puts(m, "#                  _------=> CPU#            \n"); | 
 | 1849 | 	seq_puts(m, "#                 / _-----=> irqs-off        \n"); | 
 | 1850 | 	seq_puts(m, "#                | / _----=> need-resched    \n"); | 
 | 1851 | 	seq_puts(m, "#                || / _---=> hardirq/softirq \n"); | 
 | 1852 | 	seq_puts(m, "#                ||| / _--=> preempt-depth   \n"); | 
| Steven Rostedt | e6e1e25 | 2011-03-09 10:41:56 -0500 | [diff] [blame] | 1853 | 	seq_puts(m, "#                |||| /     delay             \n"); | 
 | 1854 | 	seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n"); | 
 | 1855 | 	seq_puts(m, "#     \\   /      |||||  \\    |   /           \n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1856 | } | 
 | 1857 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1858 | static void print_func_help_header(struct seq_file *m) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1859 | { | 
| Michael Ellerman | a616835 | 2008-08-20 16:36:11 -0700 | [diff] [blame] | 1860 | 	seq_puts(m, "#           TASK-PID    CPU#    TIMESTAMP  FUNCTION\n"); | 
 | 1861 | 	seq_puts(m, "#              | |       |          |         |\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1862 | } | 
 | 1863 |  | 
 | 1864 |  | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 1865 | void | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1866 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) | 
 | 1867 | { | 
 | 1868 | 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 
 | 1869 | 	struct trace_array *tr = iter->tr; | 
 | 1870 | 	struct trace_array_cpu *data = tr->data[tr->cpu]; | 
 | 1871 | 	struct tracer *type = current_trace; | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 1872 | 	unsigned long entries = 0; | 
 | 1873 | 	unsigned long total = 0; | 
 | 1874 | 	unsigned long count; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1875 | 	const char *name = "preemption"; | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 1876 | 	int cpu; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1877 |  | 
 | 1878 | 	if (type) | 
 | 1879 | 		name = type->name; | 
 | 1880 |  | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 1881 |  | 
 | 1882 | 	for_each_tracing_cpu(cpu) { | 
 | 1883 | 		count = ring_buffer_entries_cpu(tr->buffer, cpu); | 
 | 1884 | 		/* | 
 | 1885 | 		 * If this buffer has skipped entries, then we hold all | 
 | 1886 | 		 * entries for the trace and we need to ignore the | 
 | 1887 | 		 * ones before the time stamp. | 
 | 1888 | 		 */ | 
 | 1889 | 		if (tr->data[cpu]->skipped_entries) { | 
 | 1890 | 			count -= tr->data[cpu]->skipped_entries; | 
 | 1891 | 			/* total is the same as the entries */ | 
 | 1892 | 			total += count; | 
 | 1893 | 		} else | 
 | 1894 | 			total += count + | 
 | 1895 | 				ring_buffer_overrun_cpu(tr->buffer, cpu); | 
 | 1896 | 		entries += count; | 
 | 1897 | 	} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1898 |  | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1899 | 	seq_printf(m, "# %s latency trace v1.1.5 on %s\n", | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1900 | 		   name, UTS_RELEASE); | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1901 | 	seq_puts(m, "# -----------------------------------" | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1902 | 		 "---------------------------------\n"); | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1903 | 	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1904 | 		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d", | 
| Steven Rostedt | 57f50be | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 1905 | 		   nsecs_to_usecs(data->saved_latency), | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1906 | 		   entries, | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 1907 | 		   total, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1908 | 		   tr->cpu, | 
 | 1909 | #if defined(CONFIG_PREEMPT_NONE) | 
 | 1910 | 		   "server", | 
 | 1911 | #elif defined(CONFIG_PREEMPT_VOLUNTARY) | 
 | 1912 | 		   "desktop", | 
| Steven Rostedt | b5c21b4 | 2008-07-10 20:58:12 -0400 | [diff] [blame] | 1913 | #elif defined(CONFIG_PREEMPT) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1914 | 		   "preempt", | 
 | 1915 | #else | 
 | 1916 | 		   "unknown", | 
 | 1917 | #endif | 
 | 1918 | 		   /* These are reserved for later use */ | 
 | 1919 | 		   0, 0, 0, 0); | 
 | 1920 | #ifdef CONFIG_SMP | 
 | 1921 | 	seq_printf(m, " #P:%d)\n", num_online_cpus()); | 
 | 1922 | #else | 
 | 1923 | 	seq_puts(m, ")\n"); | 
 | 1924 | #endif | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1925 | 	seq_puts(m, "#    -----------------\n"); | 
 | 1926 | 	seq_printf(m, "#    | task: %.16s-%d " | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1927 | 		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", | 
 | 1928 | 		   data->comm, data->pid, data->uid, data->nice, | 
 | 1929 | 		   data->policy, data->rt_priority); | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1930 | 	seq_puts(m, "#    -----------------\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1931 |  | 
 | 1932 | 	if (data->critical_start) { | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1933 | 		seq_puts(m, "#  => started at: "); | 
| Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1934 | 		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); | 
 | 1935 | 		trace_print_seq(m, &iter->seq); | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1936 | 		seq_puts(m, "\n#  => ended at:   "); | 
| Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1937 | 		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); | 
 | 1938 | 		trace_print_seq(m, &iter->seq); | 
| Steven Rostedt | 8248ac0 | 2009-09-02 12:27:41 -0400 | [diff] [blame] | 1939 | 		seq_puts(m, "\n#\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1940 | 	} | 
 | 1941 |  | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1942 | 	seq_puts(m, "#\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1943 | } | 
 | 1944 |  | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 1945 | static void test_cpu_buff_start(struct trace_iterator *iter) | 
 | 1946 | { | 
 | 1947 | 	struct trace_seq *s = &iter->seq; | 
 | 1948 |  | 
| Steven Rostedt | 12ef7d4 | 2008-11-12 17:52:38 -0500 | [diff] [blame] | 1949 | 	if (!(trace_flags & TRACE_ITER_ANNOTATE)) | 
 | 1950 | 		return; | 
 | 1951 |  | 
 | 1952 | 	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) | 
 | 1953 | 		return; | 
 | 1954 |  | 
| Rusty Russell | 4462344 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 1955 | 	if (cpumask_test_cpu(iter->cpu, iter->started)) | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 1956 | 		return; | 
 | 1957 |  | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 1958 | 	if (iter->tr->data[iter->cpu]->skipped_entries) | 
 | 1959 | 		return; | 
 | 1960 |  | 
| Rusty Russell | 4462344 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 1961 | 	cpumask_set_cpu(iter->cpu, iter->started); | 
| Frederic Weisbecker | b0dfa97 | 2009-04-01 22:53:08 +0200 | [diff] [blame] | 1962 |  | 
 | 1963 | 	/* Don't print started cpu buffer for the first entry of the trace */ | 
 | 1964 | 	if (iter->idx > 1) | 
 | 1965 | 		trace_seq_printf(s, "##### CPU %u buffer started ####\n", | 
 | 1966 | 				iter->cpu); | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 1967 | } | 
 | 1968 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 1969 | static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1970 | { | 
| Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1971 | 	struct trace_seq *s = &iter->seq; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1972 | 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 
| Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 1973 | 	struct trace_entry *entry; | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 1974 | 	struct trace_event *event; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1975 |  | 
| Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 1976 | 	entry = iter->ent; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1977 |  | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 1978 | 	test_cpu_buff_start(iter); | 
 | 1979 |  | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 1980 | 	event = ftrace_find_event(entry->type); | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 1981 |  | 
 | 1982 | 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
| Steven Rostedt | 27d48be | 2009-03-04 21:57:29 -0500 | [diff] [blame] | 1983 | 		if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 
 | 1984 | 			if (!trace_print_lat_context(iter)) | 
 | 1985 | 				goto partial; | 
 | 1986 | 		} else { | 
 | 1987 | 			if (!trace_print_context(iter)) | 
 | 1988 | 				goto partial; | 
 | 1989 | 		} | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 1990 | 	} | 
 | 1991 |  | 
| Arnaldo Carvalho de Melo | 268ccda | 2009-02-04 20:16:39 -0200 | [diff] [blame] | 1992 | 	if (event) | 
| Steven Rostedt | a9a5776 | 2010-04-22 18:46:14 -0400 | [diff] [blame] | 1993 | 		return event->funcs->trace(iter, sym_flags, event); | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 1994 |  | 
 | 1995 | 	if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) | 
 | 1996 | 		goto partial; | 
| Steven Rostedt | 7104f30 | 2008-10-01 10:52:51 -0400 | [diff] [blame] | 1997 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 1998 | 	return TRACE_TYPE_HANDLED; | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 1999 | partial: | 
 | 2000 | 	return TRACE_TYPE_PARTIAL_LINE; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2001 | } | 
 | 2002 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2003 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2004 | { | 
 | 2005 | 	struct trace_seq *s = &iter->seq; | 
 | 2006 | 	struct trace_entry *entry; | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 2007 | 	struct trace_event *event; | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2008 |  | 
 | 2009 | 	entry = iter->ent; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 2010 |  | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 2011 | 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 2012 | 		if (!trace_seq_printf(s, "%d %d %llu ", | 
 | 2013 | 				      entry->pid, iter->cpu, iter->ts)) | 
 | 2014 | 			goto partial; | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 2015 | 	} | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2016 |  | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 2017 | 	event = ftrace_find_event(entry->type); | 
| Arnaldo Carvalho de Melo | 268ccda | 2009-02-04 20:16:39 -0200 | [diff] [blame] | 2018 | 	if (event) | 
| Steven Rostedt | a9a5776 | 2010-04-22 18:46:14 -0400 | [diff] [blame] | 2019 | 		return event->funcs->raw(iter, 0, event); | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 2020 |  | 
 | 2021 | 	if (!trace_seq_printf(s, "%d ?\n", entry->type)) | 
 | 2022 | 		goto partial; | 
| Steven Rostedt | 7104f30 | 2008-10-01 10:52:51 -0400 | [diff] [blame] | 2023 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2024 | 	return TRACE_TYPE_HANDLED; | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 2025 | partial: | 
 | 2026 | 	return TRACE_TYPE_PARTIAL_LINE; | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2027 | } | 
 | 2028 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2029 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 2030 | { | 
 | 2031 | 	struct trace_seq *s = &iter->seq; | 
 | 2032 | 	unsigned char newline = '\n'; | 
 | 2033 | 	struct trace_entry *entry; | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 2034 | 	struct trace_event *event; | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 2035 |  | 
 | 2036 | 	entry = iter->ent; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 2037 |  | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 2038 | 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
 | 2039 | 		SEQ_PUT_HEX_FIELD_RET(s, entry->pid); | 
 | 2040 | 		SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); | 
 | 2041 | 		SEQ_PUT_HEX_FIELD_RET(s, iter->ts); | 
 | 2042 | 	} | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 2043 |  | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 2044 | 	event = ftrace_find_event(entry->type); | 
| Arnaldo Carvalho de Melo | 268ccda | 2009-02-04 20:16:39 -0200 | [diff] [blame] | 2045 | 	if (event) { | 
| Steven Rostedt | a9a5776 | 2010-04-22 18:46:14 -0400 | [diff] [blame] | 2046 | 		enum print_line_t ret = event->funcs->hex(iter, 0, event); | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 2047 | 		if (ret != TRACE_TYPE_HANDLED) | 
 | 2048 | 			return ret; | 
 | 2049 | 	} | 
| Steven Rostedt | 7104f30 | 2008-10-01 10:52:51 -0400 | [diff] [blame] | 2050 |  | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 2051 | 	SEQ_PUT_FIELD_RET(s, newline); | 
 | 2052 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2053 | 	return TRACE_TYPE_HANDLED; | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 2054 | } | 
 | 2055 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2056 | static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2057 | { | 
 | 2058 | 	struct trace_seq *s = &iter->seq; | 
 | 2059 | 	struct trace_entry *entry; | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 2060 | 	struct trace_event *event; | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2061 |  | 
 | 2062 | 	entry = iter->ent; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 2063 |  | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 2064 | 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
 | 2065 | 		SEQ_PUT_FIELD_RET(s, entry->pid); | 
| Steven Rostedt | 1830b52 | 2009-02-07 19:38:43 -0500 | [diff] [blame] | 2066 | 		SEQ_PUT_FIELD_RET(s, iter->cpu); | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 2067 | 		SEQ_PUT_FIELD_RET(s, iter->ts); | 
 | 2068 | 	} | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2069 |  | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 2070 | 	event = ftrace_find_event(entry->type); | 
| Steven Rostedt | a9a5776 | 2010-04-22 18:46:14 -0400 | [diff] [blame] | 2071 | 	return event ? event->funcs->binary(iter, 0, event) : | 
 | 2072 | 		TRACE_TYPE_HANDLED; | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2073 | } | 
 | 2074 |  | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 2075 | int trace_empty(struct trace_iterator *iter) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2076 | { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2077 | 	int cpu; | 
 | 2078 |  | 
| Steven Rostedt | 9aba60f | 2009-03-11 19:52:30 -0400 | [diff] [blame] | 2079 | 	/* If we are looking at one CPU buffer, only check that one */ | 
 | 2080 | 	if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { | 
 | 2081 | 		cpu = iter->cpu_file; | 
 | 2082 | 		if (iter->buffer_iter[cpu]) { | 
 | 2083 | 			if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) | 
 | 2084 | 				return 0; | 
 | 2085 | 		} else { | 
 | 2086 | 			if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) | 
 | 2087 | 				return 0; | 
 | 2088 | 		} | 
 | 2089 | 		return 1; | 
 | 2090 | 	} | 
 | 2091 |  | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2092 | 	for_each_tracing_cpu(cpu) { | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2093 | 		if (iter->buffer_iter[cpu]) { | 
 | 2094 | 			if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) | 
 | 2095 | 				return 0; | 
 | 2096 | 		} else { | 
 | 2097 | 			if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) | 
 | 2098 | 				return 0; | 
 | 2099 | 		} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2100 | 	} | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2101 |  | 
| Frederic Weisbecker | 797d371 | 2008-09-30 18:13:45 +0200 | [diff] [blame] | 2102 | 	return 1; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2103 | } | 
 | 2104 |  | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 2105 | /*  Called with trace_event_read_lock() held. */ | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 2106 | enum print_line_t print_trace_line(struct trace_iterator *iter) | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2107 | { | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2108 | 	enum print_line_t ret; | 
 | 2109 |  | 
| Jiri Olsa | ee5e51f | 2011-03-25 12:05:18 +0100 | [diff] [blame] | 2110 | 	if (iter->lost_events && | 
 | 2111 | 	    !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", | 
 | 2112 | 				 iter->cpu, iter->lost_events)) | 
 | 2113 | 		return TRACE_TYPE_PARTIAL_LINE; | 
| Steven Rostedt | bc21b47 | 2010-03-31 19:49:26 -0400 | [diff] [blame] | 2114 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2115 | 	if (iter->trace && iter->trace->print_line) { | 
 | 2116 | 		ret = iter->trace->print_line(iter); | 
 | 2117 | 		if (ret != TRACE_TYPE_UNHANDLED) | 
 | 2118 | 			return ret; | 
 | 2119 | 	} | 
| Thomas Gleixner | 72829bc | 2008-05-23 21:37:28 +0200 | [diff] [blame] | 2120 |  | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 2121 | 	if (iter->ent->type == TRACE_BPRINT && | 
 | 2122 | 			trace_flags & TRACE_ITER_PRINTK && | 
 | 2123 | 			trace_flags & TRACE_ITER_PRINTK_MSGONLY) | 
| Steven Rostedt | 5ef841f | 2009-03-19 12:20:38 -0400 | [diff] [blame] | 2124 | 		return trace_print_bprintk_msg_only(iter); | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 2125 |  | 
| Frederic Weisbecker | 66896a8 | 2008-12-13 20:18:13 +0100 | [diff] [blame] | 2126 | 	if (iter->ent->type == TRACE_PRINT && | 
 | 2127 | 			trace_flags & TRACE_ITER_PRINTK && | 
 | 2128 | 			trace_flags & TRACE_ITER_PRINTK_MSGONLY) | 
| Steven Rostedt | 5ef841f | 2009-03-19 12:20:38 -0400 | [diff] [blame] | 2129 | 		return trace_print_printk_msg_only(iter); | 
| Frederic Weisbecker | 66896a8 | 2008-12-13 20:18:13 +0100 | [diff] [blame] | 2130 |  | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2131 | 	if (trace_flags & TRACE_ITER_BIN) | 
 | 2132 | 		return print_bin_fmt(iter); | 
 | 2133 |  | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 2134 | 	if (trace_flags & TRACE_ITER_HEX) | 
 | 2135 | 		return print_hex_fmt(iter); | 
 | 2136 |  | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2137 | 	if (trace_flags & TRACE_ITER_RAW) | 
 | 2138 | 		return print_raw_fmt(iter); | 
 | 2139 |  | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2140 | 	return print_trace_fmt(iter); | 
 | 2141 | } | 
 | 2142 |  | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 2143 | void trace_default_header(struct seq_file *m) | 
 | 2144 | { | 
 | 2145 | 	struct trace_iterator *iter = m->private; | 
 | 2146 |  | 
| Jiri Olsa | f56e7f8 | 2011-06-03 16:58:49 +0200 | [diff] [blame] | 2147 | 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) | 
 | 2148 | 		return; | 
 | 2149 |  | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 2150 | 	if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 
 | 2151 | 		/* print nothing if the buffers are empty */ | 
 | 2152 | 		if (trace_empty(iter)) | 
 | 2153 | 			return; | 
 | 2154 | 		print_trace_header(m, iter); | 
 | 2155 | 		if (!(trace_flags & TRACE_ITER_VERBOSE)) | 
 | 2156 | 			print_lat_help_header(m); | 
 | 2157 | 	} else { | 
 | 2158 | 		if (!(trace_flags & TRACE_ITER_VERBOSE)) | 
 | 2159 | 			print_func_help_header(m); | 
 | 2160 | 	} | 
 | 2161 | } | 
 | 2162 |  | 
| Steven Rostedt | e0a413f | 2011-09-29 21:26:16 -0400 | [diff] [blame] | 2163 | static void test_ftrace_alive(struct seq_file *m) | 
 | 2164 | { | 
 | 2165 | 	if (!ftrace_is_dead()) | 
 | 2166 | 		return; | 
 | 2167 | 	seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"); | 
 | 2168 | 	seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n"); | 
 | 2169 | } | 
 | 2170 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2171 | static int s_show(struct seq_file *m, void *v) | 
 | 2172 | { | 
 | 2173 | 	struct trace_iterator *iter = v; | 
| Steven Rostedt | a63ce5b | 2009-12-07 09:11:39 -0500 | [diff] [blame] | 2174 | 	int ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2175 |  | 
 | 2176 | 	if (iter->ent == NULL) { | 
 | 2177 | 		if (iter->tr) { | 
 | 2178 | 			seq_printf(m, "# tracer: %s\n", iter->trace->name); | 
 | 2179 | 			seq_puts(m, "#\n"); | 
| Steven Rostedt | e0a413f | 2011-09-29 21:26:16 -0400 | [diff] [blame] | 2180 | 			test_ftrace_alive(m); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2181 | 		} | 
| Markus Metzger | 8bba1bf | 2008-11-25 09:12:31 +0100 | [diff] [blame] | 2182 | 		if (iter->trace && iter->trace->print_header) | 
 | 2183 | 			iter->trace->print_header(m); | 
| Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 2184 | 		else | 
 | 2185 | 			trace_default_header(m); | 
 | 2186 |  | 
| Steven Rostedt | a63ce5b | 2009-12-07 09:11:39 -0500 | [diff] [blame] | 2187 | 	} else if (iter->leftover) { | 
 | 2188 | 		/* | 
 | 2189 | 		 * If we filled the seq_file buffer earlier, we | 
 | 2190 | 		 * want to just show it now. | 
 | 2191 | 		 */ | 
 | 2192 | 		ret = trace_print_seq(m, &iter->seq); | 
 | 2193 |  | 
 | 2194 | 		/* ret should this time be zero, but you never know */ | 
 | 2195 | 		iter->leftover = ret; | 
 | 2196 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2197 | 	} else { | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2198 | 		print_trace_line(iter); | 
| Steven Rostedt | a63ce5b | 2009-12-07 09:11:39 -0500 | [diff] [blame] | 2199 | 		ret = trace_print_seq(m, &iter->seq); | 
 | 2200 | 		/* | 
 | 2201 | 		 * If we overflow the seq_file buffer, then it will | 
 | 2202 | 		 * ask us for this data again at start up. | 
 | 2203 | 		 * Use that instead. | 
 | 2204 | 		 *  ret is 0 if seq_file write succeeded. | 
 | 2205 | 		 *        -1 otherwise. | 
 | 2206 | 		 */ | 
 | 2207 | 		iter->leftover = ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2208 | 	} | 
 | 2209 |  | 
 | 2210 | 	return 0; | 
 | 2211 | } | 
 | 2212 |  | 
| James Morris | 88e9d34 | 2009-09-22 16:43:43 -0700 | [diff] [blame] | 2213 | static const struct seq_operations tracer_seq_ops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2214 | 	.start		= s_start, | 
 | 2215 | 	.next		= s_next, | 
 | 2216 | 	.stop		= s_stop, | 
 | 2217 | 	.show		= s_show, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2218 | }; | 
 | 2219 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 2220 | static struct trace_iterator * | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2221 | __tracing_open(struct inode *inode, struct file *file) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2222 | { | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2223 | 	long cpu_file = (long) inode->i_private; | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2224 | 	void *fail_ret = ERR_PTR(-ENOMEM); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2225 | 	struct trace_iterator *iter; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2226 | 	struct seq_file *m; | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2227 | 	int cpu, ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2228 |  | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2229 | 	if (tracing_disabled) | 
 | 2230 | 		return ERR_PTR(-ENODEV); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 2231 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2232 | 	iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2233 | 	if (!iter) | 
 | 2234 | 		return ERR_PTR(-ENOMEM); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2235 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2236 | 	/* | 
 | 2237 | 	 * We make a copy of the current tracer to avoid concurrent | 
 | 2238 | 	 * changes on it while we are reading. | 
 | 2239 | 	 */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2240 | 	mutex_lock(&trace_types_lock); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2241 | 	iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2242 | 	if (!iter->trace) | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2243 | 		goto fail; | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2244 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2245 | 	if (current_trace) | 
 | 2246 | 		*iter->trace = *current_trace; | 
 | 2247 |  | 
| Li Zefan | 79f5599 | 2009-06-15 14:58:26 +0800 | [diff] [blame] | 2248 | 	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) | 
| Frederic Weisbecker | b0dfa97 | 2009-04-01 22:53:08 +0200 | [diff] [blame] | 2249 | 		goto fail; | 
 | 2250 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2251 | 	if (current_trace && current_trace->print_max) | 
 | 2252 | 		iter->tr = &max_tr; | 
 | 2253 | 	else | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2254 | 		iter->tr = &global_trace; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2255 | 	iter->pos = -1; | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2256 | 	mutex_init(&iter->mutex); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2257 | 	iter->cpu_file = cpu_file; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2258 |  | 
| Markus Metzger | 8bba1bf | 2008-11-25 09:12:31 +0100 | [diff] [blame] | 2259 | 	/* Notify the tracer early; before we stop tracing. */ | 
 | 2260 | 	if (iter->trace && iter->trace->open) | 
| Markus Metzger | a93751c | 2008-12-11 13:53:26 +0100 | [diff] [blame] | 2261 | 		iter->trace->open(iter); | 
| Markus Metzger | 8bba1bf | 2008-11-25 09:12:31 +0100 | [diff] [blame] | 2262 |  | 
| Steven Rostedt | 12ef7d4 | 2008-11-12 17:52:38 -0500 | [diff] [blame] | 2263 | 	/* Annotate start of buffers if we had overruns */ | 
 | 2264 | 	if (ring_buffer_overruns(iter->tr->buffer)) | 
 | 2265 | 		iter->iter_flags |= TRACE_FILE_ANNOTATE; | 
 | 2266 |  | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 2267 | 	/* stop the trace while dumping */ | 
 | 2268 | 	tracing_stop(); | 
 | 2269 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2270 | 	if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 
 | 2271 | 		for_each_tracing_cpu(cpu) { | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2272 | 			iter->buffer_iter[cpu] = | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 2273 | 				ring_buffer_read_prepare(iter->tr->buffer, cpu); | 
 | 2274 | 		} | 
 | 2275 | 		ring_buffer_read_prepare_sync(); | 
 | 2276 | 		for_each_tracing_cpu(cpu) { | 
 | 2277 | 			ring_buffer_read_start(iter->buffer_iter[cpu]); | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 2278 | 			tracing_iter_reset(iter, cpu); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2279 | 		} | 
 | 2280 | 	} else { | 
 | 2281 | 		cpu = iter->cpu_file; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2282 | 		iter->buffer_iter[cpu] = | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 2283 | 			ring_buffer_read_prepare(iter->tr->buffer, cpu); | 
 | 2284 | 		ring_buffer_read_prepare_sync(); | 
 | 2285 | 		ring_buffer_read_start(iter->buffer_iter[cpu]); | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 2286 | 		tracing_iter_reset(iter, cpu); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2287 | 	} | 
 | 2288 |  | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2289 | 	ret = seq_open(file, &tracer_seq_ops); | 
 | 2290 | 	if (ret < 0) { | 
 | 2291 | 		fail_ret = ERR_PTR(ret); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2292 | 		goto fail_buffer; | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2293 | 	} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2294 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2295 | 	m = file->private_data; | 
 | 2296 | 	m->private = iter; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2297 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2298 | 	mutex_unlock(&trace_types_lock); | 
 | 2299 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2300 | 	return iter; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2301 |  | 
 | 2302 |  fail_buffer: | 
 | 2303 | 	for_each_tracing_cpu(cpu) { | 
 | 2304 | 		if (iter->buffer_iter[cpu]) | 
 | 2305 | 			ring_buffer_read_finish(iter->buffer_iter[cpu]); | 
 | 2306 | 	} | 
| Frederic Weisbecker | b0dfa97 | 2009-04-01 22:53:08 +0200 | [diff] [blame] | 2307 | 	free_cpumask_var(iter->started); | 
| Steven Rostedt | 2f26ebd | 2009-09-01 11:06:29 -0400 | [diff] [blame] | 2308 | 	tracing_start(); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2309 |  fail: | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2310 | 	mutex_unlock(&trace_types_lock); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2311 | 	kfree(iter->trace); | 
| Julia Lawall | 0bb943c | 2008-11-14 19:05:31 +0100 | [diff] [blame] | 2312 | 	kfree(iter); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2313 |  | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2314 | 	return fail_ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2315 | } | 
 | 2316 |  | 
 | 2317 | int tracing_open_generic(struct inode *inode, struct file *filp) | 
 | 2318 | { | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 2319 | 	if (tracing_disabled) | 
 | 2320 | 		return -ENODEV; | 
 | 2321 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2322 | 	filp->private_data = inode->i_private; | 
 | 2323 | 	return 0; | 
 | 2324 | } | 
 | 2325 |  | 
| Hannes Eder | 4fd2735 | 2009-02-10 19:44:12 +0100 | [diff] [blame] | 2326 | static int tracing_release(struct inode *inode, struct file *file) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2327 | { | 
| matt mooney | 907f278 | 2010-09-27 19:04:53 -0700 | [diff] [blame] | 2328 | 	struct seq_file *m = file->private_data; | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2329 | 	struct trace_iterator *iter; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2330 | 	int cpu; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2331 |  | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2332 | 	if (!(file->f_mode & FMODE_READ)) | 
 | 2333 | 		return 0; | 
 | 2334 |  | 
 | 2335 | 	iter = m->private; | 
 | 2336 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2337 | 	mutex_lock(&trace_types_lock); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 2338 | 	for_each_tracing_cpu(cpu) { | 
 | 2339 | 		if (iter->buffer_iter[cpu]) | 
 | 2340 | 			ring_buffer_read_finish(iter->buffer_iter[cpu]); | 
 | 2341 | 	} | 
 | 2342 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2343 | 	if (iter->trace && iter->trace->close) | 
 | 2344 | 		iter->trace->close(iter); | 
 | 2345 |  | 
 | 2346 | 	/* reenable tracing if it was previously enabled */ | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 2347 | 	tracing_start(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2348 | 	mutex_unlock(&trace_types_lock); | 
 | 2349 |  | 
 | 2350 | 	seq_release(inode, file); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2351 | 	mutex_destroy(&iter->mutex); | 
| Frederic Weisbecker | b0dfa97 | 2009-04-01 22:53:08 +0200 | [diff] [blame] | 2352 | 	free_cpumask_var(iter->started); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2353 | 	kfree(iter->trace); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2354 | 	kfree(iter); | 
 | 2355 | 	return 0; | 
 | 2356 | } | 
 | 2357 |  | 
 | 2358 | static int tracing_open(struct inode *inode, struct file *file) | 
 | 2359 | { | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 2360 | 	struct trace_iterator *iter; | 
 | 2361 | 	int ret = 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2362 |  | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2363 | 	/* If this file was open for write, then erase contents */ | 
 | 2364 | 	if ((file->f_mode & FMODE_WRITE) && | 
| Steven Rostedt | 8650ae3 | 2009-07-22 23:29:30 -0400 | [diff] [blame] | 2365 | 	    (file->f_flags & O_TRUNC)) { | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2366 | 		long cpu = (long) inode->i_private; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2367 |  | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2368 | 		if (cpu == TRACE_PIPE_ALL_CPU) | 
 | 2369 | 			tracing_reset_online_cpus(&global_trace); | 
 | 2370 | 		else | 
 | 2371 | 			tracing_reset(&global_trace, cpu); | 
 | 2372 | 	} | 
 | 2373 |  | 
 | 2374 | 	if (file->f_mode & FMODE_READ) { | 
 | 2375 | 		iter = __tracing_open(inode, file); | 
 | 2376 | 		if (IS_ERR(iter)) | 
 | 2377 | 			ret = PTR_ERR(iter); | 
 | 2378 | 		else if (trace_flags & TRACE_ITER_LATENCY_FMT) | 
 | 2379 | 			iter->iter_flags |= TRACE_FILE_LAT_FMT; | 
 | 2380 | 	} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2381 | 	return ret; | 
 | 2382 | } | 
 | 2383 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 2384 | static void * | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2385 | t_next(struct seq_file *m, void *v, loff_t *pos) | 
 | 2386 | { | 
| Li Zefan | f129e96 | 2009-06-24 09:53:44 +0800 | [diff] [blame] | 2387 | 	struct tracer *t = v; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2388 |  | 
 | 2389 | 	(*pos)++; | 
 | 2390 |  | 
 | 2391 | 	if (t) | 
 | 2392 | 		t = t->next; | 
 | 2393 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2394 | 	return t; | 
 | 2395 | } | 
 | 2396 |  | 
 | 2397 | static void *t_start(struct seq_file *m, loff_t *pos) | 
 | 2398 | { | 
| Li Zefan | f129e96 | 2009-06-24 09:53:44 +0800 | [diff] [blame] | 2399 | 	struct tracer *t; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2400 | 	loff_t l = 0; | 
 | 2401 |  | 
 | 2402 | 	mutex_lock(&trace_types_lock); | 
| Li Zefan | f129e96 | 2009-06-24 09:53:44 +0800 | [diff] [blame] | 2403 | 	for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2404 | 		; | 
 | 2405 |  | 
 | 2406 | 	return t; | 
 | 2407 | } | 
 | 2408 |  | 
 | 2409 | static void t_stop(struct seq_file *m, void *p) | 
 | 2410 | { | 
 | 2411 | 	mutex_unlock(&trace_types_lock); | 
 | 2412 | } | 
 | 2413 |  | 
 | 2414 | static int t_show(struct seq_file *m, void *v) | 
 | 2415 | { | 
 | 2416 | 	struct tracer *t = v; | 
 | 2417 |  | 
 | 2418 | 	if (!t) | 
 | 2419 | 		return 0; | 
 | 2420 |  | 
 | 2421 | 	seq_printf(m, "%s", t->name); | 
 | 2422 | 	if (t->next) | 
 | 2423 | 		seq_putc(m, ' '); | 
 | 2424 | 	else | 
 | 2425 | 		seq_putc(m, '\n'); | 
 | 2426 |  | 
 | 2427 | 	return 0; | 
 | 2428 | } | 
 | 2429 |  | 
| James Morris | 88e9d34 | 2009-09-22 16:43:43 -0700 | [diff] [blame] | 2430 | static const struct seq_operations show_traces_seq_ops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2431 | 	.start		= t_start, | 
 | 2432 | 	.next		= t_next, | 
 | 2433 | 	.stop		= t_stop, | 
 | 2434 | 	.show		= t_show, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2435 | }; | 
 | 2436 |  | 
 | 2437 | static int show_traces_open(struct inode *inode, struct file *file) | 
 | 2438 | { | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 2439 | 	if (tracing_disabled) | 
 | 2440 | 		return -ENODEV; | 
 | 2441 |  | 
| Li Zefan | f129e96 | 2009-06-24 09:53:44 +0800 | [diff] [blame] | 2442 | 	return seq_open(file, &show_traces_seq_ops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2443 | } | 
 | 2444 |  | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2445 | static ssize_t | 
 | 2446 | tracing_write_stub(struct file *filp, const char __user *ubuf, | 
 | 2447 | 		   size_t count, loff_t *ppos) | 
 | 2448 | { | 
 | 2449 | 	return count; | 
 | 2450 | } | 
 | 2451 |  | 
| Slava Pestov | 364829b | 2010-11-24 15:13:16 -0800 | [diff] [blame] | 2452 | static loff_t tracing_seek(struct file *file, loff_t offset, int origin) | 
 | 2453 | { | 
 | 2454 | 	if (file->f_mode & FMODE_READ) | 
 | 2455 | 		return seq_lseek(file, offset, origin); | 
 | 2456 | 	else | 
 | 2457 | 		return 0; | 
 | 2458 | } | 
 | 2459 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 2460 | static const struct file_operations tracing_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2461 | 	.open		= tracing_open, | 
 | 2462 | 	.read		= seq_read, | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2463 | 	.write		= tracing_write_stub, | 
| Slava Pestov | 364829b | 2010-11-24 15:13:16 -0800 | [diff] [blame] | 2464 | 	.llseek		= tracing_seek, | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2465 | 	.release	= tracing_release, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2466 | }; | 
 | 2467 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 2468 | static const struct file_operations show_traces_fops = { | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2469 | 	.open		= show_traces_open, | 
 | 2470 | 	.read		= seq_read, | 
 | 2471 | 	.release	= seq_release, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 2472 | 	.llseek		= seq_lseek, | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2473 | }; | 
 | 2474 |  | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2475 | /* | 
 | 2476 |  * Only trace on a CPU if the bitmask is set: | 
 | 2477 |  */ | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2478 | static cpumask_var_t tracing_cpumask; | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2479 |  | 
 | 2480 | /* | 
 | 2481 |  * The tracer itself will not take this lock, but still we want | 
 | 2482 |  * to provide a consistent cpumask to user-space: | 
 | 2483 |  */ | 
 | 2484 | static DEFINE_MUTEX(tracing_cpumask_update_lock); | 
 | 2485 |  | 
 | 2486 | /* | 
 | 2487 |  * Temporary storage for the character representation of the | 
 | 2488 |  * CPU bitmask (and one more byte for the newline): | 
 | 2489 |  */ | 
 | 2490 | static char mask_str[NR_CPUS + 1]; | 
 | 2491 |  | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2492 | static ssize_t | 
 | 2493 | tracing_cpumask_read(struct file *filp, char __user *ubuf, | 
 | 2494 | 		     size_t count, loff_t *ppos) | 
 | 2495 | { | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2496 | 	int len; | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2497 |  | 
 | 2498 | 	mutex_lock(&tracing_cpumask_update_lock); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2499 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2500 | 	len = cpumask_scnprintf(mask_str, count, tracing_cpumask); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2501 | 	if (count - len < 2) { | 
 | 2502 | 		count = -EINVAL; | 
 | 2503 | 		goto out_err; | 
 | 2504 | 	} | 
 | 2505 | 	len += sprintf(mask_str + len, "\n"); | 
 | 2506 | 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); | 
 | 2507 |  | 
 | 2508 | out_err: | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2509 | 	mutex_unlock(&tracing_cpumask_update_lock); | 
 | 2510 |  | 
 | 2511 | 	return count; | 
 | 2512 | } | 
 | 2513 |  | 
 | 2514 | static ssize_t | 
 | 2515 | tracing_cpumask_write(struct file *filp, const char __user *ubuf, | 
 | 2516 | 		      size_t count, loff_t *ppos) | 
 | 2517 | { | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2518 | 	int err, cpu; | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2519 | 	cpumask_var_t tracing_cpumask_new; | 
 | 2520 |  | 
 | 2521 | 	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) | 
 | 2522 | 		return -ENOMEM; | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2523 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2524 | 	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2525 | 	if (err) | 
 | 2526 | 		goto err_unlock; | 
 | 2527 |  | 
| Li Zefan | 215368e | 2009-06-15 10:56:42 +0800 | [diff] [blame] | 2528 | 	mutex_lock(&tracing_cpumask_update_lock); | 
 | 2529 |  | 
| Steven Rostedt | a5e2588 | 2008-12-02 15:34:05 -0500 | [diff] [blame] | 2530 | 	local_irq_disable(); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 2531 | 	arch_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2532 | 	for_each_tracing_cpu(cpu) { | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2533 | 		/* | 
 | 2534 | 		 * Increase/decrease the disabled counter if we are | 
 | 2535 | 		 * about to flip a bit in the cpumask: | 
 | 2536 | 		 */ | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2537 | 		if (cpumask_test_cpu(cpu, tracing_cpumask) && | 
 | 2538 | 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2539 | 			atomic_inc(&global_trace.data[cpu]->disabled); | 
 | 2540 | 		} | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2541 | 		if (!cpumask_test_cpu(cpu, tracing_cpumask) && | 
 | 2542 | 				cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2543 | 			atomic_dec(&global_trace.data[cpu]->disabled); | 
 | 2544 | 		} | 
 | 2545 | 	} | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 2546 | 	arch_spin_unlock(&ftrace_max_lock); | 
| Steven Rostedt | a5e2588 | 2008-12-02 15:34:05 -0500 | [diff] [blame] | 2547 | 	local_irq_enable(); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2548 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2549 | 	cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2550 |  | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2551 | 	mutex_unlock(&tracing_cpumask_update_lock); | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2552 | 	free_cpumask_var(tracing_cpumask_new); | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2553 |  | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2554 | 	return count; | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2555 |  | 
 | 2556 | err_unlock: | 
| Li Zefan | 215368e | 2009-06-15 10:56:42 +0800 | [diff] [blame] | 2557 | 	free_cpumask_var(tracing_cpumask_new); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2558 |  | 
 | 2559 | 	return err; | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2560 | } | 
 | 2561 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 2562 | static const struct file_operations tracing_cpumask_fops = { | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2563 | 	.open		= tracing_open_generic, | 
 | 2564 | 	.read		= tracing_cpumask_read, | 
 | 2565 | 	.write		= tracing_cpumask_write, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 2566 | 	.llseek		= generic_file_llseek, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2567 | }; | 
 | 2568 |  | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2569 | static int tracing_trace_options_show(struct seq_file *m, void *v) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2570 | { | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2571 | 	struct tracer_opt *trace_opts; | 
 | 2572 | 	u32 tracer_flags; | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2573 | 	int i; | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2574 |  | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2575 | 	mutex_lock(&trace_types_lock); | 
 | 2576 | 	tracer_flags = current_trace->flags->val; | 
 | 2577 | 	trace_opts = current_trace->flags->opts; | 
 | 2578 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2579 | 	for (i = 0; trace_options[i]; i++) { | 
 | 2580 | 		if (trace_flags & (1 << i)) | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2581 | 			seq_printf(m, "%s\n", trace_options[i]); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2582 | 		else | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2583 | 			seq_printf(m, "no%s\n", trace_options[i]); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2584 | 	} | 
 | 2585 |  | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2586 | 	for (i = 0; trace_opts[i].name; i++) { | 
 | 2587 | 		if (tracer_flags & trace_opts[i].bit) | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2588 | 			seq_printf(m, "%s\n", trace_opts[i].name); | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2589 | 		else | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2590 | 			seq_printf(m, "no%s\n", trace_opts[i].name); | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2591 | 	} | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2592 | 	mutex_unlock(&trace_types_lock); | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2593 |  | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2594 | 	return 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2595 | } | 
 | 2596 |  | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 2597 | static int __set_tracer_option(struct tracer *trace, | 
 | 2598 | 			       struct tracer_flags *tracer_flags, | 
 | 2599 | 			       struct tracer_opt *opts, int neg) | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2600 | { | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 2601 | 	int ret; | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2602 |  | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 2603 | 	ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2604 | 	if (ret) | 
 | 2605 | 		return ret; | 
 | 2606 |  | 
 | 2607 | 	if (neg) | 
| Zhaolei | 7770841 | 2009-08-07 18:53:21 +0800 | [diff] [blame] | 2608 | 		tracer_flags->val &= ~opts->bit; | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2609 | 	else | 
| Zhaolei | 7770841 | 2009-08-07 18:53:21 +0800 | [diff] [blame] | 2610 | 		tracer_flags->val |= opts->bit; | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2611 | 	return 0; | 
 | 2612 | } | 
 | 2613 |  | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 2614 | /* Try to assign a tracer specific option */ | 
 | 2615 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | 
 | 2616 | { | 
 | 2617 | 	struct tracer_flags *tracer_flags = trace->flags; | 
 | 2618 | 	struct tracer_opt *opts = NULL; | 
 | 2619 | 	int i; | 
 | 2620 |  | 
 | 2621 | 	for (i = 0; tracer_flags->opts[i].name; i++) { | 
 | 2622 | 		opts = &tracer_flags->opts[i]; | 
 | 2623 |  | 
 | 2624 | 		if (strcmp(cmp, opts->name) == 0) | 
 | 2625 | 			return __set_tracer_option(trace, trace->flags, | 
 | 2626 | 						   opts, neg); | 
 | 2627 | 	} | 
 | 2628 |  | 
 | 2629 | 	return -EINVAL; | 
 | 2630 | } | 
 | 2631 |  | 
| Steven Rostedt | af4617b | 2009-03-17 18:09:55 -0400 | [diff] [blame] | 2632 | static void set_tracer_flags(unsigned int mask, int enabled) | 
 | 2633 | { | 
 | 2634 | 	/* do nothing if flag is already set */ | 
 | 2635 | 	if (!!(trace_flags & mask) == !!enabled) | 
 | 2636 | 		return; | 
 | 2637 |  | 
 | 2638 | 	if (enabled) | 
 | 2639 | 		trace_flags |= mask; | 
 | 2640 | 	else | 
 | 2641 | 		trace_flags &= ~mask; | 
| Li Zefan | e870e9a | 2010-07-02 11:07:32 +0800 | [diff] [blame] | 2642 |  | 
 | 2643 | 	if (mask == TRACE_ITER_RECORD_CMD) | 
 | 2644 | 		trace_event_enable_cmd_record(enabled); | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 2645 |  | 
 | 2646 | 	if (mask == TRACE_ITER_OVERWRITE) | 
 | 2647 | 		ring_buffer_change_overwrite(global_trace.buffer, enabled); | 
| Steven Rostedt | af4617b | 2009-03-17 18:09:55 -0400 | [diff] [blame] | 2648 | } | 
 | 2649 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2650 | static ssize_t | 
| Steven Rostedt | ee6bce5 | 2008-11-12 17:52:37 -0500 | [diff] [blame] | 2651 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2652 | 			size_t cnt, loff_t *ppos) | 
 | 2653 | { | 
 | 2654 | 	char buf[64]; | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 2655 | 	char *cmp; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2656 | 	int neg = 0; | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2657 | 	int ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2658 | 	int i; | 
 | 2659 |  | 
| Steven Rostedt | cffae43 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2660 | 	if (cnt >= sizeof(buf)) | 
 | 2661 | 		return -EINVAL; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2662 |  | 
 | 2663 | 	if (copy_from_user(&buf, ubuf, cnt)) | 
 | 2664 | 		return -EFAULT; | 
 | 2665 |  | 
 | 2666 | 	buf[cnt] = 0; | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 2667 | 	cmp = strstrip(buf); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2668 |  | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 2669 | 	if (strncmp(cmp, "no", 2) == 0) { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2670 | 		neg = 1; | 
 | 2671 | 		cmp += 2; | 
 | 2672 | 	} | 
 | 2673 |  | 
 | 2674 | 	for (i = 0; trace_options[i]; i++) { | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 2675 | 		if (strcmp(cmp, trace_options[i]) == 0) { | 
| Steven Rostedt | af4617b | 2009-03-17 18:09:55 -0400 | [diff] [blame] | 2676 | 			set_tracer_flags(1 << i, !neg); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2677 | 			break; | 
 | 2678 | 		} | 
 | 2679 | 	} | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2680 |  | 
 | 2681 | 	/* If no option could be set, test the specific tracer options */ | 
 | 2682 | 	if (!trace_options[i]) { | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2683 | 		mutex_lock(&trace_types_lock); | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2684 | 		ret = set_tracer_option(current_trace, cmp, neg); | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2685 | 		mutex_unlock(&trace_types_lock); | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2686 | 		if (ret) | 
 | 2687 | 			return ret; | 
 | 2688 | 	} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2689 |  | 
| Jiri Olsa | cf8517c | 2009-10-23 19:36:16 -0400 | [diff] [blame] | 2690 | 	*ppos += cnt; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2691 |  | 
 | 2692 | 	return cnt; | 
 | 2693 | } | 
 | 2694 |  | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2695 | static int tracing_trace_options_open(struct inode *inode, struct file *file) | 
 | 2696 | { | 
 | 2697 | 	if (tracing_disabled) | 
 | 2698 | 		return -ENODEV; | 
 | 2699 | 	return single_open(file, tracing_trace_options_show, NULL); | 
 | 2700 | } | 
 | 2701 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 2702 | static const struct file_operations tracing_iter_fops = { | 
| Li Zefan | fdb372e | 2009-12-08 11:15:59 +0800 | [diff] [blame] | 2703 | 	.open		= tracing_trace_options_open, | 
 | 2704 | 	.read		= seq_read, | 
 | 2705 | 	.llseek		= seq_lseek, | 
 | 2706 | 	.release	= single_release, | 
| Steven Rostedt | ee6bce5 | 2008-11-12 17:52:37 -0500 | [diff] [blame] | 2707 | 	.write		= tracing_trace_options_write, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2708 | }; | 
 | 2709 |  | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2710 | static const char readme_msg[] = | 
 | 2711 | 	"tracing mini-HOWTO:\n\n" | 
| GeunSik Lim | 156f5a7 | 2009-06-02 15:01:37 +0900 | [diff] [blame] | 2712 | 	"# mount -t debugfs nodev /sys/kernel/debug\n\n" | 
 | 2713 | 	"# cat /sys/kernel/debug/tracing/available_tracers\n" | 
| Nikanth Karthikesan | bc2b687 | 2009-03-23 11:58:31 +0530 | [diff] [blame] | 2714 | 	"wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" | 
| GeunSik Lim | 156f5a7 | 2009-06-02 15:01:37 +0900 | [diff] [blame] | 2715 | 	"# cat /sys/kernel/debug/tracing/current_tracer\n" | 
| Nikanth Karthikesan | bc2b687 | 2009-03-23 11:58:31 +0530 | [diff] [blame] | 2716 | 	"nop\n" | 
| GeunSik Lim | 156f5a7 | 2009-06-02 15:01:37 +0900 | [diff] [blame] | 2717 | 	"# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n" | 
 | 2718 | 	"# cat /sys/kernel/debug/tracing/current_tracer\n" | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2719 | 	"sched_switch\n" | 
| GeunSik Lim | 156f5a7 | 2009-06-02 15:01:37 +0900 | [diff] [blame] | 2720 | 	"# cat /sys/kernel/debug/tracing/trace_options\n" | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2721 | 	"noprint-parent nosym-offset nosym-addr noverbose\n" | 
| GeunSik Lim | 156f5a7 | 2009-06-02 15:01:37 +0900 | [diff] [blame] | 2722 | 	"# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" | 
| Geunsik Lim | 9b5f8b3 | 2011-08-12 14:30:22 +0900 | [diff] [blame] | 2723 | 	"# echo 1 > /sys/kernel/debug/tracing/tracing_on\n" | 
| GeunSik Lim | 156f5a7 | 2009-06-02 15:01:37 +0900 | [diff] [blame] | 2724 | 	"# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n" | 
| Geunsik Lim | 9b5f8b3 | 2011-08-12 14:30:22 +0900 | [diff] [blame] | 2725 | 	"# echo 0 > /sys/kernel/debug/tracing/tracing_on\n" | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2726 | ; | 
 | 2727 |  | 
 | 2728 | static ssize_t | 
 | 2729 | tracing_readme_read(struct file *filp, char __user *ubuf, | 
 | 2730 | 		       size_t cnt, loff_t *ppos) | 
 | 2731 | { | 
 | 2732 | 	return simple_read_from_buffer(ubuf, cnt, ppos, | 
 | 2733 | 					readme_msg, strlen(readme_msg)); | 
 | 2734 | } | 
 | 2735 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 2736 | static const struct file_operations tracing_readme_fops = { | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2737 | 	.open		= tracing_open_generic, | 
 | 2738 | 	.read		= tracing_readme_read, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 2739 | 	.llseek		= generic_file_llseek, | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2740 | }; | 
 | 2741 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2742 | static ssize_t | 
| Avadh Patel | 69abe6a | 2009-04-10 16:04:48 -0400 | [diff] [blame] | 2743 | tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, | 
 | 2744 | 				size_t cnt, loff_t *ppos) | 
 | 2745 | { | 
 | 2746 | 	char *buf_comm; | 
 | 2747 | 	char *file_buf; | 
 | 2748 | 	char *buf; | 
 | 2749 | 	int len = 0; | 
 | 2750 | 	int pid; | 
 | 2751 | 	int i; | 
 | 2752 |  | 
 | 2753 | 	file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); | 
 | 2754 | 	if (!file_buf) | 
 | 2755 | 		return -ENOMEM; | 
 | 2756 |  | 
 | 2757 | 	buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); | 
 | 2758 | 	if (!buf_comm) { | 
 | 2759 | 		kfree(file_buf); | 
 | 2760 | 		return -ENOMEM; | 
 | 2761 | 	} | 
 | 2762 |  | 
 | 2763 | 	buf = file_buf; | 
 | 2764 |  | 
 | 2765 | 	for (i = 0; i < SAVED_CMDLINES; i++) { | 
 | 2766 | 		int r; | 
 | 2767 |  | 
 | 2768 | 		pid = map_cmdline_to_pid[i]; | 
 | 2769 | 		if (pid == -1 || pid == NO_CMDLINE_MAP) | 
 | 2770 | 			continue; | 
 | 2771 |  | 
 | 2772 | 		trace_find_cmdline(pid, buf_comm); | 
 | 2773 | 		r = sprintf(buf, "%d %s\n", pid, buf_comm); | 
 | 2774 | 		buf += r; | 
 | 2775 | 		len += r; | 
 | 2776 | 	} | 
 | 2777 |  | 
 | 2778 | 	len = simple_read_from_buffer(ubuf, cnt, ppos, | 
 | 2779 | 				      file_buf, len); | 
 | 2780 |  | 
 | 2781 | 	kfree(file_buf); | 
 | 2782 | 	kfree(buf_comm); | 
 | 2783 |  | 
 | 2784 | 	return len; | 
 | 2785 | } | 
 | 2786 |  | 
 | 2787 | static const struct file_operations tracing_saved_cmdlines_fops = { | 
 | 2788 |     .open       = tracing_open_generic, | 
 | 2789 |     .read       = tracing_saved_cmdlines_read, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 2790 |     .llseek	= generic_file_llseek, | 
| Avadh Patel | 69abe6a | 2009-04-10 16:04:48 -0400 | [diff] [blame] | 2791 | }; | 
 | 2792 |  | 
 | 2793 | static ssize_t | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2794 | tracing_ctrl_read(struct file *filp, char __user *ubuf, | 
 | 2795 | 		  size_t cnt, loff_t *ppos) | 
 | 2796 | { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2797 | 	char buf[64]; | 
 | 2798 | 	int r; | 
 | 2799 |  | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 2800 | 	r = sprintf(buf, "%u\n", tracer_enabled); | 
| Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2801 | 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2802 | } | 
 | 2803 |  | 
 | 2804 | static ssize_t | 
 | 2805 | tracing_ctrl_write(struct file *filp, const char __user *ubuf, | 
 | 2806 | 		   size_t cnt, loff_t *ppos) | 
 | 2807 | { | 
 | 2808 | 	struct trace_array *tr = filp->private_data; | 
| Hannes Eder | 5e39841 | 2009-02-10 19:44:34 +0100 | [diff] [blame] | 2809 | 	unsigned long val; | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2810 | 	int ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2811 |  | 
| Peter Huewe | 22fe9b5 | 2011-06-07 21:58:27 +0200 | [diff] [blame] | 2812 | 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | 
 | 2813 | 	if (ret) | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2814 | 		return ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2815 |  | 
 | 2816 | 	val = !!val; | 
 | 2817 |  | 
 | 2818 | 	mutex_lock(&trace_types_lock); | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 2819 | 	if (tracer_enabled ^ val) { | 
| Steven Rostedt | 6752ab4 | 2011-02-08 13:54:06 -0500 | [diff] [blame] | 2820 |  | 
 | 2821 | 		/* Only need to warn if this is used to change the state */ | 
 | 2822 | 		WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on"); | 
 | 2823 |  | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 2824 | 		if (val) { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2825 | 			tracer_enabled = 1; | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 2826 | 			if (current_trace->start) | 
 | 2827 | 				current_trace->start(tr); | 
 | 2828 | 			tracing_start(); | 
 | 2829 | 		} else { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2830 | 			tracer_enabled = 0; | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 2831 | 			tracing_stop(); | 
 | 2832 | 			if (current_trace->stop) | 
 | 2833 | 				current_trace->stop(tr); | 
 | 2834 | 		} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2835 | 	} | 
 | 2836 | 	mutex_unlock(&trace_types_lock); | 
 | 2837 |  | 
| Jiri Olsa | cf8517c | 2009-10-23 19:36:16 -0400 | [diff] [blame] | 2838 | 	*ppos += cnt; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2839 |  | 
 | 2840 | 	return cnt; | 
 | 2841 | } | 
 | 2842 |  | 
 | 2843 | static ssize_t | 
 | 2844 | tracing_set_trace_read(struct file *filp, char __user *ubuf, | 
 | 2845 | 		       size_t cnt, loff_t *ppos) | 
 | 2846 | { | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 2847 | 	char buf[MAX_TRACER_SIZE+2]; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2848 | 	int r; | 
 | 2849 |  | 
 | 2850 | 	mutex_lock(&trace_types_lock); | 
 | 2851 | 	if (current_trace) | 
 | 2852 | 		r = sprintf(buf, "%s\n", current_trace->name); | 
 | 2853 | 	else | 
 | 2854 | 		r = sprintf(buf, "\n"); | 
 | 2855 | 	mutex_unlock(&trace_types_lock); | 
 | 2856 |  | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2857 | 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2858 | } | 
 | 2859 |  | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 2860 | int tracer_init(struct tracer *t, struct trace_array *tr) | 
 | 2861 | { | 
 | 2862 | 	tracing_reset_online_cpus(tr); | 
 | 2863 | 	return t->init(tr); | 
 | 2864 | } | 
 | 2865 |  | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 2866 | static int __tracing_resize_ring_buffer(unsigned long size) | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 2867 | { | 
 | 2868 | 	int ret; | 
 | 2869 |  | 
 | 2870 | 	/* | 
 | 2871 | 	 * If kernel or user changes the size of the ring buffer | 
| Steven Rostedt | a123c52 | 2009-03-12 11:21:08 -0400 | [diff] [blame] | 2872 | 	 * we use the size that was given, and we can forget about | 
 | 2873 | 	 * expanding it later. | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 2874 | 	 */ | 
 | 2875 | 	ring_buffer_expanded = 1; | 
 | 2876 |  | 
 | 2877 | 	ret = ring_buffer_resize(global_trace.buffer, size); | 
 | 2878 | 	if (ret < 0) | 
 | 2879 | 		return ret; | 
 | 2880 |  | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 2881 | 	if (!current_trace->use_max_tr) | 
 | 2882 | 		goto out; | 
 | 2883 |  | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 2884 | 	ret = ring_buffer_resize(max_tr.buffer, size); | 
 | 2885 | 	if (ret < 0) { | 
 | 2886 | 		int r; | 
 | 2887 |  | 
 | 2888 | 		r = ring_buffer_resize(global_trace.buffer, | 
 | 2889 | 				       global_trace.entries); | 
 | 2890 | 		if (r < 0) { | 
| Steven Rostedt | a123c52 | 2009-03-12 11:21:08 -0400 | [diff] [blame] | 2891 | 			/* | 
 | 2892 | 			 * AARGH! We are left with different | 
 | 2893 | 			 * size max buffer!!!! | 
 | 2894 | 			 * The max buffer is our "snapshot" buffer. | 
 | 2895 | 			 * When a tracer needs a snapshot (one of the | 
 | 2896 | 			 * latency tracers), it swaps the max buffer | 
 | 2897 | 			 * with the saved snap shot. We succeeded to | 
 | 2898 | 			 * update the size of the main buffer, but failed to | 
 | 2899 | 			 * update the size of the max buffer. But when we tried | 
 | 2900 | 			 * to reset the main buffer to the original size, we | 
 | 2901 | 			 * failed there too. This is very unlikely to | 
 | 2902 | 			 * happen, but if it does, warn and kill all | 
 | 2903 | 			 * tracing. | 
 | 2904 | 			 */ | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 2905 | 			WARN_ON(1); | 
 | 2906 | 			tracing_disabled = 1; | 
 | 2907 | 		} | 
 | 2908 | 		return ret; | 
 | 2909 | 	} | 
 | 2910 |  | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 2911 | 	max_tr.entries = size; | 
 | 2912 |  out: | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 2913 | 	global_trace.entries = size; | 
 | 2914 |  | 
 | 2915 | 	return ret; | 
 | 2916 | } | 
 | 2917 |  | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 2918 | static ssize_t tracing_resize_ring_buffer(unsigned long size) | 
 | 2919 | { | 
 | 2920 | 	int cpu, ret = size; | 
 | 2921 |  | 
 | 2922 | 	mutex_lock(&trace_types_lock); | 
 | 2923 |  | 
 | 2924 | 	tracing_stop(); | 
 | 2925 |  | 
 | 2926 | 	/* disable all cpu buffers */ | 
 | 2927 | 	for_each_tracing_cpu(cpu) { | 
 | 2928 | 		if (global_trace.data[cpu]) | 
 | 2929 | 			atomic_inc(&global_trace.data[cpu]->disabled); | 
 | 2930 | 		if (max_tr.data[cpu]) | 
 | 2931 | 			atomic_inc(&max_tr.data[cpu]->disabled); | 
 | 2932 | 	} | 
 | 2933 |  | 
 | 2934 | 	if (size != global_trace.entries) | 
 | 2935 | 		ret = __tracing_resize_ring_buffer(size); | 
 | 2936 |  | 
 | 2937 | 	if (ret < 0) | 
 | 2938 | 		ret = -ENOMEM; | 
 | 2939 |  | 
 | 2940 | 	for_each_tracing_cpu(cpu) { | 
 | 2941 | 		if (global_trace.data[cpu]) | 
 | 2942 | 			atomic_dec(&global_trace.data[cpu]->disabled); | 
 | 2943 | 		if (max_tr.data[cpu]) | 
 | 2944 | 			atomic_dec(&max_tr.data[cpu]->disabled); | 
 | 2945 | 	} | 
 | 2946 |  | 
 | 2947 | 	tracing_start(); | 
 | 2948 | 	mutex_unlock(&trace_types_lock); | 
 | 2949 |  | 
 | 2950 | 	return ret; | 
 | 2951 | } | 
 | 2952 |  | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 2953 |  | 
| Steven Rostedt | 1852fcc | 2009-03-11 14:33:00 -0400 | [diff] [blame] | 2954 | /** | 
 | 2955 |  * tracing_update_buffers - used by tracing facility to expand ring buffers | 
 | 2956 |  * | 
 | 2957 |  * To save on memory when the tracing is never used on a system with it | 
 | 2958 |  * configured in. The ring buffers are set to a minimum size. But once | 
 | 2959 |  * a user starts to use the tracing facility, then they need to grow | 
 | 2960 |  * to their default size. | 
 | 2961 |  * | 
 | 2962 |  * This function is to be called when a tracer is about to be used. | 
 | 2963 |  */ | 
 | 2964 | int tracing_update_buffers(void) | 
 | 2965 | { | 
 | 2966 | 	int ret = 0; | 
 | 2967 |  | 
| Steven Rostedt | 1027fcb | 2009-03-12 11:33:20 -0400 | [diff] [blame] | 2968 | 	mutex_lock(&trace_types_lock); | 
| Steven Rostedt | 1852fcc | 2009-03-11 14:33:00 -0400 | [diff] [blame] | 2969 | 	if (!ring_buffer_expanded) | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 2970 | 		ret = __tracing_resize_ring_buffer(trace_buf_size); | 
| Steven Rostedt | 1027fcb | 2009-03-12 11:33:20 -0400 | [diff] [blame] | 2971 | 	mutex_unlock(&trace_types_lock); | 
| Steven Rostedt | 1852fcc | 2009-03-11 14:33:00 -0400 | [diff] [blame] | 2972 |  | 
 | 2973 | 	return ret; | 
 | 2974 | } | 
 | 2975 |  | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 2976 | struct trace_option_dentry; | 
 | 2977 |  | 
 | 2978 | static struct trace_option_dentry * | 
 | 2979 | create_trace_option_files(struct tracer *tracer); | 
 | 2980 |  | 
 | 2981 | static void | 
 | 2982 | destroy_trace_option_files(struct trace_option_dentry *topts); | 
 | 2983 |  | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 2984 | static int tracing_set_tracer(const char *buf) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2985 | { | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 2986 | 	static struct trace_option_dentry *topts; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2987 | 	struct trace_array *tr = &global_trace; | 
 | 2988 | 	struct tracer *t; | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 2989 | 	int ret = 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2990 |  | 
| Steven Rostedt | 1027fcb | 2009-03-12 11:33:20 -0400 | [diff] [blame] | 2991 | 	mutex_lock(&trace_types_lock); | 
 | 2992 |  | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 2993 | 	if (!ring_buffer_expanded) { | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 2994 | 		ret = __tracing_resize_ring_buffer(trace_buf_size); | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 2995 | 		if (ret < 0) | 
| Frederic Weisbecker | 59f586d | 2009-03-15 22:10:39 +0100 | [diff] [blame] | 2996 | 			goto out; | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 2997 | 		ret = 0; | 
 | 2998 | 	} | 
 | 2999 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3000 | 	for (t = trace_types; t; t = t->next) { | 
 | 3001 | 		if (strcmp(t->name, buf) == 0) | 
 | 3002 | 			break; | 
 | 3003 | 	} | 
| Frederic Weisbecker | c2931e0 | 2008-10-04 22:04:44 +0200 | [diff] [blame] | 3004 | 	if (!t) { | 
 | 3005 | 		ret = -EINVAL; | 
 | 3006 | 		goto out; | 
 | 3007 | 	} | 
 | 3008 | 	if (t == current_trace) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3009 | 		goto out; | 
 | 3010 |  | 
| Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 3011 | 	trace_branch_disable(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3012 | 	if (current_trace && current_trace->reset) | 
 | 3013 | 		current_trace->reset(tr); | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 3014 | 	if (current_trace && current_trace->use_max_tr) { | 
 | 3015 | 		/* | 
 | 3016 | 		 * We don't free the ring buffer. instead, resize it because | 
 | 3017 | 		 * The max_tr ring buffer has some state (e.g. ring->clock) and | 
 | 3018 | 		 * we want preserve it. | 
 | 3019 | 		 */ | 
 | 3020 | 		ring_buffer_resize(max_tr.buffer, 1); | 
 | 3021 | 		max_tr.entries = 1; | 
 | 3022 | 	} | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 3023 | 	destroy_trace_option_files(topts); | 
 | 3024 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3025 | 	current_trace = t; | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 3026 |  | 
 | 3027 | 	topts = create_trace_option_files(current_trace); | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 3028 | 	if (current_trace->use_max_tr) { | 
 | 3029 | 		ret = ring_buffer_resize(max_tr.buffer, global_trace.entries); | 
 | 3030 | 		if (ret < 0) | 
 | 3031 | 			goto out; | 
 | 3032 | 		max_tr.entries = global_trace.entries; | 
 | 3033 | 	} | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 3034 |  | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 3035 | 	if (t->init) { | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 3036 | 		ret = tracer_init(t, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 3037 | 		if (ret) | 
 | 3038 | 			goto out; | 
 | 3039 | 	} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3040 |  | 
| Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 3041 | 	trace_branch_enable(tr); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3042 |  out: | 
 | 3043 | 	mutex_unlock(&trace_types_lock); | 
 | 3044 |  | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 3045 | 	return ret; | 
 | 3046 | } | 
 | 3047 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3048 | static ssize_t | 
 | 3049 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | 
 | 3050 | 			size_t cnt, loff_t *ppos) | 
 | 3051 | { | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 3052 | 	char buf[MAX_TRACER_SIZE+1]; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3053 | 	int i; | 
 | 3054 | 	size_t ret; | 
| Frederic Weisbecker | e6e7a65 | 2008-11-16 05:53:19 +0100 | [diff] [blame] | 3055 | 	int err; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3056 |  | 
| Steven Rostedt | 60063a6 | 2008-10-28 10:44:24 -0400 | [diff] [blame] | 3057 | 	ret = cnt; | 
 | 3058 |  | 
| Li Zefan | ee6c2c1 | 2009-09-18 14:06:47 +0800 | [diff] [blame] | 3059 | 	if (cnt > MAX_TRACER_SIZE) | 
 | 3060 | 		cnt = MAX_TRACER_SIZE; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3061 |  | 
 | 3062 | 	if (copy_from_user(&buf, ubuf, cnt)) | 
 | 3063 | 		return -EFAULT; | 
 | 3064 |  | 
 | 3065 | 	buf[cnt] = 0; | 
 | 3066 |  | 
 | 3067 | 	/* strip ending whitespace. */ | 
 | 3068 | 	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | 
 | 3069 | 		buf[i] = 0; | 
 | 3070 |  | 
| Frederic Weisbecker | e6e7a65 | 2008-11-16 05:53:19 +0100 | [diff] [blame] | 3071 | 	err = tracing_set_tracer(buf); | 
 | 3072 | 	if (err) | 
 | 3073 | 		return err; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3074 |  | 
| Jiri Olsa | cf8517c | 2009-10-23 19:36:16 -0400 | [diff] [blame] | 3075 | 	*ppos += ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3076 |  | 
| Frederic Weisbecker | c2931e0 | 2008-10-04 22:04:44 +0200 | [diff] [blame] | 3077 | 	return ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3078 | } | 
 | 3079 |  | 
 | 3080 | static ssize_t | 
 | 3081 | tracing_max_lat_read(struct file *filp, char __user *ubuf, | 
 | 3082 | 		     size_t cnt, loff_t *ppos) | 
 | 3083 | { | 
 | 3084 | 	unsigned long *ptr = filp->private_data; | 
 | 3085 | 	char buf[64]; | 
 | 3086 | 	int r; | 
 | 3087 |  | 
| Steven Rostedt | cffae43 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 3088 | 	r = snprintf(buf, sizeof(buf), "%ld\n", | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3089 | 		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); | 
| Steven Rostedt | cffae43 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 3090 | 	if (r > sizeof(buf)) | 
 | 3091 | 		r = sizeof(buf); | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3092 | 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3093 | } | 
 | 3094 |  | 
 | 3095 | static ssize_t | 
 | 3096 | tracing_max_lat_write(struct file *filp, const char __user *ubuf, | 
 | 3097 | 		      size_t cnt, loff_t *ppos) | 
 | 3098 | { | 
| Hannes Eder | 5e39841 | 2009-02-10 19:44:34 +0100 | [diff] [blame] | 3099 | 	unsigned long *ptr = filp->private_data; | 
| Hannes Eder | 5e39841 | 2009-02-10 19:44:34 +0100 | [diff] [blame] | 3100 | 	unsigned long val; | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 3101 | 	int ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3102 |  | 
| Peter Huewe | 22fe9b5 | 2011-06-07 21:58:27 +0200 | [diff] [blame] | 3103 | 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | 
 | 3104 | 	if (ret) | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 3105 | 		return ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3106 |  | 
 | 3107 | 	*ptr = val * 1000; | 
 | 3108 |  | 
 | 3109 | 	return cnt; | 
 | 3110 | } | 
 | 3111 |  | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3112 | static int tracing_open_pipe(struct inode *inode, struct file *filp) | 
 | 3113 | { | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3114 | 	long cpu_file = (long) inode->i_private; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3115 | 	struct trace_iterator *iter; | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3116 | 	int ret = 0; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3117 |  | 
 | 3118 | 	if (tracing_disabled) | 
 | 3119 | 		return -ENODEV; | 
 | 3120 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3121 | 	mutex_lock(&trace_types_lock); | 
 | 3122 |  | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3123 | 	/* create a buffer to store the information to pass to userspace */ | 
 | 3124 | 	iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3125 | 	if (!iter) { | 
 | 3126 | 		ret = -ENOMEM; | 
 | 3127 | 		goto out; | 
 | 3128 | 	} | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3129 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3130 | 	/* | 
 | 3131 | 	 * We make a copy of the current tracer to avoid concurrent | 
 | 3132 | 	 * changes on it while we are reading. | 
 | 3133 | 	 */ | 
 | 3134 | 	iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL); | 
 | 3135 | 	if (!iter->trace) { | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3136 | 		ret = -ENOMEM; | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3137 | 		goto fail; | 
 | 3138 | 	} | 
 | 3139 | 	if (current_trace) | 
 | 3140 | 		*iter->trace = *current_trace; | 
 | 3141 |  | 
 | 3142 | 	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { | 
 | 3143 | 		ret = -ENOMEM; | 
 | 3144 | 		goto fail; | 
| Rusty Russell | 4462344 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 3145 | 	} | 
 | 3146 |  | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 3147 | 	/* trace pipe does not show start of buffer */ | 
| Rusty Russell | 4462344 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 3148 | 	cpumask_setall(iter->started); | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 3149 |  | 
| Steven Rostedt | 112f38a7 | 2009-06-01 15:16:05 -0400 | [diff] [blame] | 3150 | 	if (trace_flags & TRACE_ITER_LATENCY_FMT) | 
 | 3151 | 		iter->iter_flags |= TRACE_FILE_LAT_FMT; | 
 | 3152 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3153 | 	iter->cpu_file = cpu_file; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3154 | 	iter->tr = &global_trace; | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3155 | 	mutex_init(&iter->mutex); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3156 | 	filp->private_data = iter; | 
 | 3157 |  | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3158 | 	if (iter->trace->pipe_open) | 
 | 3159 | 		iter->trace->pipe_open(iter); | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3160 |  | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 3161 | 	nonseekable_open(inode, filp); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3162 | out: | 
 | 3163 | 	mutex_unlock(&trace_types_lock); | 
 | 3164 | 	return ret; | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3165 |  | 
 | 3166 | fail: | 
 | 3167 | 	kfree(iter->trace); | 
 | 3168 | 	kfree(iter); | 
 | 3169 | 	mutex_unlock(&trace_types_lock); | 
 | 3170 | 	return ret; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3171 | } | 
 | 3172 |  | 
 | 3173 | static int tracing_release_pipe(struct inode *inode, struct file *file) | 
 | 3174 | { | 
 | 3175 | 	struct trace_iterator *iter = file->private_data; | 
 | 3176 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3177 | 	mutex_lock(&trace_types_lock); | 
 | 3178 |  | 
| Steven Rostedt | 29bf4a5 | 2009-12-09 12:37:43 -0500 | [diff] [blame] | 3179 | 	if (iter->trace->pipe_close) | 
| Steven Rostedt | c521efd | 2009-12-07 09:06:24 -0500 | [diff] [blame] | 3180 | 		iter->trace->pipe_close(iter); | 
 | 3181 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3182 | 	mutex_unlock(&trace_types_lock); | 
 | 3183 |  | 
| Rusty Russell | 4462344 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 3184 | 	free_cpumask_var(iter->started); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3185 | 	mutex_destroy(&iter->mutex); | 
 | 3186 | 	kfree(iter->trace); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3187 | 	kfree(iter); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3188 |  | 
 | 3189 | 	return 0; | 
 | 3190 | } | 
 | 3191 |  | 
| Soeren Sandmann Pedersen | 2a2cc8f | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 3192 | static unsigned int | 
 | 3193 | tracing_poll_pipe(struct file *filp, poll_table *poll_table) | 
 | 3194 | { | 
 | 3195 | 	struct trace_iterator *iter = filp->private_data; | 
 | 3196 |  | 
 | 3197 | 	if (trace_flags & TRACE_ITER_BLOCK) { | 
 | 3198 | 		/* | 
 | 3199 | 		 * Always select as readable when in blocking mode | 
 | 3200 | 		 */ | 
 | 3201 | 		return POLLIN | POLLRDNORM; | 
| Ingo Molnar | afc2abc | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 3202 | 	} else { | 
| Soeren Sandmann Pedersen | 2a2cc8f | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 3203 | 		if (!trace_empty(iter)) | 
 | 3204 | 			return POLLIN | POLLRDNORM; | 
 | 3205 | 		poll_wait(filp, &trace_wait, poll_table); | 
 | 3206 | 		if (!trace_empty(iter)) | 
 | 3207 | 			return POLLIN | POLLRDNORM; | 
 | 3208 |  | 
 | 3209 | 		return 0; | 
 | 3210 | 	} | 
 | 3211 | } | 
 | 3212 |  | 
| Frederic Weisbecker | 6eaaa5d | 2009-02-11 02:25:00 +0100 | [diff] [blame] | 3213 |  | 
 | 3214 | void default_wait_pipe(struct trace_iterator *iter) | 
 | 3215 | { | 
 | 3216 | 	DEFINE_WAIT(wait); | 
 | 3217 |  | 
 | 3218 | 	prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); | 
 | 3219 |  | 
 | 3220 | 	if (trace_empty(iter)) | 
 | 3221 | 		schedule(); | 
 | 3222 |  | 
 | 3223 | 	finish_wait(&trace_wait, &wait); | 
 | 3224 | } | 
 | 3225 |  | 
 | 3226 | /* | 
 | 3227 |  * This is a make-shift waitqueue. | 
 | 3228 |  * A tracer might use this callback on some rare cases: | 
 | 3229 |  * | 
 | 3230 |  *  1) the current tracer might hold the runqueue lock when it wakes up | 
 | 3231 |  *     a reader, hence a deadlock (sched, function, and function graph tracers) | 
 | 3232 |  *  2) the function tracers, trace all functions, we don't want | 
 | 3233 |  *     the overhead of calling wake_up and friends | 
 | 3234 |  *     (and tracing them too) | 
 | 3235 |  * | 
 | 3236 |  *     Anyway, this is really very primitive wakeup. | 
 | 3237 |  */ | 
 | 3238 | void poll_wait_pipe(struct trace_iterator *iter) | 
 | 3239 | { | 
 | 3240 | 	set_current_state(TASK_INTERRUPTIBLE); | 
 | 3241 | 	/* sleep for 100 msecs, and try again. */ | 
 | 3242 | 	schedule_timeout(HZ / 10); | 
 | 3243 | } | 
 | 3244 |  | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3245 | /* Must be called with trace_types_lock mutex held. */ | 
 | 3246 | static int tracing_wait_pipe(struct file *filp) | 
 | 3247 | { | 
 | 3248 | 	struct trace_iterator *iter = filp->private_data; | 
 | 3249 |  | 
 | 3250 | 	while (trace_empty(iter)) { | 
 | 3251 |  | 
 | 3252 | 		if ((filp->f_flags & O_NONBLOCK)) { | 
 | 3253 | 			return -EAGAIN; | 
 | 3254 | 		} | 
 | 3255 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3256 | 		mutex_unlock(&iter->mutex); | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3257 |  | 
| Frederic Weisbecker | 6eaaa5d | 2009-02-11 02:25:00 +0100 | [diff] [blame] | 3258 | 		iter->trace->wait_pipe(iter); | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3259 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3260 | 		mutex_lock(&iter->mutex); | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3261 |  | 
| Frederic Weisbecker | 6eaaa5d | 2009-02-11 02:25:00 +0100 | [diff] [blame] | 3262 | 		if (signal_pending(current)) | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3263 | 			return -EINTR; | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3264 |  | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3265 | 		/* | 
 | 3266 | 		 * We block until we read something and tracing is disabled. | 
 | 3267 | 		 * We still block if tracing is disabled, but we have never | 
 | 3268 | 		 * read anything. This allows a user to cat this file, and | 
 | 3269 | 		 * then enable tracing. But after we have read something, | 
 | 3270 | 		 * we give an EOF when tracing is again disabled. | 
 | 3271 | 		 * | 
 | 3272 | 		 * iter->pos will be 0 if we haven't read anything. | 
 | 3273 | 		 */ | 
 | 3274 | 		if (!tracer_enabled && iter->pos) | 
 | 3275 | 			break; | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3276 | 	} | 
 | 3277 |  | 
 | 3278 | 	return 1; | 
 | 3279 | } | 
 | 3280 |  | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3281 | /* | 
 | 3282 |  * Consumer reader. | 
 | 3283 |  */ | 
 | 3284 | static ssize_t | 
 | 3285 | tracing_read_pipe(struct file *filp, char __user *ubuf, | 
 | 3286 | 		  size_t cnt, loff_t *ppos) | 
 | 3287 | { | 
 | 3288 | 	struct trace_iterator *iter = filp->private_data; | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3289 | 	static struct tracer *old_tracer; | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 3290 | 	ssize_t sret; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3291 |  | 
 | 3292 | 	/* return any leftover data */ | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 3293 | 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 
 | 3294 | 	if (sret != -EBUSY) | 
 | 3295 | 		return sret; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3296 |  | 
| Steven Rostedt | f952075 | 2009-03-02 14:04:40 -0500 | [diff] [blame] | 3297 | 	trace_seq_init(&iter->seq); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3298 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3299 | 	/* copy the tracer to avoid using a global lock all around */ | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3300 | 	mutex_lock(&trace_types_lock); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3301 | 	if (unlikely(old_tracer != current_trace && current_trace)) { | 
 | 3302 | 		old_tracer = current_trace; | 
 | 3303 | 		*iter->trace = *current_trace; | 
 | 3304 | 	} | 
 | 3305 | 	mutex_unlock(&trace_types_lock); | 
 | 3306 |  | 
 | 3307 | 	/* | 
 | 3308 | 	 * Avoid more than one consumer on a single file descriptor | 
 | 3309 | 	 * This is just a matter of traces coherency, the ring buffer itself | 
 | 3310 | 	 * is protected. | 
 | 3311 | 	 */ | 
 | 3312 | 	mutex_lock(&iter->mutex); | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3313 | 	if (iter->trace->read) { | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 3314 | 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); | 
 | 3315 | 		if (sret) | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3316 | 			goto out; | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3317 | 	} | 
 | 3318 |  | 
| Pekka Paalanen | 9ff4b97 | 2008-09-29 20:23:48 +0200 | [diff] [blame] | 3319 | waitagain: | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3320 | 	sret = tracing_wait_pipe(filp); | 
 | 3321 | 	if (sret <= 0) | 
 | 3322 | 		goto out; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3323 |  | 
 | 3324 | 	/* stop when tracing is finished */ | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3325 | 	if (trace_empty(iter)) { | 
 | 3326 | 		sret = 0; | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3327 | 		goto out; | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 3328 | 	} | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3329 |  | 
 | 3330 | 	if (cnt >= PAGE_SIZE) | 
 | 3331 | 		cnt = PAGE_SIZE - 1; | 
 | 3332 |  | 
| Steven Rostedt | 53d0aa7 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3333 | 	/* reset all but tr, trace, and overruns */ | 
| Steven Rostedt | 53d0aa7 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3334 | 	memset(&iter->seq, 0, | 
 | 3335 | 	       sizeof(struct trace_iterator) - | 
 | 3336 | 	       offsetof(struct trace_iterator, seq)); | 
| Steven Rostedt | 4823ed7 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3337 | 	iter->pos = -1; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3338 |  | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 3339 | 	trace_event_read_lock(); | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 3340 | 	trace_access_lock(iter->cpu_file); | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 3341 | 	while (trace_find_next_entry_inc(iter) != NULL) { | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 3342 | 		enum print_line_t ret; | 
| Steven Rostedt | 088b1e42 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 3343 | 		int len = iter->seq.len; | 
 | 3344 |  | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 3345 | 		ret = print_trace_line(iter); | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 3346 | 		if (ret == TRACE_TYPE_PARTIAL_LINE) { | 
| Steven Rostedt | 088b1e42 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 3347 | 			/* don't print partial lines */ | 
 | 3348 | 			iter->seq.len = len; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3349 | 			break; | 
| Steven Rostedt | 088b1e42 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 3350 | 		} | 
| Frederic Weisbecker | b91facc | 2009-02-06 18:30:44 +0100 | [diff] [blame] | 3351 | 		if (ret != TRACE_TYPE_NO_CONSUME) | 
 | 3352 | 			trace_consume(iter); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3353 |  | 
 | 3354 | 		if (iter->seq.len >= cnt) | 
 | 3355 | 			break; | 
| Jiri Olsa | ee5e51f | 2011-03-25 12:05:18 +0100 | [diff] [blame] | 3356 |  | 
 | 3357 | 		/* | 
 | 3358 | 		 * Setting the full flag means we reached the trace_seq buffer | 
 | 3359 | 		 * size and we should leave by partial output condition above. | 
 | 3360 | 		 * One of the trace_seq_* functions is not used properly. | 
 | 3361 | 		 */ | 
 | 3362 | 		WARN_ONCE(iter->seq.full, "full flag set for trace type %d", | 
 | 3363 | 			  iter->ent->type); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3364 | 	} | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 3365 | 	trace_access_unlock(iter->cpu_file); | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 3366 | 	trace_event_read_unlock(); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3367 |  | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3368 | 	/* Now copy what we have to the user */ | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 3369 | 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 
 | 3370 | 	if (iter->seq.readpos >= iter->seq.len) | 
| Steven Rostedt | f952075 | 2009-03-02 14:04:40 -0500 | [diff] [blame] | 3371 | 		trace_seq_init(&iter->seq); | 
| Pekka Paalanen | 9ff4b97 | 2008-09-29 20:23:48 +0200 | [diff] [blame] | 3372 |  | 
 | 3373 | 	/* | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 3374 | 	 * If there was nothing to send to user, in spite of consuming trace | 
| Pekka Paalanen | 9ff4b97 | 2008-09-29 20:23:48 +0200 | [diff] [blame] | 3375 | 	 * entries, go back to wait for more entries. | 
 | 3376 | 	 */ | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 3377 | 	if (sret == -EBUSY) | 
| Pekka Paalanen | 9ff4b97 | 2008-09-29 20:23:48 +0200 | [diff] [blame] | 3378 | 		goto waitagain; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3379 |  | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3380 | out: | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3381 | 	mutex_unlock(&iter->mutex); | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 3382 |  | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 3383 | 	return sret; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3384 | } | 
 | 3385 |  | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3386 | static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, | 
 | 3387 | 				     struct pipe_buffer *buf) | 
 | 3388 | { | 
 | 3389 | 	__free_page(buf->page); | 
 | 3390 | } | 
 | 3391 |  | 
 | 3392 | static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | 
 | 3393 | 				     unsigned int idx) | 
 | 3394 | { | 
 | 3395 | 	__free_page(spd->pages[idx]); | 
 | 3396 | } | 
 | 3397 |  | 
| Alexey Dobriyan | 28dfef8 | 2009-12-15 16:46:48 -0800 | [diff] [blame] | 3398 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3399 | 	.can_merge		= 0, | 
 | 3400 | 	.map			= generic_pipe_buf_map, | 
 | 3401 | 	.unmap			= generic_pipe_buf_unmap, | 
 | 3402 | 	.confirm		= generic_pipe_buf_confirm, | 
 | 3403 | 	.release		= tracing_pipe_buf_release, | 
 | 3404 | 	.steal			= generic_pipe_buf_steal, | 
 | 3405 | 	.get			= generic_pipe_buf_get, | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3406 | }; | 
 | 3407 |  | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3408 | static size_t | 
| Frederic Weisbecker | fa7c7f6 | 2009-02-11 02:51:30 +0100 | [diff] [blame] | 3409 | tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3410 | { | 
 | 3411 | 	size_t count; | 
 | 3412 | 	int ret; | 
 | 3413 |  | 
 | 3414 | 	/* Seq buffer is page-sized, exactly what we need. */ | 
 | 3415 | 	for (;;) { | 
 | 3416 | 		count = iter->seq.len; | 
 | 3417 | 		ret = print_trace_line(iter); | 
 | 3418 | 		count = iter->seq.len - count; | 
 | 3419 | 		if (rem < count) { | 
 | 3420 | 			rem = 0; | 
 | 3421 | 			iter->seq.len -= count; | 
 | 3422 | 			break; | 
 | 3423 | 		} | 
 | 3424 | 		if (ret == TRACE_TYPE_PARTIAL_LINE) { | 
 | 3425 | 			iter->seq.len -= count; | 
 | 3426 | 			break; | 
 | 3427 | 		} | 
 | 3428 |  | 
| Lai Jiangshan | 74e7ff8 | 2009-07-28 20:17:22 +0800 | [diff] [blame] | 3429 | 		if (ret != TRACE_TYPE_NO_CONSUME) | 
 | 3430 | 			trace_consume(iter); | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3431 | 		rem -= count; | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 3432 | 		if (!trace_find_next_entry_inc(iter))	{ | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3433 | 			rem = 0; | 
 | 3434 | 			iter->ent = NULL; | 
 | 3435 | 			break; | 
 | 3436 | 		} | 
 | 3437 | 	} | 
 | 3438 |  | 
 | 3439 | 	return rem; | 
 | 3440 | } | 
 | 3441 |  | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3442 | static ssize_t tracing_splice_read_pipe(struct file *filp, | 
 | 3443 | 					loff_t *ppos, | 
 | 3444 | 					struct pipe_inode_info *pipe, | 
 | 3445 | 					size_t len, | 
 | 3446 | 					unsigned int flags) | 
 | 3447 | { | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3448 | 	struct page *pages_def[PIPE_DEF_BUFFERS]; | 
 | 3449 | 	struct partial_page partial_def[PIPE_DEF_BUFFERS]; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3450 | 	struct trace_iterator *iter = filp->private_data; | 
 | 3451 | 	struct splice_pipe_desc spd = { | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3452 | 		.pages		= pages_def, | 
 | 3453 | 		.partial	= partial_def, | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3454 | 		.nr_pages	= 0, /* This gets updated below. */ | 
 | 3455 | 		.flags		= flags, | 
 | 3456 | 		.ops		= &tracing_pipe_buf_ops, | 
 | 3457 | 		.spd_release	= tracing_spd_release_pipe, | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3458 | 	}; | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3459 | 	static struct tracer *old_tracer; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3460 | 	ssize_t ret; | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3461 | 	size_t rem; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3462 | 	unsigned int i; | 
 | 3463 |  | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3464 | 	if (splice_grow_spd(pipe, &spd)) | 
 | 3465 | 		return -ENOMEM; | 
 | 3466 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3467 | 	/* copy the tracer to avoid using a global lock all around */ | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3468 | 	mutex_lock(&trace_types_lock); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3469 | 	if (unlikely(old_tracer != current_trace && current_trace)) { | 
 | 3470 | 		old_tracer = current_trace; | 
 | 3471 | 		*iter->trace = *current_trace; | 
 | 3472 | 	} | 
 | 3473 | 	mutex_unlock(&trace_types_lock); | 
 | 3474 |  | 
 | 3475 | 	mutex_lock(&iter->mutex); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3476 |  | 
 | 3477 | 	if (iter->trace->splice_read) { | 
 | 3478 | 		ret = iter->trace->splice_read(iter, filp, | 
 | 3479 | 					       ppos, pipe, len, flags); | 
 | 3480 | 		if (ret) | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3481 | 			goto out_err; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3482 | 	} | 
 | 3483 |  | 
 | 3484 | 	ret = tracing_wait_pipe(filp); | 
 | 3485 | 	if (ret <= 0) | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3486 | 		goto out_err; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3487 |  | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 3488 | 	if (!iter->ent && !trace_find_next_entry_inc(iter)) { | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3489 | 		ret = -EFAULT; | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3490 | 		goto out_err; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3491 | 	} | 
 | 3492 |  | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 3493 | 	trace_event_read_lock(); | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 3494 | 	trace_access_lock(iter->cpu_file); | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 3495 |  | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3496 | 	/* Fill as many pages as possible. */ | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3497 | 	for (i = 0, rem = len; i < pipe->buffers && rem; i++) { | 
 | 3498 | 		spd.pages[i] = alloc_page(GFP_KERNEL); | 
 | 3499 | 		if (!spd.pages[i]) | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3500 | 			break; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3501 |  | 
| Frederic Weisbecker | fa7c7f6 | 2009-02-11 02:51:30 +0100 | [diff] [blame] | 3502 | 		rem = tracing_fill_pipe_page(rem, iter); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3503 |  | 
 | 3504 | 		/* Copy the data into the page, so we can start over. */ | 
 | 3505 | 		ret = trace_seq_to_buffer(&iter->seq, | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3506 | 					  page_address(spd.pages[i]), | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3507 | 					  iter->seq.len); | 
 | 3508 | 		if (ret < 0) { | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3509 | 			__free_page(spd.pages[i]); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3510 | 			break; | 
 | 3511 | 		} | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3512 | 		spd.partial[i].offset = 0; | 
 | 3513 | 		spd.partial[i].len = iter->seq.len; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3514 |  | 
| Steven Rostedt | f952075 | 2009-03-02 14:04:40 -0500 | [diff] [blame] | 3515 | 		trace_seq_init(&iter->seq); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3516 | 	} | 
 | 3517 |  | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 3518 | 	trace_access_unlock(iter->cpu_file); | 
| Lai Jiangshan | 4f53596 | 2009-05-18 19:35:34 +0800 | [diff] [blame] | 3519 | 	trace_event_read_unlock(); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3520 | 	mutex_unlock(&iter->mutex); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3521 |  | 
 | 3522 | 	spd.nr_pages = i; | 
 | 3523 |  | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3524 | 	ret = splice_to_pipe(pipe, &spd); | 
 | 3525 | out: | 
 | 3526 | 	splice_shrink_spd(pipe, &spd); | 
 | 3527 | 	return ret; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3528 |  | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3529 | out_err: | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3530 | 	mutex_unlock(&iter->mutex); | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 3531 | 	goto out; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3532 | } | 
 | 3533 |  | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3534 | static ssize_t | 
 | 3535 | tracing_entries_read(struct file *filp, char __user *ubuf, | 
 | 3536 | 		     size_t cnt, loff_t *ppos) | 
 | 3537 | { | 
 | 3538 | 	struct trace_array *tr = filp->private_data; | 
| Steven Rostedt | db526ca | 2009-03-12 13:53:25 -0400 | [diff] [blame] | 3539 | 	char buf[96]; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3540 | 	int r; | 
 | 3541 |  | 
| Steven Rostedt | db526ca | 2009-03-12 13:53:25 -0400 | [diff] [blame] | 3542 | 	mutex_lock(&trace_types_lock); | 
 | 3543 | 	if (!ring_buffer_expanded) | 
 | 3544 | 		r = sprintf(buf, "%lu (expanded: %lu)\n", | 
 | 3545 | 			    tr->entries >> 10, | 
 | 3546 | 			    trace_buf_size >> 10); | 
 | 3547 | 	else | 
 | 3548 | 		r = sprintf(buf, "%lu\n", tr->entries >> 10); | 
 | 3549 | 	mutex_unlock(&trace_types_lock); | 
 | 3550 |  | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3551 | 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
 | 3552 | } | 
 | 3553 |  | 
 | 3554 | static ssize_t | 
 | 3555 | tracing_entries_write(struct file *filp, const char __user *ubuf, | 
 | 3556 | 		      size_t cnt, loff_t *ppos) | 
 | 3557 | { | 
 | 3558 | 	unsigned long val; | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3559 | 	int ret; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3560 |  | 
| Peter Huewe | 22fe9b5 | 2011-06-07 21:58:27 +0200 | [diff] [blame] | 3561 | 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | 
 | 3562 | 	if (ret) | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 3563 | 		return ret; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3564 |  | 
 | 3565 | 	/* must have at least 1 entry */ | 
 | 3566 | 	if (!val) | 
 | 3567 | 		return -EINVAL; | 
 | 3568 |  | 
| Steven Rostedt | 1696b2b | 2008-11-13 00:09:35 -0500 | [diff] [blame] | 3569 | 	/* value is in KB */ | 
 | 3570 | 	val <<= 10; | 
 | 3571 |  | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3572 | 	ret = tracing_resize_ring_buffer(val); | 
 | 3573 | 	if (ret < 0) | 
 | 3574 | 		return ret; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3575 |  | 
| Jiri Olsa | cf8517c | 2009-10-23 19:36:16 -0400 | [diff] [blame] | 3576 | 	*ppos += cnt; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3577 |  | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3578 | 	return cnt; | 
 | 3579 | } | 
| Steven Rostedt | bf5e651 | 2008-11-10 21:46:00 -0500 | [diff] [blame] | 3580 |  | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3581 | static ssize_t | 
| Vaibhav Nagarnaik | f81ab07 | 2011-08-16 14:46:15 -0700 | [diff] [blame] | 3582 | tracing_total_entries_read(struct file *filp, char __user *ubuf, | 
 | 3583 | 				size_t cnt, loff_t *ppos) | 
 | 3584 | { | 
 | 3585 | 	struct trace_array *tr = filp->private_data; | 
 | 3586 | 	char buf[64]; | 
 | 3587 | 	int r, cpu; | 
 | 3588 | 	unsigned long size = 0, expanded_size = 0; | 
 | 3589 |  | 
 | 3590 | 	mutex_lock(&trace_types_lock); | 
 | 3591 | 	for_each_tracing_cpu(cpu) { | 
 | 3592 | 		size += tr->entries >> 10; | 
 | 3593 | 		if (!ring_buffer_expanded) | 
 | 3594 | 			expanded_size += trace_buf_size >> 10; | 
 | 3595 | 	} | 
 | 3596 | 	if (ring_buffer_expanded) | 
 | 3597 | 		r = sprintf(buf, "%lu\n", size); | 
 | 3598 | 	else | 
 | 3599 | 		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); | 
 | 3600 | 	mutex_unlock(&trace_types_lock); | 
 | 3601 |  | 
 | 3602 | 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
 | 3603 | } | 
 | 3604 |  | 
 | 3605 | static ssize_t | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3606 | tracing_free_buffer_write(struct file *filp, const char __user *ubuf, | 
 | 3607 | 			  size_t cnt, loff_t *ppos) | 
 | 3608 | { | 
 | 3609 | 	/* | 
 | 3610 | 	 * There is no need to read what the user has written, this function | 
 | 3611 | 	 * is just to make sure that there is no error when "echo" is used | 
 | 3612 | 	 */ | 
 | 3613 |  | 
 | 3614 | 	*ppos += cnt; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3615 |  | 
 | 3616 | 	return cnt; | 
 | 3617 | } | 
 | 3618 |  | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3619 | static int | 
 | 3620 | tracing_free_buffer_release(struct inode *inode, struct file *filp) | 
 | 3621 | { | 
| Steven Rostedt | cf30cf6 | 2011-06-14 22:44:07 -0400 | [diff] [blame] | 3622 | 	/* disable tracing ? */ | 
 | 3623 | 	if (trace_flags & TRACE_ITER_STOP_ON_FREE) | 
 | 3624 | 		tracing_off(); | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3625 | 	/* resize the ring buffer to 0 */ | 
 | 3626 | 	tracing_resize_ring_buffer(0); | 
 | 3627 |  | 
 | 3628 | 	return 0; | 
 | 3629 | } | 
 | 3630 |  | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3631 | static ssize_t | 
 | 3632 | tracing_mark_write(struct file *filp, const char __user *ubuf, | 
 | 3633 | 					size_t cnt, loff_t *fpos) | 
 | 3634 | { | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 3635 | 	unsigned long addr = (unsigned long)ubuf; | 
 | 3636 | 	struct ring_buffer_event *event; | 
 | 3637 | 	struct ring_buffer *buffer; | 
 | 3638 | 	struct print_entry *entry; | 
 | 3639 | 	unsigned long irq_flags; | 
 | 3640 | 	struct page *pages[2]; | 
 | 3641 | 	int nr_pages = 1; | 
 | 3642 | 	ssize_t written; | 
 | 3643 | 	void *page1; | 
 | 3644 | 	void *page2; | 
 | 3645 | 	int offset; | 
 | 3646 | 	int size; | 
 | 3647 | 	int len; | 
 | 3648 | 	int ret; | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3649 |  | 
| Steven Rostedt | c76f069 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 3650 | 	if (tracing_disabled) | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3651 | 		return -EINVAL; | 
 | 3652 |  | 
 | 3653 | 	if (cnt > TRACE_BUF_SIZE) | 
 | 3654 | 		cnt = TRACE_BUF_SIZE; | 
 | 3655 |  | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 3656 | 	/* | 
 | 3657 | 	 * Userspace is injecting traces into the kernel trace buffer. | 
 | 3658 | 	 * We want to be as non intrusive as possible. | 
 | 3659 | 	 * To do so, we do not want to allocate any special buffers | 
 | 3660 | 	 * or take any locks, but instead write the userspace data | 
 | 3661 | 	 * straight into the ring buffer. | 
 | 3662 | 	 * | 
 | 3663 | 	 * First we need to pin the userspace buffer into memory, | 
 | 3664 | 	 * which, most likely it is, because it just referenced it. | 
 | 3665 | 	 * But there's no guarantee that it is. By using get_user_pages_fast() | 
 | 3666 | 	 * and kmap_atomic/kunmap_atomic() we can get access to the | 
 | 3667 | 	 * pages directly. We then write the data directly into the | 
 | 3668 | 	 * ring buffer. | 
 | 3669 | 	 */ | 
 | 3670 | 	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3671 |  | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 3672 | 	/* check if we cross pages */ | 
 | 3673 | 	if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK)) | 
 | 3674 | 		nr_pages = 2; | 
 | 3675 |  | 
 | 3676 | 	offset = addr & (PAGE_SIZE - 1); | 
 | 3677 | 	addr &= PAGE_MASK; | 
 | 3678 |  | 
 | 3679 | 	ret = get_user_pages_fast(addr, nr_pages, 0, pages); | 
 | 3680 | 	if (ret < nr_pages) { | 
 | 3681 | 		while (--ret >= 0) | 
 | 3682 | 			put_page(pages[ret]); | 
 | 3683 | 		written = -EFAULT; | 
 | 3684 | 		goto out; | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3685 | 	} | 
 | 3686 |  | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 3687 | 	page1 = kmap_atomic(pages[0]); | 
 | 3688 | 	if (nr_pages == 2) | 
 | 3689 | 		page2 = kmap_atomic(pages[1]); | 
 | 3690 |  | 
 | 3691 | 	local_save_flags(irq_flags); | 
 | 3692 | 	size = sizeof(*entry) + cnt + 2; /* possible \n added */ | 
 | 3693 | 	buffer = global_trace.buffer; | 
 | 3694 | 	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 
 | 3695 | 					  irq_flags, preempt_count()); | 
 | 3696 | 	if (!event) { | 
 | 3697 | 		/* Ring buffer disabled, return as if not open for write */ | 
 | 3698 | 		written = -EBADF; | 
 | 3699 | 		goto out_unlock; | 
 | 3700 | 	} | 
 | 3701 |  | 
 | 3702 | 	entry = ring_buffer_event_data(event); | 
 | 3703 | 	entry->ip = _THIS_IP_; | 
 | 3704 |  | 
 | 3705 | 	if (nr_pages == 2) { | 
 | 3706 | 		len = PAGE_SIZE - offset; | 
 | 3707 | 		memcpy(&entry->buf, page1 + offset, len); | 
 | 3708 | 		memcpy(&entry->buf[len], page2, cnt - len); | 
 | 3709 | 	} else | 
 | 3710 | 		memcpy(&entry->buf, page1 + offset, cnt); | 
 | 3711 |  | 
 | 3712 | 	if (entry->buf[cnt - 1] != '\n') { | 
 | 3713 | 		entry->buf[cnt] = '\n'; | 
 | 3714 | 		entry->buf[cnt + 1] = '\0'; | 
 | 3715 | 	} else | 
 | 3716 | 		entry->buf[cnt] = '\0'; | 
 | 3717 |  | 
 | 3718 | 	ring_buffer_unlock_commit(buffer, event); | 
 | 3719 |  | 
 | 3720 | 	written = cnt; | 
 | 3721 |  | 
| Marcin Slusarz | 1aa54bc | 2010-07-28 01:18:01 +0200 | [diff] [blame] | 3722 | 	*fpos += written; | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3723 |  | 
| Steven Rostedt | d696b58 | 2011-09-22 11:50:27 -0400 | [diff] [blame] | 3724 |  out_unlock: | 
 | 3725 | 	if (nr_pages == 2) | 
 | 3726 | 		kunmap_atomic(page2); | 
 | 3727 | 	kunmap_atomic(page1); | 
 | 3728 | 	while (nr_pages > 0) | 
 | 3729 | 		put_page(pages[--nr_pages]); | 
 | 3730 |  out: | 
| Marcin Slusarz | 1aa54bc | 2010-07-28 01:18:01 +0200 | [diff] [blame] | 3731 | 	return written; | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3732 | } | 
 | 3733 |  | 
| Li Zefan | 13f16d2 | 2009-12-08 11:16:11 +0800 | [diff] [blame] | 3734 | static int tracing_clock_show(struct seq_file *m, void *v) | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 3735 | { | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 3736 | 	int i; | 
 | 3737 |  | 
 | 3738 | 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) | 
| Li Zefan | 13f16d2 | 2009-12-08 11:16:11 +0800 | [diff] [blame] | 3739 | 		seq_printf(m, | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 3740 | 			"%s%s%s%s", i ? " " : "", | 
 | 3741 | 			i == trace_clock_id ? "[" : "", trace_clocks[i].name, | 
 | 3742 | 			i == trace_clock_id ? "]" : ""); | 
| Li Zefan | 13f16d2 | 2009-12-08 11:16:11 +0800 | [diff] [blame] | 3743 | 	seq_putc(m, '\n'); | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 3744 |  | 
| Li Zefan | 13f16d2 | 2009-12-08 11:16:11 +0800 | [diff] [blame] | 3745 | 	return 0; | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 3746 | } | 
 | 3747 |  | 
 | 3748 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 
 | 3749 | 				   size_t cnt, loff_t *fpos) | 
 | 3750 | { | 
 | 3751 | 	char buf[64]; | 
 | 3752 | 	const char *clockstr; | 
 | 3753 | 	int i; | 
 | 3754 |  | 
 | 3755 | 	if (cnt >= sizeof(buf)) | 
 | 3756 | 		return -EINVAL; | 
 | 3757 |  | 
 | 3758 | 	if (copy_from_user(&buf, ubuf, cnt)) | 
 | 3759 | 		return -EFAULT; | 
 | 3760 |  | 
 | 3761 | 	buf[cnt] = 0; | 
 | 3762 |  | 
 | 3763 | 	clockstr = strstrip(buf); | 
 | 3764 |  | 
 | 3765 | 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { | 
 | 3766 | 		if (strcmp(trace_clocks[i].name, clockstr) == 0) | 
 | 3767 | 			break; | 
 | 3768 | 	} | 
 | 3769 | 	if (i == ARRAY_SIZE(trace_clocks)) | 
 | 3770 | 		return -EINVAL; | 
 | 3771 |  | 
 | 3772 | 	trace_clock_id = i; | 
 | 3773 |  | 
 | 3774 | 	mutex_lock(&trace_types_lock); | 
 | 3775 |  | 
 | 3776 | 	ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func); | 
 | 3777 | 	if (max_tr.buffer) | 
 | 3778 | 		ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); | 
 | 3779 |  | 
 | 3780 | 	mutex_unlock(&trace_types_lock); | 
 | 3781 |  | 
 | 3782 | 	*fpos += cnt; | 
 | 3783 |  | 
 | 3784 | 	return cnt; | 
 | 3785 | } | 
 | 3786 |  | 
| Li Zefan | 13f16d2 | 2009-12-08 11:16:11 +0800 | [diff] [blame] | 3787 | static int tracing_clock_open(struct inode *inode, struct file *file) | 
 | 3788 | { | 
 | 3789 | 	if (tracing_disabled) | 
 | 3790 | 		return -ENODEV; | 
 | 3791 | 	return single_open(file, tracing_clock_show, NULL); | 
 | 3792 | } | 
 | 3793 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 3794 | static const struct file_operations tracing_max_lat_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3795 | 	.open		= tracing_open_generic, | 
 | 3796 | 	.read		= tracing_max_lat_read, | 
 | 3797 | 	.write		= tracing_max_lat_write, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 3798 | 	.llseek		= generic_file_llseek, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3799 | }; | 
 | 3800 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 3801 | static const struct file_operations tracing_ctrl_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3802 | 	.open		= tracing_open_generic, | 
 | 3803 | 	.read		= tracing_ctrl_read, | 
 | 3804 | 	.write		= tracing_ctrl_write, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 3805 | 	.llseek		= generic_file_llseek, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3806 | }; | 
 | 3807 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 3808 | static const struct file_operations set_tracer_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3809 | 	.open		= tracing_open_generic, | 
 | 3810 | 	.read		= tracing_set_trace_read, | 
 | 3811 | 	.write		= tracing_set_trace_write, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 3812 | 	.llseek		= generic_file_llseek, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3813 | }; | 
 | 3814 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 3815 | static const struct file_operations tracing_pipe_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3816 | 	.open		= tracing_open_pipe, | 
| Soeren Sandmann Pedersen | 2a2cc8f | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 3817 | 	.poll		= tracing_poll_pipe, | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3818 | 	.read		= tracing_read_pipe, | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3819 | 	.splice_read	= tracing_splice_read_pipe, | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3820 | 	.release	= tracing_release_pipe, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 3821 | 	.llseek		= no_llseek, | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3822 | }; | 
 | 3823 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 3824 | static const struct file_operations tracing_entries_fops = { | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3825 | 	.open		= tracing_open_generic, | 
 | 3826 | 	.read		= tracing_entries_read, | 
 | 3827 | 	.write		= tracing_entries_write, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 3828 | 	.llseek		= generic_file_llseek, | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3829 | }; | 
 | 3830 |  | 
| Vaibhav Nagarnaik | f81ab07 | 2011-08-16 14:46:15 -0700 | [diff] [blame] | 3831 | static const struct file_operations tracing_total_entries_fops = { | 
 | 3832 | 	.open		= tracing_open_generic, | 
 | 3833 | 	.read		= tracing_total_entries_read, | 
 | 3834 | 	.llseek		= generic_file_llseek, | 
 | 3835 | }; | 
 | 3836 |  | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 3837 | static const struct file_operations tracing_free_buffer_fops = { | 
 | 3838 | 	.write		= tracing_free_buffer_write, | 
 | 3839 | 	.release	= tracing_free_buffer_release, | 
 | 3840 | }; | 
 | 3841 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 3842 | static const struct file_operations tracing_mark_fops = { | 
| Frédéric Weisbecker | 43a1538 | 2008-09-21 20:16:30 +0200 | [diff] [blame] | 3843 | 	.open		= tracing_open_generic, | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3844 | 	.write		= tracing_mark_write, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 3845 | 	.llseek		= generic_file_llseek, | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3846 | }; | 
 | 3847 |  | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 3848 | static const struct file_operations trace_clock_fops = { | 
| Li Zefan | 13f16d2 | 2009-12-08 11:16:11 +0800 | [diff] [blame] | 3849 | 	.open		= tracing_clock_open, | 
 | 3850 | 	.read		= seq_read, | 
 | 3851 | 	.llseek		= seq_lseek, | 
 | 3852 | 	.release	= single_release, | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 3853 | 	.write		= tracing_clock_write, | 
 | 3854 | }; | 
 | 3855 |  | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3856 | struct ftrace_buffer_info { | 
 | 3857 | 	struct trace_array	*tr; | 
 | 3858 | 	void			*spare; | 
 | 3859 | 	int			cpu; | 
 | 3860 | 	unsigned int		read; | 
 | 3861 | }; | 
 | 3862 |  | 
 | 3863 | static int tracing_buffers_open(struct inode *inode, struct file *filp) | 
 | 3864 | { | 
 | 3865 | 	int cpu = (int)(long)inode->i_private; | 
 | 3866 | 	struct ftrace_buffer_info *info; | 
 | 3867 |  | 
 | 3868 | 	if (tracing_disabled) | 
 | 3869 | 		return -ENODEV; | 
 | 3870 |  | 
 | 3871 | 	info = kzalloc(sizeof(*info), GFP_KERNEL); | 
 | 3872 | 	if (!info) | 
 | 3873 | 		return -ENOMEM; | 
 | 3874 |  | 
 | 3875 | 	info->tr	= &global_trace; | 
 | 3876 | 	info->cpu	= cpu; | 
| Lai Jiangshan | ddd538f | 2009-04-02 15:16:59 +0800 | [diff] [blame] | 3877 | 	info->spare	= NULL; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3878 | 	/* Force reading ring buffer for first read */ | 
 | 3879 | 	info->read	= (unsigned int)-1; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3880 |  | 
 | 3881 | 	filp->private_data = info; | 
 | 3882 |  | 
| Lai Jiangshan | d1e7e02 | 2009-04-02 15:16:56 +0800 | [diff] [blame] | 3883 | 	return nonseekable_open(inode, filp); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3884 | } | 
 | 3885 |  | 
 | 3886 | static ssize_t | 
 | 3887 | tracing_buffers_read(struct file *filp, char __user *ubuf, | 
 | 3888 | 		     size_t count, loff_t *ppos) | 
 | 3889 | { | 
 | 3890 | 	struct ftrace_buffer_info *info = filp->private_data; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3891 | 	ssize_t ret; | 
 | 3892 | 	size_t size; | 
 | 3893 |  | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 3894 | 	if (!count) | 
 | 3895 | 		return 0; | 
 | 3896 |  | 
| Lai Jiangshan | ddd538f | 2009-04-02 15:16:59 +0800 | [diff] [blame] | 3897 | 	if (!info->spare) | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 3898 | 		info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu); | 
| Lai Jiangshan | ddd538f | 2009-04-02 15:16:59 +0800 | [diff] [blame] | 3899 | 	if (!info->spare) | 
 | 3900 | 		return -ENOMEM; | 
 | 3901 |  | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3902 | 	/* Do we have previous read data to read? */ | 
 | 3903 | 	if (info->read < PAGE_SIZE) | 
 | 3904 | 		goto read; | 
 | 3905 |  | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 3906 | 	trace_access_lock(info->cpu); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3907 | 	ret = ring_buffer_read_page(info->tr->buffer, | 
 | 3908 | 				    &info->spare, | 
 | 3909 | 				    count, | 
 | 3910 | 				    info->cpu, 0); | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 3911 | 	trace_access_unlock(info->cpu); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3912 | 	if (ret < 0) | 
 | 3913 | 		return 0; | 
 | 3914 |  | 
| Steven Rostedt | 436fc28 | 2011-10-14 10:44:25 -0400 | [diff] [blame] | 3915 | 	info->read = 0; | 
 | 3916 |  | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3917 | read: | 
 | 3918 | 	size = PAGE_SIZE - info->read; | 
 | 3919 | 	if (size > count) | 
 | 3920 | 		size = count; | 
 | 3921 |  | 
 | 3922 | 	ret = copy_to_user(ubuf, info->spare + info->read, size); | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 3923 | 	if (ret == size) | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3924 | 		return -EFAULT; | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 3925 | 	size -= ret; | 
 | 3926 |  | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3927 | 	*ppos += size; | 
 | 3928 | 	info->read += size; | 
 | 3929 |  | 
 | 3930 | 	return size; | 
 | 3931 | } | 
 | 3932 |  | 
 | 3933 | static int tracing_buffers_release(struct inode *inode, struct file *file) | 
 | 3934 | { | 
 | 3935 | 	struct ftrace_buffer_info *info = file->private_data; | 
 | 3936 |  | 
| Lai Jiangshan | ddd538f | 2009-04-02 15:16:59 +0800 | [diff] [blame] | 3937 | 	if (info->spare) | 
 | 3938 | 		ring_buffer_free_read_page(info->tr->buffer, info->spare); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3939 | 	kfree(info); | 
 | 3940 |  | 
 | 3941 | 	return 0; | 
 | 3942 | } | 
 | 3943 |  | 
 | 3944 | struct buffer_ref { | 
 | 3945 | 	struct ring_buffer	*buffer; | 
 | 3946 | 	void			*page; | 
 | 3947 | 	int			ref; | 
 | 3948 | }; | 
 | 3949 |  | 
 | 3950 | static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, | 
 | 3951 | 				    struct pipe_buffer *buf) | 
 | 3952 | { | 
 | 3953 | 	struct buffer_ref *ref = (struct buffer_ref *)buf->private; | 
 | 3954 |  | 
 | 3955 | 	if (--ref->ref) | 
 | 3956 | 		return; | 
 | 3957 |  | 
 | 3958 | 	ring_buffer_free_read_page(ref->buffer, ref->page); | 
 | 3959 | 	kfree(ref); | 
 | 3960 | 	buf->private = 0; | 
 | 3961 | } | 
 | 3962 |  | 
 | 3963 | static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe, | 
 | 3964 | 				 struct pipe_buffer *buf) | 
 | 3965 | { | 
 | 3966 | 	return 1; | 
 | 3967 | } | 
 | 3968 |  | 
 | 3969 | static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | 
 | 3970 | 				struct pipe_buffer *buf) | 
 | 3971 | { | 
 | 3972 | 	struct buffer_ref *ref = (struct buffer_ref *)buf->private; | 
 | 3973 |  | 
 | 3974 | 	ref->ref++; | 
 | 3975 | } | 
 | 3976 |  | 
 | 3977 | /* Pipe buffer operations for a buffer. */ | 
| Alexey Dobriyan | 28dfef8 | 2009-12-15 16:46:48 -0800 | [diff] [blame] | 3978 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3979 | 	.can_merge		= 0, | 
 | 3980 | 	.map			= generic_pipe_buf_map, | 
 | 3981 | 	.unmap			= generic_pipe_buf_unmap, | 
 | 3982 | 	.confirm		= generic_pipe_buf_confirm, | 
 | 3983 | 	.release		= buffer_pipe_buf_release, | 
 | 3984 | 	.steal			= buffer_pipe_buf_steal, | 
 | 3985 | 	.get			= buffer_pipe_buf_get, | 
 | 3986 | }; | 
 | 3987 |  | 
 | 3988 | /* | 
 | 3989 |  * Callback from splice_to_pipe(), if we need to release some pages | 
 | 3990 |  * at the end of the spd in case we error'ed out in filling the pipe. | 
 | 3991 |  */ | 
 | 3992 | static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) | 
 | 3993 | { | 
 | 3994 | 	struct buffer_ref *ref = | 
 | 3995 | 		(struct buffer_ref *)spd->partial[i].private; | 
 | 3996 |  | 
 | 3997 | 	if (--ref->ref) | 
 | 3998 | 		return; | 
 | 3999 |  | 
 | 4000 | 	ring_buffer_free_read_page(ref->buffer, ref->page); | 
 | 4001 | 	kfree(ref); | 
 | 4002 | 	spd->partial[i].private = 0; | 
 | 4003 | } | 
 | 4004 |  | 
 | 4005 | static ssize_t | 
 | 4006 | tracing_buffers_splice_read(struct file *file, loff_t *ppos, | 
 | 4007 | 			    struct pipe_inode_info *pipe, size_t len, | 
 | 4008 | 			    unsigned int flags) | 
 | 4009 | { | 
 | 4010 | 	struct ftrace_buffer_info *info = file->private_data; | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4011 | 	struct partial_page partial_def[PIPE_DEF_BUFFERS]; | 
 | 4012 | 	struct page *pages_def[PIPE_DEF_BUFFERS]; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4013 | 	struct splice_pipe_desc spd = { | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4014 | 		.pages		= pages_def, | 
 | 4015 | 		.partial	= partial_def, | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4016 | 		.flags		= flags, | 
 | 4017 | 		.ops		= &buffer_pipe_buf_ops, | 
 | 4018 | 		.spd_release	= buffer_spd_release, | 
 | 4019 | 	}; | 
 | 4020 | 	struct buffer_ref *ref; | 
| Steven Rostedt | 93459c6 | 2009-04-29 00:23:13 -0400 | [diff] [blame] | 4021 | 	int entries, size, i; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4022 | 	size_t ret; | 
 | 4023 |  | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4024 | 	if (splice_grow_spd(pipe, &spd)) | 
 | 4025 | 		return -ENOMEM; | 
 | 4026 |  | 
| Lai Jiangshan | 93cfb3c | 2009-04-02 15:17:08 +0800 | [diff] [blame] | 4027 | 	if (*ppos & (PAGE_SIZE - 1)) { | 
 | 4028 | 		WARN_ONCE(1, "Ftrace: previous read must page-align\n"); | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4029 | 		ret = -EINVAL; | 
 | 4030 | 		goto out; | 
| Lai Jiangshan | 93cfb3c | 2009-04-02 15:17:08 +0800 | [diff] [blame] | 4031 | 	} | 
 | 4032 |  | 
 | 4033 | 	if (len & (PAGE_SIZE - 1)) { | 
 | 4034 | 		WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4035 | 		if (len < PAGE_SIZE) { | 
 | 4036 | 			ret = -EINVAL; | 
 | 4037 | 			goto out; | 
 | 4038 | 		} | 
| Lai Jiangshan | 93cfb3c | 2009-04-02 15:17:08 +0800 | [diff] [blame] | 4039 | 		len &= PAGE_MASK; | 
 | 4040 | 	} | 
 | 4041 |  | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 4042 | 	trace_access_lock(info->cpu); | 
| Steven Rostedt | 93459c6 | 2009-04-29 00:23:13 -0400 | [diff] [blame] | 4043 | 	entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | 
 | 4044 |  | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4045 | 	for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4046 | 		struct page *page; | 
 | 4047 | 		int r; | 
 | 4048 |  | 
 | 4049 | 		ref = kzalloc(sizeof(*ref), GFP_KERNEL); | 
 | 4050 | 		if (!ref) | 
 | 4051 | 			break; | 
 | 4052 |  | 
| Steven Rostedt | 7267fa6 | 2009-04-29 00:16:21 -0400 | [diff] [blame] | 4053 | 		ref->ref = 1; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4054 | 		ref->buffer = info->tr->buffer; | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 4055 | 		ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4056 | 		if (!ref->page) { | 
 | 4057 | 			kfree(ref); | 
 | 4058 | 			break; | 
 | 4059 | 		} | 
 | 4060 |  | 
 | 4061 | 		r = ring_buffer_read_page(ref->buffer, &ref->page, | 
| Steven Rostedt | f2957f1 | 2009-04-29 00:26:30 -0400 | [diff] [blame] | 4062 | 					  len, info->cpu, 1); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4063 | 		if (r < 0) { | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 4064 | 			ring_buffer_free_read_page(ref->buffer, ref->page); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4065 | 			kfree(ref); | 
 | 4066 | 			break; | 
 | 4067 | 		} | 
 | 4068 |  | 
 | 4069 | 		/* | 
 | 4070 | 		 * zero out any left over data, this is going to | 
 | 4071 | 		 * user land. | 
 | 4072 | 		 */ | 
 | 4073 | 		size = ring_buffer_page_len(ref->page); | 
 | 4074 | 		if (size < PAGE_SIZE) | 
 | 4075 | 			memset(ref->page + size, 0, PAGE_SIZE - size); | 
 | 4076 |  | 
 | 4077 | 		page = virt_to_page(ref->page); | 
 | 4078 |  | 
 | 4079 | 		spd.pages[i] = page; | 
 | 4080 | 		spd.partial[i].len = PAGE_SIZE; | 
 | 4081 | 		spd.partial[i].offset = 0; | 
 | 4082 | 		spd.partial[i].private = (unsigned long)ref; | 
 | 4083 | 		spd.nr_pages++; | 
| Lai Jiangshan | 93cfb3c | 2009-04-02 15:17:08 +0800 | [diff] [blame] | 4084 | 		*ppos += PAGE_SIZE; | 
| Steven Rostedt | 93459c6 | 2009-04-29 00:23:13 -0400 | [diff] [blame] | 4085 |  | 
 | 4086 | 		entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4087 | 	} | 
 | 4088 |  | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 4089 | 	trace_access_unlock(info->cpu); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4090 | 	spd.nr_pages = i; | 
 | 4091 |  | 
 | 4092 | 	/* did we read anything? */ | 
 | 4093 | 	if (!spd.nr_pages) { | 
 | 4094 | 		if (flags & SPLICE_F_NONBLOCK) | 
 | 4095 | 			ret = -EAGAIN; | 
 | 4096 | 		else | 
 | 4097 | 			ret = 0; | 
 | 4098 | 		/* TODO: block */ | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4099 | 		goto out; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4100 | 	} | 
 | 4101 |  | 
 | 4102 | 	ret = splice_to_pipe(pipe, &spd); | 
| Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 4103 | 	splice_shrink_spd(pipe, &spd); | 
 | 4104 | out: | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 4105 | 	return ret; | 
 | 4106 | } | 
 | 4107 |  | 
 | 4108 | static const struct file_operations tracing_buffers_fops = { | 
 | 4109 | 	.open		= tracing_buffers_open, | 
 | 4110 | 	.read		= tracing_buffers_read, | 
 | 4111 | 	.release	= tracing_buffers_release, | 
 | 4112 | 	.splice_read	= tracing_buffers_splice_read, | 
 | 4113 | 	.llseek		= no_llseek, | 
 | 4114 | }; | 
 | 4115 |  | 
| Steven Rostedt | c8d7718 | 2009-04-29 18:03:45 -0400 | [diff] [blame] | 4116 | static ssize_t | 
 | 4117 | tracing_stats_read(struct file *filp, char __user *ubuf, | 
 | 4118 | 		   size_t count, loff_t *ppos) | 
 | 4119 | { | 
 | 4120 | 	unsigned long cpu = (unsigned long)filp->private_data; | 
 | 4121 | 	struct trace_array *tr = &global_trace; | 
 | 4122 | 	struct trace_seq *s; | 
 | 4123 | 	unsigned long cnt; | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 4124 | 	unsigned long long t; | 
 | 4125 | 	unsigned long usec_rem; | 
| Steven Rostedt | c8d7718 | 2009-04-29 18:03:45 -0400 | [diff] [blame] | 4126 |  | 
| Li Zefan | e4f2d10 | 2009-06-15 10:57:28 +0800 | [diff] [blame] | 4127 | 	s = kmalloc(sizeof(*s), GFP_KERNEL); | 
| Steven Rostedt | c8d7718 | 2009-04-29 18:03:45 -0400 | [diff] [blame] | 4128 | 	if (!s) | 
| Roel Kluin | a646365 | 2009-11-11 22:26:35 +0100 | [diff] [blame] | 4129 | 		return -ENOMEM; | 
| Steven Rostedt | c8d7718 | 2009-04-29 18:03:45 -0400 | [diff] [blame] | 4130 |  | 
 | 4131 | 	trace_seq_init(s); | 
 | 4132 |  | 
 | 4133 | 	cnt = ring_buffer_entries_cpu(tr->buffer, cpu); | 
 | 4134 | 	trace_seq_printf(s, "entries: %ld\n", cnt); | 
 | 4135 |  | 
 | 4136 | 	cnt = ring_buffer_overrun_cpu(tr->buffer, cpu); | 
 | 4137 | 	trace_seq_printf(s, "overrun: %ld\n", cnt); | 
 | 4138 |  | 
 | 4139 | 	cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); | 
 | 4140 | 	trace_seq_printf(s, "commit overrun: %ld\n", cnt); | 
 | 4141 |  | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 4142 | 	cnt = ring_buffer_bytes_cpu(tr->buffer, cpu); | 
 | 4143 | 	trace_seq_printf(s, "bytes: %ld\n", cnt); | 
 | 4144 |  | 
 | 4145 | 	t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu)); | 
 | 4146 | 	usec_rem = do_div(t, USEC_PER_SEC); | 
 | 4147 | 	trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem); | 
 | 4148 |  | 
 | 4149 | 	t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu)); | 
 | 4150 | 	usec_rem = do_div(t, USEC_PER_SEC); | 
 | 4151 | 	trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); | 
 | 4152 |  | 
| Steven Rostedt | c8d7718 | 2009-04-29 18:03:45 -0400 | [diff] [blame] | 4153 | 	count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); | 
 | 4154 |  | 
 | 4155 | 	kfree(s); | 
 | 4156 |  | 
 | 4157 | 	return count; | 
 | 4158 | } | 
 | 4159 |  | 
 | 4160 | static const struct file_operations tracing_stats_fops = { | 
 | 4161 | 	.open		= tracing_open_generic, | 
 | 4162 | 	.read		= tracing_stats_read, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 4163 | 	.llseek		= generic_file_llseek, | 
| Steven Rostedt | c8d7718 | 2009-04-29 18:03:45 -0400 | [diff] [blame] | 4164 | }; | 
 | 4165 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4166 | #ifdef CONFIG_DYNAMIC_FTRACE | 
 | 4167 |  | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 4168 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4169 | { | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 4170 | 	return 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4171 | } | 
 | 4172 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4173 | static ssize_t | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 4174 | tracing_read_dyn_info(struct file *filp, char __user *ubuf, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4175 | 		  size_t cnt, loff_t *ppos) | 
 | 4176 | { | 
| Steven Rostedt | a26a2a2 | 2008-10-31 00:03:22 -0400 | [diff] [blame] | 4177 | 	static char ftrace_dyn_info_buffer[1024]; | 
 | 4178 | 	static DEFINE_MUTEX(dyn_info_mutex); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4179 | 	unsigned long *p = filp->private_data; | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 4180 | 	char *buf = ftrace_dyn_info_buffer; | 
| Steven Rostedt | a26a2a2 | 2008-10-31 00:03:22 -0400 | [diff] [blame] | 4181 | 	int size = ARRAY_SIZE(ftrace_dyn_info_buffer); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4182 | 	int r; | 
 | 4183 |  | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 4184 | 	mutex_lock(&dyn_info_mutex); | 
 | 4185 | 	r = sprintf(buf, "%ld ", *p); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4186 |  | 
| Steven Rostedt | a26a2a2 | 2008-10-31 00:03:22 -0400 | [diff] [blame] | 4187 | 	r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 4188 | 	buf[r++] = '\n'; | 
 | 4189 |  | 
 | 4190 | 	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
 | 4191 |  | 
 | 4192 | 	mutex_unlock(&dyn_info_mutex); | 
 | 4193 |  | 
 | 4194 | 	return r; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4195 | } | 
 | 4196 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 4197 | static const struct file_operations tracing_dyn_info_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 4198 | 	.open		= tracing_open_generic, | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 4199 | 	.read		= tracing_read_dyn_info, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 4200 | 	.llseek		= generic_file_llseek, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4201 | }; | 
 | 4202 | #endif | 
 | 4203 |  | 
 | 4204 | static struct dentry *d_tracer; | 
 | 4205 |  | 
 | 4206 | struct dentry *tracing_init_dentry(void) | 
 | 4207 | { | 
 | 4208 | 	static int once; | 
 | 4209 |  | 
 | 4210 | 	if (d_tracer) | 
 | 4211 | 		return d_tracer; | 
 | 4212 |  | 
| Frederic Weisbecker | 3e1f60b | 2009-03-22 23:10:45 +0100 | [diff] [blame] | 4213 | 	if (!debugfs_initialized()) | 
 | 4214 | 		return NULL; | 
 | 4215 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4216 | 	d_tracer = debugfs_create_dir("tracing", NULL); | 
 | 4217 |  | 
 | 4218 | 	if (!d_tracer && !once) { | 
 | 4219 | 		once = 1; | 
 | 4220 | 		pr_warning("Could not create debugfs directory 'tracing'\n"); | 
 | 4221 | 		return NULL; | 
 | 4222 | 	} | 
 | 4223 |  | 
 | 4224 | 	return d_tracer; | 
 | 4225 | } | 
 | 4226 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4227 | static struct dentry *d_percpu; | 
 | 4228 |  | 
 | 4229 | struct dentry *tracing_dentry_percpu(void) | 
 | 4230 | { | 
 | 4231 | 	static int once; | 
 | 4232 | 	struct dentry *d_tracer; | 
 | 4233 |  | 
 | 4234 | 	if (d_percpu) | 
 | 4235 | 		return d_percpu; | 
 | 4236 |  | 
 | 4237 | 	d_tracer = tracing_init_dentry(); | 
 | 4238 |  | 
 | 4239 | 	if (!d_tracer) | 
 | 4240 | 		return NULL; | 
 | 4241 |  | 
 | 4242 | 	d_percpu = debugfs_create_dir("per_cpu", d_tracer); | 
 | 4243 |  | 
 | 4244 | 	if (!d_percpu && !once) { | 
 | 4245 | 		once = 1; | 
 | 4246 | 		pr_warning("Could not create debugfs directory 'per_cpu'\n"); | 
 | 4247 | 		return NULL; | 
 | 4248 | 	} | 
 | 4249 |  | 
 | 4250 | 	return d_percpu; | 
 | 4251 | } | 
 | 4252 |  | 
 | 4253 | static void tracing_init_debugfs_percpu(long cpu) | 
 | 4254 | { | 
 | 4255 | 	struct dentry *d_percpu = tracing_dentry_percpu(); | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4256 | 	struct dentry *d_cpu; | 
| Steven Rostedt | dd49a38 | 2010-10-20 21:51:26 -0400 | [diff] [blame] | 4257 | 	char cpu_dir[30]; /* 30 characters should be more than enough */ | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4258 |  | 
| Steven Rostedt | dd49a38 | 2010-10-20 21:51:26 -0400 | [diff] [blame] | 4259 | 	snprintf(cpu_dir, 30, "cpu%ld", cpu); | 
| Frederic Weisbecker | 8656e7a | 2009-02-26 00:41:38 +0100 | [diff] [blame] | 4260 | 	d_cpu = debugfs_create_dir(cpu_dir, d_percpu); | 
 | 4261 | 	if (!d_cpu) { | 
 | 4262 | 		pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); | 
 | 4263 | 		return; | 
 | 4264 | 	} | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4265 |  | 
| Frederic Weisbecker | 8656e7a | 2009-02-26 00:41:38 +0100 | [diff] [blame] | 4266 | 	/* per cpu trace_pipe */ | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4267 | 	trace_create_file("trace_pipe", 0444, d_cpu, | 
 | 4268 | 			(void *) cpu, &tracing_pipe_fops); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4269 |  | 
 | 4270 | 	/* per cpu trace */ | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4271 | 	trace_create_file("trace", 0644, d_cpu, | 
 | 4272 | 			(void *) cpu, &tracing_fops); | 
| Steven Rostedt | 7f96f93 | 2009-03-13 00:37:42 -0400 | [diff] [blame] | 4273 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4274 | 	trace_create_file("trace_pipe_raw", 0444, d_cpu, | 
 | 4275 | 			(void *) cpu, &tracing_buffers_fops); | 
| Steven Rostedt | c8d7718 | 2009-04-29 18:03:45 -0400 | [diff] [blame] | 4276 |  | 
 | 4277 | 	trace_create_file("stats", 0444, d_cpu, | 
 | 4278 | 			(void *) cpu, &tracing_stats_fops); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4279 | } | 
 | 4280 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 4281 | #ifdef CONFIG_FTRACE_SELFTEST | 
 | 4282 | /* Let selftest have access to static functions in this file */ | 
 | 4283 | #include "trace_selftest.c" | 
 | 4284 | #endif | 
 | 4285 |  | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4286 | struct trace_option_dentry { | 
 | 4287 | 	struct tracer_opt		*opt; | 
 | 4288 | 	struct tracer_flags		*flags; | 
 | 4289 | 	struct dentry			*entry; | 
 | 4290 | }; | 
 | 4291 |  | 
 | 4292 | static ssize_t | 
 | 4293 | trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, | 
 | 4294 | 			loff_t *ppos) | 
 | 4295 | { | 
 | 4296 | 	struct trace_option_dentry *topt = filp->private_data; | 
 | 4297 | 	char *buf; | 
 | 4298 |  | 
 | 4299 | 	if (topt->flags->val & topt->opt->bit) | 
 | 4300 | 		buf = "1\n"; | 
 | 4301 | 	else | 
 | 4302 | 		buf = "0\n"; | 
 | 4303 |  | 
 | 4304 | 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | 
 | 4305 | } | 
 | 4306 |  | 
 | 4307 | static ssize_t | 
 | 4308 | trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | 
 | 4309 | 			 loff_t *ppos) | 
 | 4310 | { | 
 | 4311 | 	struct trace_option_dentry *topt = filp->private_data; | 
 | 4312 | 	unsigned long val; | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4313 | 	int ret; | 
 | 4314 |  | 
| Peter Huewe | 22fe9b5 | 2011-06-07 21:58:27 +0200 | [diff] [blame] | 4315 | 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | 
 | 4316 | 	if (ret) | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4317 | 		return ret; | 
 | 4318 |  | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 4319 | 	if (val != 0 && val != 1) | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4320 | 		return -EINVAL; | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 4321 |  | 
 | 4322 | 	if (!!(topt->flags->val & topt->opt->bit) != val) { | 
 | 4323 | 		mutex_lock(&trace_types_lock); | 
 | 4324 | 		ret = __set_tracer_option(current_trace, topt->flags, | 
| Steven Rostedt | c757bea | 2009-12-21 22:35:16 -0500 | [diff] [blame] | 4325 | 					  topt->opt, !val); | 
| Li Zefan | 8d18eaa | 2009-12-08 11:17:06 +0800 | [diff] [blame] | 4326 | 		mutex_unlock(&trace_types_lock); | 
 | 4327 | 		if (ret) | 
 | 4328 | 			return ret; | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4329 | 	} | 
 | 4330 |  | 
 | 4331 | 	*ppos += cnt; | 
 | 4332 |  | 
 | 4333 | 	return cnt; | 
 | 4334 | } | 
 | 4335 |  | 
 | 4336 |  | 
 | 4337 | static const struct file_operations trace_options_fops = { | 
 | 4338 | 	.open = tracing_open_generic, | 
 | 4339 | 	.read = trace_options_read, | 
 | 4340 | 	.write = trace_options_write, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 4341 | 	.llseek	= generic_file_llseek, | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4342 | }; | 
 | 4343 |  | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4344 | static ssize_t | 
 | 4345 | trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, | 
 | 4346 | 			loff_t *ppos) | 
 | 4347 | { | 
 | 4348 | 	long index = (long)filp->private_data; | 
 | 4349 | 	char *buf; | 
 | 4350 |  | 
 | 4351 | 	if (trace_flags & (1 << index)) | 
 | 4352 | 		buf = "1\n"; | 
 | 4353 | 	else | 
 | 4354 | 		buf = "0\n"; | 
 | 4355 |  | 
 | 4356 | 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | 
 | 4357 | } | 
 | 4358 |  | 
 | 4359 | static ssize_t | 
 | 4360 | trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | 
 | 4361 | 			 loff_t *ppos) | 
 | 4362 | { | 
 | 4363 | 	long index = (long)filp->private_data; | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4364 | 	unsigned long val; | 
 | 4365 | 	int ret; | 
 | 4366 |  | 
| Peter Huewe | 22fe9b5 | 2011-06-07 21:58:27 +0200 | [diff] [blame] | 4367 | 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | 
 | 4368 | 	if (ret) | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4369 | 		return ret; | 
 | 4370 |  | 
| Zhaolei | f2d84b6 | 2009-08-07 18:55:48 +0800 | [diff] [blame] | 4371 | 	if (val != 0 && val != 1) | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4372 | 		return -EINVAL; | 
| Zhaolei | f2d84b6 | 2009-08-07 18:55:48 +0800 | [diff] [blame] | 4373 | 	set_tracer_flags(1 << index, val); | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4374 |  | 
 | 4375 | 	*ppos += cnt; | 
 | 4376 |  | 
 | 4377 | 	return cnt; | 
 | 4378 | } | 
 | 4379 |  | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4380 | static const struct file_operations trace_options_core_fops = { | 
 | 4381 | 	.open = tracing_open_generic, | 
 | 4382 | 	.read = trace_options_core_read, | 
 | 4383 | 	.write = trace_options_core_write, | 
| Arnd Bergmann | b444786 | 2010-07-07 23:40:11 +0200 | [diff] [blame] | 4384 | 	.llseek = generic_file_llseek, | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4385 | }; | 
 | 4386 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4387 | struct dentry *trace_create_file(const char *name, | 
 | 4388 | 				 mode_t mode, | 
 | 4389 | 				 struct dentry *parent, | 
 | 4390 | 				 void *data, | 
 | 4391 | 				 const struct file_operations *fops) | 
 | 4392 | { | 
 | 4393 | 	struct dentry *ret; | 
 | 4394 |  | 
 | 4395 | 	ret = debugfs_create_file(name, mode, parent, data, fops); | 
 | 4396 | 	if (!ret) | 
 | 4397 | 		pr_warning("Could not create debugfs '%s' entry\n", name); | 
 | 4398 |  | 
 | 4399 | 	return ret; | 
 | 4400 | } | 
 | 4401 |  | 
 | 4402 |  | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4403 | static struct dentry *trace_options_init_dentry(void) | 
 | 4404 | { | 
 | 4405 | 	struct dentry *d_tracer; | 
 | 4406 | 	static struct dentry *t_options; | 
 | 4407 |  | 
 | 4408 | 	if (t_options) | 
 | 4409 | 		return t_options; | 
 | 4410 |  | 
 | 4411 | 	d_tracer = tracing_init_dentry(); | 
 | 4412 | 	if (!d_tracer) | 
 | 4413 | 		return NULL; | 
 | 4414 |  | 
 | 4415 | 	t_options = debugfs_create_dir("options", d_tracer); | 
 | 4416 | 	if (!t_options) { | 
 | 4417 | 		pr_warning("Could not create debugfs directory 'options'\n"); | 
 | 4418 | 		return NULL; | 
 | 4419 | 	} | 
 | 4420 |  | 
 | 4421 | 	return t_options; | 
 | 4422 | } | 
 | 4423 |  | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4424 | static void | 
 | 4425 | create_trace_option_file(struct trace_option_dentry *topt, | 
 | 4426 | 			 struct tracer_flags *flags, | 
 | 4427 | 			 struct tracer_opt *opt) | 
 | 4428 | { | 
 | 4429 | 	struct dentry *t_options; | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4430 |  | 
 | 4431 | 	t_options = trace_options_init_dentry(); | 
 | 4432 | 	if (!t_options) | 
 | 4433 | 		return; | 
 | 4434 |  | 
 | 4435 | 	topt->flags = flags; | 
 | 4436 | 	topt->opt = opt; | 
 | 4437 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4438 | 	topt->entry = trace_create_file(opt->name, 0644, t_options, topt, | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4439 | 				    &trace_options_fops); | 
 | 4440 |  | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4441 | } | 
 | 4442 |  | 
 | 4443 | static struct trace_option_dentry * | 
 | 4444 | create_trace_option_files(struct tracer *tracer) | 
 | 4445 | { | 
 | 4446 | 	struct trace_option_dentry *topts; | 
 | 4447 | 	struct tracer_flags *flags; | 
 | 4448 | 	struct tracer_opt *opts; | 
 | 4449 | 	int cnt; | 
 | 4450 |  | 
 | 4451 | 	if (!tracer) | 
 | 4452 | 		return NULL; | 
 | 4453 |  | 
 | 4454 | 	flags = tracer->flags; | 
 | 4455 |  | 
 | 4456 | 	if (!flags || !flags->opts) | 
 | 4457 | 		return NULL; | 
 | 4458 |  | 
 | 4459 | 	opts = flags->opts; | 
 | 4460 |  | 
 | 4461 | 	for (cnt = 0; opts[cnt].name; cnt++) | 
 | 4462 | 		; | 
 | 4463 |  | 
| Steven Rostedt | 0cfe824 | 2009-02-27 10:51:10 -0500 | [diff] [blame] | 4464 | 	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 4465 | 	if (!topts) | 
 | 4466 | 		return NULL; | 
 | 4467 |  | 
 | 4468 | 	for (cnt = 0; opts[cnt].name; cnt++) | 
 | 4469 | 		create_trace_option_file(&topts[cnt], flags, | 
 | 4470 | 					 &opts[cnt]); | 
 | 4471 |  | 
 | 4472 | 	return topts; | 
 | 4473 | } | 
 | 4474 |  | 
 | 4475 | static void | 
 | 4476 | destroy_trace_option_files(struct trace_option_dentry *topts) | 
 | 4477 | { | 
 | 4478 | 	int cnt; | 
 | 4479 |  | 
 | 4480 | 	if (!topts) | 
 | 4481 | 		return; | 
 | 4482 |  | 
 | 4483 | 	for (cnt = 0; topts[cnt].opt; cnt++) { | 
 | 4484 | 		if (topts[cnt].entry) | 
 | 4485 | 			debugfs_remove(topts[cnt].entry); | 
 | 4486 | 	} | 
 | 4487 |  | 
 | 4488 | 	kfree(topts); | 
 | 4489 | } | 
 | 4490 |  | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4491 | static struct dentry * | 
 | 4492 | create_trace_option_core_file(const char *option, long index) | 
 | 4493 | { | 
 | 4494 | 	struct dentry *t_options; | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4495 |  | 
 | 4496 | 	t_options = trace_options_init_dentry(); | 
 | 4497 | 	if (!t_options) | 
 | 4498 | 		return NULL; | 
 | 4499 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4500 | 	return trace_create_file(option, 0644, t_options, (void *)index, | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4501 | 				    &trace_options_core_fops); | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4502 | } | 
 | 4503 |  | 
 | 4504 | static __init void create_trace_options_dir(void) | 
 | 4505 | { | 
 | 4506 | 	struct dentry *t_options; | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4507 | 	int i; | 
 | 4508 |  | 
 | 4509 | 	t_options = trace_options_init_dentry(); | 
 | 4510 | 	if (!t_options) | 
 | 4511 | 		return; | 
 | 4512 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4513 | 	for (i = 0; trace_options[i]; i++) | 
 | 4514 | 		create_trace_option_core_file(trace_options[i], i); | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4515 | } | 
 | 4516 |  | 
| Frédéric Weisbecker | b5ad384 | 2008-09-23 11:34:32 +0100 | [diff] [blame] | 4517 | static __init int tracer_init_debugfs(void) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4518 | { | 
 | 4519 | 	struct dentry *d_tracer; | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4520 | 	int cpu; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4521 |  | 
| Lai Jiangshan | 7e53bd4 | 2010-01-06 20:08:50 +0800 | [diff] [blame] | 4522 | 	trace_access_lock_init(); | 
 | 4523 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4524 | 	d_tracer = tracing_init_dentry(); | 
 | 4525 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4526 | 	trace_create_file("tracing_enabled", 0644, d_tracer, | 
 | 4527 | 			&global_trace, &tracing_ctrl_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4528 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4529 | 	trace_create_file("trace_options", 0644, d_tracer, | 
 | 4530 | 			NULL, &tracing_iter_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4531 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4532 | 	trace_create_file("tracing_cpumask", 0644, d_tracer, | 
 | 4533 | 			NULL, &tracing_cpumask_fops); | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 4534 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4535 | 	trace_create_file("trace", 0644, d_tracer, | 
 | 4536 | 			(void *) TRACE_PIPE_ALL_CPU, &tracing_fops); | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 4537 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4538 | 	trace_create_file("available_tracers", 0444, d_tracer, | 
 | 4539 | 			&global_trace, &show_traces_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4540 |  | 
| Li Zefan | 339ae5d | 2009-04-17 10:34:30 +0800 | [diff] [blame] | 4541 | 	trace_create_file("current_tracer", 0644, d_tracer, | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4542 | 			&global_trace, &set_tracer_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4543 |  | 
| Steven Rostedt | 5d4a9db | 2009-08-27 16:52:21 -0400 | [diff] [blame] | 4544 | #ifdef CONFIG_TRACER_MAX_TRACE | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4545 | 	trace_create_file("tracing_max_latency", 0644, d_tracer, | 
 | 4546 | 			&tracing_max_latency, &tracing_max_lat_fops); | 
| Tim Bird | 0e95017 | 2010-02-25 15:36:43 -0800 | [diff] [blame] | 4547 | #endif | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4548 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4549 | 	trace_create_file("tracing_thresh", 0644, d_tracer, | 
 | 4550 | 			&tracing_thresh, &tracing_max_lat_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4551 |  | 
| Li Zefan | 339ae5d | 2009-04-17 10:34:30 +0800 | [diff] [blame] | 4552 | 	trace_create_file("README", 0444, d_tracer, | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4553 | 			NULL, &tracing_readme_fops); | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 4554 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4555 | 	trace_create_file("trace_pipe", 0444, d_tracer, | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4556 | 			(void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4557 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4558 | 	trace_create_file("buffer_size_kb", 0644, d_tracer, | 
 | 4559 | 			&global_trace, &tracing_entries_fops); | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 4560 |  | 
| Vaibhav Nagarnaik | f81ab07 | 2011-08-16 14:46:15 -0700 | [diff] [blame] | 4561 | 	trace_create_file("buffer_total_size_kb", 0444, d_tracer, | 
 | 4562 | 			&global_trace, &tracing_total_entries_fops); | 
 | 4563 |  | 
| Vaibhav Nagarnaik | 4f271a2 | 2011-06-13 17:51:57 -0700 | [diff] [blame] | 4564 | 	trace_create_file("free_buffer", 0644, d_tracer, | 
 | 4565 | 			&global_trace, &tracing_free_buffer_fops); | 
 | 4566 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4567 | 	trace_create_file("trace_marker", 0220, d_tracer, | 
 | 4568 | 			NULL, &tracing_mark_fops); | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 4569 |  | 
| Avadh Patel | 69abe6a | 2009-04-10 16:04:48 -0400 | [diff] [blame] | 4570 | 	trace_create_file("saved_cmdlines", 0444, d_tracer, | 
 | 4571 | 			NULL, &tracing_saved_cmdlines_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4572 |  | 
| Zhaolei | 5079f32 | 2009-08-25 16:12:56 +0800 | [diff] [blame] | 4573 | 	trace_create_file("trace_clock", 0644, d_tracer, NULL, | 
 | 4574 | 			  &trace_clock_fops); | 
 | 4575 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4576 | #ifdef CONFIG_DYNAMIC_FTRACE | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4577 | 	trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 
 | 4578 | 			&ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4579 | #endif | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4580 |  | 
| Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 4581 | 	create_trace_options_dir(); | 
 | 4582 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4583 | 	for_each_tracing_cpu(cpu) | 
 | 4584 | 		tracing_init_debugfs_percpu(cpu); | 
 | 4585 |  | 
| Frédéric Weisbecker | b5ad384 | 2008-09-23 11:34:32 +0100 | [diff] [blame] | 4586 | 	return 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4587 | } | 
 | 4588 |  | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4589 | static int trace_panic_handler(struct notifier_block *this, | 
 | 4590 | 			       unsigned long event, void *unused) | 
 | 4591 | { | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 4592 | 	if (ftrace_dump_on_oops) | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 4593 | 		ftrace_dump(ftrace_dump_on_oops); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4594 | 	return NOTIFY_OK; | 
 | 4595 | } | 
 | 4596 |  | 
 | 4597 | static struct notifier_block trace_panic_notifier = { | 
 | 4598 | 	.notifier_call  = trace_panic_handler, | 
 | 4599 | 	.next           = NULL, | 
 | 4600 | 	.priority       = 150   /* priority: INT_MAX >= x >= 0 */ | 
 | 4601 | }; | 
 | 4602 |  | 
 | 4603 | static int trace_die_handler(struct notifier_block *self, | 
 | 4604 | 			     unsigned long val, | 
 | 4605 | 			     void *data) | 
 | 4606 | { | 
 | 4607 | 	switch (val) { | 
 | 4608 | 	case DIE_OOPS: | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 4609 | 		if (ftrace_dump_on_oops) | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 4610 | 			ftrace_dump(ftrace_dump_on_oops); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4611 | 		break; | 
 | 4612 | 	default: | 
 | 4613 | 		break; | 
 | 4614 | 	} | 
 | 4615 | 	return NOTIFY_OK; | 
 | 4616 | } | 
 | 4617 |  | 
 | 4618 | static struct notifier_block trace_die_notifier = { | 
 | 4619 | 	.notifier_call = trace_die_handler, | 
 | 4620 | 	.priority = 200 | 
 | 4621 | }; | 
 | 4622 |  | 
 | 4623 | /* | 
 | 4624 |  * printk is set to max of 1024, we really don't need it that big. | 
 | 4625 |  * Nothing should be printing 1000 characters anyway. | 
 | 4626 |  */ | 
 | 4627 | #define TRACE_MAX_PRINT		1000 | 
 | 4628 |  | 
 | 4629 | /* | 
 | 4630 |  * Define here KERN_TRACE so that we have one place to modify | 
 | 4631 |  * it if we decide to change what log level the ftrace dump | 
 | 4632 |  * should be at. | 
 | 4633 |  */ | 
| Steven Rostedt | 428aee1 | 2009-01-14 12:24:42 -0500 | [diff] [blame] | 4634 | #define KERN_TRACE		KERN_EMERG | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4635 |  | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 4636 | void | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4637 | trace_printk_seq(struct trace_seq *s) | 
 | 4638 | { | 
 | 4639 | 	/* Probably should print a warning here. */ | 
 | 4640 | 	if (s->len >= 1000) | 
 | 4641 | 		s->len = 1000; | 
 | 4642 |  | 
 | 4643 | 	/* should be zero ended, but we are paranoid. */ | 
 | 4644 | 	s->buffer[s->len] = 0; | 
 | 4645 |  | 
 | 4646 | 	printk(KERN_TRACE "%s", s->buffer); | 
 | 4647 |  | 
| Steven Rostedt | f952075 | 2009-03-02 14:04:40 -0500 | [diff] [blame] | 4648 | 	trace_seq_init(s); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4649 | } | 
 | 4650 |  | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 4651 | void trace_init_global_iter(struct trace_iterator *iter) | 
 | 4652 | { | 
 | 4653 | 	iter->tr = &global_trace; | 
 | 4654 | 	iter->trace = current_trace; | 
 | 4655 | 	iter->cpu_file = TRACE_PIPE_ALL_CPU; | 
 | 4656 | } | 
 | 4657 |  | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 4658 | static void | 
 | 4659 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4660 | { | 
| Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 4661 | 	static arch_spinlock_t ftrace_dump_lock = | 
| Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 4662 | 		(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4663 | 	/* use static because iter can be a bit big for the stack */ | 
 | 4664 | 	static struct trace_iterator iter; | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 4665 | 	unsigned int old_userobj; | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4666 | 	static int dump_ran; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 4667 | 	unsigned long flags; | 
 | 4668 | 	int cnt = 0, cpu; | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4669 |  | 
 | 4670 | 	/* only one dump */ | 
| Steven Rostedt | cd891ae | 2009-04-28 11:39:34 -0400 | [diff] [blame] | 4671 | 	local_irq_save(flags); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 4672 | 	arch_spin_lock(&ftrace_dump_lock); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4673 | 	if (dump_ran) | 
 | 4674 | 		goto out; | 
 | 4675 |  | 
 | 4676 | 	dump_ran = 1; | 
 | 4677 |  | 
| Steven Rostedt | 0ee6b6c | 2009-01-14 14:50:19 -0500 | [diff] [blame] | 4678 | 	tracing_off(); | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 4679 |  | 
| Steven Rostedt | e0a413f | 2011-09-29 21:26:16 -0400 | [diff] [blame] | 4680 | 	/* Did function tracer already get disabled? */ | 
 | 4681 | 	if (ftrace_is_dead()) { | 
 | 4682 | 		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); | 
 | 4683 | 		printk("#          MAY BE MISSING FUNCTION EVENTS\n"); | 
 | 4684 | 	} | 
 | 4685 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 4686 | 	if (disable_tracing) | 
 | 4687 | 		ftrace_kill(); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4688 |  | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 4689 | 	trace_init_global_iter(&iter); | 
 | 4690 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 4691 | 	for_each_tracing_cpu(cpu) { | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 4692 | 		atomic_inc(&iter.tr->data[cpu]->disabled); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 4693 | 	} | 
 | 4694 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 4695 | 	old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; | 
 | 4696 |  | 
| Török Edwin | b54d3de | 2008-11-22 13:28:48 +0200 | [diff] [blame] | 4697 | 	/* don't look at user memory in panic mode */ | 
 | 4698 | 	trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 
 | 4699 |  | 
| Steven Rostedt | e543ad7 | 2009-03-04 18:20:36 -0500 | [diff] [blame] | 4700 | 	/* Simulate the iterator */ | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4701 | 	iter.tr = &global_trace; | 
 | 4702 | 	iter.trace = current_trace; | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 4703 |  | 
 | 4704 | 	switch (oops_dump_mode) { | 
 | 4705 | 	case DUMP_ALL: | 
 | 4706 | 		iter.cpu_file = TRACE_PIPE_ALL_CPU; | 
 | 4707 | 		break; | 
 | 4708 | 	case DUMP_ORIG: | 
 | 4709 | 		iter.cpu_file = raw_smp_processor_id(); | 
 | 4710 | 		break; | 
 | 4711 | 	case DUMP_NONE: | 
 | 4712 | 		goto out_enable; | 
 | 4713 | 	default: | 
 | 4714 | 		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); | 
 | 4715 | 		iter.cpu_file = TRACE_PIPE_ALL_CPU; | 
 | 4716 | 	} | 
 | 4717 |  | 
 | 4718 | 	printk(KERN_TRACE "Dumping ftrace buffer:\n"); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4719 |  | 
 | 4720 | 	/* | 
 | 4721 | 	 * We need to stop all tracing on all CPUS to read the | 
 | 4722 | 	 * the next buffer. This is a bit expensive, but is | 
 | 4723 | 	 * not done often. We fill all what we can read, | 
 | 4724 | 	 * and then release the locks again. | 
 | 4725 | 	 */ | 
 | 4726 |  | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4727 | 	while (!trace_empty(&iter)) { | 
 | 4728 |  | 
 | 4729 | 		if (!cnt) | 
 | 4730 | 			printk(KERN_TRACE "---------------------------------\n"); | 
 | 4731 |  | 
 | 4732 | 		cnt++; | 
 | 4733 |  | 
 | 4734 | 		/* reset all but tr, trace, and overruns */ | 
 | 4735 | 		memset(&iter.seq, 0, | 
 | 4736 | 		       sizeof(struct trace_iterator) - | 
 | 4737 | 		       offsetof(struct trace_iterator, seq)); | 
 | 4738 | 		iter.iter_flags |= TRACE_FILE_LAT_FMT; | 
 | 4739 | 		iter.pos = -1; | 
 | 4740 |  | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 4741 | 		if (trace_find_next_entry_inc(&iter) != NULL) { | 
| Lai Jiangshan | 74e7ff8 | 2009-07-28 20:17:22 +0800 | [diff] [blame] | 4742 | 			int ret; | 
 | 4743 |  | 
 | 4744 | 			ret = print_trace_line(&iter); | 
 | 4745 | 			if (ret != TRACE_TYPE_NO_CONSUME) | 
 | 4746 | 				trace_consume(&iter); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4747 | 		} | 
 | 4748 |  | 
 | 4749 | 		trace_printk_seq(&iter.seq); | 
 | 4750 | 	} | 
 | 4751 |  | 
 | 4752 | 	if (!cnt) | 
 | 4753 | 		printk(KERN_TRACE "   (ftrace buffer empty)\n"); | 
 | 4754 | 	else | 
 | 4755 | 		printk(KERN_TRACE "---------------------------------\n"); | 
 | 4756 |  | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 4757 |  out_enable: | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 4758 | 	/* Re-enable tracing if requested */ | 
 | 4759 | 	if (!disable_tracing) { | 
 | 4760 | 		trace_flags |= old_userobj; | 
 | 4761 |  | 
 | 4762 | 		for_each_tracing_cpu(cpu) { | 
| Jason Wessel | 955b61e | 2010-08-05 09:22:23 -0500 | [diff] [blame] | 4763 | 			atomic_dec(&iter.tr->data[cpu]->disabled); | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 4764 | 		} | 
 | 4765 | 		tracing_on(); | 
 | 4766 | 	} | 
 | 4767 |  | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4768 |  out: | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 4769 | 	arch_spin_unlock(&ftrace_dump_lock); | 
| Steven Rostedt | cd891ae | 2009-04-28 11:39:34 -0400 | [diff] [blame] | 4770 | 	local_irq_restore(flags); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4771 | } | 
 | 4772 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 4773 | /* By default: disable tracing after the dump */ | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 4774 | void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 4775 | { | 
| Frederic Weisbecker | cecbca9 | 2010-04-18 19:08:41 +0200 | [diff] [blame] | 4776 | 	__ftrace_dump(true, oops_dump_mode); | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 4777 | } | 
 | 4778 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4779 | __init static int tracer_alloc_buffers(void) | 
 | 4780 | { | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 4781 | 	int ring_buf_size; | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 4782 | 	enum ring_buffer_flags rb_flags; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4783 | 	int i; | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4784 | 	int ret = -ENOMEM; | 
 | 4785 |  | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 4786 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4787 | 	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) | 
 | 4788 | 		goto out; | 
 | 4789 |  | 
 | 4790 | 	if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 
 | 4791 | 		goto out_free_buffer_mask; | 
 | 4792 |  | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 4793 | 	/* To save memory, keep the ring buffer size to its minimum */ | 
 | 4794 | 	if (ring_buffer_expanded) | 
 | 4795 | 		ring_buf_size = trace_buf_size; | 
 | 4796 | 	else | 
 | 4797 | 		ring_buf_size = 1; | 
 | 4798 |  | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 4799 | 	rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; | 
 | 4800 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4801 | 	cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 
 | 4802 | 	cpumask_copy(tracing_cpumask, cpu_all_mask); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4803 |  | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 4804 | 	/* TODO: make the number of buffers hot pluggable with CPUS */ | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 4805 | 	global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 4806 | 	if (!global_trace.buffer) { | 
 | 4807 | 		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 
 | 4808 | 		WARN_ON(1); | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4809 | 		goto out_free_cpumask; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 4810 | 	} | 
 | 4811 | 	global_trace.entries = ring_buffer_size(global_trace.buffer); | 
 | 4812 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4813 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 4814 | #ifdef CONFIG_TRACER_MAX_TRACE | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 4815 | 	max_tr.buffer = ring_buffer_alloc(1, rb_flags); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 4816 | 	if (!max_tr.buffer) { | 
 | 4817 | 		printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 
 | 4818 | 		WARN_ON(1); | 
 | 4819 | 		ring_buffer_free(global_trace.buffer); | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4820 | 		goto out_free_cpumask; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 4821 | 	} | 
| KOSAKI Motohiro | ef710e1 | 2010-07-01 14:34:35 +0900 | [diff] [blame] | 4822 | 	max_tr.entries = 1; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 4823 | #endif | 
 | 4824 |  | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 4825 | 	/* Allocate the first page for all buffers */ | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 4826 | 	for_each_tracing_cpu(i) { | 
| jolsa@redhat.com | 566b0aa | 2009-07-16 21:44:26 +0200 | [diff] [blame] | 4827 | 		global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 
| Tejun Heo | 9705f69 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 4828 | 		max_tr.data[i] = &per_cpu(max_tr_data, i); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4829 | 	} | 
 | 4830 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4831 | 	trace_init_cmdlines(); | 
 | 4832 |  | 
| Frédéric Weisbecker | 43a1538 | 2008-09-21 20:16:30 +0200 | [diff] [blame] | 4833 | 	register_tracer(&nop_trace); | 
| Steven Rostedt | 79fb076 | 2009-02-02 21:38:33 -0500 | [diff] [blame] | 4834 | 	current_trace = &nop_trace; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 4835 | 	/* All seems OK, enable tracing */ | 
 | 4836 | 	tracing_disabled = 0; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 4837 |  | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4838 | 	atomic_notifier_chain_register(&panic_notifier_list, | 
 | 4839 | 				       &trace_panic_notifier); | 
 | 4840 |  | 
 | 4841 | 	register_die_notifier(&trace_die_notifier); | 
| Frederic Weisbecker | 2fc1dfb | 2009-03-16 01:45:03 +0100 | [diff] [blame] | 4842 |  | 
 | 4843 | 	return 0; | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4844 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4845 | out_free_cpumask: | 
 | 4846 | 	free_cpumask_var(tracing_cpumask); | 
 | 4847 | out_free_buffer_mask: | 
 | 4848 | 	free_cpumask_var(tracing_buffer_mask); | 
 | 4849 | out: | 
 | 4850 | 	return ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4851 | } | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 4852 |  | 
 | 4853 | __init static int clear_boot_tracer(void) | 
 | 4854 | { | 
 | 4855 | 	/* | 
 | 4856 | 	 * The default tracer at boot buffer is an init section. | 
 | 4857 | 	 * This function is called in lateinit. If we did not | 
 | 4858 | 	 * find the boot tracer, then clear it out, to prevent | 
 | 4859 | 	 * later registration from accessing the buffer that is | 
 | 4860 | 	 * about to be freed. | 
 | 4861 | 	 */ | 
 | 4862 | 	if (!default_bootup_tracer) | 
 | 4863 | 		return 0; | 
 | 4864 |  | 
 | 4865 | 	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", | 
 | 4866 | 	       default_bootup_tracer); | 
 | 4867 | 	default_bootup_tracer = NULL; | 
 | 4868 |  | 
 | 4869 | 	return 0; | 
 | 4870 | } | 
 | 4871 |  | 
| Frédéric Weisbecker | b5ad384 | 2008-09-23 11:34:32 +0100 | [diff] [blame] | 4872 | early_initcall(tracer_alloc_buffers); | 
 | 4873 | fs_initcall(tracer_init_debugfs); | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 4874 | late_initcall(clear_boot_tracer); |