| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* | 
 | 2 |  * ring buffer based function tracer | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 
 | 5 |  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | 
 | 6 |  * | 
 | 7 |  * Originally taken from the RT patch by: | 
 | 8 |  *    Arnaldo Carvalho de Melo <acme@redhat.com> | 
 | 9 |  * | 
 | 10 |  * Based on code from the latency_tracer, that is: | 
 | 11 |  *  Copyright (C) 2004-2006 Ingo Molnar | 
 | 12 |  *  Copyright (C) 2004 William Lee Irwin III | 
 | 13 |  */ | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 14 | #include <linux/ring_buffer.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 15 | #include <linux/utsrelease.h> | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 16 | #include <linux/stacktrace.h> | 
 | 17 | #include <linux/writeback.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 18 | #include <linux/kallsyms.h> | 
 | 19 | #include <linux/seq_file.h> | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 20 | #include <linux/notifier.h> | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 21 | #include <linux/irqflags.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 22 | #include <linux/debugfs.h> | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 23 | #include <linux/pagemap.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 24 | #include <linux/hardirq.h> | 
 | 25 | #include <linux/linkage.h> | 
 | 26 | #include <linux/uaccess.h> | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 27 | #include <linux/kprobes.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 28 | #include <linux/ftrace.h> | 
 | 29 | #include <linux/module.h> | 
 | 30 | #include <linux/percpu.h> | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 31 | #include <linux/splice.h> | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 32 | #include <linux/kdebug.h> | 
| Frederic Weisbecker | 5f0c6c0 | 2009-03-27 14:22:10 +0100 | [diff] [blame] | 33 | #include <linux/string.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 34 | #include <linux/ctype.h> | 
 | 35 | #include <linux/init.h> | 
| Soeren Sandmann Pedersen | 2a2cc8f | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 36 | #include <linux/poll.h> | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 37 | #include <linux/gfp.h> | 
 | 38 | #include <linux/fs.h> | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 39 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 40 | #include "trace.h" | 
| Steven Rostedt | f0868d1 | 2008-12-23 23:24:12 -0500 | [diff] [blame] | 41 | #include "trace_output.h" | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 42 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 43 | #define TRACE_BUFFER_FLAGS	(RB_FL_OVERWRITE) | 
 | 44 |  | 
| Steven Rostedt | 745b162 | 2009-01-15 23:40:11 -0500 | [diff] [blame] | 45 | unsigned long __read_mostly	tracing_max_latency; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 46 | unsigned long __read_mostly	tracing_thresh; | 
 | 47 |  | 
| Frederic Weisbecker | 8e1b82e | 2008-12-06 03:41:33 +0100 | [diff] [blame] | 48 | /* | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 49 |  * On boot up, the ring buffer is set to the minimum size, so that | 
 | 50 |  * we do not waste memory on systems that are not using tracing. | 
 | 51 |  */ | 
 | 52 | static int ring_buffer_expanded; | 
 | 53 |  | 
 | 54 | /* | 
| Frederic Weisbecker | 8e1b82e | 2008-12-06 03:41:33 +0100 | [diff] [blame] | 55 |  * We need to change this state when a selftest is running. | 
| Frederic Weisbecker | ff32504 | 2008-12-04 23:47:35 +0100 | [diff] [blame] | 56 |  * A selftest will lurk into the ring-buffer to count the | 
 | 57 |  * entries inserted during the selftest although some concurrent | 
| Ingo Molnar | 5e1607a | 2009-03-05 10:24:48 +0100 | [diff] [blame] | 58 |  * insertions into the ring-buffer such as trace_printk could occurred | 
| Frederic Weisbecker | ff32504 | 2008-12-04 23:47:35 +0100 | [diff] [blame] | 59 |  * at the same time, giving false positive or negative results. | 
 | 60 |  */ | 
| Frederic Weisbecker | 8e1b82e | 2008-12-06 03:41:33 +0100 | [diff] [blame] | 61 | static bool __read_mostly tracing_selftest_running; | 
| Frederic Weisbecker | ff32504 | 2008-12-04 23:47:35 +0100 | [diff] [blame] | 62 |  | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 63 | /* | 
 | 64 |  * If a tracer is running, we do not want to run SELFTEST. | 
 | 65 |  */ | 
 | 66 | static bool __read_mostly tracing_selftest_disabled; | 
 | 67 |  | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 68 | /* For tracers that don't implement custom flags */ | 
 | 69 | static struct tracer_opt dummy_tracer_opt[] = { | 
 | 70 | 	{ } | 
 | 71 | }; | 
 | 72 |  | 
 | 73 | static struct tracer_flags dummy_tracer_flags = { | 
 | 74 | 	.val = 0, | 
 | 75 | 	.opts = dummy_tracer_opt | 
 | 76 | }; | 
 | 77 |  | 
 | 78 | static int dummy_set_flag(u32 old_flags, u32 bit, int set) | 
 | 79 | { | 
 | 80 | 	return 0; | 
 | 81 | } | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 82 |  | 
 | 83 | /* | 
 | 84 |  * Kill all tracing for good (never come back). | 
 | 85 |  * It is initialized to 1 but will turn to zero if the initialization | 
 | 86 |  * of the tracer is successful. But that is the only place that sets | 
 | 87 |  * this back to zero. | 
 | 88 |  */ | 
| Hannes Eder | 4fd2735 | 2009-02-10 19:44:12 +0100 | [diff] [blame] | 89 | static int tracing_disabled = 1; | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 90 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 91 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 
 | 92 |  | 
 | 93 | static inline void ftrace_disable_cpu(void) | 
 | 94 | { | 
 | 95 | 	preempt_disable(); | 
 | 96 | 	local_inc(&__get_cpu_var(ftrace_cpu_disabled)); | 
 | 97 | } | 
 | 98 |  | 
 | 99 | static inline void ftrace_enable_cpu(void) | 
 | 100 | { | 
 | 101 | 	local_dec(&__get_cpu_var(ftrace_cpu_disabled)); | 
 | 102 | 	preempt_enable(); | 
 | 103 | } | 
 | 104 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 105 | static cpumask_var_t __read_mostly	tracing_buffer_mask; | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 106 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 107 | /* Define which cpu buffers are currently read in trace_pipe */ | 
 | 108 | static cpumask_var_t			tracing_reader_cpumask; | 
 | 109 |  | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 110 | #define for_each_tracing_cpu(cpu)	\ | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 111 | 	for_each_cpu(cpu, tracing_buffer_mask) | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 112 |  | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 113 | /* | 
 | 114 |  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | 
 | 115 |  * | 
 | 116 |  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops | 
 | 117 |  * is set, then ftrace_dump is called. This will output the contents | 
 | 118 |  * of the ftrace buffers to the console.  This is very useful for | 
 | 119 |  * capturing traces that lead to crashes and outputing it to a | 
 | 120 |  * serial console. | 
 | 121 |  * | 
 | 122 |  * It is default off, but you can enable it with either specifying | 
 | 123 |  * "ftrace_dump_on_oops" in the kernel command line, or setting | 
 | 124 |  * /proc/sys/kernel/ftrace_dump_on_oops to true. | 
 | 125 |  */ | 
 | 126 | int ftrace_dump_on_oops; | 
 | 127 |  | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 128 | static int tracing_set_tracer(const char *buf); | 
 | 129 |  | 
 | 130 | #define BOOTUP_TRACER_SIZE		100 | 
 | 131 | static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata; | 
 | 132 | static char *default_bootup_tracer; | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 133 |  | 
 | 134 | static int __init set_ftrace(char *str) | 
 | 135 | { | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 136 | 	strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); | 
 | 137 | 	default_bootup_tracer = bootup_tracer_buf; | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 138 | 	/* We are using ftrace early, expand it */ | 
 | 139 | 	ring_buffer_expanded = 1; | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 140 | 	return 1; | 
 | 141 | } | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 142 | __setup("ftrace=", set_ftrace); | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 143 |  | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 144 | static int __init set_ftrace_dump_on_oops(char *str) | 
 | 145 | { | 
 | 146 | 	ftrace_dump_on_oops = 1; | 
 | 147 | 	return 1; | 
 | 148 | } | 
 | 149 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 150 |  | 
| Lai Jiangshan | cf8e347 | 2009-03-30 13:48:00 +0800 | [diff] [blame] | 151 | unsigned long long ns2usecs(cycle_t nsec) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 152 | { | 
 | 153 | 	nsec += 500; | 
 | 154 | 	do_div(nsec, 1000); | 
 | 155 | 	return nsec; | 
 | 156 | } | 
 | 157 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 158 | /* | 
 | 159 |  * The global_trace is the descriptor that holds the tracing | 
 | 160 |  * buffers for the live tracing. For each CPU, it contains | 
 | 161 |  * a link list of pages that will store trace entries. The | 
 | 162 |  * page descriptor of the pages in the memory is used to hold | 
 | 163 |  * the link list by linking the lru item in the page descriptor | 
 | 164 |  * to each of the pages in the buffer per CPU. | 
 | 165 |  * | 
 | 166 |  * For each active CPU there is a data field that holds the | 
 | 167 |  * pages for the buffer for that CPU. Each CPU has the same number | 
 | 168 |  * of pages allocated for its buffer. | 
 | 169 |  */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 170 | static struct trace_array	global_trace; | 
 | 171 |  | 
 | 172 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 
 | 173 |  | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 174 | cycle_t ftrace_now(int cpu) | 
 | 175 | { | 
 | 176 | 	u64 ts; | 
 | 177 |  | 
 | 178 | 	/* Early boot up does not have a buffer yet */ | 
 | 179 | 	if (!global_trace.buffer) | 
 | 180 | 		return trace_clock_local(); | 
 | 181 |  | 
 | 182 | 	ts = ring_buffer_time_stamp(global_trace.buffer, cpu); | 
 | 183 | 	ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts); | 
 | 184 |  | 
 | 185 | 	return ts; | 
 | 186 | } | 
 | 187 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 188 | /* | 
 | 189 |  * The max_tr is used to snapshot the global_trace when a maximum | 
 | 190 |  * latency is reached. Some tracers will use this to store a maximum | 
 | 191 |  * trace while it continues examining live traces. | 
 | 192 |  * | 
 | 193 |  * The buffers for the max_tr are set up the same as the global_trace. | 
 | 194 |  * When a snapshot is taken, the link list of the max_tr is swapped | 
 | 195 |  * with the link list of the global_trace and the buffers are reset for | 
 | 196 |  * the global_trace so the tracing can continue. | 
 | 197 |  */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 198 | static struct trace_array	max_tr; | 
 | 199 |  | 
 | 200 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | 
 | 201 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 202 | /* tracer_enabled is used to toggle activation of a tracer */ | 
| Steven Rostedt | 26994ea | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 203 | static int			tracer_enabled = 1; | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 204 |  | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 205 | /** | 
 | 206 |  * tracing_is_enabled - return tracer_enabled status | 
 | 207 |  * | 
 | 208 |  * This function is used by other tracers to know the status | 
 | 209 |  * of the tracer_enabled flag.  Tracers may use this function | 
 | 210 |  * to know if it should enable their features when starting | 
 | 211 |  * up. See irqsoff tracer for an example (start_irqsoff_tracer). | 
 | 212 |  */ | 
 | 213 | int tracing_is_enabled(void) | 
 | 214 | { | 
 | 215 | 	return tracer_enabled; | 
 | 216 | } | 
 | 217 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 218 | /* | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 219 |  * trace_buf_size is the size in bytes that is allocated | 
 | 220 |  * for a buffer. Note, the number of bytes is always rounded | 
 | 221 |  * to page size. | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 222 |  * | 
 | 223 |  * This number is purposely set to a low number of 16384. | 
 | 224 |  * If the dump on oops happens, it will be much appreciated | 
 | 225 |  * to not have to wait for all that output. Anyway this can be | 
 | 226 |  * boot time and run time configurable. | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 227 |  */ | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 228 | #define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */ | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 229 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 230 | static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 231 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 232 | /* trace_types holds a link list of available tracers. */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 233 | static struct tracer		*trace_types __read_mostly; | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 234 |  | 
 | 235 | /* current_trace points to the tracer that is currently active */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 236 | static struct tracer		*current_trace __read_mostly; | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 237 |  | 
 | 238 | /* | 
 | 239 |  * max_tracer_type_len is used to simplify the allocating of | 
 | 240 |  * buffers to read userspace tracer names. We keep track of | 
 | 241 |  * the longest tracer name registered. | 
 | 242 |  */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 243 | static int			max_tracer_type_len; | 
 | 244 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 245 | /* | 
 | 246 |  * trace_types_lock is used to protect the trace_types list. | 
 | 247 |  * This lock is also used to keep user access serialized. | 
 | 248 |  * Accesses from userspace will grab this lock while userspace | 
 | 249 |  * activities happen inside the kernel. | 
 | 250 |  */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 251 | static DEFINE_MUTEX(trace_types_lock); | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 252 |  | 
 | 253 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ | 
| Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 254 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | 
 | 255 |  | 
| Steven Rostedt | ee6bce5 | 2008-11-12 17:52:37 -0500 | [diff] [blame] | 256 | /* trace_flags holds trace_options default values */ | 
| Steven Rostedt | 12ef7d4 | 2008-11-12 17:52:38 -0500 | [diff] [blame] | 257 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 
| Steven Rostedt | be6f164 | 2009-03-24 11:06:24 -0400 | [diff] [blame] | 258 | 	TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME; | 
| Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 259 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 260 | /** | 
 | 261 |  * trace_wake_up - wake up tasks waiting for trace input | 
 | 262 |  * | 
 | 263 |  * Simply wakes up any task that is blocked on the trace_wait | 
 | 264 |  * queue. These is used with trace_poll for tasks polling the trace. | 
 | 265 |  */ | 
| Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 266 | void trace_wake_up(void) | 
 | 267 | { | 
| Ingo Molnar | 017730c | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 268 | 	/* | 
 | 269 | 	 * The runqueue_is_locked() can fail, but this is the best we | 
 | 270 | 	 * have for now: | 
 | 271 | 	 */ | 
 | 272 | 	if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked()) | 
| Ingo Molnar | 4e65551 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 273 | 		wake_up(&trace_wait); | 
 | 274 | } | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 275 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 276 | static int __init set_buf_size(char *str) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 277 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 278 | 	unsigned long buf_size; | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 279 | 	int ret; | 
 | 280 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 281 | 	if (!str) | 
 | 282 | 		return 0; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 283 | 	ret = strict_strtoul(str, 0, &buf_size); | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 284 | 	/* nr_entries can not be zero */ | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 285 | 	if (ret < 0 || buf_size == 0) | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 286 | 		return 0; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 287 | 	trace_buf_size = buf_size; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 288 | 	return 1; | 
 | 289 | } | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 290 | __setup("trace_buf_size=", set_buf_size); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 291 |  | 
| Steven Rostedt | 57f50be | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 292 | unsigned long nsecs_to_usecs(unsigned long nsecs) | 
 | 293 | { | 
 | 294 | 	return nsecs / 1000; | 
 | 295 | } | 
 | 296 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 297 | /* These must match the bit postions in trace_iterator_flags */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 298 | static const char *trace_options[] = { | 
 | 299 | 	"print-parent", | 
 | 300 | 	"sym-offset", | 
 | 301 | 	"sym-addr", | 
 | 302 | 	"verbose", | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 303 | 	"raw", | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 304 | 	"hex", | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 305 | 	"bin", | 
| Soeren Sandmann Pedersen | 2a2cc8f | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 306 | 	"block", | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 307 | 	"stacktrace", | 
| Ingo Molnar | 4ac3ba4 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 308 | 	"sched-tree", | 
| Ingo Molnar | 5e1607a | 2009-03-05 10:24:48 +0100 | [diff] [blame] | 309 | 	"trace_printk", | 
| Steven Rostedt | b2a866f | 2008-11-03 23:15:57 -0500 | [diff] [blame] | 310 | 	"ftrace_preempt", | 
| Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 311 | 	"branch", | 
| Steven Rostedt | 12ef7d4 | 2008-11-12 17:52:38 -0500 | [diff] [blame] | 312 | 	"annotate", | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 313 | 	"userstacktrace", | 
| Török Edwin | b54d3de | 2008-11-22 13:28:48 +0200 | [diff] [blame] | 314 | 	"sym-userobj", | 
| Frederic Weisbecker | 66896a8 | 2008-12-13 20:18:13 +0100 | [diff] [blame] | 315 | 	"printk-msg-only", | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 316 | 	"context-info", | 
| Steven Rostedt | c032ef64 | 2009-03-04 20:34:24 -0500 | [diff] [blame] | 317 | 	"latency-format", | 
| Steven Rostedt | af4617b | 2009-03-17 18:09:55 -0400 | [diff] [blame] | 318 | 	"global-clock", | 
| Steven Rostedt | be6f164 | 2009-03-24 11:06:24 -0400 | [diff] [blame] | 319 | 	"sleep-time", | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 320 | 	NULL | 
 | 321 | }; | 
 | 322 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 323 | /* | 
 | 324 |  * ftrace_max_lock is used to protect the swapping of buffers | 
 | 325 |  * when taking a max snapshot. The buffers themselves are | 
 | 326 |  * protected by per_cpu spinlocks. But the action of the swap | 
 | 327 |  * needs its own lock. | 
 | 328 |  * | 
 | 329 |  * This is defined as a raw_spinlock_t in order to help | 
 | 330 |  * with performance when lockdep debugging is enabled. | 
 | 331 |  */ | 
| Steven Rostedt | 92205c2 | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 332 | static raw_spinlock_t ftrace_max_lock = | 
 | 333 | 	(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 334 |  | 
 | 335 | /* | 
 | 336 |  * Copy the new maximum trace into the separate maximum-trace | 
 | 337 |  * structure. (this way the maximum trace is permanently saved, | 
 | 338 |  * for later retrieval via /debugfs/tracing/latency_trace) | 
 | 339 |  */ | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 340 | static void | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 341 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 
 | 342 | { | 
 | 343 | 	struct trace_array_cpu *data = tr->data[cpu]; | 
 | 344 |  | 
 | 345 | 	max_tr.cpu = cpu; | 
 | 346 | 	max_tr.time_start = data->preempt_timestamp; | 
 | 347 |  | 
 | 348 | 	data = max_tr.data[cpu]; | 
 | 349 | 	data->saved_latency = tracing_max_latency; | 
 | 350 |  | 
 | 351 | 	memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | 
 | 352 | 	data->pid = tsk->pid; | 
| David Howells | b6dff3e | 2008-11-14 10:39:16 +1100 | [diff] [blame] | 353 | 	data->uid = task_uid(tsk); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 354 | 	data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | 
 | 355 | 	data->policy = tsk->policy; | 
 | 356 | 	data->rt_priority = tsk->rt_priority; | 
 | 357 |  | 
 | 358 | 	/* record this tasks comm */ | 
| Wenji Huang | af51309 | 2009-02-17 01:07:28 -0500 | [diff] [blame] | 359 | 	tracing_record_cmdline(tsk); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 360 | } | 
 | 361 |  | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 362 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | 
 | 363 | { | 
 | 364 | 	int len; | 
 | 365 | 	int ret; | 
 | 366 |  | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 367 | 	if (!cnt) | 
 | 368 | 		return 0; | 
 | 369 |  | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 370 | 	if (s->len <= s->readpos) | 
 | 371 | 		return -EBUSY; | 
 | 372 |  | 
 | 373 | 	len = s->len - s->readpos; | 
 | 374 | 	if (cnt > len) | 
 | 375 | 		cnt = len; | 
 | 376 | 	ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 377 | 	if (ret == cnt) | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 378 | 		return -EFAULT; | 
 | 379 |  | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 380 | 	cnt -= ret; | 
 | 381 |  | 
| Steven Rostedt | e74da52 | 2009-03-04 20:31:11 -0500 | [diff] [blame] | 382 | 	s->readpos += cnt; | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 383 | 	return cnt; | 
| Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 384 | } | 
 | 385 |  | 
| Dmitri Vorobiev | b8b9426 | 2009-03-22 19:11:11 +0200 | [diff] [blame] | 386 | static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 387 | { | 
 | 388 | 	int len; | 
 | 389 | 	void *ret; | 
 | 390 |  | 
 | 391 | 	if (s->len <= s->readpos) | 
 | 392 | 		return -EBUSY; | 
 | 393 |  | 
 | 394 | 	len = s->len - s->readpos; | 
 | 395 | 	if (cnt > len) | 
 | 396 | 		cnt = len; | 
 | 397 | 	ret = memcpy(buf, s->buffer + s->readpos, cnt); | 
 | 398 | 	if (!ret) | 
 | 399 | 		return -EFAULT; | 
 | 400 |  | 
| Steven Rostedt | e74da52 | 2009-03-04 20:31:11 -0500 | [diff] [blame] | 401 | 	s->readpos += cnt; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 402 | 	return cnt; | 
 | 403 | } | 
 | 404 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 405 | static void | 
| Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 406 | trace_print_seq(struct seq_file *m, struct trace_seq *s) | 
 | 407 | { | 
 | 408 | 	int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | 
 | 409 |  | 
 | 410 | 	s->buffer[len] = 0; | 
 | 411 | 	seq_puts(m, s->buffer); | 
 | 412 |  | 
| Steven Rostedt | f952075 | 2009-03-02 14:04:40 -0500 | [diff] [blame] | 413 | 	trace_seq_init(s); | 
| Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 414 | } | 
 | 415 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 416 | /** | 
 | 417 |  * update_max_tr - snapshot all trace buffers from global_trace to max_tr | 
 | 418 |  * @tr: tracer | 
 | 419 |  * @tsk: the task with the latency | 
 | 420 |  * @cpu: The cpu that initiated the trace. | 
 | 421 |  * | 
 | 422 |  * Flip the buffers between the @tr and the max_tr and record information | 
 | 423 |  * about which task was the cause of this latency. | 
 | 424 |  */ | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 425 | void | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 426 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 
 | 427 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 428 | 	struct ring_buffer *buf = tr->buffer; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 429 |  | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 430 | 	WARN_ON_ONCE(!irqs_disabled()); | 
| Steven Rostedt | 92205c2 | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 431 | 	__raw_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 432 |  | 
 | 433 | 	tr->buffer = max_tr.buffer; | 
 | 434 | 	max_tr.buffer = buf; | 
 | 435 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 436 | 	ftrace_disable_cpu(); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 437 | 	ring_buffer_reset(tr->buffer); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 438 | 	ftrace_enable_cpu(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 439 |  | 
 | 440 | 	__update_max_tr(tr, tsk, cpu); | 
| Steven Rostedt | 92205c2 | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 441 | 	__raw_spin_unlock(&ftrace_max_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 442 | } | 
 | 443 |  | 
 | 444 | /** | 
 | 445 |  * update_max_tr_single - only copy one trace over, and reset the rest | 
 | 446 |  * @tr - tracer | 
 | 447 |  * @tsk - task with the latency | 
 | 448 |  * @cpu - the cpu of the buffer to copy. | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 449 |  * | 
 | 450 |  * Flip the trace of a single CPU buffer between the @tr and the max_tr. | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 451 |  */ | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 452 | void | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 453 | update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | 
 | 454 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 455 | 	int ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 456 |  | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 457 | 	WARN_ON_ONCE(!irqs_disabled()); | 
| Steven Rostedt | 92205c2 | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 458 | 	__raw_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 459 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 460 | 	ftrace_disable_cpu(); | 
 | 461 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 462 | 	ring_buffer_reset(max_tr.buffer); | 
 | 463 | 	ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); | 
 | 464 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 465 | 	ftrace_enable_cpu(); | 
 | 466 |  | 
| Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 467 | 	WARN_ON_ONCE(ret && ret != -EAGAIN); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 468 |  | 
 | 469 | 	__update_max_tr(tr, tsk, cpu); | 
| Steven Rostedt | 92205c2 | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 470 | 	__raw_spin_unlock(&ftrace_max_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 471 | } | 
 | 472 |  | 
| Steven Rostedt | 4fcdae8 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 473 | /** | 
 | 474 |  * register_tracer - register a tracer with the ftrace system. | 
 | 475 |  * @type - the plugin for the tracer | 
 | 476 |  * | 
 | 477 |  * Register a new plugin tracer. | 
 | 478 |  */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 479 | int register_tracer(struct tracer *type) | 
| Hannes Eder | e7669b8 | 2009-02-10 19:44:45 +0100 | [diff] [blame] | 480 | __releases(kernel_lock) | 
 | 481 | __acquires(kernel_lock) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 482 | { | 
 | 483 | 	struct tracer *t; | 
 | 484 | 	int len; | 
 | 485 | 	int ret = 0; | 
 | 486 |  | 
 | 487 | 	if (!type->name) { | 
 | 488 | 		pr_info("Tracer must have a name\n"); | 
 | 489 | 		return -1; | 
 | 490 | 	} | 
 | 491 |  | 
| Ingo Molnar | 86fa2f6 | 2008-11-19 10:00:15 +0100 | [diff] [blame] | 492 | 	/* | 
 | 493 | 	 * When this gets called we hold the BKL which means that | 
 | 494 | 	 * preemption is disabled. Various trace selftests however | 
 | 495 | 	 * need to disable and enable preemption for successful tests. | 
 | 496 | 	 * So we drop the BKL here and grab it after the tests again. | 
 | 497 | 	 */ | 
 | 498 | 	unlock_kernel(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 499 | 	mutex_lock(&trace_types_lock); | 
| Ingo Molnar | 86fa2f6 | 2008-11-19 10:00:15 +0100 | [diff] [blame] | 500 |  | 
| Frederic Weisbecker | 8e1b82e | 2008-12-06 03:41:33 +0100 | [diff] [blame] | 501 | 	tracing_selftest_running = true; | 
 | 502 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 503 | 	for (t = trace_types; t; t = t->next) { | 
 | 504 | 		if (strcmp(type->name, t->name) == 0) { | 
 | 505 | 			/* already found */ | 
 | 506 | 			pr_info("Trace %s already registered\n", | 
 | 507 | 				type->name); | 
 | 508 | 			ret = -1; | 
 | 509 | 			goto out; | 
 | 510 | 		} | 
 | 511 | 	} | 
 | 512 |  | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 513 | 	if (!type->set_flag) | 
 | 514 | 		type->set_flag = &dummy_set_flag; | 
 | 515 | 	if (!type->flags) | 
 | 516 | 		type->flags = &dummy_tracer_flags; | 
 | 517 | 	else | 
 | 518 | 		if (!type->flags->opts) | 
 | 519 | 			type->flags->opts = dummy_tracer_opt; | 
| Frederic Weisbecker | 6eaaa5d | 2009-02-11 02:25:00 +0100 | [diff] [blame] | 520 | 	if (!type->wait_pipe) | 
 | 521 | 		type->wait_pipe = default_wait_pipe; | 
 | 522 |  | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 523 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 524 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 525 | 	if (type->selftest && !tracing_selftest_disabled) { | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 526 | 		struct tracer *saved_tracer = current_trace; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 527 | 		struct trace_array *tr = &global_trace; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 528 | 		int i; | 
| Frederic Weisbecker | ff32504 | 2008-12-04 23:47:35 +0100 | [diff] [blame] | 529 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 530 | 		/* | 
 | 531 | 		 * Run a selftest on this tracer. | 
 | 532 | 		 * Here we reset the trace buffer, and set the current | 
 | 533 | 		 * tracer to be this tracer. The tracer can then run some | 
 | 534 | 		 * internal tracing to verify that everything is in order. | 
 | 535 | 		 * If we fail, we do not register this tracer. | 
 | 536 | 		 */ | 
| Ingo Molnar | 86fa2f6 | 2008-11-19 10:00:15 +0100 | [diff] [blame] | 537 | 		for_each_tracing_cpu(i) | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 538 | 			tracing_reset(tr, i); | 
| Ingo Molnar | 86fa2f6 | 2008-11-19 10:00:15 +0100 | [diff] [blame] | 539 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 540 | 		current_trace = type; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 541 | 		/* the test is responsible for initializing and enabling */ | 
 | 542 | 		pr_info("Testing tracer %s: ", type->name); | 
 | 543 | 		ret = type->selftest(type, tr); | 
 | 544 | 		/* the test is responsible for resetting too */ | 
 | 545 | 		current_trace = saved_tracer; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 546 | 		if (ret) { | 
 | 547 | 			printk(KERN_CONT "FAILED!\n"); | 
 | 548 | 			goto out; | 
 | 549 | 		} | 
| Steven Rostedt | 1d4db00 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 550 | 		/* Only reset on passing, to avoid touching corrupted buffers */ | 
| Ingo Molnar | 86fa2f6 | 2008-11-19 10:00:15 +0100 | [diff] [blame] | 551 | 		for_each_tracing_cpu(i) | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 552 | 			tracing_reset(tr, i); | 
| Ingo Molnar | 86fa2f6 | 2008-11-19 10:00:15 +0100 | [diff] [blame] | 553 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 554 | 		printk(KERN_CONT "PASSED\n"); | 
 | 555 | 	} | 
 | 556 | #endif | 
 | 557 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 558 | 	type->next = trace_types; | 
 | 559 | 	trace_types = type; | 
 | 560 | 	len = strlen(type->name); | 
 | 561 | 	if (len > max_tracer_type_len) | 
 | 562 | 		max_tracer_type_len = len; | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 563 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 564 |  out: | 
| Frederic Weisbecker | 8e1b82e | 2008-12-06 03:41:33 +0100 | [diff] [blame] | 565 | 	tracing_selftest_running = false; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 566 | 	mutex_unlock(&trace_types_lock); | 
 | 567 |  | 
| Steven Rostedt | dac7494 | 2009-02-05 01:13:38 -0500 | [diff] [blame] | 568 | 	if (ret || !default_bootup_tracer) | 
 | 569 | 		goto out_unlock; | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 570 |  | 
| Steven Rostedt | dac7494 | 2009-02-05 01:13:38 -0500 | [diff] [blame] | 571 | 	if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE)) | 
 | 572 | 		goto out_unlock; | 
 | 573 |  | 
 | 574 | 	printk(KERN_INFO "Starting tracer '%s'\n", type->name); | 
 | 575 | 	/* Do we want this tracer to start on bootup? */ | 
 | 576 | 	tracing_set_tracer(type->name); | 
 | 577 | 	default_bootup_tracer = NULL; | 
 | 578 | 	/* disable other selftests, since this will break it. */ | 
 | 579 | 	tracing_selftest_disabled = 1; | 
 | 580 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 
 | 581 | 	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", | 
 | 582 | 	       type->name); | 
 | 583 | #endif | 
 | 584 |  | 
 | 585 |  out_unlock: | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 586 | 	lock_kernel(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 587 | 	return ret; | 
 | 588 | } | 
 | 589 |  | 
 | 590 | void unregister_tracer(struct tracer *type) | 
 | 591 | { | 
 | 592 | 	struct tracer **t; | 
 | 593 | 	int len; | 
 | 594 |  | 
 | 595 | 	mutex_lock(&trace_types_lock); | 
 | 596 | 	for (t = &trace_types; *t; t = &(*t)->next) { | 
 | 597 | 		if (*t == type) | 
 | 598 | 			goto found; | 
 | 599 | 	} | 
 | 600 | 	pr_info("Trace %s not registered\n", type->name); | 
 | 601 | 	goto out; | 
 | 602 |  | 
 | 603 |  found: | 
 | 604 | 	*t = (*t)->next; | 
| Arnaldo Carvalho de Melo | b5db03c | 2009-02-07 18:52:59 -0200 | [diff] [blame] | 605 |  | 
 | 606 | 	if (type == current_trace && tracer_enabled) { | 
 | 607 | 		tracer_enabled = 0; | 
 | 608 | 		tracing_stop(); | 
 | 609 | 		if (current_trace->stop) | 
 | 610 | 			current_trace->stop(&global_trace); | 
 | 611 | 		current_trace = &nop_trace; | 
 | 612 | 	} | 
 | 613 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 614 | 	if (strlen(type->name) != max_tracer_type_len) | 
 | 615 | 		goto out; | 
 | 616 |  | 
 | 617 | 	max_tracer_type_len = 0; | 
 | 618 | 	for (t = &trace_types; *t; t = &(*t)->next) { | 
 | 619 | 		len = strlen((*t)->name); | 
 | 620 | 		if (len > max_tracer_type_len) | 
 | 621 | 			max_tracer_type_len = len; | 
 | 622 | 	} | 
 | 623 |  out: | 
 | 624 | 	mutex_unlock(&trace_types_lock); | 
 | 625 | } | 
 | 626 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 627 | void tracing_reset(struct trace_array *tr, int cpu) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 628 | { | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 629 | 	ftrace_disable_cpu(); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 630 | 	ring_buffer_reset_cpu(tr->buffer, cpu); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 631 | 	ftrace_enable_cpu(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 632 | } | 
 | 633 |  | 
| Pekka J Enberg | 213cc06 | 2008-12-19 12:08:39 +0200 | [diff] [blame] | 634 | void tracing_reset_online_cpus(struct trace_array *tr) | 
 | 635 | { | 
 | 636 | 	int cpu; | 
 | 637 |  | 
 | 638 | 	tr->time_start = ftrace_now(tr->cpu); | 
 | 639 |  | 
 | 640 | 	for_each_online_cpu(cpu) | 
 | 641 | 		tracing_reset(tr, cpu); | 
 | 642 | } | 
 | 643 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 644 | #define SAVED_CMDLINES 128 | 
| Thomas Gleixner | 2c7eea4 | 2009-03-18 09:03:19 +0100 | [diff] [blame] | 645 | #define NO_CMDLINE_MAP UINT_MAX | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 646 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | 
 | 647 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 
 | 648 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 
 | 649 | static int cmdline_idx; | 
| Peter Zijlstra | efed792 | 2009-03-04 12:32:55 +0100 | [diff] [blame] | 650 | static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; | 
| Steven Rostedt | 25b0b44 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 651 |  | 
| Steven Rostedt | 25b0b44 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 652 | /* temporary disable recording */ | 
| Hannes Eder | 4fd2735 | 2009-02-10 19:44:12 +0100 | [diff] [blame] | 653 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 654 |  | 
 | 655 | static void trace_init_cmdlines(void) | 
 | 656 | { | 
| Thomas Gleixner | 2c7eea4 | 2009-03-18 09:03:19 +0100 | [diff] [blame] | 657 | 	memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); | 
 | 658 | 	memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 659 | 	cmdline_idx = 0; | 
 | 660 | } | 
 | 661 |  | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 662 | static int trace_stop_count; | 
 | 663 | static DEFINE_SPINLOCK(tracing_start_lock); | 
 | 664 |  | 
 | 665 | /** | 
| Steven Rostedt | 69bb54e | 2008-11-21 12:59:38 -0500 | [diff] [blame] | 666 |  * ftrace_off_permanent - disable all ftrace code permanently | 
 | 667 |  * | 
 | 668 |  * This should only be called when a serious anomally has | 
 | 669 |  * been detected.  This will turn off the function tracing, | 
 | 670 |  * ring buffers, and other tracing utilites. It takes no | 
 | 671 |  * locks and can be called from any context. | 
 | 672 |  */ | 
 | 673 | void ftrace_off_permanent(void) | 
 | 674 | { | 
 | 675 | 	tracing_disabled = 1; | 
 | 676 | 	ftrace_stop(); | 
 | 677 | 	tracing_off_permanent(); | 
 | 678 | } | 
 | 679 |  | 
 | 680 | /** | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 681 |  * tracing_start - quick start of the tracer | 
 | 682 |  * | 
 | 683 |  * If tracing is enabled but was stopped by tracing_stop, | 
 | 684 |  * this will start the tracer back up. | 
 | 685 |  */ | 
 | 686 | void tracing_start(void) | 
 | 687 | { | 
 | 688 | 	struct ring_buffer *buffer; | 
 | 689 | 	unsigned long flags; | 
 | 690 |  | 
 | 691 | 	if (tracing_disabled) | 
 | 692 | 		return; | 
 | 693 |  | 
 | 694 | 	spin_lock_irqsave(&tracing_start_lock, flags); | 
| Steven Rostedt | b06a830 | 2009-01-22 14:26:15 -0500 | [diff] [blame] | 695 | 	if (--trace_stop_count) { | 
 | 696 | 		if (trace_stop_count < 0) { | 
 | 697 | 			/* Someone screwed up their debugging */ | 
 | 698 | 			WARN_ON_ONCE(1); | 
 | 699 | 			trace_stop_count = 0; | 
 | 700 | 		} | 
| Steven Rostedt | 0f04870 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 701 | 		goto out; | 
 | 702 | 	} | 
 | 703 |  | 
 | 704 |  | 
 | 705 | 	buffer = global_trace.buffer; | 
 | 706 | 	if (buffer) | 
 | 707 | 		ring_buffer_record_enable(buffer); | 
 | 708 |  | 
 | 709 | 	buffer = max_tr.buffer; | 
 | 710 | 	if (buffer) | 
 | 711 | 		ring_buffer_record_enable(buffer); | 
 | 712 |  | 
 | 713 | 	ftrace_start(); | 
 | 714 |  out: | 
 | 715 | 	spin_unlock_irqrestore(&tracing_start_lock, flags); | 
 | 716 | } | 
 | 717 |  | 
 | 718 | /** | 
 | 719 |  * tracing_stop - quick stop of the tracer | 
 | 720 |  * | 
 | 721 |  * Light weight way to stop tracing. Use in conjunction with | 
 | 722 |  * tracing_start. | 
 | 723 |  */ | 
 | 724 | void tracing_stop(void) | 
 | 725 | { | 
 | 726 | 	struct ring_buffer *buffer; | 
 | 727 | 	unsigned long flags; | 
 | 728 |  | 
 | 729 | 	ftrace_stop(); | 
 | 730 | 	spin_lock_irqsave(&tracing_start_lock, flags); | 
 | 731 | 	if (trace_stop_count++) | 
 | 732 | 		goto out; | 
 | 733 |  | 
 | 734 | 	buffer = global_trace.buffer; | 
 | 735 | 	if (buffer) | 
 | 736 | 		ring_buffer_record_disable(buffer); | 
 | 737 |  | 
 | 738 | 	buffer = max_tr.buffer; | 
 | 739 | 	if (buffer) | 
 | 740 | 		ring_buffer_record_disable(buffer); | 
 | 741 |  | 
 | 742 |  out: | 
 | 743 | 	spin_unlock_irqrestore(&tracing_start_lock, flags); | 
 | 744 | } | 
 | 745 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 746 | void trace_stop_cmdline_recording(void); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 747 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 748 | static void trace_save_cmdline(struct task_struct *tsk) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 749 | { | 
| Carsten Emde | a635cf0 | 2009-03-18 09:00:41 +0100 | [diff] [blame] | 750 | 	unsigned pid, idx; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 751 |  | 
 | 752 | 	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) | 
 | 753 | 		return; | 
 | 754 |  | 
 | 755 | 	/* | 
 | 756 | 	 * It's not the end of the world if we don't get | 
 | 757 | 	 * the lock, but we also don't want to spin | 
 | 758 | 	 * nor do we want to disable interrupts, | 
 | 759 | 	 * so if we miss here, then better luck next time. | 
 | 760 | 	 */ | 
| Peter Zijlstra | efed792 | 2009-03-04 12:32:55 +0100 | [diff] [blame] | 761 | 	if (!__raw_spin_trylock(&trace_cmdline_lock)) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 762 | 		return; | 
 | 763 |  | 
 | 764 | 	idx = map_pid_to_cmdline[tsk->pid]; | 
| Thomas Gleixner | 2c7eea4 | 2009-03-18 09:03:19 +0100 | [diff] [blame] | 765 | 	if (idx == NO_CMDLINE_MAP) { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 766 | 		idx = (cmdline_idx + 1) % SAVED_CMDLINES; | 
 | 767 |  | 
| Carsten Emde | a635cf0 | 2009-03-18 09:00:41 +0100 | [diff] [blame] | 768 | 		/* | 
 | 769 | 		 * Check whether the cmdline buffer at idx has a pid | 
 | 770 | 		 * mapped. We are going to overwrite that entry so we | 
 | 771 | 		 * need to clear the map_pid_to_cmdline. Otherwise we | 
 | 772 | 		 * would read the new comm for the old pid. | 
 | 773 | 		 */ | 
 | 774 | 		pid = map_cmdline_to_pid[idx]; | 
 | 775 | 		if (pid != NO_CMDLINE_MAP) | 
 | 776 | 			map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 777 |  | 
| Carsten Emde | a635cf0 | 2009-03-18 09:00:41 +0100 | [diff] [blame] | 778 | 		map_cmdline_to_pid[idx] = tsk->pid; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 779 | 		map_pid_to_cmdline[tsk->pid] = idx; | 
 | 780 |  | 
 | 781 | 		cmdline_idx = idx; | 
 | 782 | 	} | 
 | 783 |  | 
 | 784 | 	memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 
 | 785 |  | 
| Peter Zijlstra | efed792 | 2009-03-04 12:32:55 +0100 | [diff] [blame] | 786 | 	__raw_spin_unlock(&trace_cmdline_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 787 | } | 
 | 788 |  | 
| Steven Rostedt | 4ca5308 | 2009-03-16 19:20:15 -0400 | [diff] [blame] | 789 | void trace_find_cmdline(int pid, char comm[]) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 790 | { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 791 | 	unsigned map; | 
 | 792 |  | 
| Steven Rostedt | 4ca5308 | 2009-03-16 19:20:15 -0400 | [diff] [blame] | 793 | 	if (!pid) { | 
 | 794 | 		strcpy(comm, "<idle>"); | 
 | 795 | 		return; | 
 | 796 | 	} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 797 |  | 
| Steven Rostedt | 4ca5308 | 2009-03-16 19:20:15 -0400 | [diff] [blame] | 798 | 	if (pid > PID_MAX_DEFAULT) { | 
 | 799 | 		strcpy(comm, "<...>"); | 
 | 800 | 		return; | 
 | 801 | 	} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 802 |  | 
| Steven Rostedt | 4ca5308 | 2009-03-16 19:20:15 -0400 | [diff] [blame] | 803 | 	__raw_spin_lock(&trace_cmdline_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 804 | 	map = map_pid_to_cmdline[pid]; | 
| Thomas Gleixner | 50d8875 | 2009-03-18 08:58:44 +0100 | [diff] [blame] | 805 | 	if (map != NO_CMDLINE_MAP) | 
 | 806 | 		strcpy(comm, saved_cmdlines[map]); | 
 | 807 | 	else | 
 | 808 | 		strcpy(comm, "<...>"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 809 |  | 
| Steven Rostedt | 4ca5308 | 2009-03-16 19:20:15 -0400 | [diff] [blame] | 810 | 	__raw_spin_unlock(&trace_cmdline_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 811 | } | 
 | 812 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 813 | void tracing_record_cmdline(struct task_struct *tsk) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 814 | { | 
| Thomas Gleixner | 18aecd3 | 2009-03-18 08:56:58 +0100 | [diff] [blame] | 815 | 	if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled || | 
 | 816 | 	    !tracing_is_on()) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 817 | 		return; | 
 | 818 |  | 
 | 819 | 	trace_save_cmdline(tsk); | 
 | 820 | } | 
 | 821 |  | 
| Pekka Paalanen | 45dcd8b | 2008-09-16 21:56:41 +0300 | [diff] [blame] | 822 | void | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 823 | tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | 
 | 824 | 			     int pc) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 825 | { | 
 | 826 | 	struct task_struct *tsk = current; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 827 |  | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 828 | 	entry->preempt_count		= pc & 0xff; | 
 | 829 | 	entry->pid			= (tsk) ? tsk->pid : 0; | 
| Steven Rostedt | ef18012 | 2009-03-10 14:10:56 -0400 | [diff] [blame] | 830 | 	entry->tgid			= (tsk) ? tsk->tgid : 0; | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 831 | 	entry->flags = | 
| Steven Rostedt | 9244489 | 2008-10-24 09:42:59 -0400 | [diff] [blame] | 832 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 
| Steven Rostedt | 2e2ca15 | 2008-08-01 12:26:40 -0400 | [diff] [blame] | 833 | 		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 
| Steven Rostedt | 9244489 | 2008-10-24 09:42:59 -0400 | [diff] [blame] | 834 | #else | 
 | 835 | 		TRACE_FLAG_IRQS_NOSUPPORT | | 
 | 836 | #endif | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 837 | 		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | | 
 | 838 | 		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | 
 | 839 | 		(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 
 | 840 | } | 
 | 841 |  | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 842 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | 
 | 843 | 						    unsigned char type, | 
 | 844 | 						    unsigned long len, | 
 | 845 | 						    unsigned long flags, int pc) | 
 | 846 | { | 
 | 847 | 	struct ring_buffer_event *event; | 
 | 848 |  | 
 | 849 | 	event = ring_buffer_lock_reserve(tr->buffer, len); | 
 | 850 | 	if (event != NULL) { | 
 | 851 | 		struct trace_entry *ent = ring_buffer_event_data(event); | 
 | 852 |  | 
 | 853 | 		tracing_generic_entry_update(ent, flags, pc); | 
 | 854 | 		ent->type = type; | 
 | 855 | 	} | 
 | 856 |  | 
 | 857 | 	return event; | 
 | 858 | } | 
 | 859 | static void ftrace_trace_stack(struct trace_array *tr, | 
 | 860 | 			       unsigned long flags, int skip, int pc); | 
 | 861 | static void ftrace_trace_userstack(struct trace_array *tr, | 
 | 862 | 				   unsigned long flags, int pc); | 
 | 863 |  | 
| Frederic Weisbecker | 07edf71 | 2009-03-22 23:10:46 +0100 | [diff] [blame] | 864 | static inline void __trace_buffer_unlock_commit(struct trace_array *tr, | 
 | 865 | 					struct ring_buffer_event *event, | 
 | 866 | 					unsigned long flags, int pc, | 
 | 867 | 					int wake) | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 868 | { | 
 | 869 | 	ring_buffer_unlock_commit(tr->buffer, event); | 
 | 870 |  | 
 | 871 | 	ftrace_trace_stack(tr, flags, 6, pc); | 
 | 872 | 	ftrace_trace_userstack(tr, flags, pc); | 
| Frederic Weisbecker | 07edf71 | 2009-03-22 23:10:46 +0100 | [diff] [blame] | 873 |  | 
 | 874 | 	if (wake) | 
 | 875 | 		trace_wake_up(); | 
 | 876 | } | 
 | 877 |  | 
 | 878 | void trace_buffer_unlock_commit(struct trace_array *tr, | 
 | 879 | 					struct ring_buffer_event *event, | 
 | 880 | 					unsigned long flags, int pc) | 
 | 881 | { | 
 | 882 | 	__trace_buffer_unlock_commit(tr, event, flags, pc, 1); | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 883 | } | 
 | 884 |  | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 885 | struct ring_buffer_event * | 
 | 886 | trace_current_buffer_lock_reserve(unsigned char type, unsigned long len, | 
 | 887 | 				  unsigned long flags, int pc) | 
 | 888 | { | 
 | 889 | 	return trace_buffer_lock_reserve(&global_trace, | 
 | 890 | 					 type, len, flags, pc); | 
 | 891 | } | 
 | 892 |  | 
 | 893 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | 
 | 894 | 					unsigned long flags, int pc) | 
 | 895 | { | 
| Frederic Weisbecker | 07edf71 | 2009-03-22 23:10:46 +0100 | [diff] [blame] | 896 | 	return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); | 
 | 897 | } | 
 | 898 |  | 
 | 899 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | 
 | 900 | 					unsigned long flags, int pc) | 
 | 901 | { | 
 | 902 | 	return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); | 
| Steven Rostedt | ef5580d | 2009-02-27 19:38:04 -0500 | [diff] [blame] | 903 | } | 
 | 904 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 905 | void | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 906 | trace_function(struct trace_array *tr, | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 907 | 	       unsigned long ip, unsigned long parent_ip, unsigned long flags, | 
 | 908 | 	       int pc) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 909 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 910 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 911 | 	struct ftrace_entry *entry; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 912 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 913 | 	/* If we are reading the ring buffer, don't trace */ | 
 | 914 | 	if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 
 | 915 | 		return; | 
 | 916 |  | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 917 | 	event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry), | 
 | 918 | 					  flags, pc); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 919 | 	if (!event) | 
 | 920 | 		return; | 
 | 921 | 	entry	= ring_buffer_event_data(event); | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 922 | 	entry->ip			= ip; | 
 | 923 | 	entry->parent_ip		= parent_ip; | 
| Arnaldo Carvalho de Melo | 0a98775 | 2009-02-05 16:12:56 -0200 | [diff] [blame] | 924 | 	ring_buffer_unlock_commit(tr->buffer, event); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 925 | } | 
 | 926 |  | 
| Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 927 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
| Frederic Weisbecker | 1618536 | 2009-03-23 22:17:01 +0100 | [diff] [blame] | 928 | static int __trace_graph_entry(struct trace_array *tr, | 
| Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 929 | 				struct ftrace_graph_ent *trace, | 
| Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 930 | 				unsigned long flags, | 
 | 931 | 				int pc) | 
 | 932 | { | 
 | 933 | 	struct ring_buffer_event *event; | 
| Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 934 | 	struct ftrace_graph_ent_entry *entry; | 
| Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 935 |  | 
 | 936 | 	if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 
| Frederic Weisbecker | 1618536 | 2009-03-23 22:17:01 +0100 | [diff] [blame] | 937 | 		return 0; | 
| Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 938 |  | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 939 | 	event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT, | 
 | 940 | 					  sizeof(*entry), flags, pc); | 
| Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 941 | 	if (!event) | 
| Frederic Weisbecker | 1618536 | 2009-03-23 22:17:01 +0100 | [diff] [blame] | 942 | 		return 0; | 
| Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 943 | 	entry	= ring_buffer_event_data(event); | 
| Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 944 | 	entry->graph_ent			= *trace; | 
| Arnaldo Carvalho de Melo | 0a98775 | 2009-02-05 16:12:56 -0200 | [diff] [blame] | 945 | 	ring_buffer_unlock_commit(global_trace.buffer, event); | 
| Frederic Weisbecker | 1618536 | 2009-03-23 22:17:01 +0100 | [diff] [blame] | 946 |  | 
 | 947 | 	return 1; | 
| Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 948 | } | 
 | 949 |  | 
 | 950 | static void __trace_graph_return(struct trace_array *tr, | 
| Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 951 | 				struct ftrace_graph_ret *trace, | 
 | 952 | 				unsigned long flags, | 
 | 953 | 				int pc) | 
 | 954 | { | 
 | 955 | 	struct ring_buffer_event *event; | 
 | 956 | 	struct ftrace_graph_ret_entry *entry; | 
| Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 957 |  | 
 | 958 | 	if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 
 | 959 | 		return; | 
 | 960 |  | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 961 | 	event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET, | 
 | 962 | 					  sizeof(*entry), flags, pc); | 
| Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 963 | 	if (!event) | 
 | 964 | 		return; | 
 | 965 | 	entry	= ring_buffer_event_data(event); | 
| Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 966 | 	entry->ret				= *trace; | 
| Arnaldo Carvalho de Melo | 0a98775 | 2009-02-05 16:12:56 -0200 | [diff] [blame] | 967 | 	ring_buffer_unlock_commit(global_trace.buffer, event); | 
| Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 968 | } | 
 | 969 | #endif | 
 | 970 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 971 | void | 
| Ingo Molnar | 2e0f576 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 972 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 973 |        unsigned long ip, unsigned long parent_ip, unsigned long flags, | 
 | 974 |        int pc) | 
| Ingo Molnar | 2e0f576 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 975 | { | 
 | 976 | 	if (likely(!atomic_read(&data->disabled))) | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 977 | 		trace_function(tr, ip, parent_ip, flags, pc); | 
| Ingo Molnar | 2e0f576 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 978 | } | 
 | 979 |  | 
| Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 980 | static void __ftrace_trace_stack(struct trace_array *tr, | 
| Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 981 | 				 unsigned long flags, | 
 | 982 | 				 int skip, int pc) | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 983 | { | 
| Al Viro | c2c8052 | 2008-10-31 19:50:41 +0000 | [diff] [blame] | 984 | #ifdef CONFIG_STACKTRACE | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 985 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 986 | 	struct stack_entry *entry; | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 987 | 	struct stack_trace trace; | 
 | 988 |  | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 989 | 	event = trace_buffer_lock_reserve(tr, TRACE_STACK, | 
 | 990 | 					  sizeof(*entry), flags, pc); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 991 | 	if (!event) | 
 | 992 | 		return; | 
 | 993 | 	entry	= ring_buffer_event_data(event); | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 994 | 	memset(&entry->caller, 0, sizeof(entry->caller)); | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 995 |  | 
 | 996 | 	trace.nr_entries	= 0; | 
 | 997 | 	trace.max_entries	= FTRACE_STACK_ENTRIES; | 
 | 998 | 	trace.skip		= skip; | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 999 | 	trace.entries		= entry->caller; | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1000 |  | 
 | 1001 | 	save_stack_trace(&trace); | 
| Arnaldo Carvalho de Melo | 0a98775 | 2009-02-05 16:12:56 -0200 | [diff] [blame] | 1002 | 	ring_buffer_unlock_commit(tr->buffer, event); | 
| Al Viro | c2c8052 | 2008-10-31 19:50:41 +0000 | [diff] [blame] | 1003 | #endif | 
| Ingo Molnar | f0a920d | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1004 | } | 
 | 1005 |  | 
| Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 1006 | static void ftrace_trace_stack(struct trace_array *tr, | 
| Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 1007 | 			       unsigned long flags, | 
 | 1008 | 			       int skip, int pc) | 
 | 1009 | { | 
 | 1010 | 	if (!(trace_flags & TRACE_ITER_STACKTRACE)) | 
 | 1011 | 		return; | 
 | 1012 |  | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 1013 | 	__ftrace_trace_stack(tr, flags, skip, pc); | 
| Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 1014 | } | 
 | 1015 |  | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1016 | void __trace_stack(struct trace_array *tr, | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1017 | 		   unsigned long flags, | 
| Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 1018 | 		   int skip, int pc) | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1019 | { | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 1020 | 	__ftrace_trace_stack(tr, flags, skip, pc); | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1021 | } | 
 | 1022 |  | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1023 | static void ftrace_trace_userstack(struct trace_array *tr, | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 1024 | 				   unsigned long flags, int pc) | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1025 | { | 
| Török Edwin | c7425ac | 2008-11-28 11:17:56 +0200 | [diff] [blame] | 1026 | #ifdef CONFIG_STACKTRACE | 
| Török Edwin | 8d7c6a9 | 2008-11-23 12:39:06 +0200 | [diff] [blame] | 1027 | 	struct ring_buffer_event *event; | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1028 | 	struct userstack_entry *entry; | 
 | 1029 | 	struct stack_trace trace; | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1030 |  | 
 | 1031 | 	if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 
 | 1032 | 		return; | 
 | 1033 |  | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1034 | 	event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK, | 
 | 1035 | 					  sizeof(*entry), flags, pc); | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1036 | 	if (!event) | 
 | 1037 | 		return; | 
 | 1038 | 	entry	= ring_buffer_event_data(event); | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1039 |  | 
 | 1040 | 	memset(&entry->caller, 0, sizeof(entry->caller)); | 
 | 1041 |  | 
 | 1042 | 	trace.nr_entries	= 0; | 
 | 1043 | 	trace.max_entries	= FTRACE_STACK_ENTRIES; | 
 | 1044 | 	trace.skip		= 0; | 
 | 1045 | 	trace.entries		= entry->caller; | 
 | 1046 |  | 
 | 1047 | 	save_stack_trace_user(&trace); | 
| Arnaldo Carvalho de Melo | 0a98775 | 2009-02-05 16:12:56 -0200 | [diff] [blame] | 1048 | 	ring_buffer_unlock_commit(tr->buffer, event); | 
| Török Edwin | c7425ac | 2008-11-28 11:17:56 +0200 | [diff] [blame] | 1049 | #endif | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1050 | } | 
 | 1051 |  | 
| Hannes Eder | 4fd2735 | 2009-02-10 19:44:12 +0100 | [diff] [blame] | 1052 | #ifdef UNUSED | 
 | 1053 | static void __trace_userstack(struct trace_array *tr, unsigned long flags) | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1054 | { | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 1055 | 	ftrace_trace_userstack(tr, flags, preempt_count()); | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1056 | } | 
| Hannes Eder | 4fd2735 | 2009-02-10 19:44:12 +0100 | [diff] [blame] | 1057 | #endif /* UNUSED */ | 
| Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 1058 |  | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1059 | static void | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 1060 | ftrace_trace_special(void *__tr, | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1061 | 		     unsigned long arg1, unsigned long arg2, unsigned long arg3, | 
 | 1062 | 		     int pc) | 
| Ingo Molnar | a4feb834 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 1063 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1064 | 	struct ring_buffer_event *event; | 
| Ingo Molnar | a4feb834 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 1065 | 	struct trace_array *tr = __tr; | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 1066 | 	struct special_entry *entry; | 
| Ingo Molnar | a4feb834 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 1067 |  | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1068 | 	event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL, | 
 | 1069 | 					  sizeof(*entry), 0, pc); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1070 | 	if (!event) | 
 | 1071 | 		return; | 
 | 1072 | 	entry	= ring_buffer_event_data(event); | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 1073 | 	entry->arg1			= arg1; | 
 | 1074 | 	entry->arg2			= arg2; | 
 | 1075 | 	entry->arg3			= arg3; | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1076 | 	trace_buffer_unlock_commit(tr, event, 0, pc); | 
| Ingo Molnar | a4feb834 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 1077 | } | 
 | 1078 |  | 
 | 1079 | void | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1080 | __trace_special(void *__tr, void *__data, | 
 | 1081 | 		unsigned long arg1, unsigned long arg2, unsigned long arg3) | 
 | 1082 | { | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 1083 | 	ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count()); | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1084 | } | 
 | 1085 |  | 
 | 1086 | void | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1087 | tracing_sched_switch_trace(struct trace_array *tr, | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1088 | 			   struct task_struct *prev, | 
 | 1089 | 			   struct task_struct *next, | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1090 | 			   unsigned long flags, int pc) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1091 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1092 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 1093 | 	struct ctx_switch_entry *entry; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1094 |  | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1095 | 	event = trace_buffer_lock_reserve(tr, TRACE_CTX, | 
 | 1096 | 					  sizeof(*entry), flags, pc); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1097 | 	if (!event) | 
 | 1098 | 		return; | 
 | 1099 | 	entry	= ring_buffer_event_data(event); | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 1100 | 	entry->prev_pid			= prev->pid; | 
 | 1101 | 	entry->prev_prio		= prev->prio; | 
 | 1102 | 	entry->prev_state		= prev->state; | 
 | 1103 | 	entry->next_pid			= next->pid; | 
 | 1104 | 	entry->next_prio		= next->prio; | 
 | 1105 | 	entry->next_state		= next->state; | 
 | 1106 | 	entry->next_cpu	= task_cpu(next); | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1107 | 	trace_buffer_unlock_commit(tr, event, flags, pc); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1108 | } | 
 | 1109 |  | 
| Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1110 | void | 
 | 1111 | tracing_sched_wakeup_trace(struct trace_array *tr, | 
| Ingo Molnar | 86387f7 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1112 | 			   struct task_struct *wakee, | 
 | 1113 | 			   struct task_struct *curr, | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1114 | 			   unsigned long flags, int pc) | 
| Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1115 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1116 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 1117 | 	struct ctx_switch_entry *entry; | 
| Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1118 |  | 
| Arnaldo Carvalho de Melo | 51a763d | 2009-02-05 16:14:13 -0200 | [diff] [blame] | 1119 | 	event = trace_buffer_lock_reserve(tr, TRACE_WAKE, | 
 | 1120 | 					  sizeof(*entry), flags, pc); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1121 | 	if (!event) | 
 | 1122 | 		return; | 
 | 1123 | 	entry	= ring_buffer_event_data(event); | 
| Steven Rostedt | 777e208 | 2008-09-29 23:02:42 -0400 | [diff] [blame] | 1124 | 	entry->prev_pid			= curr->pid; | 
 | 1125 | 	entry->prev_prio		= curr->prio; | 
 | 1126 | 	entry->prev_state		= curr->state; | 
 | 1127 | 	entry->next_pid			= wakee->pid; | 
 | 1128 | 	entry->next_prio		= wakee->prio; | 
 | 1129 | 	entry->next_state		= wakee->state; | 
 | 1130 | 	entry->next_cpu			= task_cpu(wakee); | 
| Frederic Weisbecker | 6eaaa5d | 2009-02-11 02:25:00 +0100 | [diff] [blame] | 1131 |  | 
 | 1132 | 	ring_buffer_unlock_commit(tr->buffer, event); | 
 | 1133 | 	ftrace_trace_stack(tr, flags, 6, pc); | 
 | 1134 | 	ftrace_trace_userstack(tr, flags, pc); | 
| Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1135 | } | 
 | 1136 |  | 
| Steven Rostedt | 4902f88 | 2008-05-22 00:22:18 -0400 | [diff] [blame] | 1137 | void | 
 | 1138 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | 
 | 1139 | { | 
 | 1140 | 	struct trace_array *tr = &global_trace; | 
 | 1141 | 	struct trace_array_cpu *data; | 
| Steven Rostedt | 5aa1ba6 | 2008-11-10 23:07:30 -0500 | [diff] [blame] | 1142 | 	unsigned long flags; | 
| Steven Rostedt | 4902f88 | 2008-05-22 00:22:18 -0400 | [diff] [blame] | 1143 | 	int cpu; | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1144 | 	int pc; | 
| Steven Rostedt | 4902f88 | 2008-05-22 00:22:18 -0400 | [diff] [blame] | 1145 |  | 
| Steven Rostedt | c76f069 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 1146 | 	if (tracing_disabled) | 
| Steven Rostedt | 4902f88 | 2008-05-22 00:22:18 -0400 | [diff] [blame] | 1147 | 		return; | 
 | 1148 |  | 
| Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 1149 | 	pc = preempt_count(); | 
| Steven Rostedt | 5aa1ba6 | 2008-11-10 23:07:30 -0500 | [diff] [blame] | 1150 | 	local_irq_save(flags); | 
| Steven Rostedt | 4902f88 | 2008-05-22 00:22:18 -0400 | [diff] [blame] | 1151 | 	cpu = raw_smp_processor_id(); | 
 | 1152 | 	data = tr->data[cpu]; | 
| Steven Rostedt | 4902f88 | 2008-05-22 00:22:18 -0400 | [diff] [blame] | 1153 |  | 
| Steven Rostedt | 5aa1ba6 | 2008-11-10 23:07:30 -0500 | [diff] [blame] | 1154 | 	if (likely(atomic_inc_return(&data->disabled) == 1)) | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 1155 | 		ftrace_trace_special(tr, arg1, arg2, arg3, pc); | 
| Steven Rostedt | 4902f88 | 2008-05-22 00:22:18 -0400 | [diff] [blame] | 1156 |  | 
| Steven Rostedt | 5aa1ba6 | 2008-11-10 23:07:30 -0500 | [diff] [blame] | 1157 | 	atomic_dec(&data->disabled); | 
 | 1158 | 	local_irq_restore(flags); | 
| Steven Rostedt | 4902f88 | 2008-05-22 00:22:18 -0400 | [diff] [blame] | 1159 | } | 
 | 1160 |  | 
| Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 1161 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
| Steven Rostedt | e49dc19 | 2008-12-02 23:50:05 -0500 | [diff] [blame] | 1162 | int trace_graph_entry(struct ftrace_graph_ent *trace) | 
| Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 1163 | { | 
 | 1164 | 	struct trace_array *tr = &global_trace; | 
 | 1165 | 	struct trace_array_cpu *data; | 
 | 1166 | 	unsigned long flags; | 
 | 1167 | 	long disabled; | 
| Frederic Weisbecker | 1618536 | 2009-03-23 22:17:01 +0100 | [diff] [blame] | 1168 | 	int ret; | 
| Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 1169 | 	int cpu; | 
 | 1170 | 	int pc; | 
 | 1171 |  | 
| Steven Rostedt | 804a685 | 2008-12-03 15:36:59 -0500 | [diff] [blame] | 1172 | 	if (!ftrace_trace_task(current)) | 
 | 1173 | 		return 0; | 
 | 1174 |  | 
| Steven Rostedt | ea4e2bc | 2008-12-03 15:36:57 -0500 | [diff] [blame] | 1175 | 	if (!ftrace_graph_addr(trace->func)) | 
 | 1176 | 		return 0; | 
 | 1177 |  | 
| Steven Rostedt | a5e2588 | 2008-12-02 15:34:05 -0500 | [diff] [blame] | 1178 | 	local_irq_save(flags); | 
| Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 1179 | 	cpu = raw_smp_processor_id(); | 
 | 1180 | 	data = tr->data[cpu]; | 
 | 1181 | 	disabled = atomic_inc_return(&data->disabled); | 
 | 1182 | 	if (likely(disabled == 1)) { | 
 | 1183 | 		pc = preempt_count(); | 
| Frederic Weisbecker | 1618536 | 2009-03-23 22:17:01 +0100 | [diff] [blame] | 1184 | 		ret = __trace_graph_entry(tr, trace, flags, pc); | 
 | 1185 | 	} else { | 
 | 1186 | 		ret = 0; | 
| Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 1187 | 	} | 
| Steven Rostedt | ea4e2bc | 2008-12-03 15:36:57 -0500 | [diff] [blame] | 1188 | 	/* Only do the atomic if it is not already set */ | 
 | 1189 | 	if (!test_tsk_trace_graph(current)) | 
 | 1190 | 		set_tsk_trace_graph(current); | 
| Frederic Weisbecker | 1618536 | 2009-03-23 22:17:01 +0100 | [diff] [blame] | 1191 |  | 
| Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 1192 | 	atomic_dec(&data->disabled); | 
| Steven Rostedt | a5e2588 | 2008-12-02 15:34:05 -0500 | [diff] [blame] | 1193 | 	local_irq_restore(flags); | 
| Steven Rostedt | e49dc19 | 2008-12-02 23:50:05 -0500 | [diff] [blame] | 1194 |  | 
| Frederic Weisbecker | 1618536 | 2009-03-23 22:17:01 +0100 | [diff] [blame] | 1195 | 	return ret; | 
| Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 1196 | } | 
 | 1197 |  | 
 | 1198 | void trace_graph_return(struct ftrace_graph_ret *trace) | 
 | 1199 | { | 
 | 1200 | 	struct trace_array *tr = &global_trace; | 
 | 1201 | 	struct trace_array_cpu *data; | 
 | 1202 | 	unsigned long flags; | 
 | 1203 | 	long disabled; | 
 | 1204 | 	int cpu; | 
 | 1205 | 	int pc; | 
 | 1206 |  | 
| Steven Rostedt | a5e2588 | 2008-12-02 15:34:05 -0500 | [diff] [blame] | 1207 | 	local_irq_save(flags); | 
| Frederic Weisbecker | 287b6e6 | 2008-11-26 00:57:25 +0100 | [diff] [blame] | 1208 | 	cpu = raw_smp_processor_id(); | 
 | 1209 | 	data = tr->data[cpu]; | 
 | 1210 | 	disabled = atomic_inc_return(&data->disabled); | 
 | 1211 | 	if (likely(disabled == 1)) { | 
 | 1212 | 		pc = preempt_count(); | 
| Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 1213 | 		__trace_graph_return(tr, trace, flags, pc); | 
| Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 1214 | 	} | 
| Steven Rostedt | ea4e2bc | 2008-12-03 15:36:57 -0500 | [diff] [blame] | 1215 | 	if (!trace->depth) | 
 | 1216 | 		clear_tsk_trace_graph(current); | 
| Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 1217 | 	atomic_dec(&data->disabled); | 
| Steven Rostedt | a5e2588 | 2008-12-02 15:34:05 -0500 | [diff] [blame] | 1218 | 	local_irq_restore(flags); | 
| Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 1219 | } | 
| Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 1220 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 
| Frederic Weisbecker | 15e6cb3 | 2008-11-11 07:14:25 +0100 | [diff] [blame] | 1221 |  | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1222 |  | 
 | 1223 | /** | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1224 |  * trace_vbprintk - write binary msg to tracing buffer | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1225 |  * | 
 | 1226 |  */ | 
| Steven Rostedt | 40ce74f | 2009-03-19 14:03:53 -0400 | [diff] [blame] | 1227 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1228 | { | 
| Steven Rostedt | 80370cb | 2009-03-10 17:16:35 -0400 | [diff] [blame] | 1229 | 	static raw_spinlock_t trace_buf_lock = | 
 | 1230 | 		(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1231 | 	static u32 trace_buf[TRACE_BUF_SIZE]; | 
 | 1232 |  | 
 | 1233 | 	struct ring_buffer_event *event; | 
 | 1234 | 	struct trace_array *tr = &global_trace; | 
 | 1235 | 	struct trace_array_cpu *data; | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1236 | 	struct bprint_entry *entry; | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1237 | 	unsigned long flags; | 
 | 1238 | 	int resched; | 
 | 1239 | 	int cpu, len = 0, size, pc; | 
 | 1240 |  | 
 | 1241 | 	if (unlikely(tracing_selftest_running || tracing_disabled)) | 
 | 1242 | 		return 0; | 
 | 1243 |  | 
 | 1244 | 	/* Don't pollute graph traces with trace_vprintk internals */ | 
 | 1245 | 	pause_graph_tracing(); | 
 | 1246 |  | 
 | 1247 | 	pc = preempt_count(); | 
 | 1248 | 	resched = ftrace_preempt_disable(); | 
 | 1249 | 	cpu = raw_smp_processor_id(); | 
 | 1250 | 	data = tr->data[cpu]; | 
 | 1251 |  | 
 | 1252 | 	if (unlikely(atomic_read(&data->disabled))) | 
 | 1253 | 		goto out; | 
 | 1254 |  | 
| Steven Rostedt | 80370cb | 2009-03-10 17:16:35 -0400 | [diff] [blame] | 1255 | 	/* Lockdep uses trace_printk for lock tracing */ | 
 | 1256 | 	local_irq_save(flags); | 
 | 1257 | 	__raw_spin_lock(&trace_buf_lock); | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1258 | 	len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 
 | 1259 |  | 
 | 1260 | 	if (len > TRACE_BUF_SIZE || len < 0) | 
 | 1261 | 		goto out_unlock; | 
 | 1262 |  | 
 | 1263 | 	size = sizeof(*entry) + sizeof(u32) * len; | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1264 | 	event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc); | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1265 | 	if (!event) | 
 | 1266 | 		goto out_unlock; | 
 | 1267 | 	entry = ring_buffer_event_data(event); | 
 | 1268 | 	entry->ip			= ip; | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1269 | 	entry->fmt			= fmt; | 
 | 1270 |  | 
 | 1271 | 	memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 
 | 1272 | 	ring_buffer_unlock_commit(tr->buffer, event); | 
 | 1273 |  | 
 | 1274 | out_unlock: | 
| Steven Rostedt | 80370cb | 2009-03-10 17:16:35 -0400 | [diff] [blame] | 1275 | 	__raw_spin_unlock(&trace_buf_lock); | 
 | 1276 | 	local_irq_restore(flags); | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1277 |  | 
 | 1278 | out: | 
 | 1279 | 	ftrace_preempt_enable(resched); | 
 | 1280 | 	unpause_graph_tracing(); | 
 | 1281 |  | 
 | 1282 | 	return len; | 
 | 1283 | } | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1284 | EXPORT_SYMBOL_GPL(trace_vbprintk); | 
 | 1285 |  | 
| Steven Rostedt | 40ce74f | 2009-03-19 14:03:53 -0400 | [diff] [blame] | 1286 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1287 | { | 
 | 1288 | 	static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 
 | 1289 | 	static char trace_buf[TRACE_BUF_SIZE]; | 
 | 1290 |  | 
 | 1291 | 	struct ring_buffer_event *event; | 
 | 1292 | 	struct trace_array *tr = &global_trace; | 
 | 1293 | 	struct trace_array_cpu *data; | 
 | 1294 | 	int cpu, len = 0, size, pc; | 
 | 1295 | 	struct print_entry *entry; | 
 | 1296 | 	unsigned long irq_flags; | 
 | 1297 |  | 
 | 1298 | 	if (tracing_disabled || tracing_selftest_running) | 
 | 1299 | 		return 0; | 
 | 1300 |  | 
 | 1301 | 	pc = preempt_count(); | 
 | 1302 | 	preempt_disable_notrace(); | 
 | 1303 | 	cpu = raw_smp_processor_id(); | 
 | 1304 | 	data = tr->data[cpu]; | 
 | 1305 |  | 
 | 1306 | 	if (unlikely(atomic_read(&data->disabled))) | 
 | 1307 | 		goto out; | 
 | 1308 |  | 
 | 1309 | 	pause_graph_tracing(); | 
 | 1310 | 	raw_local_irq_save(irq_flags); | 
 | 1311 | 	__raw_spin_lock(&trace_buf_lock); | 
 | 1312 | 	len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 
 | 1313 |  | 
 | 1314 | 	len = min(len, TRACE_BUF_SIZE-1); | 
 | 1315 | 	trace_buf[len] = 0; | 
 | 1316 |  | 
 | 1317 | 	size = sizeof(*entry) + len + 1; | 
 | 1318 | 	event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); | 
 | 1319 | 	if (!event) | 
 | 1320 | 		goto out_unlock; | 
 | 1321 | 	entry = ring_buffer_event_data(event); | 
 | 1322 | 	entry->ip			= ip; | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1323 |  | 
 | 1324 | 	memcpy(&entry->buf, trace_buf, len); | 
 | 1325 | 	entry->buf[len] = 0; | 
 | 1326 | 	ring_buffer_unlock_commit(tr->buffer, event); | 
 | 1327 |  | 
 | 1328 |  out_unlock: | 
 | 1329 | 	__raw_spin_unlock(&trace_buf_lock); | 
 | 1330 | 	raw_local_irq_restore(irq_flags); | 
 | 1331 | 	unpause_graph_tracing(); | 
 | 1332 |  out: | 
 | 1333 | 	preempt_enable_notrace(); | 
 | 1334 |  | 
 | 1335 | 	return len; | 
 | 1336 | } | 
| Frederic Weisbecker | 769b044 | 2009-03-06 17:21:49 +0100 | [diff] [blame] | 1337 | EXPORT_SYMBOL_GPL(trace_vprintk); | 
 | 1338 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1339 | enum trace_file_type { | 
 | 1340 | 	TRACE_FILE_LAT_FMT	= 1, | 
| Steven Rostedt | 12ef7d4 | 2008-11-12 17:52:38 -0500 | [diff] [blame] | 1341 | 	TRACE_FILE_ANNOTATE	= 2, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1342 | }; | 
 | 1343 |  | 
| Robert Richter | e2ac8ef | 2008-11-12 12:59:32 +0100 | [diff] [blame] | 1344 | static void trace_iterator_increment(struct trace_iterator *iter) | 
| Steven Rostedt | 5a90f57 | 2008-09-03 17:42:51 -0400 | [diff] [blame] | 1345 | { | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1346 | 	/* Don't allow ftrace to trace into the ring buffers */ | 
 | 1347 | 	ftrace_disable_cpu(); | 
 | 1348 |  | 
| Steven Rostedt | 5a90f57 | 2008-09-03 17:42:51 -0400 | [diff] [blame] | 1349 | 	iter->idx++; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1350 | 	if (iter->buffer_iter[iter->cpu]) | 
 | 1351 | 		ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | 
 | 1352 |  | 
 | 1353 | 	ftrace_enable_cpu(); | 
| Steven Rostedt | 5a90f57 | 2008-09-03 17:42:51 -0400 | [diff] [blame] | 1354 | } | 
 | 1355 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1356 | static struct trace_entry * | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1357 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1358 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1359 | 	struct ring_buffer_event *event; | 
 | 1360 | 	struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1361 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1362 | 	/* Don't allow ftrace to trace into the ring buffers */ | 
 | 1363 | 	ftrace_disable_cpu(); | 
 | 1364 |  | 
 | 1365 | 	if (buf_iter) | 
 | 1366 | 		event = ring_buffer_iter_peek(buf_iter, ts); | 
 | 1367 | 	else | 
 | 1368 | 		event = ring_buffer_peek(iter->tr->buffer, cpu, ts); | 
 | 1369 |  | 
 | 1370 | 	ftrace_enable_cpu(); | 
 | 1371 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1372 | 	return event ? ring_buffer_event_data(event) : NULL; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1373 | } | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1374 |  | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1375 | static struct trace_entry * | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1376 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1377 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1378 | 	struct ring_buffer *buffer = iter->tr->buffer; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1379 | 	struct trace_entry *ent, *next = NULL; | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1380 | 	int cpu_file = iter->cpu_file; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1381 | 	u64 next_ts = 0, ts; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1382 | 	int next_cpu = -1; | 
 | 1383 | 	int cpu; | 
 | 1384 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1385 | 	/* | 
 | 1386 | 	 * If we are in a per_cpu trace file, don't bother by iterating over | 
 | 1387 | 	 * all cpu and peek directly. | 
 | 1388 | 	 */ | 
 | 1389 | 	if (cpu_file > TRACE_PIPE_ALL_CPU) { | 
 | 1390 | 		if (ring_buffer_empty_cpu(buffer, cpu_file)) | 
 | 1391 | 			return NULL; | 
 | 1392 | 		ent = peek_next_entry(iter, cpu_file, ent_ts); | 
 | 1393 | 		if (ent_cpu) | 
 | 1394 | 			*ent_cpu = cpu_file; | 
 | 1395 |  | 
 | 1396 | 		return ent; | 
 | 1397 | 	} | 
 | 1398 |  | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 1399 | 	for_each_tracing_cpu(cpu) { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1400 |  | 
 | 1401 | 		if (ring_buffer_empty_cpu(buffer, cpu)) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1402 | 			continue; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1403 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1404 | 		ent = peek_next_entry(iter, cpu, &ts); | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1405 |  | 
| Ingo Molnar | cdd31cd | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1406 | 		/* | 
 | 1407 | 		 * Pick the entry with the smallest timestamp: | 
 | 1408 | 		 */ | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1409 | 		if (ent && (!next || ts < next_ts)) { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1410 | 			next = ent; | 
 | 1411 | 			next_cpu = cpu; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1412 | 			next_ts = ts; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1413 | 		} | 
 | 1414 | 	} | 
 | 1415 |  | 
 | 1416 | 	if (ent_cpu) | 
 | 1417 | 		*ent_cpu = next_cpu; | 
 | 1418 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1419 | 	if (ent_ts) | 
 | 1420 | 		*ent_ts = next_ts; | 
 | 1421 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1422 | 	return next; | 
 | 1423 | } | 
 | 1424 |  | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1425 | /* Find the next real entry, without updating the iterator itself */ | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 1426 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 
 | 1427 | 					  int *ent_cpu, u64 *ent_ts) | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1428 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1429 | 	return __find_next_entry(iter, ent_cpu, ent_ts); | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1430 | } | 
| Ingo Molnar | 8c523a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1431 |  | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1432 | /* Find the next real entry, and increment the iterator to the next entry */ | 
 | 1433 | static void *find_next_entry_inc(struct trace_iterator *iter) | 
 | 1434 | { | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1435 | 	iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1436 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1437 | 	if (iter->ent) | 
| Robert Richter | e2ac8ef | 2008-11-12 12:59:32 +0100 | [diff] [blame] | 1438 | 		trace_iterator_increment(iter); | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1439 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1440 | 	return iter->ent ? iter : NULL; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1441 | } | 
 | 1442 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1443 | static void trace_consume(struct trace_iterator *iter) | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1444 | { | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1445 | 	/* Don't allow ftrace to trace into the ring buffers */ | 
 | 1446 | 	ftrace_disable_cpu(); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1447 | 	ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1448 | 	ftrace_enable_cpu(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1449 | } | 
 | 1450 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1451 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1452 | { | 
 | 1453 | 	struct trace_iterator *iter = m->private; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1454 | 	int i = (int)*pos; | 
| Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 1455 | 	void *ent; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1456 |  | 
 | 1457 | 	(*pos)++; | 
 | 1458 |  | 
 | 1459 | 	/* can't go backwards */ | 
 | 1460 | 	if (iter->idx > i) | 
 | 1461 | 		return NULL; | 
 | 1462 |  | 
 | 1463 | 	if (iter->idx < 0) | 
 | 1464 | 		ent = find_next_entry_inc(iter); | 
 | 1465 | 	else | 
 | 1466 | 		ent = iter; | 
 | 1467 |  | 
 | 1468 | 	while (ent && iter->idx < i) | 
 | 1469 | 		ent = find_next_entry_inc(iter); | 
 | 1470 |  | 
 | 1471 | 	iter->pos = *pos; | 
 | 1472 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1473 | 	return ent; | 
 | 1474 | } | 
 | 1475 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1476 | /* | 
 | 1477 |  * No necessary locking here. The worst thing which can | 
 | 1478 |  * happen is loosing events consumed at the same time | 
 | 1479 |  * by a trace_pipe reader. | 
 | 1480 |  * Other than that, we don't risk to crash the ring buffer | 
 | 1481 |  * because it serializes the readers. | 
 | 1482 |  * | 
 | 1483 |  * The current tracer is copied to avoid a global locking | 
 | 1484 |  * all around. | 
 | 1485 |  */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1486 | static void *s_start(struct seq_file *m, loff_t *pos) | 
 | 1487 | { | 
 | 1488 | 	struct trace_iterator *iter = m->private; | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1489 | 	static struct tracer *old_tracer; | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1490 | 	int cpu_file = iter->cpu_file; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1491 | 	void *p = NULL; | 
 | 1492 | 	loff_t l = 0; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1493 | 	int cpu; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1494 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1495 | 	/* copy the tracer to avoid using a global lock all around */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1496 | 	mutex_lock(&trace_types_lock); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1497 | 	if (unlikely(old_tracer != current_trace && current_trace)) { | 
 | 1498 | 		old_tracer = current_trace; | 
 | 1499 | 		*iter->trace = *current_trace; | 
| Steven Rostedt | d15f57f | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 1500 | 	} | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1501 | 	mutex_unlock(&trace_types_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1502 |  | 
 | 1503 | 	atomic_inc(&trace_record_cmdline_disabled); | 
 | 1504 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1505 | 	if (*pos != iter->pos) { | 
 | 1506 | 		iter->ent = NULL; | 
 | 1507 | 		iter->cpu = 0; | 
 | 1508 | 		iter->idx = -1; | 
 | 1509 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1510 | 		ftrace_disable_cpu(); | 
 | 1511 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1512 | 		if (cpu_file == TRACE_PIPE_ALL_CPU) { | 
 | 1513 | 			for_each_tracing_cpu(cpu) | 
 | 1514 | 				ring_buffer_iter_reset(iter->buffer_iter[cpu]); | 
 | 1515 | 		} else | 
 | 1516 | 			ring_buffer_iter_reset(iter->buffer_iter[cpu_file]); | 
 | 1517 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1518 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1519 | 		ftrace_enable_cpu(); | 
 | 1520 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1521 | 		for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 
 | 1522 | 			; | 
 | 1523 |  | 
 | 1524 | 	} else { | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 1525 | 		l = *pos - 1; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1526 | 		p = s_next(m, p, &l); | 
 | 1527 | 	} | 
 | 1528 |  | 
 | 1529 | 	return p; | 
 | 1530 | } | 
 | 1531 |  | 
 | 1532 | static void s_stop(struct seq_file *m, void *p) | 
 | 1533 | { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1534 | 	atomic_dec(&trace_record_cmdline_disabled); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1535 | } | 
 | 1536 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1537 | static void print_lat_help_header(struct seq_file *m) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1538 | { | 
| Michael Ellerman | a616835 | 2008-08-20 16:36:11 -0700 | [diff] [blame] | 1539 | 	seq_puts(m, "#                  _------=> CPU#            \n"); | 
 | 1540 | 	seq_puts(m, "#                 / _-----=> irqs-off        \n"); | 
 | 1541 | 	seq_puts(m, "#                | / _----=> need-resched    \n"); | 
 | 1542 | 	seq_puts(m, "#                || / _---=> hardirq/softirq \n"); | 
 | 1543 | 	seq_puts(m, "#                ||| / _--=> preempt-depth   \n"); | 
 | 1544 | 	seq_puts(m, "#                |||| /                      \n"); | 
 | 1545 | 	seq_puts(m, "#                |||||     delay             \n"); | 
 | 1546 | 	seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n"); | 
 | 1547 | 	seq_puts(m, "#     \\   /      |||||   \\   |   /           \n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1548 | } | 
 | 1549 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1550 | static void print_func_help_header(struct seq_file *m) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1551 | { | 
| Michael Ellerman | a616835 | 2008-08-20 16:36:11 -0700 | [diff] [blame] | 1552 | 	seq_puts(m, "#           TASK-PID    CPU#    TIMESTAMP  FUNCTION\n"); | 
 | 1553 | 	seq_puts(m, "#              | |       |          |         |\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1554 | } | 
 | 1555 |  | 
 | 1556 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1557 | static void | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1558 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) | 
 | 1559 | { | 
 | 1560 | 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 
 | 1561 | 	struct trace_array *tr = iter->tr; | 
 | 1562 | 	struct trace_array_cpu *data = tr->data[tr->cpu]; | 
 | 1563 | 	struct tracer *type = current_trace; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1564 | 	unsigned long total; | 
 | 1565 | 	unsigned long entries; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1566 | 	const char *name = "preemption"; | 
 | 1567 |  | 
 | 1568 | 	if (type) | 
 | 1569 | 		name = type->name; | 
 | 1570 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1571 | 	entries = ring_buffer_entries(iter->tr->buffer); | 
 | 1572 | 	total = entries + | 
 | 1573 | 		ring_buffer_overruns(iter->tr->buffer); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1574 |  | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1575 | 	seq_printf(m, "# %s latency trace v1.1.5 on %s\n", | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1576 | 		   name, UTS_RELEASE); | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1577 | 	seq_puts(m, "# -----------------------------------" | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1578 | 		 "---------------------------------\n"); | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1579 | 	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1580 | 		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d", | 
| Steven Rostedt | 57f50be | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 1581 | 		   nsecs_to_usecs(data->saved_latency), | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1582 | 		   entries, | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 1583 | 		   total, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1584 | 		   tr->cpu, | 
 | 1585 | #if defined(CONFIG_PREEMPT_NONE) | 
 | 1586 | 		   "server", | 
 | 1587 | #elif defined(CONFIG_PREEMPT_VOLUNTARY) | 
 | 1588 | 		   "desktop", | 
| Steven Rostedt | b5c21b4 | 2008-07-10 20:58:12 -0400 | [diff] [blame] | 1589 | #elif defined(CONFIG_PREEMPT) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1590 | 		   "preempt", | 
 | 1591 | #else | 
 | 1592 | 		   "unknown", | 
 | 1593 | #endif | 
 | 1594 | 		   /* These are reserved for later use */ | 
 | 1595 | 		   0, 0, 0, 0); | 
 | 1596 | #ifdef CONFIG_SMP | 
 | 1597 | 	seq_printf(m, " #P:%d)\n", num_online_cpus()); | 
 | 1598 | #else | 
 | 1599 | 	seq_puts(m, ")\n"); | 
 | 1600 | #endif | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1601 | 	seq_puts(m, "#    -----------------\n"); | 
 | 1602 | 	seq_printf(m, "#    | task: %.16s-%d " | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1603 | 		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", | 
 | 1604 | 		   data->comm, data->pid, data->uid, data->nice, | 
 | 1605 | 		   data->policy, data->rt_priority); | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1606 | 	seq_puts(m, "#    -----------------\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1607 |  | 
 | 1608 | 	if (data->critical_start) { | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1609 | 		seq_puts(m, "#  => started at: "); | 
| Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1610 | 		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); | 
 | 1611 | 		trace_print_seq(m, &iter->seq); | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1612 | 		seq_puts(m, "\n#  => ended at:   "); | 
| Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1613 | 		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); | 
 | 1614 | 		trace_print_seq(m, &iter->seq); | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1615 | 		seq_puts(m, "#\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1616 | 	} | 
 | 1617 |  | 
| KOSAKI Motohiro | 888b55d | 2009-03-08 13:12:43 +0900 | [diff] [blame] | 1618 | 	seq_puts(m, "#\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1619 | } | 
 | 1620 |  | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 1621 | static void test_cpu_buff_start(struct trace_iterator *iter) | 
 | 1622 | { | 
 | 1623 | 	struct trace_seq *s = &iter->seq; | 
 | 1624 |  | 
| Steven Rostedt | 12ef7d4 | 2008-11-12 17:52:38 -0500 | [diff] [blame] | 1625 | 	if (!(trace_flags & TRACE_ITER_ANNOTATE)) | 
 | 1626 | 		return; | 
 | 1627 |  | 
 | 1628 | 	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) | 
 | 1629 | 		return; | 
 | 1630 |  | 
| Rusty Russell | 4462344 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 1631 | 	if (cpumask_test_cpu(iter->cpu, iter->started)) | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 1632 | 		return; | 
 | 1633 |  | 
| Rusty Russell | 4462344 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 1634 | 	cpumask_set_cpu(iter->cpu, iter->started); | 
| Frederic Weisbecker | b0dfa97 | 2009-04-01 22:53:08 +0200 | [diff] [blame] | 1635 |  | 
 | 1636 | 	/* Don't print started cpu buffer for the first entry of the trace */ | 
 | 1637 | 	if (iter->idx > 1) | 
 | 1638 | 		trace_seq_printf(s, "##### CPU %u buffer started ####\n", | 
 | 1639 | 				iter->cpu); | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 1640 | } | 
 | 1641 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 1642 | static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1643 | { | 
| Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1644 | 	struct trace_seq *s = &iter->seq; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1645 | 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 
| Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 1646 | 	struct trace_entry *entry; | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 1647 | 	struct trace_event *event; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1648 |  | 
| Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 1649 | 	entry = iter->ent; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1650 |  | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 1651 | 	test_cpu_buff_start(iter); | 
 | 1652 |  | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 1653 | 	event = ftrace_find_event(entry->type); | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 1654 |  | 
 | 1655 | 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
| Steven Rostedt | 27d48be | 2009-03-04 21:57:29 -0500 | [diff] [blame] | 1656 | 		if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 
 | 1657 | 			if (!trace_print_lat_context(iter)) | 
 | 1658 | 				goto partial; | 
 | 1659 | 		} else { | 
 | 1660 | 			if (!trace_print_context(iter)) | 
 | 1661 | 				goto partial; | 
 | 1662 | 		} | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 1663 | 	} | 
 | 1664 |  | 
| Arnaldo Carvalho de Melo | 268ccda | 2009-02-04 20:16:39 -0200 | [diff] [blame] | 1665 | 	if (event) | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 1666 | 		return event->trace(iter, sym_flags); | 
 | 1667 |  | 
 | 1668 | 	if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) | 
 | 1669 | 		goto partial; | 
| Steven Rostedt | 7104f30 | 2008-10-01 10:52:51 -0400 | [diff] [blame] | 1670 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 1671 | 	return TRACE_TYPE_HANDLED; | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 1672 | partial: | 
 | 1673 | 	return TRACE_TYPE_PARTIAL_LINE; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1674 | } | 
 | 1675 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 1676 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1677 | { | 
 | 1678 | 	struct trace_seq *s = &iter->seq; | 
 | 1679 | 	struct trace_entry *entry; | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 1680 | 	struct trace_event *event; | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1681 |  | 
 | 1682 | 	entry = iter->ent; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1683 |  | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 1684 | 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 1685 | 		if (!trace_seq_printf(s, "%d %d %llu ", | 
 | 1686 | 				      entry->pid, iter->cpu, iter->ts)) | 
 | 1687 | 			goto partial; | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 1688 | 	} | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1689 |  | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 1690 | 	event = ftrace_find_event(entry->type); | 
| Arnaldo Carvalho de Melo | 268ccda | 2009-02-04 20:16:39 -0200 | [diff] [blame] | 1691 | 	if (event) | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 1692 | 		return event->raw(iter, 0); | 
 | 1693 |  | 
 | 1694 | 	if (!trace_seq_printf(s, "%d ?\n", entry->type)) | 
 | 1695 | 		goto partial; | 
| Steven Rostedt | 7104f30 | 2008-10-01 10:52:51 -0400 | [diff] [blame] | 1696 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 1697 | 	return TRACE_TYPE_HANDLED; | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 1698 | partial: | 
 | 1699 | 	return TRACE_TYPE_PARTIAL_LINE; | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1700 | } | 
 | 1701 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 1702 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 1703 | { | 
 | 1704 | 	struct trace_seq *s = &iter->seq; | 
 | 1705 | 	unsigned char newline = '\n'; | 
 | 1706 | 	struct trace_entry *entry; | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 1707 | 	struct trace_event *event; | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 1708 |  | 
 | 1709 | 	entry = iter->ent; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1710 |  | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 1711 | 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
 | 1712 | 		SEQ_PUT_HEX_FIELD_RET(s, entry->pid); | 
 | 1713 | 		SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); | 
 | 1714 | 		SEQ_PUT_HEX_FIELD_RET(s, iter->ts); | 
 | 1715 | 	} | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 1716 |  | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 1717 | 	event = ftrace_find_event(entry->type); | 
| Arnaldo Carvalho de Melo | 268ccda | 2009-02-04 20:16:39 -0200 | [diff] [blame] | 1718 | 	if (event) { | 
| Arnaldo Carvalho de Melo | ae7462b | 2009-02-03 22:05:50 -0200 | [diff] [blame] | 1719 | 		enum print_line_t ret = event->hex(iter, 0); | 
| Arnaldo Carvalho de Melo | d9793bd | 2009-02-03 20:20:41 -0200 | [diff] [blame] | 1720 | 		if (ret != TRACE_TYPE_HANDLED) | 
 | 1721 | 			return ret; | 
 | 1722 | 	} | 
| Steven Rostedt | 7104f30 | 2008-10-01 10:52:51 -0400 | [diff] [blame] | 1723 |  | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 1724 | 	SEQ_PUT_FIELD_RET(s, newline); | 
 | 1725 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 1726 | 	return TRACE_TYPE_HANDLED; | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 1727 | } | 
 | 1728 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 1729 | static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1730 | { | 
 | 1731 | 	struct trace_seq *s = &iter->seq; | 
 | 1732 | 	struct trace_entry *entry; | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 1733 | 	struct trace_event *event; | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1734 |  | 
 | 1735 | 	entry = iter->ent; | 
| Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 1736 |  | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 1737 | 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
 | 1738 | 		SEQ_PUT_FIELD_RET(s, entry->pid); | 
| Steven Rostedt | 1830b52 | 2009-02-07 19:38:43 -0500 | [diff] [blame] | 1739 | 		SEQ_PUT_FIELD_RET(s, iter->cpu); | 
| Frederic Weisbecker | c4a8e8b | 2009-02-02 20:29:21 -0200 | [diff] [blame] | 1740 | 		SEQ_PUT_FIELD_RET(s, iter->ts); | 
 | 1741 | 	} | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1742 |  | 
| Steven Rostedt | f633cef | 2008-12-23 23:24:13 -0500 | [diff] [blame] | 1743 | 	event = ftrace_find_event(entry->type); | 
| Arnaldo Carvalho de Melo | 268ccda | 2009-02-04 20:16:39 -0200 | [diff] [blame] | 1744 | 	return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1745 | } | 
 | 1746 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1747 | static int trace_empty(struct trace_iterator *iter) | 
 | 1748 | { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1749 | 	int cpu; | 
 | 1750 |  | 
| Steven Rostedt | 9aba60f | 2009-03-11 19:52:30 -0400 | [diff] [blame] | 1751 | 	/* If we are looking at one CPU buffer, only check that one */ | 
 | 1752 | 	if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { | 
 | 1753 | 		cpu = iter->cpu_file; | 
 | 1754 | 		if (iter->buffer_iter[cpu]) { | 
 | 1755 | 			if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) | 
 | 1756 | 				return 0; | 
 | 1757 | 		} else { | 
 | 1758 | 			if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) | 
 | 1759 | 				return 0; | 
 | 1760 | 		} | 
 | 1761 | 		return 1; | 
 | 1762 | 	} | 
 | 1763 |  | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 1764 | 	for_each_tracing_cpu(cpu) { | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1765 | 		if (iter->buffer_iter[cpu]) { | 
 | 1766 | 			if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) | 
 | 1767 | 				return 0; | 
 | 1768 | 		} else { | 
 | 1769 | 			if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) | 
 | 1770 | 				return 0; | 
 | 1771 | 		} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1772 | 	} | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1773 |  | 
| Frederic Weisbecker | 797d371 | 2008-09-30 18:13:45 +0200 | [diff] [blame] | 1774 | 	return 1; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1775 | } | 
 | 1776 |  | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 1777 | static enum print_line_t print_trace_line(struct trace_iterator *iter) | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1778 | { | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 1779 | 	enum print_line_t ret; | 
 | 1780 |  | 
 | 1781 | 	if (iter->trace && iter->trace->print_line) { | 
 | 1782 | 		ret = iter->trace->print_line(iter); | 
 | 1783 | 		if (ret != TRACE_TYPE_UNHANDLED) | 
 | 1784 | 			return ret; | 
 | 1785 | 	} | 
| Thomas Gleixner | 72829bc | 2008-05-23 21:37:28 +0200 | [diff] [blame] | 1786 |  | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1787 | 	if (iter->ent->type == TRACE_BPRINT && | 
 | 1788 | 			trace_flags & TRACE_ITER_PRINTK && | 
 | 1789 | 			trace_flags & TRACE_ITER_PRINTK_MSGONLY) | 
| Steven Rostedt | 5ef841f | 2009-03-19 12:20:38 -0400 | [diff] [blame] | 1790 | 		return trace_print_bprintk_msg_only(iter); | 
| Frederic Weisbecker | 48ead02 | 2009-03-12 18:24:49 +0100 | [diff] [blame] | 1791 |  | 
| Frederic Weisbecker | 66896a8 | 2008-12-13 20:18:13 +0100 | [diff] [blame] | 1792 | 	if (iter->ent->type == TRACE_PRINT && | 
 | 1793 | 			trace_flags & TRACE_ITER_PRINTK && | 
 | 1794 | 			trace_flags & TRACE_ITER_PRINTK_MSGONLY) | 
| Steven Rostedt | 5ef841f | 2009-03-19 12:20:38 -0400 | [diff] [blame] | 1795 | 		return trace_print_printk_msg_only(iter); | 
| Frederic Weisbecker | 66896a8 | 2008-12-13 20:18:13 +0100 | [diff] [blame] | 1796 |  | 
| Ingo Molnar | cb0f12a | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1797 | 	if (trace_flags & TRACE_ITER_BIN) | 
 | 1798 | 		return print_bin_fmt(iter); | 
 | 1799 |  | 
| Ingo Molnar | 5e3ca0e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 1800 | 	if (trace_flags & TRACE_ITER_HEX) | 
 | 1801 | 		return print_hex_fmt(iter); | 
 | 1802 |  | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1803 | 	if (trace_flags & TRACE_ITER_RAW) | 
 | 1804 | 		return print_raw_fmt(iter); | 
 | 1805 |  | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1806 | 	return print_trace_fmt(iter); | 
 | 1807 | } | 
 | 1808 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1809 | static int s_show(struct seq_file *m, void *v) | 
 | 1810 | { | 
 | 1811 | 	struct trace_iterator *iter = v; | 
 | 1812 |  | 
 | 1813 | 	if (iter->ent == NULL) { | 
 | 1814 | 		if (iter->tr) { | 
 | 1815 | 			seq_printf(m, "# tracer: %s\n", iter->trace->name); | 
 | 1816 | 			seq_puts(m, "#\n"); | 
 | 1817 | 		} | 
| Markus Metzger | 8bba1bf | 2008-11-25 09:12:31 +0100 | [diff] [blame] | 1818 | 		if (iter->trace && iter->trace->print_header) | 
 | 1819 | 			iter->trace->print_header(m); | 
 | 1820 | 		else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1821 | 			/* print nothing if the buffers are empty */ | 
 | 1822 | 			if (trace_empty(iter)) | 
 | 1823 | 				return 0; | 
 | 1824 | 			print_trace_header(m, iter); | 
 | 1825 | 			if (!(trace_flags & TRACE_ITER_VERBOSE)) | 
 | 1826 | 				print_lat_help_header(m); | 
 | 1827 | 		} else { | 
 | 1828 | 			if (!(trace_flags & TRACE_ITER_VERBOSE)) | 
 | 1829 | 				print_func_help_header(m); | 
 | 1830 | 		} | 
 | 1831 | 	} else { | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 1832 | 		print_trace_line(iter); | 
| Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1833 | 		trace_print_seq(m, &iter->seq); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1834 | 	} | 
 | 1835 |  | 
 | 1836 | 	return 0; | 
 | 1837 | } | 
 | 1838 |  | 
 | 1839 | static struct seq_operations tracer_seq_ops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 1840 | 	.start		= s_start, | 
 | 1841 | 	.next		= s_next, | 
 | 1842 | 	.stop		= s_stop, | 
 | 1843 | 	.show		= s_show, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1844 | }; | 
 | 1845 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 1846 | static struct trace_iterator * | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 1847 | __tracing_open(struct inode *inode, struct file *file) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1848 | { | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1849 | 	long cpu_file = (long) inode->i_private; | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 1850 | 	void *fail_ret = ERR_PTR(-ENOMEM); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1851 | 	struct trace_iterator *iter; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1852 | 	struct seq_file *m; | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 1853 | 	int cpu, ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1854 |  | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 1855 | 	if (tracing_disabled) | 
 | 1856 | 		return ERR_PTR(-ENODEV); | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 1857 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1858 | 	iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 1859 | 	if (!iter) | 
 | 1860 | 		return ERR_PTR(-ENOMEM); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1861 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1862 | 	/* | 
 | 1863 | 	 * We make a copy of the current tracer to avoid concurrent | 
 | 1864 | 	 * changes on it while we are reading. | 
 | 1865 | 	 */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1866 | 	mutex_lock(&trace_types_lock); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1867 | 	iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 1868 | 	if (!iter->trace) | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1869 | 		goto fail; | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 1870 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1871 | 	if (current_trace) | 
 | 1872 | 		*iter->trace = *current_trace; | 
 | 1873 |  | 
| Frederic Weisbecker | b0dfa97 | 2009-04-01 22:53:08 +0200 | [diff] [blame] | 1874 | 	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) | 
 | 1875 | 		goto fail; | 
 | 1876 |  | 
 | 1877 | 	cpumask_clear(iter->started); | 
 | 1878 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1879 | 	if (current_trace && current_trace->print_max) | 
 | 1880 | 		iter->tr = &max_tr; | 
 | 1881 | 	else | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1882 | 		iter->tr = &global_trace; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1883 | 	iter->pos = -1; | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1884 | 	mutex_init(&iter->mutex); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1885 | 	iter->cpu_file = cpu_file; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1886 |  | 
| Markus Metzger | 8bba1bf | 2008-11-25 09:12:31 +0100 | [diff] [blame] | 1887 | 	/* Notify the tracer early; before we stop tracing. */ | 
 | 1888 | 	if (iter->trace && iter->trace->open) | 
| Markus Metzger | a93751c | 2008-12-11 13:53:26 +0100 | [diff] [blame] | 1889 | 		iter->trace->open(iter); | 
| Markus Metzger | 8bba1bf | 2008-11-25 09:12:31 +0100 | [diff] [blame] | 1890 |  | 
| Steven Rostedt | 12ef7d4 | 2008-11-12 17:52:38 -0500 | [diff] [blame] | 1891 | 	/* Annotate start of buffers if we had overruns */ | 
 | 1892 | 	if (ring_buffer_overruns(iter->tr->buffer)) | 
 | 1893 | 		iter->iter_flags |= TRACE_FILE_ANNOTATE; | 
 | 1894 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1895 | 	if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 
 | 1896 | 		for_each_tracing_cpu(cpu) { | 
| Steven Rostedt | 12ef7d4 | 2008-11-12 17:52:38 -0500 | [diff] [blame] | 1897 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1898 | 			iter->buffer_iter[cpu] = | 
 | 1899 | 				ring_buffer_read_start(iter->tr->buffer, cpu); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1900 | 		} | 
 | 1901 | 	} else { | 
 | 1902 | 		cpu = iter->cpu_file; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1903 | 		iter->buffer_iter[cpu] = | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 1904 | 				ring_buffer_read_start(iter->tr->buffer, cpu); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1905 | 	} | 
 | 1906 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1907 | 	/* TODO stop tracer */ | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 1908 | 	ret = seq_open(file, &tracer_seq_ops); | 
 | 1909 | 	if (ret < 0) { | 
 | 1910 | 		fail_ret = ERR_PTR(ret); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1911 | 		goto fail_buffer; | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 1912 | 	} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1913 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1914 | 	m = file->private_data; | 
 | 1915 | 	m->private = iter; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1916 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1917 | 	/* stop the trace while dumping */ | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1918 | 	tracing_stop(); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1919 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1920 | 	mutex_unlock(&trace_types_lock); | 
 | 1921 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1922 | 	return iter; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1923 |  | 
 | 1924 |  fail_buffer: | 
 | 1925 | 	for_each_tracing_cpu(cpu) { | 
 | 1926 | 		if (iter->buffer_iter[cpu]) | 
 | 1927 | 			ring_buffer_read_finish(iter->buffer_iter[cpu]); | 
 | 1928 | 	} | 
| Frederic Weisbecker | b0dfa97 | 2009-04-01 22:53:08 +0200 | [diff] [blame] | 1929 | 	free_cpumask_var(iter->started); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1930 |  fail: | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1931 | 	mutex_unlock(&trace_types_lock); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1932 | 	kfree(iter->trace); | 
| Julia Lawall | 0bb943c | 2008-11-14 19:05:31 +0100 | [diff] [blame] | 1933 | 	kfree(iter); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1934 |  | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 1935 | 	return fail_ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1936 | } | 
 | 1937 |  | 
 | 1938 | int tracing_open_generic(struct inode *inode, struct file *filp) | 
 | 1939 | { | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 1940 | 	if (tracing_disabled) | 
 | 1941 | 		return -ENODEV; | 
 | 1942 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1943 | 	filp->private_data = inode->i_private; | 
 | 1944 | 	return 0; | 
 | 1945 | } | 
 | 1946 |  | 
| Hannes Eder | 4fd2735 | 2009-02-10 19:44:12 +0100 | [diff] [blame] | 1947 | static int tracing_release(struct inode *inode, struct file *file) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1948 | { | 
 | 1949 | 	struct seq_file *m = (struct seq_file *)file->private_data; | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 1950 | 	struct trace_iterator *iter; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1951 | 	int cpu; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1952 |  | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 1953 | 	if (!(file->f_mode & FMODE_READ)) | 
 | 1954 | 		return 0; | 
 | 1955 |  | 
 | 1956 | 	iter = m->private; | 
 | 1957 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1958 | 	mutex_lock(&trace_types_lock); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 1959 | 	for_each_tracing_cpu(cpu) { | 
 | 1960 | 		if (iter->buffer_iter[cpu]) | 
 | 1961 | 			ring_buffer_read_finish(iter->buffer_iter[cpu]); | 
 | 1962 | 	} | 
 | 1963 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1964 | 	if (iter->trace && iter->trace->close) | 
 | 1965 | 		iter->trace->close(iter); | 
 | 1966 |  | 
 | 1967 | 	/* reenable tracing if it was previously enabled */ | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 1968 | 	tracing_start(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1969 | 	mutex_unlock(&trace_types_lock); | 
 | 1970 |  | 
 | 1971 | 	seq_release(inode, file); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1972 | 	mutex_destroy(&iter->mutex); | 
| Frederic Weisbecker | b0dfa97 | 2009-04-01 22:53:08 +0200 | [diff] [blame] | 1973 | 	free_cpumask_var(iter->started); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 1974 | 	kfree(iter->trace); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1975 | 	kfree(iter); | 
 | 1976 | 	return 0; | 
 | 1977 | } | 
 | 1978 |  | 
 | 1979 | static int tracing_open(struct inode *inode, struct file *file) | 
 | 1980 | { | 
| Steven Rostedt | 85a2f9b | 2009-02-27 00:12:38 -0500 | [diff] [blame] | 1981 | 	struct trace_iterator *iter; | 
 | 1982 | 	int ret = 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1983 |  | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 1984 | 	/* If this file was open for write, then erase contents */ | 
 | 1985 | 	if ((file->f_mode & FMODE_WRITE) && | 
 | 1986 | 	    !(file->f_flags & O_APPEND)) { | 
 | 1987 | 		long cpu = (long) inode->i_private; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1988 |  | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 1989 | 		if (cpu == TRACE_PIPE_ALL_CPU) | 
 | 1990 | 			tracing_reset_online_cpus(&global_trace); | 
 | 1991 | 		else | 
 | 1992 | 			tracing_reset(&global_trace, cpu); | 
 | 1993 | 	} | 
 | 1994 |  | 
 | 1995 | 	if (file->f_mode & FMODE_READ) { | 
 | 1996 | 		iter = __tracing_open(inode, file); | 
 | 1997 | 		if (IS_ERR(iter)) | 
 | 1998 | 			ret = PTR_ERR(iter); | 
 | 1999 | 		else if (trace_flags & TRACE_ITER_LATENCY_FMT) | 
 | 2000 | 			iter->iter_flags |= TRACE_FILE_LAT_FMT; | 
 | 2001 | 	} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2002 | 	return ret; | 
 | 2003 | } | 
 | 2004 |  | 
| Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 2005 | static void * | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2006 | t_next(struct seq_file *m, void *v, loff_t *pos) | 
 | 2007 | { | 
 | 2008 | 	struct tracer *t = m->private; | 
 | 2009 |  | 
 | 2010 | 	(*pos)++; | 
 | 2011 |  | 
 | 2012 | 	if (t) | 
 | 2013 | 		t = t->next; | 
 | 2014 |  | 
 | 2015 | 	m->private = t; | 
 | 2016 |  | 
 | 2017 | 	return t; | 
 | 2018 | } | 
 | 2019 |  | 
 | 2020 | static void *t_start(struct seq_file *m, loff_t *pos) | 
 | 2021 | { | 
 | 2022 | 	struct tracer *t = m->private; | 
 | 2023 | 	loff_t l = 0; | 
 | 2024 |  | 
 | 2025 | 	mutex_lock(&trace_types_lock); | 
 | 2026 | 	for (; t && l < *pos; t = t_next(m, t, &l)) | 
 | 2027 | 		; | 
 | 2028 |  | 
 | 2029 | 	return t; | 
 | 2030 | } | 
 | 2031 |  | 
 | 2032 | static void t_stop(struct seq_file *m, void *p) | 
 | 2033 | { | 
 | 2034 | 	mutex_unlock(&trace_types_lock); | 
 | 2035 | } | 
 | 2036 |  | 
 | 2037 | static int t_show(struct seq_file *m, void *v) | 
 | 2038 | { | 
 | 2039 | 	struct tracer *t = v; | 
 | 2040 |  | 
 | 2041 | 	if (!t) | 
 | 2042 | 		return 0; | 
 | 2043 |  | 
 | 2044 | 	seq_printf(m, "%s", t->name); | 
 | 2045 | 	if (t->next) | 
 | 2046 | 		seq_putc(m, ' '); | 
 | 2047 | 	else | 
 | 2048 | 		seq_putc(m, '\n'); | 
 | 2049 |  | 
 | 2050 | 	return 0; | 
 | 2051 | } | 
 | 2052 |  | 
 | 2053 | static struct seq_operations show_traces_seq_ops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2054 | 	.start		= t_start, | 
 | 2055 | 	.next		= t_next, | 
 | 2056 | 	.stop		= t_stop, | 
 | 2057 | 	.show		= t_show, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2058 | }; | 
 | 2059 |  | 
 | 2060 | static int show_traces_open(struct inode *inode, struct file *file) | 
 | 2061 | { | 
 | 2062 | 	int ret; | 
 | 2063 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 2064 | 	if (tracing_disabled) | 
 | 2065 | 		return -ENODEV; | 
 | 2066 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2067 | 	ret = seq_open(file, &show_traces_seq_ops); | 
 | 2068 | 	if (!ret) { | 
 | 2069 | 		struct seq_file *m = file->private_data; | 
 | 2070 | 		m->private = trace_types; | 
 | 2071 | 	} | 
 | 2072 |  | 
 | 2073 | 	return ret; | 
 | 2074 | } | 
 | 2075 |  | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2076 | static ssize_t | 
 | 2077 | tracing_write_stub(struct file *filp, const char __user *ubuf, | 
 | 2078 | 		   size_t count, loff_t *ppos) | 
 | 2079 | { | 
 | 2080 | 	return count; | 
 | 2081 | } | 
 | 2082 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 2083 | static const struct file_operations tracing_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2084 | 	.open		= tracing_open, | 
 | 2085 | 	.read		= seq_read, | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 2086 | 	.write		= tracing_write_stub, | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2087 | 	.llseek		= seq_lseek, | 
 | 2088 | 	.release	= tracing_release, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2089 | }; | 
 | 2090 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 2091 | static const struct file_operations show_traces_fops = { | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2092 | 	.open		= show_traces_open, | 
 | 2093 | 	.read		= seq_read, | 
 | 2094 | 	.release	= seq_release, | 
 | 2095 | }; | 
 | 2096 |  | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2097 | /* | 
 | 2098 |  * Only trace on a CPU if the bitmask is set: | 
 | 2099 |  */ | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2100 | static cpumask_var_t tracing_cpumask; | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2101 |  | 
 | 2102 | /* | 
 | 2103 |  * The tracer itself will not take this lock, but still we want | 
 | 2104 |  * to provide a consistent cpumask to user-space: | 
 | 2105 |  */ | 
 | 2106 | static DEFINE_MUTEX(tracing_cpumask_update_lock); | 
 | 2107 |  | 
 | 2108 | /* | 
 | 2109 |  * Temporary storage for the character representation of the | 
 | 2110 |  * CPU bitmask (and one more byte for the newline): | 
 | 2111 |  */ | 
 | 2112 | static char mask_str[NR_CPUS + 1]; | 
 | 2113 |  | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2114 | static ssize_t | 
 | 2115 | tracing_cpumask_read(struct file *filp, char __user *ubuf, | 
 | 2116 | 		     size_t count, loff_t *ppos) | 
 | 2117 | { | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2118 | 	int len; | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2119 |  | 
 | 2120 | 	mutex_lock(&tracing_cpumask_update_lock); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2121 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2122 | 	len = cpumask_scnprintf(mask_str, count, tracing_cpumask); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2123 | 	if (count - len < 2) { | 
 | 2124 | 		count = -EINVAL; | 
 | 2125 | 		goto out_err; | 
 | 2126 | 	} | 
 | 2127 | 	len += sprintf(mask_str + len, "\n"); | 
 | 2128 | 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); | 
 | 2129 |  | 
 | 2130 | out_err: | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2131 | 	mutex_unlock(&tracing_cpumask_update_lock); | 
 | 2132 |  | 
 | 2133 | 	return count; | 
 | 2134 | } | 
 | 2135 |  | 
 | 2136 | static ssize_t | 
 | 2137 | tracing_cpumask_write(struct file *filp, const char __user *ubuf, | 
 | 2138 | 		      size_t count, loff_t *ppos) | 
 | 2139 | { | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2140 | 	int err, cpu; | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2141 | 	cpumask_var_t tracing_cpumask_new; | 
 | 2142 |  | 
 | 2143 | 	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) | 
 | 2144 | 		return -ENOMEM; | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2145 |  | 
 | 2146 | 	mutex_lock(&tracing_cpumask_update_lock); | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2147 | 	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2148 | 	if (err) | 
 | 2149 | 		goto err_unlock; | 
 | 2150 |  | 
| Steven Rostedt | a5e2588 | 2008-12-02 15:34:05 -0500 | [diff] [blame] | 2151 | 	local_irq_disable(); | 
| Steven Rostedt | 92205c2 | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 2152 | 	__raw_spin_lock(&ftrace_max_lock); | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2153 | 	for_each_tracing_cpu(cpu) { | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2154 | 		/* | 
 | 2155 | 		 * Increase/decrease the disabled counter if we are | 
 | 2156 | 		 * about to flip a bit in the cpumask: | 
 | 2157 | 		 */ | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2158 | 		if (cpumask_test_cpu(cpu, tracing_cpumask) && | 
 | 2159 | 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2160 | 			atomic_inc(&global_trace.data[cpu]->disabled); | 
 | 2161 | 		} | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2162 | 		if (!cpumask_test_cpu(cpu, tracing_cpumask) && | 
 | 2163 | 				cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2164 | 			atomic_dec(&global_trace.data[cpu]->disabled); | 
 | 2165 | 		} | 
 | 2166 | 	} | 
| Steven Rostedt | 92205c2 | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 2167 | 	__raw_spin_unlock(&ftrace_max_lock); | 
| Steven Rostedt | a5e2588 | 2008-12-02 15:34:05 -0500 | [diff] [blame] | 2168 | 	local_irq_enable(); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2169 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2170 | 	cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2171 |  | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2172 | 	mutex_unlock(&tracing_cpumask_update_lock); | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2173 | 	free_cpumask_var(tracing_cpumask_new); | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2174 |  | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2175 | 	return count; | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2176 |  | 
 | 2177 | err_unlock: | 
 | 2178 | 	mutex_unlock(&tracing_cpumask_update_lock); | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2179 | 	free_cpumask_var(tracing_cpumask); | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2180 |  | 
 | 2181 | 	return err; | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2182 | } | 
 | 2183 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 2184 | static const struct file_operations tracing_cpumask_fops = { | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2185 | 	.open		= tracing_open_generic, | 
 | 2186 | 	.read		= tracing_cpumask_read, | 
 | 2187 | 	.write		= tracing_cpumask_write, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2188 | }; | 
 | 2189 |  | 
 | 2190 | static ssize_t | 
| Steven Rostedt | ee6bce5 | 2008-11-12 17:52:37 -0500 | [diff] [blame] | 2191 | tracing_trace_options_read(struct file *filp, char __user *ubuf, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2192 | 		       size_t cnt, loff_t *ppos) | 
 | 2193 | { | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2194 | 	struct tracer_opt *trace_opts; | 
 | 2195 | 	u32 tracer_flags; | 
 | 2196 | 	int len = 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2197 | 	char *buf; | 
 | 2198 | 	int r = 0; | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2199 | 	int i; | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2200 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2201 |  | 
| Wenji Huang | c3706f0 | 2009-02-10 01:03:18 -0500 | [diff] [blame] | 2202 | 	/* calculate max size */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2203 | 	for (i = 0; trace_options[i]; i++) { | 
 | 2204 | 		len += strlen(trace_options[i]); | 
| Steven Rostedt | 5c6a3ae | 2009-02-27 00:22:21 -0500 | [diff] [blame] | 2205 | 		len += 3; /* "no" and newline */ | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2206 | 	} | 
 | 2207 |  | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2208 | 	mutex_lock(&trace_types_lock); | 
 | 2209 | 	tracer_flags = current_trace->flags->val; | 
 | 2210 | 	trace_opts = current_trace->flags->opts; | 
 | 2211 |  | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2212 | 	/* | 
 | 2213 | 	 * Increase the size with names of options specific | 
 | 2214 | 	 * of the current tracer. | 
 | 2215 | 	 */ | 
 | 2216 | 	for (i = 0; trace_opts[i].name; i++) { | 
 | 2217 | 		len += strlen(trace_opts[i].name); | 
| Steven Rostedt | 5c6a3ae | 2009-02-27 00:22:21 -0500 | [diff] [blame] | 2218 | 		len += 3; /* "no" and newline */ | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2219 | 	} | 
 | 2220 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2221 | 	/* +2 for \n and \0 */ | 
 | 2222 | 	buf = kmalloc(len + 2, GFP_KERNEL); | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2223 | 	if (!buf) { | 
 | 2224 | 		mutex_unlock(&trace_types_lock); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2225 | 		return -ENOMEM; | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2226 | 	} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2227 |  | 
 | 2228 | 	for (i = 0; trace_options[i]; i++) { | 
 | 2229 | 		if (trace_flags & (1 << i)) | 
| Steven Rostedt | 5c6a3ae | 2009-02-27 00:22:21 -0500 | [diff] [blame] | 2230 | 			r += sprintf(buf + r, "%s\n", trace_options[i]); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2231 | 		else | 
| Steven Rostedt | 5c6a3ae | 2009-02-27 00:22:21 -0500 | [diff] [blame] | 2232 | 			r += sprintf(buf + r, "no%s\n", trace_options[i]); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2233 | 	} | 
 | 2234 |  | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2235 | 	for (i = 0; trace_opts[i].name; i++) { | 
 | 2236 | 		if (tracer_flags & trace_opts[i].bit) | 
| Steven Rostedt | 5c6a3ae | 2009-02-27 00:22:21 -0500 | [diff] [blame] | 2237 | 			r += sprintf(buf + r, "%s\n", | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2238 | 				trace_opts[i].name); | 
 | 2239 | 		else | 
| Steven Rostedt | 5c6a3ae | 2009-02-27 00:22:21 -0500 | [diff] [blame] | 2240 | 			r += sprintf(buf + r, "no%s\n", | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2241 | 				trace_opts[i].name); | 
 | 2242 | 	} | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2243 | 	mutex_unlock(&trace_types_lock); | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2244 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2245 | 	WARN_ON(r >= len + 2); | 
 | 2246 |  | 
| Ingo Molnar | 36dfe92 | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2247 | 	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2248 |  | 
 | 2249 | 	kfree(buf); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2250 | 	return r; | 
 | 2251 | } | 
 | 2252 |  | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2253 | /* Try to assign a tracer specific option */ | 
 | 2254 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | 
 | 2255 | { | 
 | 2256 | 	struct tracer_flags *trace_flags = trace->flags; | 
 | 2257 | 	struct tracer_opt *opts = NULL; | 
 | 2258 | 	int ret = 0, i = 0; | 
 | 2259 | 	int len; | 
 | 2260 |  | 
 | 2261 | 	for (i = 0; trace_flags->opts[i].name; i++) { | 
 | 2262 | 		opts = &trace_flags->opts[i]; | 
 | 2263 | 		len = strlen(opts->name); | 
 | 2264 |  | 
 | 2265 | 		if (strncmp(cmp, opts->name, len) == 0) { | 
 | 2266 | 			ret = trace->set_flag(trace_flags->val, | 
 | 2267 | 				opts->bit, !neg); | 
 | 2268 | 			break; | 
 | 2269 | 		} | 
 | 2270 | 	} | 
 | 2271 | 	/* Not found */ | 
 | 2272 | 	if (!trace_flags->opts[i].name) | 
 | 2273 | 		return -EINVAL; | 
 | 2274 |  | 
 | 2275 | 	/* Refused to handle */ | 
 | 2276 | 	if (ret) | 
 | 2277 | 		return ret; | 
 | 2278 |  | 
 | 2279 | 	if (neg) | 
 | 2280 | 		trace_flags->val &= ~opts->bit; | 
 | 2281 | 	else | 
 | 2282 | 		trace_flags->val |= opts->bit; | 
 | 2283 |  | 
 | 2284 | 	return 0; | 
 | 2285 | } | 
 | 2286 |  | 
| Steven Rostedt | af4617b | 2009-03-17 18:09:55 -0400 | [diff] [blame] | 2287 | static void set_tracer_flags(unsigned int mask, int enabled) | 
 | 2288 | { | 
 | 2289 | 	/* do nothing if flag is already set */ | 
 | 2290 | 	if (!!(trace_flags & mask) == !!enabled) | 
 | 2291 | 		return; | 
 | 2292 |  | 
 | 2293 | 	if (enabled) | 
 | 2294 | 		trace_flags |= mask; | 
 | 2295 | 	else | 
 | 2296 | 		trace_flags &= ~mask; | 
 | 2297 |  | 
 | 2298 | 	if (mask == TRACE_ITER_GLOBAL_CLK) { | 
 | 2299 | 		u64 (*func)(void); | 
 | 2300 |  | 
 | 2301 | 		if (enabled) | 
 | 2302 | 			func = trace_clock_global; | 
 | 2303 | 		else | 
 | 2304 | 			func = trace_clock_local; | 
 | 2305 |  | 
 | 2306 | 		mutex_lock(&trace_types_lock); | 
 | 2307 | 		ring_buffer_set_clock(global_trace.buffer, func); | 
 | 2308 |  | 
 | 2309 | 		if (max_tr.buffer) | 
 | 2310 | 			ring_buffer_set_clock(max_tr.buffer, func); | 
 | 2311 | 		mutex_unlock(&trace_types_lock); | 
 | 2312 | 	} | 
 | 2313 | } | 
 | 2314 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2315 | static ssize_t | 
| Steven Rostedt | ee6bce5 | 2008-11-12 17:52:37 -0500 | [diff] [blame] | 2316 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2317 | 			size_t cnt, loff_t *ppos) | 
 | 2318 | { | 
 | 2319 | 	char buf[64]; | 
 | 2320 | 	char *cmp = buf; | 
 | 2321 | 	int neg = 0; | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2322 | 	int ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2323 | 	int i; | 
 | 2324 |  | 
| Steven Rostedt | cffae43 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2325 | 	if (cnt >= sizeof(buf)) | 
 | 2326 | 		return -EINVAL; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2327 |  | 
 | 2328 | 	if (copy_from_user(&buf, ubuf, cnt)) | 
 | 2329 | 		return -EFAULT; | 
 | 2330 |  | 
 | 2331 | 	buf[cnt] = 0; | 
 | 2332 |  | 
 | 2333 | 	if (strncmp(buf, "no", 2) == 0) { | 
 | 2334 | 		neg = 1; | 
 | 2335 | 		cmp += 2; | 
 | 2336 | 	} | 
 | 2337 |  | 
 | 2338 | 	for (i = 0; trace_options[i]; i++) { | 
 | 2339 | 		int len = strlen(trace_options[i]); | 
 | 2340 |  | 
 | 2341 | 		if (strncmp(cmp, trace_options[i], len) == 0) { | 
| Steven Rostedt | af4617b | 2009-03-17 18:09:55 -0400 | [diff] [blame] | 2342 | 			set_tracer_flags(1 << i, !neg); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2343 | 			break; | 
 | 2344 | 		} | 
 | 2345 | 	} | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2346 |  | 
 | 2347 | 	/* If no option could be set, test the specific tracer options */ | 
 | 2348 | 	if (!trace_options[i]) { | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2349 | 		mutex_lock(&trace_types_lock); | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2350 | 		ret = set_tracer_option(current_trace, cmp, neg); | 
| Steven Rostedt | d8e83d2 | 2009-02-26 23:55:58 -0500 | [diff] [blame] | 2351 | 		mutex_unlock(&trace_types_lock); | 
| Frederic Weisbecker | adf9f19 | 2008-11-17 19:23:42 +0100 | [diff] [blame] | 2352 | 		if (ret) | 
 | 2353 | 			return ret; | 
 | 2354 | 	} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2355 |  | 
 | 2356 | 	filp->f_pos += cnt; | 
 | 2357 |  | 
 | 2358 | 	return cnt; | 
 | 2359 | } | 
 | 2360 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 2361 | static const struct file_operations tracing_iter_fops = { | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2362 | 	.open		= tracing_open_generic, | 
| Steven Rostedt | ee6bce5 | 2008-11-12 17:52:37 -0500 | [diff] [blame] | 2363 | 	.read		= tracing_trace_options_read, | 
 | 2364 | 	.write		= tracing_trace_options_write, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2365 | }; | 
 | 2366 |  | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2367 | static const char readme_msg[] = | 
 | 2368 | 	"tracing mini-HOWTO:\n\n" | 
 | 2369 | 	"# mkdir /debug\n" | 
 | 2370 | 	"# mount -t debugfs nodev /debug\n\n" | 
 | 2371 | 	"# cat /debug/tracing/available_tracers\n" | 
| Nikanth Karthikesan | bc2b687 | 2009-03-23 11:58:31 +0530 | [diff] [blame] | 2372 | 	"wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2373 | 	"# cat /debug/tracing/current_tracer\n" | 
| Nikanth Karthikesan | bc2b687 | 2009-03-23 11:58:31 +0530 | [diff] [blame] | 2374 | 	"nop\n" | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2375 | 	"# echo sched_switch > /debug/tracing/current_tracer\n" | 
 | 2376 | 	"# cat /debug/tracing/current_tracer\n" | 
 | 2377 | 	"sched_switch\n" | 
| Steven Rostedt | ee6bce5 | 2008-11-12 17:52:37 -0500 | [diff] [blame] | 2378 | 	"# cat /debug/tracing/trace_options\n" | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2379 | 	"noprint-parent nosym-offset nosym-addr noverbose\n" | 
| Steven Rostedt | ee6bce5 | 2008-11-12 17:52:37 -0500 | [diff] [blame] | 2380 | 	"# echo print-parent > /debug/tracing/trace_options\n" | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2381 | 	"# echo 1 > /debug/tracing/tracing_enabled\n" | 
 | 2382 | 	"# cat /debug/tracing/trace > /tmp/trace.txt\n" | 
| GeunSik Lim | 88fc86c | 2009-05-14 17:23:38 +0900 | [diff] [blame] | 2383 | 	"# echo 0 > /debug/tracing/tracing_enabled\n" | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2384 | ; | 
 | 2385 |  | 
 | 2386 | static ssize_t | 
 | 2387 | tracing_readme_read(struct file *filp, char __user *ubuf, | 
 | 2388 | 		       size_t cnt, loff_t *ppos) | 
 | 2389 | { | 
 | 2390 | 	return simple_read_from_buffer(ubuf, cnt, ppos, | 
 | 2391 | 					readme_msg, strlen(readme_msg)); | 
 | 2392 | } | 
 | 2393 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 2394 | static const struct file_operations tracing_readme_fops = { | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 2395 | 	.open		= tracing_open_generic, | 
 | 2396 | 	.read		= tracing_readme_read, | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2397 | }; | 
 | 2398 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2399 | static ssize_t | 
 | 2400 | tracing_ctrl_read(struct file *filp, char __user *ubuf, | 
 | 2401 | 		  size_t cnt, loff_t *ppos) | 
 | 2402 | { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2403 | 	char buf[64]; | 
 | 2404 | 	int r; | 
 | 2405 |  | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 2406 | 	r = sprintf(buf, "%u\n", tracer_enabled); | 
| Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 2407 | 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2408 | } | 
 | 2409 |  | 
 | 2410 | static ssize_t | 
 | 2411 | tracing_ctrl_write(struct file *filp, const char __user *ubuf, | 
 | 2412 | 		   size_t cnt, loff_t *ppos) | 
 | 2413 | { | 
 | 2414 | 	struct trace_array *tr = filp->private_data; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2415 | 	char buf[64]; | 
| Hannes Eder | 5e39841 | 2009-02-10 19:44:34 +0100 | [diff] [blame] | 2416 | 	unsigned long val; | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2417 | 	int ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2418 |  | 
| Steven Rostedt | cffae43 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2419 | 	if (cnt >= sizeof(buf)) | 
 | 2420 | 		return -EINVAL; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2421 |  | 
 | 2422 | 	if (copy_from_user(&buf, ubuf, cnt)) | 
 | 2423 | 		return -EFAULT; | 
 | 2424 |  | 
 | 2425 | 	buf[cnt] = 0; | 
 | 2426 |  | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2427 | 	ret = strict_strtoul(buf, 10, &val); | 
 | 2428 | 	if (ret < 0) | 
 | 2429 | 		return ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2430 |  | 
 | 2431 | 	val = !!val; | 
 | 2432 |  | 
 | 2433 | 	mutex_lock(&trace_types_lock); | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 2434 | 	if (tracer_enabled ^ val) { | 
 | 2435 | 		if (val) { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2436 | 			tracer_enabled = 1; | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 2437 | 			if (current_trace->start) | 
 | 2438 | 				current_trace->start(tr); | 
 | 2439 | 			tracing_start(); | 
 | 2440 | 		} else { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2441 | 			tracer_enabled = 0; | 
| Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 2442 | 			tracing_stop(); | 
 | 2443 | 			if (current_trace->stop) | 
 | 2444 | 				current_trace->stop(tr); | 
 | 2445 | 		} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2446 | 	} | 
 | 2447 | 	mutex_unlock(&trace_types_lock); | 
 | 2448 |  | 
 | 2449 | 	filp->f_pos += cnt; | 
 | 2450 |  | 
 | 2451 | 	return cnt; | 
 | 2452 | } | 
 | 2453 |  | 
 | 2454 | static ssize_t | 
 | 2455 | tracing_set_trace_read(struct file *filp, char __user *ubuf, | 
 | 2456 | 		       size_t cnt, loff_t *ppos) | 
 | 2457 | { | 
 | 2458 | 	char buf[max_tracer_type_len+2]; | 
 | 2459 | 	int r; | 
 | 2460 |  | 
 | 2461 | 	mutex_lock(&trace_types_lock); | 
 | 2462 | 	if (current_trace) | 
 | 2463 | 		r = sprintf(buf, "%s\n", current_trace->name); | 
 | 2464 | 	else | 
 | 2465 | 		r = sprintf(buf, "\n"); | 
 | 2466 | 	mutex_unlock(&trace_types_lock); | 
 | 2467 |  | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2468 | 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2469 | } | 
 | 2470 |  | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 2471 | int tracer_init(struct tracer *t, struct trace_array *tr) | 
 | 2472 | { | 
 | 2473 | 	tracing_reset_online_cpus(tr); | 
 | 2474 | 	return t->init(tr); | 
 | 2475 | } | 
 | 2476 |  | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 2477 | static int tracing_resize_ring_buffer(unsigned long size) | 
 | 2478 | { | 
 | 2479 | 	int ret; | 
 | 2480 |  | 
 | 2481 | 	/* | 
 | 2482 | 	 * If kernel or user changes the size of the ring buffer | 
| Steven Rostedt | a123c52 | 2009-03-12 11:21:08 -0400 | [diff] [blame] | 2483 | 	 * we use the size that was given, and we can forget about | 
 | 2484 | 	 * expanding it later. | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 2485 | 	 */ | 
 | 2486 | 	ring_buffer_expanded = 1; | 
 | 2487 |  | 
 | 2488 | 	ret = ring_buffer_resize(global_trace.buffer, size); | 
 | 2489 | 	if (ret < 0) | 
 | 2490 | 		return ret; | 
 | 2491 |  | 
 | 2492 | 	ret = ring_buffer_resize(max_tr.buffer, size); | 
 | 2493 | 	if (ret < 0) { | 
 | 2494 | 		int r; | 
 | 2495 |  | 
 | 2496 | 		r = ring_buffer_resize(global_trace.buffer, | 
 | 2497 | 				       global_trace.entries); | 
 | 2498 | 		if (r < 0) { | 
| Steven Rostedt | a123c52 | 2009-03-12 11:21:08 -0400 | [diff] [blame] | 2499 | 			/* | 
 | 2500 | 			 * AARGH! We are left with different | 
 | 2501 | 			 * size max buffer!!!! | 
 | 2502 | 			 * The max buffer is our "snapshot" buffer. | 
 | 2503 | 			 * When a tracer needs a snapshot (one of the | 
 | 2504 | 			 * latency tracers), it swaps the max buffer | 
 | 2505 | 			 * with the saved snap shot. We succeeded to | 
 | 2506 | 			 * update the size of the main buffer, but failed to | 
 | 2507 | 			 * update the size of the max buffer. But when we tried | 
 | 2508 | 			 * to reset the main buffer to the original size, we | 
 | 2509 | 			 * failed there too. This is very unlikely to | 
 | 2510 | 			 * happen, but if it does, warn and kill all | 
 | 2511 | 			 * tracing. | 
 | 2512 | 			 */ | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 2513 | 			WARN_ON(1); | 
 | 2514 | 			tracing_disabled = 1; | 
 | 2515 | 		} | 
 | 2516 | 		return ret; | 
 | 2517 | 	} | 
 | 2518 |  | 
 | 2519 | 	global_trace.entries = size; | 
 | 2520 |  | 
 | 2521 | 	return ret; | 
 | 2522 | } | 
 | 2523 |  | 
| Steven Rostedt | 1852fcc | 2009-03-11 14:33:00 -0400 | [diff] [blame] | 2524 | /** | 
 | 2525 |  * tracing_update_buffers - used by tracing facility to expand ring buffers | 
 | 2526 |  * | 
 | 2527 |  * To save on memory when the tracing is never used on a system with it | 
 | 2528 |  * configured in. The ring buffers are set to a minimum size. But once | 
 | 2529 |  * a user starts to use the tracing facility, then they need to grow | 
 | 2530 |  * to their default size. | 
 | 2531 |  * | 
 | 2532 |  * This function is to be called when a tracer is about to be used. | 
 | 2533 |  */ | 
 | 2534 | int tracing_update_buffers(void) | 
 | 2535 | { | 
 | 2536 | 	int ret = 0; | 
 | 2537 |  | 
| Steven Rostedt | 1027fcb | 2009-03-12 11:33:20 -0400 | [diff] [blame] | 2538 | 	mutex_lock(&trace_types_lock); | 
| Steven Rostedt | 1852fcc | 2009-03-11 14:33:00 -0400 | [diff] [blame] | 2539 | 	if (!ring_buffer_expanded) | 
 | 2540 | 		ret = tracing_resize_ring_buffer(trace_buf_size); | 
| Steven Rostedt | 1027fcb | 2009-03-12 11:33:20 -0400 | [diff] [blame] | 2541 | 	mutex_unlock(&trace_types_lock); | 
| Steven Rostedt | 1852fcc | 2009-03-11 14:33:00 -0400 | [diff] [blame] | 2542 |  | 
 | 2543 | 	return ret; | 
 | 2544 | } | 
 | 2545 |  | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 2546 | struct trace_option_dentry; | 
 | 2547 |  | 
 | 2548 | static struct trace_option_dentry * | 
 | 2549 | create_trace_option_files(struct tracer *tracer); | 
 | 2550 |  | 
 | 2551 | static void | 
 | 2552 | destroy_trace_option_files(struct trace_option_dentry *topts); | 
 | 2553 |  | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 2554 | static int tracing_set_tracer(const char *buf) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2555 | { | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 2556 | 	static struct trace_option_dentry *topts; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2557 | 	struct trace_array *tr = &global_trace; | 
 | 2558 | 	struct tracer *t; | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 2559 | 	int ret = 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2560 |  | 
| Steven Rostedt | 1027fcb | 2009-03-12 11:33:20 -0400 | [diff] [blame] | 2561 | 	mutex_lock(&trace_types_lock); | 
 | 2562 |  | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 2563 | 	if (!ring_buffer_expanded) { | 
 | 2564 | 		ret = tracing_resize_ring_buffer(trace_buf_size); | 
 | 2565 | 		if (ret < 0) | 
| Frederic Weisbecker | 59f586d | 2009-03-15 22:10:39 +0100 | [diff] [blame] | 2566 | 			goto out; | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 2567 | 		ret = 0; | 
 | 2568 | 	} | 
 | 2569 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2570 | 	for (t = trace_types; t; t = t->next) { | 
 | 2571 | 		if (strcmp(t->name, buf) == 0) | 
 | 2572 | 			break; | 
 | 2573 | 	} | 
| Frederic Weisbecker | c2931e0 | 2008-10-04 22:04:44 +0200 | [diff] [blame] | 2574 | 	if (!t) { | 
 | 2575 | 		ret = -EINVAL; | 
 | 2576 | 		goto out; | 
 | 2577 | 	} | 
 | 2578 | 	if (t == current_trace) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2579 | 		goto out; | 
 | 2580 |  | 
| Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 2581 | 	trace_branch_disable(); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2582 | 	if (current_trace && current_trace->reset) | 
 | 2583 | 		current_trace->reset(tr); | 
 | 2584 |  | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 2585 | 	destroy_trace_option_files(topts); | 
 | 2586 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2587 | 	current_trace = t; | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 2588 |  | 
 | 2589 | 	topts = create_trace_option_files(current_trace); | 
 | 2590 |  | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 2591 | 	if (t->init) { | 
| Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 2592 | 		ret = tracer_init(t, tr); | 
| Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 2593 | 		if (ret) | 
 | 2594 | 			goto out; | 
 | 2595 | 	} | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2596 |  | 
| Steven Rostedt | 9f029e8 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 2597 | 	trace_branch_enable(tr); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2598 |  out: | 
 | 2599 | 	mutex_unlock(&trace_types_lock); | 
 | 2600 |  | 
| Peter Zijlstra | d9e5407 | 2008-11-01 19:57:37 +0100 | [diff] [blame] | 2601 | 	return ret; | 
 | 2602 | } | 
 | 2603 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2604 | static ssize_t | 
 | 2605 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | 
 | 2606 | 			size_t cnt, loff_t *ppos) | 
 | 2607 | { | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2608 | 	char buf[max_tracer_type_len+1]; | 
 | 2609 | 	int i; | 
 | 2610 | 	size_t ret; | 
| Frederic Weisbecker | e6e7a65 | 2008-11-16 05:53:19 +0100 | [diff] [blame] | 2611 | 	int err; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2612 |  | 
| Steven Rostedt | 60063a6 | 2008-10-28 10:44:24 -0400 | [diff] [blame] | 2613 | 	ret = cnt; | 
 | 2614 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2615 | 	if (cnt > max_tracer_type_len) | 
 | 2616 | 		cnt = max_tracer_type_len; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2617 |  | 
 | 2618 | 	if (copy_from_user(&buf, ubuf, cnt)) | 
 | 2619 | 		return -EFAULT; | 
 | 2620 |  | 
 | 2621 | 	buf[cnt] = 0; | 
 | 2622 |  | 
 | 2623 | 	/* strip ending whitespace. */ | 
 | 2624 | 	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | 
 | 2625 | 		buf[i] = 0; | 
 | 2626 |  | 
| Frederic Weisbecker | e6e7a65 | 2008-11-16 05:53:19 +0100 | [diff] [blame] | 2627 | 	err = tracing_set_tracer(buf); | 
 | 2628 | 	if (err) | 
 | 2629 | 		return err; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2630 |  | 
| Frederic Weisbecker | e6e7a65 | 2008-11-16 05:53:19 +0100 | [diff] [blame] | 2631 | 	filp->f_pos += ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2632 |  | 
| Frederic Weisbecker | c2931e0 | 2008-10-04 22:04:44 +0200 | [diff] [blame] | 2633 | 	return ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2634 | } | 
 | 2635 |  | 
 | 2636 | static ssize_t | 
 | 2637 | tracing_max_lat_read(struct file *filp, char __user *ubuf, | 
 | 2638 | 		     size_t cnt, loff_t *ppos) | 
 | 2639 | { | 
 | 2640 | 	unsigned long *ptr = filp->private_data; | 
 | 2641 | 	char buf[64]; | 
 | 2642 | 	int r; | 
 | 2643 |  | 
| Steven Rostedt | cffae43 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2644 | 	r = snprintf(buf, sizeof(buf), "%ld\n", | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2645 | 		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); | 
| Steven Rostedt | cffae43 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2646 | 	if (r > sizeof(buf)) | 
 | 2647 | 		r = sizeof(buf); | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2648 | 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2649 | } | 
 | 2650 |  | 
 | 2651 | static ssize_t | 
 | 2652 | tracing_max_lat_write(struct file *filp, const char __user *ubuf, | 
 | 2653 | 		      size_t cnt, loff_t *ppos) | 
 | 2654 | { | 
| Hannes Eder | 5e39841 | 2009-02-10 19:44:34 +0100 | [diff] [blame] | 2655 | 	unsigned long *ptr = filp->private_data; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2656 | 	char buf[64]; | 
| Hannes Eder | 5e39841 | 2009-02-10 19:44:34 +0100 | [diff] [blame] | 2657 | 	unsigned long val; | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2658 | 	int ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2659 |  | 
| Steven Rostedt | cffae43 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2660 | 	if (cnt >= sizeof(buf)) | 
 | 2661 | 		return -EINVAL; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2662 |  | 
 | 2663 | 	if (copy_from_user(&buf, ubuf, cnt)) | 
 | 2664 | 		return -EFAULT; | 
 | 2665 |  | 
 | 2666 | 	buf[cnt] = 0; | 
 | 2667 |  | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2668 | 	ret = strict_strtoul(buf, 10, &val); | 
 | 2669 | 	if (ret < 0) | 
 | 2670 | 		return ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 2671 |  | 
 | 2672 | 	*ptr = val * 1000; | 
 | 2673 |  | 
 | 2674 | 	return cnt; | 
 | 2675 | } | 
 | 2676 |  | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2677 | static int tracing_open_pipe(struct inode *inode, struct file *filp) | 
 | 2678 | { | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2679 | 	long cpu_file = (long) inode->i_private; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2680 | 	struct trace_iterator *iter; | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2681 | 	int ret = 0; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2682 |  | 
 | 2683 | 	if (tracing_disabled) | 
 | 2684 | 		return -ENODEV; | 
 | 2685 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2686 | 	mutex_lock(&trace_types_lock); | 
 | 2687 |  | 
 | 2688 | 	/* We only allow one reader per cpu */ | 
 | 2689 | 	if (cpu_file == TRACE_PIPE_ALL_CPU) { | 
 | 2690 | 		if (!cpumask_empty(tracing_reader_cpumask)) { | 
 | 2691 | 			ret = -EBUSY; | 
 | 2692 | 			goto out; | 
 | 2693 | 		} | 
 | 2694 | 		cpumask_setall(tracing_reader_cpumask); | 
 | 2695 | 	} else { | 
 | 2696 | 		if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask)) | 
 | 2697 | 			cpumask_set_cpu(cpu_file, tracing_reader_cpumask); | 
 | 2698 | 		else { | 
 | 2699 | 			ret = -EBUSY; | 
 | 2700 | 			goto out; | 
 | 2701 | 		} | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2702 | 	} | 
 | 2703 |  | 
 | 2704 | 	/* create a buffer to store the information to pass to userspace */ | 
 | 2705 | 	iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2706 | 	if (!iter) { | 
 | 2707 | 		ret = -ENOMEM; | 
 | 2708 | 		goto out; | 
 | 2709 | 	} | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2710 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2711 | 	/* | 
 | 2712 | 	 * We make a copy of the current tracer to avoid concurrent | 
 | 2713 | 	 * changes on it while we are reading. | 
 | 2714 | 	 */ | 
 | 2715 | 	iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL); | 
 | 2716 | 	if (!iter->trace) { | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2717 | 		ret = -ENOMEM; | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2718 | 		goto fail; | 
 | 2719 | 	} | 
 | 2720 | 	if (current_trace) | 
 | 2721 | 		*iter->trace = *current_trace; | 
 | 2722 |  | 
 | 2723 | 	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { | 
 | 2724 | 		ret = -ENOMEM; | 
 | 2725 | 		goto fail; | 
| Rusty Russell | 4462344 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 2726 | 	} | 
 | 2727 |  | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 2728 | 	/* trace pipe does not show start of buffer */ | 
| Rusty Russell | 4462344 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 2729 | 	cpumask_setall(iter->started); | 
| Steven Rostedt | a309720 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 2730 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2731 | 	iter->cpu_file = cpu_file; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2732 | 	iter->tr = &global_trace; | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2733 | 	mutex_init(&iter->mutex); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2734 | 	filp->private_data = iter; | 
 | 2735 |  | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 2736 | 	if (iter->trace->pipe_open) | 
 | 2737 | 		iter->trace->pipe_open(iter); | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 2738 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2739 | out: | 
 | 2740 | 	mutex_unlock(&trace_types_lock); | 
 | 2741 | 	return ret; | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2742 |  | 
 | 2743 | fail: | 
 | 2744 | 	kfree(iter->trace); | 
 | 2745 | 	kfree(iter); | 
 | 2746 | 	mutex_unlock(&trace_types_lock); | 
 | 2747 | 	return ret; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2748 | } | 
 | 2749 |  | 
 | 2750 | static int tracing_release_pipe(struct inode *inode, struct file *file) | 
 | 2751 | { | 
 | 2752 | 	struct trace_iterator *iter = file->private_data; | 
 | 2753 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 2754 | 	mutex_lock(&trace_types_lock); | 
 | 2755 |  | 
 | 2756 | 	if (iter->cpu_file == TRACE_PIPE_ALL_CPU) | 
 | 2757 | 		cpumask_clear(tracing_reader_cpumask); | 
 | 2758 | 	else | 
 | 2759 | 		cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); | 
 | 2760 |  | 
 | 2761 | 	mutex_unlock(&trace_types_lock); | 
 | 2762 |  | 
| Rusty Russell | 4462344 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 2763 | 	free_cpumask_var(iter->started); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2764 | 	mutex_destroy(&iter->mutex); | 
 | 2765 | 	kfree(iter->trace); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2766 | 	kfree(iter); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2767 |  | 
 | 2768 | 	return 0; | 
 | 2769 | } | 
 | 2770 |  | 
| Soeren Sandmann Pedersen | 2a2cc8f | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 2771 | static unsigned int | 
 | 2772 | tracing_poll_pipe(struct file *filp, poll_table *poll_table) | 
 | 2773 | { | 
 | 2774 | 	struct trace_iterator *iter = filp->private_data; | 
 | 2775 |  | 
 | 2776 | 	if (trace_flags & TRACE_ITER_BLOCK) { | 
 | 2777 | 		/* | 
 | 2778 | 		 * Always select as readable when in blocking mode | 
 | 2779 | 		 */ | 
 | 2780 | 		return POLLIN | POLLRDNORM; | 
| Ingo Molnar | afc2abc | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 2781 | 	} else { | 
| Soeren Sandmann Pedersen | 2a2cc8f | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 2782 | 		if (!trace_empty(iter)) | 
 | 2783 | 			return POLLIN | POLLRDNORM; | 
 | 2784 | 		poll_wait(filp, &trace_wait, poll_table); | 
 | 2785 | 		if (!trace_empty(iter)) | 
 | 2786 | 			return POLLIN | POLLRDNORM; | 
 | 2787 |  | 
 | 2788 | 		return 0; | 
 | 2789 | 	} | 
 | 2790 | } | 
 | 2791 |  | 
| Frederic Weisbecker | 6eaaa5d | 2009-02-11 02:25:00 +0100 | [diff] [blame] | 2792 |  | 
 | 2793 | void default_wait_pipe(struct trace_iterator *iter) | 
 | 2794 | { | 
 | 2795 | 	DEFINE_WAIT(wait); | 
 | 2796 |  | 
 | 2797 | 	prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); | 
 | 2798 |  | 
 | 2799 | 	if (trace_empty(iter)) | 
 | 2800 | 		schedule(); | 
 | 2801 |  | 
 | 2802 | 	finish_wait(&trace_wait, &wait); | 
 | 2803 | } | 
 | 2804 |  | 
 | 2805 | /* | 
 | 2806 |  * This is a make-shift waitqueue. | 
 | 2807 |  * A tracer might use this callback on some rare cases: | 
 | 2808 |  * | 
 | 2809 |  *  1) the current tracer might hold the runqueue lock when it wakes up | 
 | 2810 |  *     a reader, hence a deadlock (sched, function, and function graph tracers) | 
 | 2811 |  *  2) the function tracers, trace all functions, we don't want | 
 | 2812 |  *     the overhead of calling wake_up and friends | 
 | 2813 |  *     (and tracing them too) | 
 | 2814 |  * | 
 | 2815 |  *     Anyway, this is really very primitive wakeup. | 
 | 2816 |  */ | 
 | 2817 | void poll_wait_pipe(struct trace_iterator *iter) | 
 | 2818 | { | 
 | 2819 | 	set_current_state(TASK_INTERRUPTIBLE); | 
 | 2820 | 	/* sleep for 100 msecs, and try again. */ | 
 | 2821 | 	schedule_timeout(HZ / 10); | 
 | 2822 | } | 
 | 2823 |  | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 2824 | /* Must be called with trace_types_lock mutex held. */ | 
 | 2825 | static int tracing_wait_pipe(struct file *filp) | 
 | 2826 | { | 
 | 2827 | 	struct trace_iterator *iter = filp->private_data; | 
 | 2828 |  | 
 | 2829 | 	while (trace_empty(iter)) { | 
 | 2830 |  | 
 | 2831 | 		if ((filp->f_flags & O_NONBLOCK)) { | 
 | 2832 | 			return -EAGAIN; | 
 | 2833 | 		} | 
 | 2834 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2835 | 		mutex_unlock(&iter->mutex); | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 2836 |  | 
| Frederic Weisbecker | 6eaaa5d | 2009-02-11 02:25:00 +0100 | [diff] [blame] | 2837 | 		iter->trace->wait_pipe(iter); | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 2838 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2839 | 		mutex_lock(&iter->mutex); | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 2840 |  | 
| Frederic Weisbecker | 6eaaa5d | 2009-02-11 02:25:00 +0100 | [diff] [blame] | 2841 | 		if (signal_pending(current)) | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 2842 | 			return -EINTR; | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 2843 |  | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 2844 | 		/* | 
 | 2845 | 		 * We block until we read something and tracing is disabled. | 
 | 2846 | 		 * We still block if tracing is disabled, but we have never | 
 | 2847 | 		 * read anything. This allows a user to cat this file, and | 
 | 2848 | 		 * then enable tracing. But after we have read something, | 
 | 2849 | 		 * we give an EOF when tracing is again disabled. | 
 | 2850 | 		 * | 
 | 2851 | 		 * iter->pos will be 0 if we haven't read anything. | 
 | 2852 | 		 */ | 
 | 2853 | 		if (!tracer_enabled && iter->pos) | 
 | 2854 | 			break; | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 2855 | 	} | 
 | 2856 |  | 
 | 2857 | 	return 1; | 
 | 2858 | } | 
 | 2859 |  | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2860 | /* | 
 | 2861 |  * Consumer reader. | 
 | 2862 |  */ | 
 | 2863 | static ssize_t | 
 | 2864 | tracing_read_pipe(struct file *filp, char __user *ubuf, | 
 | 2865 | 		  size_t cnt, loff_t *ppos) | 
 | 2866 | { | 
 | 2867 | 	struct trace_iterator *iter = filp->private_data; | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2868 | 	static struct tracer *old_tracer; | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 2869 | 	ssize_t sret; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2870 |  | 
 | 2871 | 	/* return any leftover data */ | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 2872 | 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 
 | 2873 | 	if (sret != -EBUSY) | 
 | 2874 | 		return sret; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2875 |  | 
| Steven Rostedt | f952075 | 2009-03-02 14:04:40 -0500 | [diff] [blame] | 2876 | 	trace_seq_init(&iter->seq); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2877 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2878 | 	/* copy the tracer to avoid using a global lock all around */ | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 2879 | 	mutex_lock(&trace_types_lock); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2880 | 	if (unlikely(old_tracer != current_trace && current_trace)) { | 
 | 2881 | 		old_tracer = current_trace; | 
 | 2882 | 		*iter->trace = *current_trace; | 
 | 2883 | 	} | 
 | 2884 | 	mutex_unlock(&trace_types_lock); | 
 | 2885 |  | 
 | 2886 | 	/* | 
 | 2887 | 	 * Avoid more than one consumer on a single file descriptor | 
 | 2888 | 	 * This is just a matter of traces coherency, the ring buffer itself | 
 | 2889 | 	 * is protected. | 
 | 2890 | 	 */ | 
 | 2891 | 	mutex_lock(&iter->mutex); | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 2892 | 	if (iter->trace->read) { | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 2893 | 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); | 
 | 2894 | 		if (sret) | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 2895 | 			goto out; | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 2896 | 	} | 
 | 2897 |  | 
| Pekka Paalanen | 9ff4b97 | 2008-09-29 20:23:48 +0200 | [diff] [blame] | 2898 | waitagain: | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 2899 | 	sret = tracing_wait_pipe(filp); | 
 | 2900 | 	if (sret <= 0) | 
 | 2901 | 		goto out; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2902 |  | 
 | 2903 | 	/* stop when tracing is finished */ | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 2904 | 	if (trace_empty(iter)) { | 
 | 2905 | 		sret = 0; | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 2906 | 		goto out; | 
| Eduard - Gabriel Munteanu | ff98781 | 2009-02-09 08:15:55 +0200 | [diff] [blame] | 2907 | 	} | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2908 |  | 
 | 2909 | 	if (cnt >= PAGE_SIZE) | 
 | 2910 | 		cnt = PAGE_SIZE - 1; | 
 | 2911 |  | 
| Steven Rostedt | 53d0aa7 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 2912 | 	/* reset all but tr, trace, and overruns */ | 
| Steven Rostedt | 53d0aa7 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 2913 | 	memset(&iter->seq, 0, | 
 | 2914 | 	       sizeof(struct trace_iterator) - | 
 | 2915 | 	       offsetof(struct trace_iterator, seq)); | 
| Steven Rostedt | 4823ed7 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 2916 | 	iter->pos = -1; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2917 |  | 
| Steven Rostedt | 088b1e42 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 2918 | 	while (find_next_entry_inc(iter) != NULL) { | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2919 | 		enum print_line_t ret; | 
| Steven Rostedt | 088b1e42 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 2920 | 		int len = iter->seq.len; | 
 | 2921 |  | 
| Ingo Molnar | f9896bf | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 2922 | 		ret = print_trace_line(iter); | 
| Frederic Weisbecker | 2c4f035 | 2008-09-29 20:18:34 +0200 | [diff] [blame] | 2923 | 		if (ret == TRACE_TYPE_PARTIAL_LINE) { | 
| Steven Rostedt | 088b1e42 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 2924 | 			/* don't print partial lines */ | 
 | 2925 | 			iter->seq.len = len; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2926 | 			break; | 
| Steven Rostedt | 088b1e42 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 2927 | 		} | 
| Frederic Weisbecker | b91facc | 2009-02-06 18:30:44 +0100 | [diff] [blame] | 2928 | 		if (ret != TRACE_TYPE_NO_CONSUME) | 
 | 2929 | 			trace_consume(iter); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2930 |  | 
 | 2931 | 		if (iter->seq.len >= cnt) | 
 | 2932 | 			break; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2933 | 	} | 
 | 2934 |  | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2935 | 	/* Now copy what we have to the user */ | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 2936 | 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 
 | 2937 | 	if (iter->seq.readpos >= iter->seq.len) | 
| Steven Rostedt | f952075 | 2009-03-02 14:04:40 -0500 | [diff] [blame] | 2938 | 		trace_seq_init(&iter->seq); | 
| Pekka Paalanen | 9ff4b97 | 2008-09-29 20:23:48 +0200 | [diff] [blame] | 2939 |  | 
 | 2940 | 	/* | 
 | 2941 | 	 * If there was nothing to send to user, inspite of consuming trace | 
 | 2942 | 	 * entries, go back to wait for more entries. | 
 | 2943 | 	 */ | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 2944 | 	if (sret == -EBUSY) | 
| Pekka Paalanen | 9ff4b97 | 2008-09-29 20:23:48 +0200 | [diff] [blame] | 2945 | 		goto waitagain; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2946 |  | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 2947 | out: | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 2948 | 	mutex_unlock(&iter->mutex); | 
| Steven Rostedt | 107bad8 | 2008-05-12 21:21:01 +0200 | [diff] [blame] | 2949 |  | 
| Pekka Paalanen | 6c6c279 | 2008-05-12 21:21:02 +0200 | [diff] [blame] | 2950 | 	return sret; | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 2951 | } | 
 | 2952 |  | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 2953 | static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, | 
 | 2954 | 				     struct pipe_buffer *buf) | 
 | 2955 | { | 
 | 2956 | 	__free_page(buf->page); | 
 | 2957 | } | 
 | 2958 |  | 
 | 2959 | static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | 
 | 2960 | 				     unsigned int idx) | 
 | 2961 | { | 
 | 2962 | 	__free_page(spd->pages[idx]); | 
 | 2963 | } | 
 | 2964 |  | 
 | 2965 | static struct pipe_buf_operations tracing_pipe_buf_ops = { | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 2966 | 	.can_merge		= 0, | 
 | 2967 | 	.map			= generic_pipe_buf_map, | 
 | 2968 | 	.unmap			= generic_pipe_buf_unmap, | 
 | 2969 | 	.confirm		= generic_pipe_buf_confirm, | 
 | 2970 | 	.release		= tracing_pipe_buf_release, | 
 | 2971 | 	.steal			= generic_pipe_buf_steal, | 
 | 2972 | 	.get			= generic_pipe_buf_get, | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 2973 | }; | 
 | 2974 |  | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 2975 | static size_t | 
| Frederic Weisbecker | fa7c7f6 | 2009-02-11 02:51:30 +0100 | [diff] [blame] | 2976 | tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 2977 | { | 
 | 2978 | 	size_t count; | 
 | 2979 | 	int ret; | 
 | 2980 |  | 
 | 2981 | 	/* Seq buffer is page-sized, exactly what we need. */ | 
 | 2982 | 	for (;;) { | 
 | 2983 | 		count = iter->seq.len; | 
 | 2984 | 		ret = print_trace_line(iter); | 
 | 2985 | 		count = iter->seq.len - count; | 
 | 2986 | 		if (rem < count) { | 
 | 2987 | 			rem = 0; | 
 | 2988 | 			iter->seq.len -= count; | 
 | 2989 | 			break; | 
 | 2990 | 		} | 
 | 2991 | 		if (ret == TRACE_TYPE_PARTIAL_LINE) { | 
 | 2992 | 			iter->seq.len -= count; | 
 | 2993 | 			break; | 
 | 2994 | 		} | 
 | 2995 |  | 
 | 2996 | 		trace_consume(iter); | 
 | 2997 | 		rem -= count; | 
 | 2998 | 		if (!find_next_entry_inc(iter))	{ | 
 | 2999 | 			rem = 0; | 
 | 3000 | 			iter->ent = NULL; | 
 | 3001 | 			break; | 
 | 3002 | 		} | 
 | 3003 | 	} | 
 | 3004 |  | 
 | 3005 | 	return rem; | 
 | 3006 | } | 
 | 3007 |  | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3008 | static ssize_t tracing_splice_read_pipe(struct file *filp, | 
 | 3009 | 					loff_t *ppos, | 
 | 3010 | 					struct pipe_inode_info *pipe, | 
 | 3011 | 					size_t len, | 
 | 3012 | 					unsigned int flags) | 
 | 3013 | { | 
 | 3014 | 	struct page *pages[PIPE_BUFFERS]; | 
 | 3015 | 	struct partial_page partial[PIPE_BUFFERS]; | 
 | 3016 | 	struct trace_iterator *iter = filp->private_data; | 
 | 3017 | 	struct splice_pipe_desc spd = { | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3018 | 		.pages		= pages, | 
 | 3019 | 		.partial	= partial, | 
 | 3020 | 		.nr_pages	= 0, /* This gets updated below. */ | 
 | 3021 | 		.flags		= flags, | 
 | 3022 | 		.ops		= &tracing_pipe_buf_ops, | 
 | 3023 | 		.spd_release	= tracing_spd_release_pipe, | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3024 | 	}; | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3025 | 	static struct tracer *old_tracer; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3026 | 	ssize_t ret; | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3027 | 	size_t rem; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3028 | 	unsigned int i; | 
 | 3029 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3030 | 	/* copy the tracer to avoid using a global lock all around */ | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3031 | 	mutex_lock(&trace_types_lock); | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3032 | 	if (unlikely(old_tracer != current_trace && current_trace)) { | 
 | 3033 | 		old_tracer = current_trace; | 
 | 3034 | 		*iter->trace = *current_trace; | 
 | 3035 | 	} | 
 | 3036 | 	mutex_unlock(&trace_types_lock); | 
 | 3037 |  | 
 | 3038 | 	mutex_lock(&iter->mutex); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3039 |  | 
 | 3040 | 	if (iter->trace->splice_read) { | 
 | 3041 | 		ret = iter->trace->splice_read(iter, filp, | 
 | 3042 | 					       ppos, pipe, len, flags); | 
 | 3043 | 		if (ret) | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3044 | 			goto out_err; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3045 | 	} | 
 | 3046 |  | 
 | 3047 | 	ret = tracing_wait_pipe(filp); | 
 | 3048 | 	if (ret <= 0) | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3049 | 		goto out_err; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3050 |  | 
 | 3051 | 	if (!iter->ent && !find_next_entry_inc(iter)) { | 
 | 3052 | 		ret = -EFAULT; | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3053 | 		goto out_err; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3054 | 	} | 
 | 3055 |  | 
 | 3056 | 	/* Fill as many pages as possible. */ | 
 | 3057 | 	for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { | 
 | 3058 | 		pages[i] = alloc_page(GFP_KERNEL); | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3059 | 		if (!pages[i]) | 
 | 3060 | 			break; | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3061 |  | 
| Frederic Weisbecker | fa7c7f6 | 2009-02-11 02:51:30 +0100 | [diff] [blame] | 3062 | 		rem = tracing_fill_pipe_page(rem, iter); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3063 |  | 
 | 3064 | 		/* Copy the data into the page, so we can start over. */ | 
 | 3065 | 		ret = trace_seq_to_buffer(&iter->seq, | 
 | 3066 | 					  page_address(pages[i]), | 
 | 3067 | 					  iter->seq.len); | 
 | 3068 | 		if (ret < 0) { | 
 | 3069 | 			__free_page(pages[i]); | 
 | 3070 | 			break; | 
 | 3071 | 		} | 
 | 3072 | 		partial[i].offset = 0; | 
 | 3073 | 		partial[i].len = iter->seq.len; | 
 | 3074 |  | 
| Steven Rostedt | f952075 | 2009-03-02 14:04:40 -0500 | [diff] [blame] | 3075 | 		trace_seq_init(&iter->seq); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3076 | 	} | 
 | 3077 |  | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3078 | 	mutex_unlock(&iter->mutex); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3079 |  | 
 | 3080 | 	spd.nr_pages = i; | 
 | 3081 |  | 
 | 3082 | 	return splice_to_pipe(pipe, &spd); | 
 | 3083 |  | 
| Steven Rostedt | 34cd499 | 2009-02-09 12:06:29 -0500 | [diff] [blame] | 3084 | out_err: | 
| Frederic Weisbecker | d7350c3 | 2009-02-25 06:13:16 +0100 | [diff] [blame] | 3085 | 	mutex_unlock(&iter->mutex); | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3086 |  | 
 | 3087 | 	return ret; | 
 | 3088 | } | 
 | 3089 |  | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3090 | static ssize_t | 
 | 3091 | tracing_entries_read(struct file *filp, char __user *ubuf, | 
 | 3092 | 		     size_t cnt, loff_t *ppos) | 
 | 3093 | { | 
 | 3094 | 	struct trace_array *tr = filp->private_data; | 
| Steven Rostedt | db526ca | 2009-03-12 13:53:25 -0400 | [diff] [blame] | 3095 | 	char buf[96]; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3096 | 	int r; | 
 | 3097 |  | 
| Steven Rostedt | db526ca | 2009-03-12 13:53:25 -0400 | [diff] [blame] | 3098 | 	mutex_lock(&trace_types_lock); | 
 | 3099 | 	if (!ring_buffer_expanded) | 
 | 3100 | 		r = sprintf(buf, "%lu (expanded: %lu)\n", | 
 | 3101 | 			    tr->entries >> 10, | 
 | 3102 | 			    trace_buf_size >> 10); | 
 | 3103 | 	else | 
 | 3104 | 		r = sprintf(buf, "%lu\n", tr->entries >> 10); | 
 | 3105 | 	mutex_unlock(&trace_types_lock); | 
 | 3106 |  | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3107 | 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
 | 3108 | } | 
 | 3109 |  | 
 | 3110 | static ssize_t | 
 | 3111 | tracing_entries_write(struct file *filp, const char __user *ubuf, | 
 | 3112 | 		      size_t cnt, loff_t *ppos) | 
 | 3113 | { | 
 | 3114 | 	unsigned long val; | 
 | 3115 | 	char buf[64]; | 
| Steven Rostedt | bf5e651 | 2008-11-10 21:46:00 -0500 | [diff] [blame] | 3116 | 	int ret, cpu; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3117 |  | 
| Steven Rostedt | cffae43 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 3118 | 	if (cnt >= sizeof(buf)) | 
 | 3119 | 		return -EINVAL; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3120 |  | 
 | 3121 | 	if (copy_from_user(&buf, ubuf, cnt)) | 
 | 3122 | 		return -EFAULT; | 
 | 3123 |  | 
 | 3124 | 	buf[cnt] = 0; | 
 | 3125 |  | 
| Steven Rostedt | c6caeeb | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 3126 | 	ret = strict_strtoul(buf, 10, &val); | 
 | 3127 | 	if (ret < 0) | 
 | 3128 | 		return ret; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3129 |  | 
 | 3130 | 	/* must have at least 1 entry */ | 
 | 3131 | 	if (!val) | 
 | 3132 | 		return -EINVAL; | 
 | 3133 |  | 
 | 3134 | 	mutex_lock(&trace_types_lock); | 
 | 3135 |  | 
| Steven Rostedt | c76f069 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 3136 | 	tracing_stop(); | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3137 |  | 
| Steven Rostedt | bf5e651 | 2008-11-10 21:46:00 -0500 | [diff] [blame] | 3138 | 	/* disable all cpu buffers */ | 
 | 3139 | 	for_each_tracing_cpu(cpu) { | 
 | 3140 | 		if (global_trace.data[cpu]) | 
 | 3141 | 			atomic_inc(&global_trace.data[cpu]->disabled); | 
 | 3142 | 		if (max_tr.data[cpu]) | 
 | 3143 | 			atomic_inc(&max_tr.data[cpu]->disabled); | 
 | 3144 | 	} | 
 | 3145 |  | 
| Steven Rostedt | 1696b2b | 2008-11-13 00:09:35 -0500 | [diff] [blame] | 3146 | 	/* value is in KB */ | 
 | 3147 | 	val <<= 10; | 
 | 3148 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 3149 | 	if (val != global_trace.entries) { | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 3150 | 		ret = tracing_resize_ring_buffer(val); | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 3151 | 		if (ret < 0) { | 
 | 3152 | 			cnt = ret; | 
| Steven Rostedt | 3eefae9 | 2008-05-12 21:21:04 +0200 | [diff] [blame] | 3153 | 			goto out; | 
 | 3154 | 		} | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3155 | 	} | 
 | 3156 |  | 
 | 3157 | 	filp->f_pos += cnt; | 
 | 3158 |  | 
| Steven Rostedt | 19384c0 | 2008-05-22 00:22:16 -0400 | [diff] [blame] | 3159 | 	/* If check pages failed, return ENOMEM */ | 
 | 3160 | 	if (tracing_disabled) | 
 | 3161 | 		cnt = -ENOMEM; | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3162 |  out: | 
| Steven Rostedt | bf5e651 | 2008-11-10 21:46:00 -0500 | [diff] [blame] | 3163 | 	for_each_tracing_cpu(cpu) { | 
 | 3164 | 		if (global_trace.data[cpu]) | 
 | 3165 | 			atomic_dec(&global_trace.data[cpu]->disabled); | 
 | 3166 | 		if (max_tr.data[cpu]) | 
 | 3167 | 			atomic_dec(&max_tr.data[cpu]->disabled); | 
 | 3168 | 	} | 
 | 3169 |  | 
| Steven Rostedt | c76f069 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 3170 | 	tracing_start(); | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3171 | 	max_tr.entries = global_trace.entries; | 
 | 3172 | 	mutex_unlock(&trace_types_lock); | 
 | 3173 |  | 
 | 3174 | 	return cnt; | 
 | 3175 | } | 
 | 3176 |  | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3177 | static int mark_printk(const char *fmt, ...) | 
 | 3178 | { | 
 | 3179 | 	int ret; | 
 | 3180 | 	va_list args; | 
 | 3181 | 	va_start(args, fmt); | 
| Steven Rostedt | 40ce74f | 2009-03-19 14:03:53 -0400 | [diff] [blame] | 3182 | 	ret = trace_vprintk(0, fmt, args); | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3183 | 	va_end(args); | 
 | 3184 | 	return ret; | 
 | 3185 | } | 
 | 3186 |  | 
 | 3187 | static ssize_t | 
 | 3188 | tracing_mark_write(struct file *filp, const char __user *ubuf, | 
 | 3189 | 					size_t cnt, loff_t *fpos) | 
 | 3190 | { | 
 | 3191 | 	char *buf; | 
 | 3192 | 	char *end; | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3193 |  | 
| Steven Rostedt | c76f069 | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 3194 | 	if (tracing_disabled) | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3195 | 		return -EINVAL; | 
 | 3196 |  | 
 | 3197 | 	if (cnt > TRACE_BUF_SIZE) | 
 | 3198 | 		cnt = TRACE_BUF_SIZE; | 
 | 3199 |  | 
 | 3200 | 	buf = kmalloc(cnt + 1, GFP_KERNEL); | 
 | 3201 | 	if (buf == NULL) | 
 | 3202 | 		return -ENOMEM; | 
 | 3203 |  | 
 | 3204 | 	if (copy_from_user(buf, ubuf, cnt)) { | 
 | 3205 | 		kfree(buf); | 
 | 3206 | 		return -EFAULT; | 
 | 3207 | 	} | 
 | 3208 |  | 
 | 3209 | 	/* Cut from the first nil or newline. */ | 
 | 3210 | 	buf[cnt] = '\0'; | 
 | 3211 | 	end = strchr(buf, '\n'); | 
 | 3212 | 	if (end) | 
 | 3213 | 		*end = '\0'; | 
 | 3214 |  | 
 | 3215 | 	cnt = mark_printk("%s\n", buf); | 
 | 3216 | 	kfree(buf); | 
 | 3217 | 	*fpos += cnt; | 
 | 3218 |  | 
 | 3219 | 	return cnt; | 
 | 3220 | } | 
 | 3221 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 3222 | static const struct file_operations tracing_max_lat_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3223 | 	.open		= tracing_open_generic, | 
 | 3224 | 	.read		= tracing_max_lat_read, | 
 | 3225 | 	.write		= tracing_max_lat_write, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3226 | }; | 
 | 3227 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 3228 | static const struct file_operations tracing_ctrl_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3229 | 	.open		= tracing_open_generic, | 
 | 3230 | 	.read		= tracing_ctrl_read, | 
 | 3231 | 	.write		= tracing_ctrl_write, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3232 | }; | 
 | 3233 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 3234 | static const struct file_operations set_tracer_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3235 | 	.open		= tracing_open_generic, | 
 | 3236 | 	.read		= tracing_set_trace_read, | 
 | 3237 | 	.write		= tracing_set_trace_write, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3238 | }; | 
 | 3239 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 3240 | static const struct file_operations tracing_pipe_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3241 | 	.open		= tracing_open_pipe, | 
| Soeren Sandmann Pedersen | 2a2cc8f | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 3242 | 	.poll		= tracing_poll_pipe, | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3243 | 	.read		= tracing_read_pipe, | 
| Eduard - Gabriel Munteanu | 3c56819 | 2009-02-09 08:15:56 +0200 | [diff] [blame] | 3244 | 	.splice_read	= tracing_splice_read_pipe, | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3245 | 	.release	= tracing_release_pipe, | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3246 | }; | 
 | 3247 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 3248 | static const struct file_operations tracing_entries_fops = { | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3249 | 	.open		= tracing_open_generic, | 
 | 3250 | 	.read		= tracing_entries_read, | 
 | 3251 | 	.write		= tracing_entries_write, | 
 | 3252 | }; | 
 | 3253 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 3254 | static const struct file_operations tracing_mark_fops = { | 
| Frédéric Weisbecker | 43a1538 | 2008-09-21 20:16:30 +0200 | [diff] [blame] | 3255 | 	.open		= tracing_open_generic, | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3256 | 	.write		= tracing_mark_write, | 
 | 3257 | }; | 
 | 3258 |  | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3259 | struct ftrace_buffer_info { | 
 | 3260 | 	struct trace_array	*tr; | 
 | 3261 | 	void			*spare; | 
 | 3262 | 	int			cpu; | 
 | 3263 | 	unsigned int		read; | 
 | 3264 | }; | 
 | 3265 |  | 
 | 3266 | static int tracing_buffers_open(struct inode *inode, struct file *filp) | 
 | 3267 | { | 
 | 3268 | 	int cpu = (int)(long)inode->i_private; | 
 | 3269 | 	struct ftrace_buffer_info *info; | 
 | 3270 |  | 
 | 3271 | 	if (tracing_disabled) | 
 | 3272 | 		return -ENODEV; | 
 | 3273 |  | 
 | 3274 | 	info = kzalloc(sizeof(*info), GFP_KERNEL); | 
 | 3275 | 	if (!info) | 
 | 3276 | 		return -ENOMEM; | 
 | 3277 |  | 
 | 3278 | 	info->tr	= &global_trace; | 
 | 3279 | 	info->cpu	= cpu; | 
| Lai Jiangshan | ddd538f | 2009-04-02 15:16:59 +0800 | [diff] [blame] | 3280 | 	info->spare	= NULL; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3281 | 	/* Force reading ring buffer for first read */ | 
 | 3282 | 	info->read	= (unsigned int)-1; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3283 |  | 
 | 3284 | 	filp->private_data = info; | 
 | 3285 |  | 
| Lai Jiangshan | d1e7e02 | 2009-04-02 15:16:56 +0800 | [diff] [blame] | 3286 | 	return nonseekable_open(inode, filp); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3287 | } | 
 | 3288 |  | 
 | 3289 | static ssize_t | 
 | 3290 | tracing_buffers_read(struct file *filp, char __user *ubuf, | 
 | 3291 | 		     size_t count, loff_t *ppos) | 
 | 3292 | { | 
 | 3293 | 	struct ftrace_buffer_info *info = filp->private_data; | 
 | 3294 | 	unsigned int pos; | 
 | 3295 | 	ssize_t ret; | 
 | 3296 | 	size_t size; | 
 | 3297 |  | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 3298 | 	if (!count) | 
 | 3299 | 		return 0; | 
 | 3300 |  | 
| Lai Jiangshan | ddd538f | 2009-04-02 15:16:59 +0800 | [diff] [blame] | 3301 | 	if (!info->spare) | 
 | 3302 | 		info->spare = ring_buffer_alloc_read_page(info->tr->buffer); | 
 | 3303 | 	if (!info->spare) | 
 | 3304 | 		return -ENOMEM; | 
 | 3305 |  | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3306 | 	/* Do we have previous read data to read? */ | 
 | 3307 | 	if (info->read < PAGE_SIZE) | 
 | 3308 | 		goto read; | 
 | 3309 |  | 
 | 3310 | 	info->read = 0; | 
 | 3311 |  | 
 | 3312 | 	ret = ring_buffer_read_page(info->tr->buffer, | 
 | 3313 | 				    &info->spare, | 
 | 3314 | 				    count, | 
 | 3315 | 				    info->cpu, 0); | 
 | 3316 | 	if (ret < 0) | 
 | 3317 | 		return 0; | 
 | 3318 |  | 
 | 3319 | 	pos = ring_buffer_page_len(info->spare); | 
 | 3320 |  | 
 | 3321 | 	if (pos < PAGE_SIZE) | 
 | 3322 | 		memset(info->spare + pos, 0, PAGE_SIZE - pos); | 
 | 3323 |  | 
 | 3324 | read: | 
 | 3325 | 	size = PAGE_SIZE - info->read; | 
 | 3326 | 	if (size > count) | 
 | 3327 | 		size = count; | 
 | 3328 |  | 
 | 3329 | 	ret = copy_to_user(ubuf, info->spare + info->read, size); | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 3330 | 	if (ret == size) | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3331 | 		return -EFAULT; | 
| Steven Rostedt | 2dc5d12 | 2009-03-04 19:10:05 -0500 | [diff] [blame] | 3332 | 	size -= ret; | 
 | 3333 |  | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3334 | 	*ppos += size; | 
 | 3335 | 	info->read += size; | 
 | 3336 |  | 
 | 3337 | 	return size; | 
 | 3338 | } | 
 | 3339 |  | 
 | 3340 | static int tracing_buffers_release(struct inode *inode, struct file *file) | 
 | 3341 | { | 
 | 3342 | 	struct ftrace_buffer_info *info = file->private_data; | 
 | 3343 |  | 
| Lai Jiangshan | ddd538f | 2009-04-02 15:16:59 +0800 | [diff] [blame] | 3344 | 	if (info->spare) | 
 | 3345 | 		ring_buffer_free_read_page(info->tr->buffer, info->spare); | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3346 | 	kfree(info); | 
 | 3347 |  | 
 | 3348 | 	return 0; | 
 | 3349 | } | 
 | 3350 |  | 
 | 3351 | struct buffer_ref { | 
 | 3352 | 	struct ring_buffer	*buffer; | 
 | 3353 | 	void			*page; | 
 | 3354 | 	int			ref; | 
 | 3355 | }; | 
 | 3356 |  | 
 | 3357 | static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, | 
 | 3358 | 				    struct pipe_buffer *buf) | 
 | 3359 | { | 
 | 3360 | 	struct buffer_ref *ref = (struct buffer_ref *)buf->private; | 
 | 3361 |  | 
 | 3362 | 	if (--ref->ref) | 
 | 3363 | 		return; | 
 | 3364 |  | 
 | 3365 | 	ring_buffer_free_read_page(ref->buffer, ref->page); | 
 | 3366 | 	kfree(ref); | 
 | 3367 | 	buf->private = 0; | 
 | 3368 | } | 
 | 3369 |  | 
 | 3370 | static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe, | 
 | 3371 | 				 struct pipe_buffer *buf) | 
 | 3372 | { | 
 | 3373 | 	return 1; | 
 | 3374 | } | 
 | 3375 |  | 
 | 3376 | static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | 
 | 3377 | 				struct pipe_buffer *buf) | 
 | 3378 | { | 
 | 3379 | 	struct buffer_ref *ref = (struct buffer_ref *)buf->private; | 
 | 3380 |  | 
 | 3381 | 	ref->ref++; | 
 | 3382 | } | 
 | 3383 |  | 
 | 3384 | /* Pipe buffer operations for a buffer. */ | 
 | 3385 | static struct pipe_buf_operations buffer_pipe_buf_ops = { | 
 | 3386 | 	.can_merge		= 0, | 
 | 3387 | 	.map			= generic_pipe_buf_map, | 
 | 3388 | 	.unmap			= generic_pipe_buf_unmap, | 
 | 3389 | 	.confirm		= generic_pipe_buf_confirm, | 
 | 3390 | 	.release		= buffer_pipe_buf_release, | 
 | 3391 | 	.steal			= buffer_pipe_buf_steal, | 
 | 3392 | 	.get			= buffer_pipe_buf_get, | 
 | 3393 | }; | 
 | 3394 |  | 
 | 3395 | /* | 
 | 3396 |  * Callback from splice_to_pipe(), if we need to release some pages | 
 | 3397 |  * at the end of the spd in case we error'ed out in filling the pipe. | 
 | 3398 |  */ | 
 | 3399 | static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) | 
 | 3400 | { | 
 | 3401 | 	struct buffer_ref *ref = | 
 | 3402 | 		(struct buffer_ref *)spd->partial[i].private; | 
 | 3403 |  | 
 | 3404 | 	if (--ref->ref) | 
 | 3405 | 		return; | 
 | 3406 |  | 
 | 3407 | 	ring_buffer_free_read_page(ref->buffer, ref->page); | 
 | 3408 | 	kfree(ref); | 
 | 3409 | 	spd->partial[i].private = 0; | 
 | 3410 | } | 
 | 3411 |  | 
 | 3412 | static ssize_t | 
 | 3413 | tracing_buffers_splice_read(struct file *file, loff_t *ppos, | 
 | 3414 | 			    struct pipe_inode_info *pipe, size_t len, | 
 | 3415 | 			    unsigned int flags) | 
 | 3416 | { | 
 | 3417 | 	struct ftrace_buffer_info *info = file->private_data; | 
 | 3418 | 	struct partial_page partial[PIPE_BUFFERS]; | 
 | 3419 | 	struct page *pages[PIPE_BUFFERS]; | 
 | 3420 | 	struct splice_pipe_desc spd = { | 
 | 3421 | 		.pages		= pages, | 
 | 3422 | 		.partial	= partial, | 
 | 3423 | 		.flags		= flags, | 
 | 3424 | 		.ops		= &buffer_pipe_buf_ops, | 
 | 3425 | 		.spd_release	= buffer_spd_release, | 
 | 3426 | 	}; | 
 | 3427 | 	struct buffer_ref *ref; | 
 | 3428 | 	int size, i; | 
 | 3429 | 	size_t ret; | 
 | 3430 |  | 
| Lai Jiangshan | 93cfb3c | 2009-04-02 15:17:08 +0800 | [diff] [blame] | 3431 | 	if (*ppos & (PAGE_SIZE - 1)) { | 
 | 3432 | 		WARN_ONCE(1, "Ftrace: previous read must page-align\n"); | 
 | 3433 | 		return -EINVAL; | 
 | 3434 | 	} | 
 | 3435 |  | 
 | 3436 | 	if (len & (PAGE_SIZE - 1)) { | 
 | 3437 | 		WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); | 
 | 3438 | 		if (len < PAGE_SIZE) | 
 | 3439 | 			return -EINVAL; | 
 | 3440 | 		len &= PAGE_MASK; | 
 | 3441 | 	} | 
 | 3442 |  | 
 | 3443 | 	for (i = 0; i < PIPE_BUFFERS && len; i++, len -= PAGE_SIZE) { | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3444 | 		struct page *page; | 
 | 3445 | 		int r; | 
 | 3446 |  | 
 | 3447 | 		ref = kzalloc(sizeof(*ref), GFP_KERNEL); | 
 | 3448 | 		if (!ref) | 
 | 3449 | 			break; | 
 | 3450 |  | 
| Steven Rostedt | 7267fa6 | 2009-04-29 00:16:21 -0400 | [diff] [blame] | 3451 | 		ref->ref = 1; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3452 | 		ref->buffer = info->tr->buffer; | 
 | 3453 | 		ref->page = ring_buffer_alloc_read_page(ref->buffer); | 
 | 3454 | 		if (!ref->page) { | 
 | 3455 | 			kfree(ref); | 
 | 3456 | 			break; | 
 | 3457 | 		} | 
 | 3458 |  | 
 | 3459 | 		r = ring_buffer_read_page(ref->buffer, &ref->page, | 
 | 3460 | 					  len, info->cpu, 0); | 
 | 3461 | 		if (r < 0) { | 
 | 3462 | 			ring_buffer_free_read_page(ref->buffer, | 
 | 3463 | 						   ref->page); | 
 | 3464 | 			kfree(ref); | 
 | 3465 | 			break; | 
 | 3466 | 		} | 
 | 3467 |  | 
 | 3468 | 		/* | 
 | 3469 | 		 * zero out any left over data, this is going to | 
 | 3470 | 		 * user land. | 
 | 3471 | 		 */ | 
 | 3472 | 		size = ring_buffer_page_len(ref->page); | 
 | 3473 | 		if (size < PAGE_SIZE) | 
 | 3474 | 			memset(ref->page + size, 0, PAGE_SIZE - size); | 
 | 3475 |  | 
 | 3476 | 		page = virt_to_page(ref->page); | 
 | 3477 |  | 
 | 3478 | 		spd.pages[i] = page; | 
 | 3479 | 		spd.partial[i].len = PAGE_SIZE; | 
 | 3480 | 		spd.partial[i].offset = 0; | 
 | 3481 | 		spd.partial[i].private = (unsigned long)ref; | 
 | 3482 | 		spd.nr_pages++; | 
| Lai Jiangshan | 93cfb3c | 2009-04-02 15:17:08 +0800 | [diff] [blame] | 3483 | 		*ppos += PAGE_SIZE; | 
| Steven Rostedt | 2cadf91 | 2008-12-01 22:20:19 -0500 | [diff] [blame] | 3484 | 	} | 
 | 3485 |  | 
 | 3486 | 	spd.nr_pages = i; | 
 | 3487 |  | 
 | 3488 | 	/* did we read anything? */ | 
 | 3489 | 	if (!spd.nr_pages) { | 
 | 3490 | 		if (flags & SPLICE_F_NONBLOCK) | 
 | 3491 | 			ret = -EAGAIN; | 
 | 3492 | 		else | 
 | 3493 | 			ret = 0; | 
 | 3494 | 		/* TODO: block */ | 
 | 3495 | 		return ret; | 
 | 3496 | 	} | 
 | 3497 |  | 
 | 3498 | 	ret = splice_to_pipe(pipe, &spd); | 
 | 3499 |  | 
 | 3500 | 	return ret; | 
 | 3501 | } | 
 | 3502 |  | 
 | 3503 | static const struct file_operations tracing_buffers_fops = { | 
 | 3504 | 	.open		= tracing_buffers_open, | 
 | 3505 | 	.read		= tracing_buffers_read, | 
 | 3506 | 	.release	= tracing_buffers_release, | 
 | 3507 | 	.splice_read	= tracing_buffers_splice_read, | 
 | 3508 | 	.llseek		= no_llseek, | 
 | 3509 | }; | 
 | 3510 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3511 | #ifdef CONFIG_DYNAMIC_FTRACE | 
 | 3512 |  | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 3513 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3514 | { | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 3515 | 	return 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3516 | } | 
 | 3517 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3518 | static ssize_t | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 3519 | tracing_read_dyn_info(struct file *filp, char __user *ubuf, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3520 | 		  size_t cnt, loff_t *ppos) | 
 | 3521 | { | 
| Steven Rostedt | a26a2a2 | 2008-10-31 00:03:22 -0400 | [diff] [blame] | 3522 | 	static char ftrace_dyn_info_buffer[1024]; | 
 | 3523 | 	static DEFINE_MUTEX(dyn_info_mutex); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3524 | 	unsigned long *p = filp->private_data; | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 3525 | 	char *buf = ftrace_dyn_info_buffer; | 
| Steven Rostedt | a26a2a2 | 2008-10-31 00:03:22 -0400 | [diff] [blame] | 3526 | 	int size = ARRAY_SIZE(ftrace_dyn_info_buffer); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3527 | 	int r; | 
 | 3528 |  | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 3529 | 	mutex_lock(&dyn_info_mutex); | 
 | 3530 | 	r = sprintf(buf, "%ld ", *p); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3531 |  | 
| Steven Rostedt | a26a2a2 | 2008-10-31 00:03:22 -0400 | [diff] [blame] | 3532 | 	r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 3533 | 	buf[r++] = '\n'; | 
 | 3534 |  | 
 | 3535 | 	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
 | 3536 |  | 
 | 3537 | 	mutex_unlock(&dyn_info_mutex); | 
 | 3538 |  | 
 | 3539 | 	return r; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3540 | } | 
 | 3541 |  | 
| Steven Rostedt | 5e2336a0 | 2009-03-05 21:44:55 -0500 | [diff] [blame] | 3542 | static const struct file_operations tracing_dyn_info_fops = { | 
| Ingo Molnar | 4bf39a9 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3543 | 	.open		= tracing_open_generic, | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 3544 | 	.read		= tracing_read_dyn_info, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3545 | }; | 
 | 3546 | #endif | 
 | 3547 |  | 
 | 3548 | static struct dentry *d_tracer; | 
 | 3549 |  | 
 | 3550 | struct dentry *tracing_init_dentry(void) | 
 | 3551 | { | 
 | 3552 | 	static int once; | 
 | 3553 |  | 
 | 3554 | 	if (d_tracer) | 
 | 3555 | 		return d_tracer; | 
 | 3556 |  | 
| Frederic Weisbecker | 3e1f60b | 2009-03-22 23:10:45 +0100 | [diff] [blame] | 3557 | 	if (!debugfs_initialized()) | 
 | 3558 | 		return NULL; | 
 | 3559 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3560 | 	d_tracer = debugfs_create_dir("tracing", NULL); | 
 | 3561 |  | 
 | 3562 | 	if (!d_tracer && !once) { | 
 | 3563 | 		once = 1; | 
 | 3564 | 		pr_warning("Could not create debugfs directory 'tracing'\n"); | 
 | 3565 | 		return NULL; | 
 | 3566 | 	} | 
 | 3567 |  | 
 | 3568 | 	return d_tracer; | 
 | 3569 | } | 
 | 3570 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3571 | static struct dentry *d_percpu; | 
 | 3572 |  | 
 | 3573 | struct dentry *tracing_dentry_percpu(void) | 
 | 3574 | { | 
 | 3575 | 	static int once; | 
 | 3576 | 	struct dentry *d_tracer; | 
 | 3577 |  | 
 | 3578 | 	if (d_percpu) | 
 | 3579 | 		return d_percpu; | 
 | 3580 |  | 
 | 3581 | 	d_tracer = tracing_init_dentry(); | 
 | 3582 |  | 
 | 3583 | 	if (!d_tracer) | 
 | 3584 | 		return NULL; | 
 | 3585 |  | 
 | 3586 | 	d_percpu = debugfs_create_dir("per_cpu", d_tracer); | 
 | 3587 |  | 
 | 3588 | 	if (!d_percpu && !once) { | 
 | 3589 | 		once = 1; | 
 | 3590 | 		pr_warning("Could not create debugfs directory 'per_cpu'\n"); | 
 | 3591 | 		return NULL; | 
 | 3592 | 	} | 
 | 3593 |  | 
 | 3594 | 	return d_percpu; | 
 | 3595 | } | 
 | 3596 |  | 
 | 3597 | static void tracing_init_debugfs_percpu(long cpu) | 
 | 3598 | { | 
 | 3599 | 	struct dentry *d_percpu = tracing_dentry_percpu(); | 
| Frederic Weisbecker | 8656e7a | 2009-02-26 00:41:38 +0100 | [diff] [blame] | 3600 | 	struct dentry *entry, *d_cpu; | 
 | 3601 | 	/* strlen(cpu) + MAX(log10(cpu)) + '\0' */ | 
 | 3602 | 	char cpu_dir[7]; | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3603 |  | 
 | 3604 | 	if (cpu > 999 || cpu < 0) | 
 | 3605 | 		return; | 
 | 3606 |  | 
| Frederic Weisbecker | 8656e7a | 2009-02-26 00:41:38 +0100 | [diff] [blame] | 3607 | 	sprintf(cpu_dir, "cpu%ld", cpu); | 
 | 3608 | 	d_cpu = debugfs_create_dir(cpu_dir, d_percpu); | 
 | 3609 | 	if (!d_cpu) { | 
 | 3610 | 		pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); | 
 | 3611 | 		return; | 
 | 3612 | 	} | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3613 |  | 
| Frederic Weisbecker | 8656e7a | 2009-02-26 00:41:38 +0100 | [diff] [blame] | 3614 | 	/* per cpu trace_pipe */ | 
 | 3615 | 	entry = debugfs_create_file("trace_pipe", 0444, d_cpu, | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3616 | 				(void *) cpu, &tracing_pipe_fops); | 
 | 3617 | 	if (!entry) | 
| Frederic Weisbecker | 8656e7a | 2009-02-26 00:41:38 +0100 | [diff] [blame] | 3618 | 		pr_warning("Could not create debugfs 'trace_pipe' entry\n"); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3619 |  | 
 | 3620 | 	/* per cpu trace */ | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 3621 | 	entry = debugfs_create_file("trace", 0644, d_cpu, | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3622 | 				(void *) cpu, &tracing_fops); | 
 | 3623 | 	if (!entry) | 
| Frederic Weisbecker | 8656e7a | 2009-02-26 00:41:38 +0100 | [diff] [blame] | 3624 | 		pr_warning("Could not create debugfs 'trace' entry\n"); | 
| Steven Rostedt | 7f96f93 | 2009-03-13 00:37:42 -0400 | [diff] [blame] | 3625 |  | 
 | 3626 | 	entry = debugfs_create_file("trace_pipe_raw", 0444, d_cpu, | 
 | 3627 | 				    (void *) cpu, &tracing_buffers_fops); | 
 | 3628 | 	if (!entry) | 
 | 3629 | 		pr_warning("Could not create debugfs 'trace_pipe_raw' entry\n"); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3630 | } | 
 | 3631 |  | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 3632 | #ifdef CONFIG_FTRACE_SELFTEST | 
 | 3633 | /* Let selftest have access to static functions in this file */ | 
 | 3634 | #include "trace_selftest.c" | 
 | 3635 | #endif | 
 | 3636 |  | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 3637 | struct trace_option_dentry { | 
 | 3638 | 	struct tracer_opt		*opt; | 
 | 3639 | 	struct tracer_flags		*flags; | 
 | 3640 | 	struct dentry			*entry; | 
 | 3641 | }; | 
 | 3642 |  | 
 | 3643 | static ssize_t | 
 | 3644 | trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, | 
 | 3645 | 			loff_t *ppos) | 
 | 3646 | { | 
 | 3647 | 	struct trace_option_dentry *topt = filp->private_data; | 
 | 3648 | 	char *buf; | 
 | 3649 |  | 
 | 3650 | 	if (topt->flags->val & topt->opt->bit) | 
 | 3651 | 		buf = "1\n"; | 
 | 3652 | 	else | 
 | 3653 | 		buf = "0\n"; | 
 | 3654 |  | 
 | 3655 | 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | 
 | 3656 | } | 
 | 3657 |  | 
 | 3658 | static ssize_t | 
 | 3659 | trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | 
 | 3660 | 			 loff_t *ppos) | 
 | 3661 | { | 
 | 3662 | 	struct trace_option_dentry *topt = filp->private_data; | 
 | 3663 | 	unsigned long val; | 
 | 3664 | 	char buf[64]; | 
 | 3665 | 	int ret; | 
 | 3666 |  | 
 | 3667 | 	if (cnt >= sizeof(buf)) | 
 | 3668 | 		return -EINVAL; | 
 | 3669 |  | 
 | 3670 | 	if (copy_from_user(&buf, ubuf, cnt)) | 
 | 3671 | 		return -EFAULT; | 
 | 3672 |  | 
 | 3673 | 	buf[cnt] = 0; | 
 | 3674 |  | 
 | 3675 | 	ret = strict_strtoul(buf, 10, &val); | 
 | 3676 | 	if (ret < 0) | 
 | 3677 | 		return ret; | 
 | 3678 |  | 
 | 3679 | 	ret = 0; | 
 | 3680 | 	switch (val) { | 
 | 3681 | 	case 0: | 
 | 3682 | 		/* do nothing if already cleared */ | 
 | 3683 | 		if (!(topt->flags->val & topt->opt->bit)) | 
 | 3684 | 			break; | 
 | 3685 |  | 
 | 3686 | 		mutex_lock(&trace_types_lock); | 
 | 3687 | 		if (current_trace->set_flag) | 
 | 3688 | 			ret = current_trace->set_flag(topt->flags->val, | 
 | 3689 | 						      topt->opt->bit, 0); | 
 | 3690 | 		mutex_unlock(&trace_types_lock); | 
 | 3691 | 		if (ret) | 
 | 3692 | 			return ret; | 
 | 3693 | 		topt->flags->val &= ~topt->opt->bit; | 
 | 3694 | 		break; | 
 | 3695 | 	case 1: | 
 | 3696 | 		/* do nothing if already set */ | 
 | 3697 | 		if (topt->flags->val & topt->opt->bit) | 
 | 3698 | 			break; | 
 | 3699 |  | 
 | 3700 | 		mutex_lock(&trace_types_lock); | 
 | 3701 | 		if (current_trace->set_flag) | 
 | 3702 | 			ret = current_trace->set_flag(topt->flags->val, | 
 | 3703 | 						      topt->opt->bit, 1); | 
 | 3704 | 		mutex_unlock(&trace_types_lock); | 
 | 3705 | 		if (ret) | 
 | 3706 | 			return ret; | 
 | 3707 | 		topt->flags->val |= topt->opt->bit; | 
 | 3708 | 		break; | 
 | 3709 |  | 
 | 3710 | 	default: | 
 | 3711 | 		return -EINVAL; | 
 | 3712 | 	} | 
 | 3713 |  | 
 | 3714 | 	*ppos += cnt; | 
 | 3715 |  | 
 | 3716 | 	return cnt; | 
 | 3717 | } | 
 | 3718 |  | 
 | 3719 |  | 
 | 3720 | static const struct file_operations trace_options_fops = { | 
 | 3721 | 	.open = tracing_open_generic, | 
 | 3722 | 	.read = trace_options_read, | 
 | 3723 | 	.write = trace_options_write, | 
 | 3724 | }; | 
 | 3725 |  | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 3726 | static ssize_t | 
 | 3727 | trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, | 
 | 3728 | 			loff_t *ppos) | 
 | 3729 | { | 
 | 3730 | 	long index = (long)filp->private_data; | 
 | 3731 | 	char *buf; | 
 | 3732 |  | 
 | 3733 | 	if (trace_flags & (1 << index)) | 
 | 3734 | 		buf = "1\n"; | 
 | 3735 | 	else | 
 | 3736 | 		buf = "0\n"; | 
 | 3737 |  | 
 | 3738 | 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | 
 | 3739 | } | 
 | 3740 |  | 
 | 3741 | static ssize_t | 
 | 3742 | trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | 
 | 3743 | 			 loff_t *ppos) | 
 | 3744 | { | 
 | 3745 | 	long index = (long)filp->private_data; | 
 | 3746 | 	char buf[64]; | 
 | 3747 | 	unsigned long val; | 
 | 3748 | 	int ret; | 
 | 3749 |  | 
 | 3750 | 	if (cnt >= sizeof(buf)) | 
 | 3751 | 		return -EINVAL; | 
 | 3752 |  | 
 | 3753 | 	if (copy_from_user(&buf, ubuf, cnt)) | 
 | 3754 | 		return -EFAULT; | 
 | 3755 |  | 
 | 3756 | 	buf[cnt] = 0; | 
 | 3757 |  | 
 | 3758 | 	ret = strict_strtoul(buf, 10, &val); | 
 | 3759 | 	if (ret < 0) | 
 | 3760 | 		return ret; | 
 | 3761 |  | 
 | 3762 | 	switch (val) { | 
 | 3763 | 	case 0: | 
 | 3764 | 		trace_flags &= ~(1 << index); | 
 | 3765 | 		break; | 
 | 3766 | 	case 1: | 
 | 3767 | 		trace_flags |= 1 << index; | 
 | 3768 | 		break; | 
 | 3769 |  | 
 | 3770 | 	default: | 
 | 3771 | 		return -EINVAL; | 
 | 3772 | 	} | 
 | 3773 |  | 
 | 3774 | 	*ppos += cnt; | 
 | 3775 |  | 
 | 3776 | 	return cnt; | 
 | 3777 | } | 
 | 3778 |  | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 3779 | static const struct file_operations trace_options_core_fops = { | 
 | 3780 | 	.open = tracing_open_generic, | 
 | 3781 | 	.read = trace_options_core_read, | 
 | 3782 | 	.write = trace_options_core_write, | 
 | 3783 | }; | 
 | 3784 |  | 
 | 3785 | static struct dentry *trace_options_init_dentry(void) | 
 | 3786 | { | 
 | 3787 | 	struct dentry *d_tracer; | 
 | 3788 | 	static struct dentry *t_options; | 
 | 3789 |  | 
 | 3790 | 	if (t_options) | 
 | 3791 | 		return t_options; | 
 | 3792 |  | 
 | 3793 | 	d_tracer = tracing_init_dentry(); | 
 | 3794 | 	if (!d_tracer) | 
 | 3795 | 		return NULL; | 
 | 3796 |  | 
 | 3797 | 	t_options = debugfs_create_dir("options", d_tracer); | 
 | 3798 | 	if (!t_options) { | 
 | 3799 | 		pr_warning("Could not create debugfs directory 'options'\n"); | 
 | 3800 | 		return NULL; | 
 | 3801 | 	} | 
 | 3802 |  | 
 | 3803 | 	return t_options; | 
 | 3804 | } | 
 | 3805 |  | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 3806 | static void | 
 | 3807 | create_trace_option_file(struct trace_option_dentry *topt, | 
 | 3808 | 			 struct tracer_flags *flags, | 
 | 3809 | 			 struct tracer_opt *opt) | 
 | 3810 | { | 
 | 3811 | 	struct dentry *t_options; | 
 | 3812 | 	struct dentry *entry; | 
 | 3813 |  | 
 | 3814 | 	t_options = trace_options_init_dentry(); | 
 | 3815 | 	if (!t_options) | 
 | 3816 | 		return; | 
 | 3817 |  | 
 | 3818 | 	topt->flags = flags; | 
 | 3819 | 	topt->opt = opt; | 
 | 3820 |  | 
 | 3821 | 	entry = debugfs_create_file(opt->name, 0644, t_options, topt, | 
 | 3822 | 				    &trace_options_fops); | 
 | 3823 |  | 
 | 3824 | 	topt->entry = entry; | 
 | 3825 |  | 
 | 3826 | } | 
 | 3827 |  | 
 | 3828 | static struct trace_option_dentry * | 
 | 3829 | create_trace_option_files(struct tracer *tracer) | 
 | 3830 | { | 
 | 3831 | 	struct trace_option_dentry *topts; | 
 | 3832 | 	struct tracer_flags *flags; | 
 | 3833 | 	struct tracer_opt *opts; | 
 | 3834 | 	int cnt; | 
 | 3835 |  | 
 | 3836 | 	if (!tracer) | 
 | 3837 | 		return NULL; | 
 | 3838 |  | 
 | 3839 | 	flags = tracer->flags; | 
 | 3840 |  | 
 | 3841 | 	if (!flags || !flags->opts) | 
 | 3842 | 		return NULL; | 
 | 3843 |  | 
 | 3844 | 	opts = flags->opts; | 
 | 3845 |  | 
 | 3846 | 	for (cnt = 0; opts[cnt].name; cnt++) | 
 | 3847 | 		; | 
 | 3848 |  | 
| Steven Rostedt | 0cfe824 | 2009-02-27 10:51:10 -0500 | [diff] [blame] | 3849 | 	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); | 
| Steven Rostedt | 577b785 | 2009-02-26 23:43:05 -0500 | [diff] [blame] | 3850 | 	if (!topts) | 
 | 3851 | 		return NULL; | 
 | 3852 |  | 
 | 3853 | 	for (cnt = 0; opts[cnt].name; cnt++) | 
 | 3854 | 		create_trace_option_file(&topts[cnt], flags, | 
 | 3855 | 					 &opts[cnt]); | 
 | 3856 |  | 
 | 3857 | 	return topts; | 
 | 3858 | } | 
 | 3859 |  | 
 | 3860 | static void | 
 | 3861 | destroy_trace_option_files(struct trace_option_dentry *topts) | 
 | 3862 | { | 
 | 3863 | 	int cnt; | 
 | 3864 |  | 
 | 3865 | 	if (!topts) | 
 | 3866 | 		return; | 
 | 3867 |  | 
 | 3868 | 	for (cnt = 0; topts[cnt].opt; cnt++) { | 
 | 3869 | 		if (topts[cnt].entry) | 
 | 3870 | 			debugfs_remove(topts[cnt].entry); | 
 | 3871 | 	} | 
 | 3872 |  | 
 | 3873 | 	kfree(topts); | 
 | 3874 | } | 
 | 3875 |  | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 3876 | static struct dentry * | 
 | 3877 | create_trace_option_core_file(const char *option, long index) | 
 | 3878 | { | 
 | 3879 | 	struct dentry *t_options; | 
 | 3880 | 	struct dentry *entry; | 
 | 3881 |  | 
 | 3882 | 	t_options = trace_options_init_dentry(); | 
 | 3883 | 	if (!t_options) | 
 | 3884 | 		return NULL; | 
 | 3885 |  | 
 | 3886 | 	entry = debugfs_create_file(option, 0644, t_options, (void *)index, | 
 | 3887 | 				    &trace_options_core_fops); | 
 | 3888 |  | 
 | 3889 | 	return entry; | 
 | 3890 | } | 
 | 3891 |  | 
 | 3892 | static __init void create_trace_options_dir(void) | 
 | 3893 | { | 
 | 3894 | 	struct dentry *t_options; | 
 | 3895 | 	struct dentry *entry; | 
 | 3896 | 	int i; | 
 | 3897 |  | 
 | 3898 | 	t_options = trace_options_init_dentry(); | 
 | 3899 | 	if (!t_options) | 
 | 3900 | 		return; | 
 | 3901 |  | 
 | 3902 | 	for (i = 0; trace_options[i]; i++) { | 
 | 3903 | 		entry = create_trace_option_core_file(trace_options[i], i); | 
 | 3904 | 		if (!entry) | 
 | 3905 | 			pr_warning("Could not create debugfs %s entry\n", | 
 | 3906 | 				   trace_options[i]); | 
 | 3907 | 	} | 
 | 3908 | } | 
 | 3909 |  | 
| Frédéric Weisbecker | b5ad384 | 2008-09-23 11:34:32 +0100 | [diff] [blame] | 3910 | static __init int tracer_init_debugfs(void) | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3911 | { | 
 | 3912 | 	struct dentry *d_tracer; | 
 | 3913 | 	struct dentry *entry; | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3914 | 	int cpu; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3915 |  | 
 | 3916 | 	d_tracer = tracing_init_dentry(); | 
 | 3917 |  | 
 | 3918 | 	entry = debugfs_create_file("tracing_enabled", 0644, d_tracer, | 
 | 3919 | 				    &global_trace, &tracing_ctrl_fops); | 
 | 3920 | 	if (!entry) | 
 | 3921 | 		pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); | 
 | 3922 |  | 
| Steven Rostedt | ee6bce5 | 2008-11-12 17:52:37 -0500 | [diff] [blame] | 3923 | 	entry = debugfs_create_file("trace_options", 0644, d_tracer, | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3924 | 				    NULL, &tracing_iter_fops); | 
 | 3925 | 	if (!entry) | 
| Steven Rostedt | ee6bce5 | 2008-11-12 17:52:37 -0500 | [diff] [blame] | 3926 | 		pr_warning("Could not create debugfs 'trace_options' entry\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3927 |  | 
| Steven Rostedt | a825907 | 2009-02-26 22:19:12 -0500 | [diff] [blame] | 3928 | 	create_trace_options_dir(); | 
 | 3929 |  | 
| Ingo Molnar | c7078de | 2008-05-12 21:20:52 +0200 | [diff] [blame] | 3930 | 	entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, | 
 | 3931 | 				    NULL, &tracing_cpumask_fops); | 
 | 3932 | 	if (!entry) | 
 | 3933 | 		pr_warning("Could not create debugfs 'tracing_cpumask' entry\n"); | 
 | 3934 |  | 
| Steven Rostedt | 4acd4d0 | 2009-03-18 10:40:24 -0400 | [diff] [blame] | 3935 | 	entry = debugfs_create_file("trace", 0644, d_tracer, | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3936 | 				 (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3937 | 	if (!entry) | 
 | 3938 | 		pr_warning("Could not create debugfs 'trace' entry\n"); | 
 | 3939 |  | 
 | 3940 | 	entry = debugfs_create_file("available_tracers", 0444, d_tracer, | 
 | 3941 | 				    &global_trace, &show_traces_fops); | 
 | 3942 | 	if (!entry) | 
| Frédéric Weisbecker | 98a983a | 2008-08-15 21:08:22 +0200 | [diff] [blame] | 3943 | 		pr_warning("Could not create debugfs 'available_tracers' entry\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3944 |  | 
 | 3945 | 	entry = debugfs_create_file("current_tracer", 0444, d_tracer, | 
 | 3946 | 				    &global_trace, &set_tracer_fops); | 
 | 3947 | 	if (!entry) | 
| Frédéric Weisbecker | 98a983a | 2008-08-15 21:08:22 +0200 | [diff] [blame] | 3948 | 		pr_warning("Could not create debugfs 'current_tracer' entry\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3949 |  | 
 | 3950 | 	entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer, | 
 | 3951 | 				    &tracing_max_latency, | 
 | 3952 | 				    &tracing_max_lat_fops); | 
 | 3953 | 	if (!entry) | 
 | 3954 | 		pr_warning("Could not create debugfs " | 
 | 3955 | 			   "'tracing_max_latency' entry\n"); | 
 | 3956 |  | 
 | 3957 | 	entry = debugfs_create_file("tracing_thresh", 0644, d_tracer, | 
 | 3958 | 				    &tracing_thresh, &tracing_max_lat_fops); | 
 | 3959 | 	if (!entry) | 
 | 3960 | 		pr_warning("Could not create debugfs " | 
| Frédéric Weisbecker | 98a983a | 2008-08-15 21:08:22 +0200 | [diff] [blame] | 3961 | 			   "'tracing_thresh' entry\n"); | 
| Ingo Molnar | 7bd2f24 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 3962 | 	entry = debugfs_create_file("README", 0644, d_tracer, | 
 | 3963 | 				    NULL, &tracing_readme_fops); | 
 | 3964 | 	if (!entry) | 
 | 3965 | 		pr_warning("Could not create debugfs 'README' entry\n"); | 
 | 3966 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3967 | 	entry = debugfs_create_file("trace_pipe", 0444, d_tracer, | 
 | 3968 | 			(void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); | 
| Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 3969 | 	if (!entry) | 
 | 3970 | 		pr_warning("Could not create debugfs " | 
| Frédéric Weisbecker | 98a983a | 2008-08-15 21:08:22 +0200 | [diff] [blame] | 3971 | 			   "'trace_pipe' entry\n"); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3972 |  | 
| Steven Rostedt | a94c80e | 2008-11-12 17:52:36 -0500 | [diff] [blame] | 3973 | 	entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer, | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3974 | 				    &global_trace, &tracing_entries_fops); | 
 | 3975 | 	if (!entry) | 
 | 3976 | 		pr_warning("Could not create debugfs " | 
| Steven Rostedt | a94c80e | 2008-11-12 17:52:36 -0500 | [diff] [blame] | 3977 | 			   "'buffer_size_kb' entry\n"); | 
| Steven Rostedt | a98a3c3 | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 3978 |  | 
| Pekka Paalanen | 5bf9a1e | 2008-09-16 22:06:42 +0300 | [diff] [blame] | 3979 | 	entry = debugfs_create_file("trace_marker", 0220, d_tracer, | 
 | 3980 | 				    NULL, &tracing_mark_fops); | 
 | 3981 | 	if (!entry) | 
 | 3982 | 		pr_warning("Could not create debugfs " | 
 | 3983 | 			   "'trace_marker' entry\n"); | 
 | 3984 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3985 | #ifdef CONFIG_DYNAMIC_FTRACE | 
 | 3986 | 	entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 
 | 3987 | 				    &ftrace_update_tot_cnt, | 
| Steven Rostedt | b807c3d | 2008-10-30 16:08:33 -0400 | [diff] [blame] | 3988 | 				    &tracing_dyn_info_fops); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3989 | 	if (!entry) | 
 | 3990 | 		pr_warning("Could not create debugfs " | 
 | 3991 | 			   "'dyn_ftrace_total_info' entry\n"); | 
 | 3992 | #endif | 
| Ingo Molnar | d618b3e | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 3993 | #ifdef CONFIG_SYSPROF_TRACER | 
 | 3994 | 	init_tracer_sysprof_debugfs(d_tracer); | 
 | 3995 | #endif | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 3996 |  | 
 | 3997 | 	for_each_tracing_cpu(cpu) | 
 | 3998 | 		tracing_init_debugfs_percpu(cpu); | 
 | 3999 |  | 
| Frédéric Weisbecker | b5ad384 | 2008-09-23 11:34:32 +0100 | [diff] [blame] | 4000 | 	return 0; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4001 | } | 
 | 4002 |  | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4003 | static int trace_panic_handler(struct notifier_block *this, | 
 | 4004 | 			       unsigned long event, void *unused) | 
 | 4005 | { | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 4006 | 	if (ftrace_dump_on_oops) | 
 | 4007 | 		ftrace_dump(); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4008 | 	return NOTIFY_OK; | 
 | 4009 | } | 
 | 4010 |  | 
 | 4011 | static struct notifier_block trace_panic_notifier = { | 
 | 4012 | 	.notifier_call  = trace_panic_handler, | 
 | 4013 | 	.next           = NULL, | 
 | 4014 | 	.priority       = 150   /* priority: INT_MAX >= x >= 0 */ | 
 | 4015 | }; | 
 | 4016 |  | 
 | 4017 | static int trace_die_handler(struct notifier_block *self, | 
 | 4018 | 			     unsigned long val, | 
 | 4019 | 			     void *data) | 
 | 4020 | { | 
 | 4021 | 	switch (val) { | 
 | 4022 | 	case DIE_OOPS: | 
| Steven Rostedt | 944ac42 | 2008-10-23 19:26:08 -0400 | [diff] [blame] | 4023 | 		if (ftrace_dump_on_oops) | 
 | 4024 | 			ftrace_dump(); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4025 | 		break; | 
 | 4026 | 	default: | 
 | 4027 | 		break; | 
 | 4028 | 	} | 
 | 4029 | 	return NOTIFY_OK; | 
 | 4030 | } | 
 | 4031 |  | 
 | 4032 | static struct notifier_block trace_die_notifier = { | 
 | 4033 | 	.notifier_call = trace_die_handler, | 
 | 4034 | 	.priority = 200 | 
 | 4035 | }; | 
 | 4036 |  | 
 | 4037 | /* | 
 | 4038 |  * printk is set to max of 1024, we really don't need it that big. | 
 | 4039 |  * Nothing should be printing 1000 characters anyway. | 
 | 4040 |  */ | 
 | 4041 | #define TRACE_MAX_PRINT		1000 | 
 | 4042 |  | 
 | 4043 | /* | 
 | 4044 |  * Define here KERN_TRACE so that we have one place to modify | 
 | 4045 |  * it if we decide to change what log level the ftrace dump | 
 | 4046 |  * should be at. | 
 | 4047 |  */ | 
| Steven Rostedt | 428aee1 | 2009-01-14 12:24:42 -0500 | [diff] [blame] | 4048 | #define KERN_TRACE		KERN_EMERG | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4049 |  | 
 | 4050 | static void | 
 | 4051 | trace_printk_seq(struct trace_seq *s) | 
 | 4052 | { | 
 | 4053 | 	/* Probably should print a warning here. */ | 
 | 4054 | 	if (s->len >= 1000) | 
 | 4055 | 		s->len = 1000; | 
 | 4056 |  | 
 | 4057 | 	/* should be zero ended, but we are paranoid. */ | 
 | 4058 | 	s->buffer[s->len] = 0; | 
 | 4059 |  | 
 | 4060 | 	printk(KERN_TRACE "%s", s->buffer); | 
 | 4061 |  | 
| Steven Rostedt | f952075 | 2009-03-02 14:04:40 -0500 | [diff] [blame] | 4062 | 	trace_seq_init(s); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4063 | } | 
 | 4064 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 4065 | static void __ftrace_dump(bool disable_tracing) | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4066 | { | 
 | 4067 | 	static DEFINE_SPINLOCK(ftrace_dump_lock); | 
 | 4068 | 	/* use static because iter can be a bit big for the stack */ | 
 | 4069 | 	static struct trace_iterator iter; | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 4070 | 	unsigned int old_userobj; | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4071 | 	static int dump_ran; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 4072 | 	unsigned long flags; | 
 | 4073 | 	int cnt = 0, cpu; | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4074 |  | 
 | 4075 | 	/* only one dump */ | 
 | 4076 | 	spin_lock_irqsave(&ftrace_dump_lock, flags); | 
 | 4077 | 	if (dump_ran) | 
 | 4078 | 		goto out; | 
 | 4079 |  | 
 | 4080 | 	dump_ran = 1; | 
 | 4081 |  | 
| Steven Rostedt | 0ee6b6c | 2009-01-14 14:50:19 -0500 | [diff] [blame] | 4082 | 	tracing_off(); | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 4083 |  | 
 | 4084 | 	if (disable_tracing) | 
 | 4085 | 		ftrace_kill(); | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4086 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 4087 | 	for_each_tracing_cpu(cpu) { | 
 | 4088 | 		atomic_inc(&global_trace.data[cpu]->disabled); | 
 | 4089 | 	} | 
 | 4090 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 4091 | 	old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; | 
 | 4092 |  | 
| Török Edwin | b54d3de | 2008-11-22 13:28:48 +0200 | [diff] [blame] | 4093 | 	/* don't look at user memory in panic mode */ | 
 | 4094 | 	trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 
 | 4095 |  | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4096 | 	printk(KERN_TRACE "Dumping ftrace buffer:\n"); | 
 | 4097 |  | 
| Steven Rostedt | e543ad7 | 2009-03-04 18:20:36 -0500 | [diff] [blame] | 4098 | 	/* Simulate the iterator */ | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4099 | 	iter.tr = &global_trace; | 
 | 4100 | 	iter.trace = current_trace; | 
| Steven Rostedt | e543ad7 | 2009-03-04 18:20:36 -0500 | [diff] [blame] | 4101 | 	iter.cpu_file = TRACE_PIPE_ALL_CPU; | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4102 |  | 
 | 4103 | 	/* | 
 | 4104 | 	 * We need to stop all tracing on all CPUS to read the | 
 | 4105 | 	 * the next buffer. This is a bit expensive, but is | 
 | 4106 | 	 * not done often. We fill all what we can read, | 
 | 4107 | 	 * and then release the locks again. | 
 | 4108 | 	 */ | 
 | 4109 |  | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4110 | 	while (!trace_empty(&iter)) { | 
 | 4111 |  | 
 | 4112 | 		if (!cnt) | 
 | 4113 | 			printk(KERN_TRACE "---------------------------------\n"); | 
 | 4114 |  | 
 | 4115 | 		cnt++; | 
 | 4116 |  | 
 | 4117 | 		/* reset all but tr, trace, and overruns */ | 
 | 4118 | 		memset(&iter.seq, 0, | 
 | 4119 | 		       sizeof(struct trace_iterator) - | 
 | 4120 | 		       offsetof(struct trace_iterator, seq)); | 
 | 4121 | 		iter.iter_flags |= TRACE_FILE_LAT_FMT; | 
 | 4122 | 		iter.pos = -1; | 
 | 4123 |  | 
 | 4124 | 		if (find_next_entry_inc(&iter) != NULL) { | 
 | 4125 | 			print_trace_line(&iter); | 
 | 4126 | 			trace_consume(&iter); | 
 | 4127 | 		} | 
 | 4128 |  | 
 | 4129 | 		trace_printk_seq(&iter.seq); | 
 | 4130 | 	} | 
 | 4131 |  | 
 | 4132 | 	if (!cnt) | 
 | 4133 | 		printk(KERN_TRACE "   (ftrace buffer empty)\n"); | 
 | 4134 | 	else | 
 | 4135 | 		printk(KERN_TRACE "---------------------------------\n"); | 
 | 4136 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 4137 | 	/* Re-enable tracing if requested */ | 
 | 4138 | 	if (!disable_tracing) { | 
 | 4139 | 		trace_flags |= old_userobj; | 
 | 4140 |  | 
 | 4141 | 		for_each_tracing_cpu(cpu) { | 
 | 4142 | 			atomic_dec(&global_trace.data[cpu]->disabled); | 
 | 4143 | 		} | 
 | 4144 | 		tracing_on(); | 
 | 4145 | 	} | 
 | 4146 |  | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4147 |  out: | 
 | 4148 | 	spin_unlock_irqrestore(&ftrace_dump_lock, flags); | 
 | 4149 | } | 
 | 4150 |  | 
| Frederic Weisbecker | cf586b6 | 2009-03-22 05:04:35 +0100 | [diff] [blame] | 4151 | /* By default: disable tracing after the dump */ | 
 | 4152 | void ftrace_dump(void) | 
 | 4153 | { | 
 | 4154 | 	__ftrace_dump(true); | 
 | 4155 | } | 
 | 4156 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4157 | __init static int tracer_alloc_buffers(void) | 
 | 4158 | { | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 4159 | 	struct trace_array_cpu *data; | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 4160 | 	int ring_buf_size; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4161 | 	int i; | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4162 | 	int ret = -ENOMEM; | 
 | 4163 |  | 
 | 4164 | 	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) | 
 | 4165 | 		goto out; | 
 | 4166 |  | 
 | 4167 | 	if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 
 | 4168 | 		goto out_free_buffer_mask; | 
 | 4169 |  | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4170 | 	if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) | 
 | 4171 | 		goto out_free_tracing_cpumask; | 
 | 4172 |  | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 4173 | 	/* To save memory, keep the ring buffer size to its minimum */ | 
 | 4174 | 	if (ring_buffer_expanded) | 
 | 4175 | 		ring_buf_size = trace_buf_size; | 
 | 4176 | 	else | 
 | 4177 | 		ring_buf_size = 1; | 
 | 4178 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4179 | 	cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 
 | 4180 | 	cpumask_copy(tracing_cpumask, cpu_all_mask); | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4181 | 	cpumask_clear(tracing_reader_cpumask); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4182 |  | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 4183 | 	/* TODO: make the number of buffers hot pluggable with CPUS */ | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 4184 | 	global_trace.buffer = ring_buffer_alloc(ring_buf_size, | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 4185 | 						   TRACE_BUFFER_FLAGS); | 
 | 4186 | 	if (!global_trace.buffer) { | 
 | 4187 | 		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 
 | 4188 | 		WARN_ON(1); | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4189 | 		goto out_free_cpumask; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 4190 | 	} | 
 | 4191 | 	global_trace.entries = ring_buffer_size(global_trace.buffer); | 
 | 4192 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4193 |  | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 4194 | #ifdef CONFIG_TRACER_MAX_TRACE | 
| Steven Rostedt | 73c5162 | 2009-03-11 13:42:01 -0400 | [diff] [blame] | 4195 | 	max_tr.buffer = ring_buffer_alloc(ring_buf_size, | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 4196 | 					     TRACE_BUFFER_FLAGS); | 
 | 4197 | 	if (!max_tr.buffer) { | 
 | 4198 | 		printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 
 | 4199 | 		WARN_ON(1); | 
 | 4200 | 		ring_buffer_free(global_trace.buffer); | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4201 | 		goto out_free_cpumask; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 4202 | 	} | 
 | 4203 | 	max_tr.entries = ring_buffer_size(max_tr.buffer); | 
 | 4204 | 	WARN_ON(max_tr.entries != global_trace.entries); | 
 | 4205 | #endif | 
 | 4206 |  | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 4207 | 	/* Allocate the first page for all buffers */ | 
| Steven Rostedt | ab46428 | 2008-05-12 21:21:00 +0200 | [diff] [blame] | 4208 | 	for_each_tracing_cpu(i) { | 
| Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 4209 | 		data = global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4210 | 		max_tr.data[i] = &per_cpu(max_data, i); | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4211 | 	} | 
 | 4212 |  | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4213 | 	trace_init_cmdlines(); | 
 | 4214 |  | 
| Frédéric Weisbecker | 43a1538 | 2008-09-21 20:16:30 +0200 | [diff] [blame] | 4215 | 	register_tracer(&nop_trace); | 
| Steven Rostedt | 79fb076 | 2009-02-02 21:38:33 -0500 | [diff] [blame] | 4216 | 	current_trace = &nop_trace; | 
| Frédéric Weisbecker | b5ad384 | 2008-09-23 11:34:32 +0100 | [diff] [blame] | 4217 | #ifdef CONFIG_BOOT_TRACER | 
 | 4218 | 	register_tracer(&boot_tracer); | 
| Frédéric Weisbecker | b5ad384 | 2008-09-23 11:34:32 +0100 | [diff] [blame] | 4219 | #endif | 
| Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 4220 | 	/* All seems OK, enable tracing */ | 
 | 4221 | 	tracing_disabled = 0; | 
| Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 4222 |  | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4223 | 	atomic_notifier_chain_register(&panic_notifier_list, | 
 | 4224 | 				       &trace_panic_notifier); | 
 | 4225 |  | 
 | 4226 | 	register_die_notifier(&trace_die_notifier); | 
| Frederic Weisbecker | 2fc1dfb | 2009-03-16 01:45:03 +0100 | [diff] [blame] | 4227 |  | 
 | 4228 | 	return 0; | 
| Steven Rostedt | 3f5a54e | 2008-07-30 22:36:46 -0400 | [diff] [blame] | 4229 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4230 | out_free_cpumask: | 
| Frederic Weisbecker | b04cc6b | 2009-02-25 03:22:28 +0100 | [diff] [blame] | 4231 | 	free_cpumask_var(tracing_reader_cpumask); | 
 | 4232 | out_free_tracing_cpumask: | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4233 | 	free_cpumask_var(tracing_cpumask); | 
 | 4234 | out_free_buffer_mask: | 
 | 4235 | 	free_cpumask_var(tracing_buffer_mask); | 
 | 4236 | out: | 
 | 4237 | 	return ret; | 
| Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 4238 | } | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 4239 |  | 
 | 4240 | __init static int clear_boot_tracer(void) | 
 | 4241 | { | 
 | 4242 | 	/* | 
 | 4243 | 	 * The default tracer at boot buffer is an init section. | 
 | 4244 | 	 * This function is called in lateinit. If we did not | 
 | 4245 | 	 * find the boot tracer, then clear it out, to prevent | 
 | 4246 | 	 * later registration from accessing the buffer that is | 
 | 4247 | 	 * about to be freed. | 
 | 4248 | 	 */ | 
 | 4249 | 	if (!default_bootup_tracer) | 
 | 4250 | 		return 0; | 
 | 4251 |  | 
 | 4252 | 	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", | 
 | 4253 | 	       default_bootup_tracer); | 
 | 4254 | 	default_bootup_tracer = NULL; | 
 | 4255 |  | 
 | 4256 | 	return 0; | 
 | 4257 | } | 
 | 4258 |  | 
| Frédéric Weisbecker | b5ad384 | 2008-09-23 11:34:32 +0100 | [diff] [blame] | 4259 | early_initcall(tracer_alloc_buffers); | 
 | 4260 | fs_initcall(tracer_init_debugfs); | 
| Steven Rostedt | b2821ae | 2009-02-02 21:38:32 -0500 | [diff] [blame] | 4261 | late_initcall(clear_boot_tracer); |