blob: c4c9296b1916ef547f532ba2ad8d51a14fa3d7c7 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
76static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77{
78 return 0;
79}
Steven Rostedt0f048702008-11-05 16:05:44 -050080
81/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040082 * To prevent the comm cache from being overwritten when no
83 * tracing is active, only save the comm when a trace event
84 * occurred.
85 */
86static DEFINE_PER_CPU(bool, trace_cmdline_save);
87
88/*
Steven Rostedt0f048702008-11-05 16:05:44 -050089 * Kill all tracing for good (never come back).
90 * It is initialized to 1 but will turn to zero if the initialization
91 * of the tracer is successful. But that is the only place that sets
92 * this back to zero.
93 */
Hannes Eder4fd27352009-02-10 19:44:12 +010094static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050095
Christoph Lameter9288f992009-10-07 19:17:45 -040096DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040097
Jason Wessel955b61e2010-08-05 09:22:23 -050098cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +020099
Steven Rostedt944ac422008-10-23 19:26:08 -0400100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400114 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200115
116enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400117
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400118/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning;
120
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500121static int tracing_set_tracer(const char *buf);
122
Li Zefanee6c2c12009-09-18 14:06:47 +0800123#define MAX_TRACER_SIZE 100
124static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500125static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100126
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500127static bool allocate_snapshot;
128
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200129static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100130{
Chen Gang67012ab2013-04-08 12:06:44 +0800131 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500132 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400133 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500134 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100135 return 1;
136}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200137__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100138
Steven Rostedt944ac422008-10-23 19:26:08 -0400139static int __init set_ftrace_dump_on_oops(char *str)
140{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200141 if (*str++ != '=' || !*str) {
142 ftrace_dump_on_oops = DUMP_ALL;
143 return 1;
144 }
145
146 if (!strcmp("orig_cpu", str)) {
147 ftrace_dump_on_oops = DUMP_ORIG;
148 return 1;
149 }
150
151 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400152}
153__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200154
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400155static int __init stop_trace_on_warning(char *str)
156{
157 __disable_trace_on_warning = 1;
158 return 1;
159}
160__setup("traceoff_on_warning=", stop_trace_on_warning);
161
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400162static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500163{
164 allocate_snapshot = true;
165 /* We also need the main ring buffer expanded */
166 ring_buffer_expanded = true;
167 return 1;
168}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400169__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500170
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400171
172static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
173static char *trace_boot_options __initdata;
174
175static int __init set_trace_boot_options(char *str)
176{
Chen Gang67012ab2013-04-08 12:06:44 +0800177 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400178 trace_boot_options = trace_boot_options_buf;
179 return 0;
180}
181__setup("trace_options=", set_trace_boot_options);
182
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400183
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800184unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200185{
186 nsec += 500;
187 do_div(nsec, 1000);
188 return nsec;
189}
190
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200191/*
192 * The global_trace is the descriptor that holds the tracing
193 * buffers for the live tracing. For each CPU, it contains
194 * a link list of pages that will store trace entries. The
195 * page descriptor of the pages in the memory is used to hold
196 * the link list by linking the lru item in the page descriptor
197 * to each of the pages in the buffer per CPU.
198 *
199 * For each active CPU there is a data field that holds the
200 * pages for the buffer for that CPU. Each CPU has the same number
201 * of pages allocated for its buffer.
202 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200203static struct trace_array global_trace;
204
Steven Rostedtae63b312012-05-03 23:09:03 -0400205LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200206
Steven Rostedte77405a2009-09-02 14:17:06 -0400207int filter_current_check_discard(struct ring_buffer *buffer,
208 struct ftrace_event_call *call, void *rec,
Tom Zanussieb02ce02009-04-08 03:15:54 -0500209 struct ring_buffer_event *event)
210{
Steven Rostedte77405a2009-09-02 14:17:06 -0400211 return filter_check_discard(call, rec, buffer, event);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500212}
Steven Rostedt17c873e2009-04-10 18:12:50 -0400213EXPORT_SYMBOL_GPL(filter_current_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500214
Steven Rostedt37886f62009-03-17 17:22:06 -0400215cycle_t ftrace_now(int cpu)
216{
217 u64 ts;
218
219 /* Early boot up does not have a buffer yet */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500220 if (!global_trace.trace_buffer.buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400221 return trace_clock_local();
222
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500223 ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
224 ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400225
226 return ts;
227}
228
Steven Rostedt90369902008-11-05 16:05:44 -0500229int tracing_is_enabled(void)
230{
Steven Rostedt0fb96562012-05-11 14:25:30 -0400231 return tracing_is_on();
Steven Rostedt90369902008-11-05 16:05:44 -0500232}
233
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200234/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400235 * trace_buf_size is the size in bytes that is allocated
236 * for a buffer. Note, the number of bytes is always rounded
237 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400238 *
239 * This number is purposely set to a low number of 16384.
240 * If the dump on oops happens, it will be much appreciated
241 * to not have to wait for all that output. Anyway this can be
242 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200243 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400244#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400245
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400246static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200247
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200248/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200249static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200250
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200251/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200252 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200253 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200254static DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200255
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800256/*
257 * serialize the access of the ring buffer
258 *
259 * ring buffer serializes readers, but it is low level protection.
260 * The validity of the events (which returns by ring_buffer_peek() ..etc)
261 * are not protected by ring buffer.
262 *
263 * The content of events may become garbage if we allow other process consumes
264 * these events concurrently:
265 * A) the page of the consumed events may become a normal page
266 * (not reader page) in ring buffer, and this page will be rewrited
267 * by events producer.
268 * B) The page of the consumed events may become a page for splice_read,
269 * and this page will be returned to system.
270 *
271 * These primitives allow multi process access to different cpu ring buffer
272 * concurrently.
273 *
274 * These primitives don't distinguish read-only and read-consume access.
275 * Multi read-only access are also serialized.
276 */
277
278#ifdef CONFIG_SMP
279static DECLARE_RWSEM(all_cpu_access_lock);
280static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
281
282static inline void trace_access_lock(int cpu)
283{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500284 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800285 /* gain it for accessing the whole ring buffer. */
286 down_write(&all_cpu_access_lock);
287 } else {
288 /* gain it for accessing a cpu ring buffer. */
289
Steven Rostedtae3b5092013-01-23 15:22:59 -0500290 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800291 down_read(&all_cpu_access_lock);
292
293 /* Secondly block other access to this @cpu ring buffer. */
294 mutex_lock(&per_cpu(cpu_access_lock, cpu));
295 }
296}
297
298static inline void trace_access_unlock(int cpu)
299{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500300 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800301 up_write(&all_cpu_access_lock);
302 } else {
303 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
304 up_read(&all_cpu_access_lock);
305 }
306}
307
308static inline void trace_access_lock_init(void)
309{
310 int cpu;
311
312 for_each_possible_cpu(cpu)
313 mutex_init(&per_cpu(cpu_access_lock, cpu));
314}
315
316#else
317
318static DEFINE_MUTEX(access_lock);
319
320static inline void trace_access_lock(int cpu)
321{
322 (void)cpu;
323 mutex_lock(&access_lock);
324}
325
326static inline void trace_access_unlock(int cpu)
327{
328 (void)cpu;
329 mutex_unlock(&access_lock);
330}
331
332static inline void trace_access_lock_init(void)
333{
334}
335
336#endif
337
Steven Rostedtee6bce52008-11-12 17:52:37 -0500338/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500339unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400340 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500341 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400342 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700343
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200344/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500345 * tracing_on - enable tracing buffers
346 *
347 * This function enables tracing buffers that may have been
348 * disabled with tracing_off.
349 */
350void tracing_on(void)
351{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500352 if (global_trace.trace_buffer.buffer)
353 ring_buffer_record_on(global_trace.trace_buffer.buffer);
Steven Rostedt499e5472012-02-22 15:50:28 -0500354 /*
355 * This flag is only looked at when buffers haven't been
356 * allocated yet. We don't really care about the race
357 * between setting this flag and actually turning
358 * on the buffer.
359 */
360 global_trace.buffer_disabled = 0;
361}
362EXPORT_SYMBOL_GPL(tracing_on);
363
364/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500365 * __trace_puts - write a constant string into the trace buffer.
366 * @ip: The address of the caller
367 * @str: The constant string to write
368 * @size: The size of the string.
369 */
370int __trace_puts(unsigned long ip, const char *str, int size)
371{
372 struct ring_buffer_event *event;
373 struct ring_buffer *buffer;
374 struct print_entry *entry;
375 unsigned long irq_flags;
376 int alloc;
377
378 alloc = sizeof(*entry) + size + 2; /* possible \n added */
379
380 local_save_flags(irq_flags);
381 buffer = global_trace.trace_buffer.buffer;
382 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
383 irq_flags, preempt_count());
384 if (!event)
385 return 0;
386
387 entry = ring_buffer_event_data(event);
388 entry->ip = ip;
389
390 memcpy(&entry->buf, str, size);
391
392 /* Add a newline if necessary */
393 if (entry->buf[size - 1] != '\n') {
394 entry->buf[size] = '\n';
395 entry->buf[size + 1] = '\0';
396 } else
397 entry->buf[size] = '\0';
398
399 __buffer_unlock_commit(buffer, event);
400
401 return size;
402}
403EXPORT_SYMBOL_GPL(__trace_puts);
404
405/**
406 * __trace_bputs - write the pointer to a constant string into trace buffer
407 * @ip: The address of the caller
408 * @str: The constant string to write to the buffer to
409 */
410int __trace_bputs(unsigned long ip, const char *str)
411{
412 struct ring_buffer_event *event;
413 struct ring_buffer *buffer;
414 struct bputs_entry *entry;
415 unsigned long irq_flags;
416 int size = sizeof(struct bputs_entry);
417
418 local_save_flags(irq_flags);
419 buffer = global_trace.trace_buffer.buffer;
420 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
421 irq_flags, preempt_count());
422 if (!event)
423 return 0;
424
425 entry = ring_buffer_event_data(event);
426 entry->ip = ip;
427 entry->str = str;
428
429 __buffer_unlock_commit(buffer, event);
430
431 return 1;
432}
433EXPORT_SYMBOL_GPL(__trace_bputs);
434
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500435#ifdef CONFIG_TRACER_SNAPSHOT
436/**
437 * trace_snapshot - take a snapshot of the current buffer.
438 *
439 * This causes a swap between the snapshot buffer and the current live
440 * tracing buffer. You can use this to take snapshots of the live
441 * trace when some condition is triggered, but continue to trace.
442 *
443 * Note, make sure to allocate the snapshot with either
444 * a tracing_snapshot_alloc(), or by doing it manually
445 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
446 *
447 * If the snapshot buffer is not allocated, it will stop tracing.
448 * Basically making a permanent snapshot.
449 */
450void tracing_snapshot(void)
451{
452 struct trace_array *tr = &global_trace;
453 struct tracer *tracer = tr->current_trace;
454 unsigned long flags;
455
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500456 if (in_nmi()) {
457 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
458 internal_trace_puts("*** snapshot is being ignored ***\n");
459 return;
460 }
461
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500462 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500463 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
464 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500465 tracing_off();
466 return;
467 }
468
469 /* Note, snapshot can not be used when the tracer uses it */
470 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500471 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
472 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500473 return;
474 }
475
476 local_irq_save(flags);
477 update_max_tr(tr, current, smp_processor_id());
478 local_irq_restore(flags);
479}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500480EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500481
482static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
483 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400484static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
485
486static int alloc_snapshot(struct trace_array *tr)
487{
488 int ret;
489
490 if (!tr->allocated_snapshot) {
491
492 /* allocate spare buffer */
493 ret = resize_buffer_duplicate_size(&tr->max_buffer,
494 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
495 if (ret < 0)
496 return ret;
497
498 tr->allocated_snapshot = true;
499 }
500
501 return 0;
502}
503
504void free_snapshot(struct trace_array *tr)
505{
506 /*
507 * We don't free the ring buffer. instead, resize it because
508 * The max_tr ring buffer has some state (e.g. ring->clock) and
509 * we want preserve it.
510 */
511 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
512 set_buffer_entries(&tr->max_buffer, 1);
513 tracing_reset_online_cpus(&tr->max_buffer);
514 tr->allocated_snapshot = false;
515}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500516
517/**
518 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
519 *
520 * This is similar to trace_snapshot(), but it will allocate the
521 * snapshot buffer if it isn't already allocated. Use this only
522 * where it is safe to sleep, as the allocation may sleep.
523 *
524 * This causes a swap between the snapshot buffer and the current live
525 * tracing buffer. You can use this to take snapshots of the live
526 * trace when some condition is triggered, but continue to trace.
527 */
528void tracing_snapshot_alloc(void)
529{
530 struct trace_array *tr = &global_trace;
531 int ret;
532
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400533 ret = alloc_snapshot(tr);
534 if (WARN_ON(ret < 0))
535 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500536
537 tracing_snapshot();
538}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500539EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500540#else
541void tracing_snapshot(void)
542{
543 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
544}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500545EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500546void tracing_snapshot_alloc(void)
547{
548 /* Give warning */
549 tracing_snapshot();
550}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500551EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500552#endif /* CONFIG_TRACER_SNAPSHOT */
553
Steven Rostedt499e5472012-02-22 15:50:28 -0500554/**
555 * tracing_off - turn off tracing buffers
556 *
557 * This function stops the tracing buffers from recording data.
558 * It does not disable any overhead the tracers themselves may
559 * be causing. This function simply causes all recording to
560 * the ring buffers to fail.
561 */
562void tracing_off(void)
563{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500564 if (global_trace.trace_buffer.buffer)
565 ring_buffer_record_off(global_trace.trace_buffer.buffer);
Steven Rostedt499e5472012-02-22 15:50:28 -0500566 /*
567 * This flag is only looked at when buffers haven't been
568 * allocated yet. We don't really care about the race
569 * between setting this flag and actually turning
570 * on the buffer.
571 */
572 global_trace.buffer_disabled = 1;
573}
574EXPORT_SYMBOL_GPL(tracing_off);
575
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400576void disable_trace_on_warning(void)
577{
578 if (__disable_trace_on_warning)
579 tracing_off();
580}
581
Steven Rostedt499e5472012-02-22 15:50:28 -0500582/**
583 * tracing_is_on - show state of ring buffers enabled
584 */
585int tracing_is_on(void)
586{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500587 if (global_trace.trace_buffer.buffer)
588 return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
Steven Rostedt499e5472012-02-22 15:50:28 -0500589 return !global_trace.buffer_disabled;
590}
591EXPORT_SYMBOL_GPL(tracing_is_on);
592
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400593static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200594{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400595 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200596
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200597 if (!str)
598 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800599 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200600 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800601 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200602 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400603 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200604 return 1;
605}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400606__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200607
Tim Bird0e950172010-02-25 15:36:43 -0800608static int __init set_tracing_thresh(char *str)
609{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800610 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800611 int ret;
612
613 if (!str)
614 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200615 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800616 if (ret < 0)
617 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800618 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800619 return 1;
620}
621__setup("tracing_thresh=", set_tracing_thresh);
622
Steven Rostedt57f50be2008-05-12 21:20:44 +0200623unsigned long nsecs_to_usecs(unsigned long nsecs)
624{
625 return nsecs / 1000;
626}
627
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200628/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200629static const char *trace_options[] = {
630 "print-parent",
631 "sym-offset",
632 "sym-addr",
633 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200634 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200635 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200636 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200637 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200638 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100639 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500640 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500641 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500642 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200643 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200644 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100645 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200646 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500647 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400648 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400649 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800650 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800651 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400652 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500653 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700654 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400655 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200656 NULL
657};
658
Zhaolei5079f322009-08-25 16:12:56 +0800659static struct {
660 u64 (*func)(void);
661 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800662 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800663} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800664 { trace_clock_local, "local", 1 },
665 { trace_clock_global, "global", 1 },
666 { trace_clock_counter, "counter", 0 },
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -0400667 { trace_clock_jiffies, "uptime", 1 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400668 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800669 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800670};
671
672int trace_clock_id;
673
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200674/*
675 * trace_parser_get_init - gets the buffer for trace parser
676 */
677int trace_parser_get_init(struct trace_parser *parser, int size)
678{
679 memset(parser, 0, sizeof(*parser));
680
681 parser->buffer = kmalloc(size, GFP_KERNEL);
682 if (!parser->buffer)
683 return 1;
684
685 parser->size = size;
686 return 0;
687}
688
689/*
690 * trace_parser_put - frees the buffer for trace parser
691 */
692void trace_parser_put(struct trace_parser *parser)
693{
694 kfree(parser->buffer);
695}
696
697/*
698 * trace_get_user - reads the user input string separated by space
699 * (matched by isspace(ch))
700 *
701 * For each string found the 'struct trace_parser' is updated,
702 * and the function returns.
703 *
704 * Returns number of bytes read.
705 *
706 * See kernel/trace/trace.h for 'struct trace_parser' details.
707 */
708int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
709 size_t cnt, loff_t *ppos)
710{
711 char ch;
712 size_t read = 0;
713 ssize_t ret;
714
715 if (!*ppos)
716 trace_parser_clear(parser);
717
718 ret = get_user(ch, ubuf++);
719 if (ret)
720 goto out;
721
722 read++;
723 cnt--;
724
725 /*
726 * The parser is not finished with the last write,
727 * continue reading the user input without skipping spaces.
728 */
729 if (!parser->cont) {
730 /* skip white space */
731 while (cnt && isspace(ch)) {
732 ret = get_user(ch, ubuf++);
733 if (ret)
734 goto out;
735 read++;
736 cnt--;
737 }
738
739 /* only spaces were written */
740 if (isspace(ch)) {
741 *ppos += read;
742 ret = read;
743 goto out;
744 }
745
746 parser->idx = 0;
747 }
748
749 /* read the non-space input */
750 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800751 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200752 parser->buffer[parser->idx++] = ch;
753 else {
754 ret = -EINVAL;
755 goto out;
756 }
757 ret = get_user(ch, ubuf++);
758 if (ret)
759 goto out;
760 read++;
761 cnt--;
762 }
763
764 /* We either got finished input or we have to wait for another call. */
765 if (isspace(ch)) {
766 parser->buffer[parser->idx] = 0;
767 parser->cont = false;
768 } else {
769 parser->cont = true;
770 parser->buffer[parser->idx++] = ch;
771 }
772
773 *ppos += read;
774 ret = read;
775
776out:
777 return ret;
778}
779
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200780ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
781{
782 int len;
783 int ret;
784
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500785 if (!cnt)
786 return 0;
787
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200788 if (s->len <= s->readpos)
789 return -EBUSY;
790
791 len = s->len - s->readpos;
792 if (cnt > len)
793 cnt = len;
794 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500795 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200796 return -EFAULT;
797
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500798 cnt -= ret;
799
Steven Rostedte74da522009-03-04 20:31:11 -0500800 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200801 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200802}
803
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200804static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200805{
806 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200807
808 if (s->len <= s->readpos)
809 return -EBUSY;
810
811 len = s->len - s->readpos;
812 if (cnt > len)
813 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300814 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200815
Steven Rostedte74da522009-03-04 20:31:11 -0500816 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200817 return cnt;
818}
819
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400820/*
821 * ftrace_max_lock is used to protect the swapping of buffers
822 * when taking a max snapshot. The buffers themselves are
823 * protected by per_cpu spinlocks. But the action of the swap
824 * needs its own lock.
825 *
Thomas Gleixner445c8952009-12-02 19:49:50 +0100826 * This is defined as a arch_spinlock_t in order to help
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400827 * with performance when lockdep debugging is enabled.
828 *
829 * It is also used in other places outside the update_max_tr
830 * so it needs to be defined outside of the
831 * CONFIG_TRACER_MAX_TRACE.
832 */
Thomas Gleixner445c8952009-12-02 19:49:50 +0100833static arch_spinlock_t ftrace_max_lock =
Thomas Gleixneredc35bd2009-12-03 12:38:57 +0100834 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400835
Tim Bird0e950172010-02-25 15:36:43 -0800836unsigned long __read_mostly tracing_thresh;
837
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400838#ifdef CONFIG_TRACER_MAX_TRACE
839unsigned long __read_mostly tracing_max_latency;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400840
841/*
842 * Copy the new maximum trace into the separate maximum-trace
843 * structure. (this way the maximum trace is permanently saved,
844 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
845 */
846static void
847__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
848{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500849 struct trace_buffer *trace_buf = &tr->trace_buffer;
850 struct trace_buffer *max_buf = &tr->max_buffer;
851 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
852 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400853
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500854 max_buf->cpu = cpu;
855 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400856
Steven Rostedt8248ac02009-09-02 12:27:41 -0400857 max_data->saved_latency = tracing_max_latency;
858 max_data->critical_start = data->critical_start;
859 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400860
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300861 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400862 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400863 /*
864 * If tsk == current, then use current_uid(), as that does not use
865 * RCU. The irq tracer can be called out of RCU scope.
866 */
867 if (tsk == current)
868 max_data->uid = current_uid();
869 else
870 max_data->uid = task_uid(tsk);
871
Steven Rostedt8248ac02009-09-02 12:27:41 -0400872 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
873 max_data->policy = tsk->policy;
874 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400875
876 /* record this tasks comm */
877 tracing_record_cmdline(tsk);
878}
879
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200880/**
881 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
882 * @tr: tracer
883 * @tsk: the task with the latency
884 * @cpu: The cpu that initiated the trace.
885 *
886 * Flip the buffers between the @tr and the max_tr and record information
887 * about which task was the cause of this latency.
888 */
Ingo Molnare309b412008-05-12 21:20:51 +0200889void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200890update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
891{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -0400892 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200893
Steven Rostedt2b6080f2012-05-11 13:29:49 -0400894 if (tr->stop_count)
Steven Rostedtb8de7bd2009-08-31 22:32:27 -0400895 return;
896
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200897 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -0500898
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -0500899 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +0900900 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -0400901 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +0900902 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +0900903 }
Steven Rostedt34600f02013-01-22 13:35:11 -0500904
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100905 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400906
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500907 buf = tr->trace_buffer.buffer;
908 tr->trace_buffer.buffer = tr->max_buffer.buffer;
909 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400910
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200911 __update_max_tr(tr, tsk, cpu);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100912 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200913}
914
915/**
916 * update_max_tr_single - only copy one trace over, and reset the rest
917 * @tr - tracer
918 * @tsk - task with the latency
919 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200920 *
921 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200922 */
Ingo Molnare309b412008-05-12 21:20:51 +0200923void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200924update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
925{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400926 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200927
Steven Rostedt2b6080f2012-05-11 13:29:49 -0400928 if (tr->stop_count)
Steven Rostedtb8de7bd2009-08-31 22:32:27 -0400929 return;
930
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200931 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -0400932 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -0400933 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -0700934 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +0900935 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -0400936 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +0900937
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100938 arch_spin_lock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200939
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500940 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400941
Steven Rostedte8165db2009-09-03 19:13:05 -0400942 if (ret == -EBUSY) {
943 /*
944 * We failed to swap the buffer due to a commit taking
945 * place on this CPU. We fail to record, but we reset
946 * the max trace buffer (no one writes directly to it)
947 * and flag that it failed.
948 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500949 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -0400950 "Failed to swap buffers due to commit in progress\n");
951 }
952
Steven Rostedte8165db2009-09-03 19:13:05 -0400953 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200954
955 __update_max_tr(tr, tsk, cpu);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100956 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200957}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400958#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200959
Steven Rostedt0d5c6e12012-11-01 20:54:21 -0400960static void default_wait_pipe(struct trace_iterator *iter)
961{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -0500962 /* Iterators are static, they should be filled or empty */
963 if (trace_buffer_iter(iter, iter->cpu_file))
964 return;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -0400965
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500966 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -0400967}
968
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -0500969#ifdef CONFIG_FTRACE_STARTUP_TEST
970static int run_tracer_selftest(struct tracer *type)
971{
972 struct trace_array *tr = &global_trace;
973 struct tracer *saved_tracer = tr->current_trace;
974 int ret;
975
976 if (!type->selftest || tracing_selftest_disabled)
977 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200978
979 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -0500980 * Run a selftest on this tracer.
981 * Here we reset the trace buffer, and set the current
982 * tracer to be this tracer. The tracer can then run some
983 * internal tracing to verify that everything is in order.
984 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200985 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -0500986 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200987
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -0500988 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200989
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -0500990#ifdef CONFIG_TRACER_MAX_TRACE
991 if (type->use_max_tr) {
992 /* If we expanded the buffers, make sure the max is expanded too */
993 if (ring_buffer_expanded)
994 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
995 RING_BUFFER_ALL_CPUS);
996 tr->allocated_snapshot = true;
997 }
998#endif
999
1000 /* the test is responsible for initializing and enabling */
1001 pr_info("Testing tracer %s: ", type->name);
1002 ret = type->selftest(type, tr);
1003 /* the test is responsible for resetting too */
1004 tr->current_trace = saved_tracer;
1005 if (ret) {
1006 printk(KERN_CONT "FAILED!\n");
1007 /* Add the warning after printing 'FAILED' */
1008 WARN_ON(1);
1009 return -1;
1010 }
1011 /* Only reset on passing, to avoid touching corrupted buffers */
1012 tracing_reset_online_cpus(&tr->trace_buffer);
1013
1014#ifdef CONFIG_TRACER_MAX_TRACE
1015 if (type->use_max_tr) {
1016 tr->allocated_snapshot = false;
1017
1018 /* Shrink the max buffer again */
1019 if (ring_buffer_expanded)
1020 ring_buffer_resize(tr->max_buffer.buffer, 1,
1021 RING_BUFFER_ALL_CPUS);
1022 }
1023#endif
1024
1025 printk(KERN_CONT "PASSED\n");
1026 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001027}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001028#else
1029static inline int run_tracer_selftest(struct tracer *type)
1030{
1031 return 0;
1032}
1033#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001034
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001035/**
1036 * register_tracer - register a tracer with the ftrace system.
1037 * @type - the plugin for the tracer
1038 *
1039 * Register a new plugin tracer.
1040 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001041int register_tracer(struct tracer *type)
1042{
1043 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001044 int ret = 0;
1045
1046 if (!type->name) {
1047 pr_info("Tracer must have a name\n");
1048 return -1;
1049 }
1050
Dan Carpenter24a461d2010-07-10 12:06:44 +02001051 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001052 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1053 return -1;
1054 }
1055
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001056 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001057
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001058 tracing_selftest_running = true;
1059
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001060 for (t = trace_types; t; t = t->next) {
1061 if (strcmp(type->name, t->name) == 0) {
1062 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001063 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001064 type->name);
1065 ret = -1;
1066 goto out;
1067 }
1068 }
1069
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001070 if (!type->set_flag)
1071 type->set_flag = &dummy_set_flag;
1072 if (!type->flags)
1073 type->flags = &dummy_tracer_flags;
1074 else
1075 if (!type->flags->opts)
1076 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001077 if (!type->wait_pipe)
1078 type->wait_pipe = default_wait_pipe;
1079
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001080 ret = run_tracer_selftest(type);
1081 if (ret < 0)
1082 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001083
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001084 type->next = trace_types;
1085 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001086
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001087 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001088 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001089 mutex_unlock(&trace_types_lock);
1090
Steven Rostedtdac74942009-02-05 01:13:38 -05001091 if (ret || !default_bootup_tracer)
1092 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001093
Li Zefanee6c2c12009-09-18 14:06:47 +08001094 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001095 goto out_unlock;
1096
1097 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1098 /* Do we want this tracer to start on bootup? */
1099 tracing_set_tracer(type->name);
1100 default_bootup_tracer = NULL;
1101 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001102 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001103#ifdef CONFIG_FTRACE_STARTUP_TEST
1104 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1105 type->name);
1106#endif
1107
1108 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001109 return ret;
1110}
1111
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001112void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001113{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001114 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001115
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001116 if (!buffer)
1117 return;
1118
Steven Rostedtf6339032009-09-04 12:35:16 -04001119 ring_buffer_record_disable(buffer);
1120
1121 /* Make sure all commits have finished */
1122 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001123 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001124
1125 ring_buffer_record_enable(buffer);
1126}
1127
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001128void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001129{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001130 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001131 int cpu;
1132
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001133 if (!buffer)
1134 return;
1135
Steven Rostedt621968c2009-09-04 12:02:35 -04001136 ring_buffer_record_disable(buffer);
1137
1138 /* Make sure all commits have finished */
1139 synchronize_sched();
1140
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001141 buf->time_start = ftrace_now(buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001142
1143 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001144 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001145
1146 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001147}
1148
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001149void tracing_reset_current(int cpu)
1150{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001151 tracing_reset(&global_trace.trace_buffer, cpu);
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001152}
1153
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001154void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001155{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001156 struct trace_array *tr;
1157
1158 mutex_lock(&trace_types_lock);
1159 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001160 tracing_reset_online_cpus(&tr->trace_buffer);
1161#ifdef CONFIG_TRACER_MAX_TRACE
1162 tracing_reset_online_cpus(&tr->max_buffer);
1163#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001164 }
1165 mutex_unlock(&trace_types_lock);
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001166}
1167
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001168#define SAVED_CMDLINES 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001169#define NO_CMDLINE_MAP UINT_MAX
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001170static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1171static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1172static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1173static int cmdline_idx;
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001174static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001175
Steven Rostedt25b0b442008-05-12 21:21:00 +02001176/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001177static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001178
1179static void trace_init_cmdlines(void)
1180{
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001181 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1182 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001183 cmdline_idx = 0;
1184}
1185
Carsten Emdeb5130b12009-09-13 01:43:07 +02001186int is_tracing_stopped(void)
1187{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001188 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001189}
1190
Steven Rostedt0f048702008-11-05 16:05:44 -05001191/**
Steven Rostedt69bb54e2008-11-21 12:59:38 -05001192 * ftrace_off_permanent - disable all ftrace code permanently
1193 *
1194 * This should only be called when a serious anomally has
1195 * been detected. This will turn off the function tracing,
1196 * ring buffers, and other tracing utilites. It takes no
1197 * locks and can be called from any context.
1198 */
1199void ftrace_off_permanent(void)
1200{
1201 tracing_disabled = 1;
1202 ftrace_stop();
1203 tracing_off_permanent();
1204}
1205
1206/**
Steven Rostedt0f048702008-11-05 16:05:44 -05001207 * tracing_start - quick start of the tracer
1208 *
1209 * If tracing is enabled but was stopped by tracing_stop,
1210 * this will start the tracer back up.
1211 */
1212void tracing_start(void)
1213{
1214 struct ring_buffer *buffer;
1215 unsigned long flags;
1216
1217 if (tracing_disabled)
1218 return;
1219
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001220 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1221 if (--global_trace.stop_count) {
1222 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001223 /* Someone screwed up their debugging */
1224 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001225 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001226 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001227 goto out;
1228 }
1229
Steven Rostedta2f80712010-03-12 19:56:00 -05001230 /* Prevent the buffers from switching */
1231 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001232
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001233 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001234 if (buffer)
1235 ring_buffer_record_enable(buffer);
1236
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001237#ifdef CONFIG_TRACER_MAX_TRACE
1238 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001239 if (buffer)
1240 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001241#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001242
Steven Rostedta2f80712010-03-12 19:56:00 -05001243 arch_spin_unlock(&ftrace_max_lock);
1244
Steven Rostedt0f048702008-11-05 16:05:44 -05001245 ftrace_start();
1246 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001247 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1248}
1249
1250static void tracing_start_tr(struct trace_array *tr)
1251{
1252 struct ring_buffer *buffer;
1253 unsigned long flags;
1254
1255 if (tracing_disabled)
1256 return;
1257
1258 /* If global, we need to also start the max tracer */
1259 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1260 return tracing_start();
1261
1262 raw_spin_lock_irqsave(&tr->start_lock, flags);
1263
1264 if (--tr->stop_count) {
1265 if (tr->stop_count < 0) {
1266 /* Someone screwed up their debugging */
1267 WARN_ON_ONCE(1);
1268 tr->stop_count = 0;
1269 }
1270 goto out;
1271 }
1272
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001273 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001274 if (buffer)
1275 ring_buffer_record_enable(buffer);
1276
1277 out:
1278 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001279}
1280
1281/**
1282 * tracing_stop - quick stop of the tracer
1283 *
1284 * Light weight way to stop tracing. Use in conjunction with
1285 * tracing_start.
1286 */
1287void tracing_stop(void)
1288{
1289 struct ring_buffer *buffer;
1290 unsigned long flags;
1291
1292 ftrace_stop();
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001293 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1294 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001295 goto out;
1296
Steven Rostedta2f80712010-03-12 19:56:00 -05001297 /* Prevent the buffers from switching */
1298 arch_spin_lock(&ftrace_max_lock);
1299
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001300 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001301 if (buffer)
1302 ring_buffer_record_disable(buffer);
1303
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001304#ifdef CONFIG_TRACER_MAX_TRACE
1305 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001306 if (buffer)
1307 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001308#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001309
Steven Rostedta2f80712010-03-12 19:56:00 -05001310 arch_spin_unlock(&ftrace_max_lock);
1311
Steven Rostedt0f048702008-11-05 16:05:44 -05001312 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001313 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1314}
1315
1316static void tracing_stop_tr(struct trace_array *tr)
1317{
1318 struct ring_buffer *buffer;
1319 unsigned long flags;
1320
1321 /* If global, we need to also stop the max tracer */
1322 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1323 return tracing_stop();
1324
1325 raw_spin_lock_irqsave(&tr->start_lock, flags);
1326 if (tr->stop_count++)
1327 goto out;
1328
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001329 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001330 if (buffer)
1331 ring_buffer_record_disable(buffer);
1332
1333 out:
1334 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001335}
1336
Ingo Molnare309b412008-05-12 21:20:51 +02001337void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001338
Ingo Molnare309b412008-05-12 21:20:51 +02001339static void trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001340{
Carsten Emdea635cf02009-03-18 09:00:41 +01001341 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001342
1343 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1344 return;
1345
1346 /*
1347 * It's not the end of the world if we don't get
1348 * the lock, but we also don't want to spin
1349 * nor do we want to disable interrupts,
1350 * so if we miss here, then better luck next time.
1351 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001352 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001353 return;
1354
1355 idx = map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001356 if (idx == NO_CMDLINE_MAP) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001357 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1358
Carsten Emdea635cf02009-03-18 09:00:41 +01001359 /*
1360 * Check whether the cmdline buffer at idx has a pid
1361 * mapped. We are going to overwrite that entry so we
1362 * need to clear the map_pid_to_cmdline. Otherwise we
1363 * would read the new comm for the old pid.
1364 */
1365 pid = map_cmdline_to_pid[idx];
1366 if (pid != NO_CMDLINE_MAP)
1367 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001368
Carsten Emdea635cf02009-03-18 09:00:41 +01001369 map_cmdline_to_pid[idx] = tsk->pid;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001370 map_pid_to_cmdline[tsk->pid] = idx;
1371
1372 cmdline_idx = idx;
1373 }
1374
1375 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1376
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001377 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001378}
1379
Steven Rostedt4ca53082009-03-16 19:20:15 -04001380void trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001381{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001382 unsigned map;
1383
Steven Rostedt4ca53082009-03-16 19:20:15 -04001384 if (!pid) {
1385 strcpy(comm, "<idle>");
1386 return;
1387 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001388
Steven Rostedt74bf4072010-01-25 15:11:53 -05001389 if (WARN_ON_ONCE(pid < 0)) {
1390 strcpy(comm, "<XXX>");
1391 return;
1392 }
1393
Steven Rostedt4ca53082009-03-16 19:20:15 -04001394 if (pid > PID_MAX_DEFAULT) {
1395 strcpy(comm, "<...>");
1396 return;
1397 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001398
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001399 preempt_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001400 arch_spin_lock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001401 map = map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001402 if (map != NO_CMDLINE_MAP)
1403 strcpy(comm, saved_cmdlines[map]);
1404 else
1405 strcpy(comm, "<...>");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001406
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001407 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001408 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001409}
1410
Ingo Molnare309b412008-05-12 21:20:51 +02001411void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001412{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001413 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001414 return;
1415
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001416 if (!__this_cpu_read(trace_cmdline_save))
1417 return;
1418
1419 __this_cpu_write(trace_cmdline_save, false);
1420
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001421 trace_save_cmdline(tsk);
1422}
1423
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001424void
Steven Rostedt38697052008-10-01 13:14:09 -04001425tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1426 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001427{
1428 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001429
Steven Rostedt777e2082008-09-29 23:02:42 -04001430 entry->preempt_count = pc & 0xff;
1431 entry->pid = (tsk) ? tsk->pid : 0;
1432 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001433#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001434 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001435#else
1436 TRACE_FLAG_IRQS_NOSUPPORT |
1437#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001438 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1439 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1440 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1441}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001442EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001443
Steven Rostedte77405a2009-09-02 14:17:06 -04001444struct ring_buffer_event *
1445trace_buffer_lock_reserve(struct ring_buffer *buffer,
1446 int type,
1447 unsigned long len,
1448 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001449{
1450 struct ring_buffer_event *event;
1451
Steven Rostedte77405a2009-09-02 14:17:06 -04001452 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001453 if (event != NULL) {
1454 struct trace_entry *ent = ring_buffer_event_data(event);
1455
1456 tracing_generic_entry_update(ent, flags, pc);
1457 ent->type = type;
1458 }
1459
1460 return event;
1461}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001462
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001463void
1464__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1465{
1466 __this_cpu_write(trace_cmdline_save, true);
1467 ring_buffer_unlock_commit(buffer, event);
1468}
1469
Steven Rostedte77405a2009-09-02 14:17:06 -04001470static inline void
1471__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1472 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001473 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001474{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001475 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001476
Steven Rostedte77405a2009-09-02 14:17:06 -04001477 ftrace_trace_stack(buffer, flags, 6, pc);
1478 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001479}
1480
Steven Rostedte77405a2009-09-02 14:17:06 -04001481void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1482 struct ring_buffer_event *event,
1483 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001484{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001485 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001486}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001487EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001488
Steven Rostedtef5580d2009-02-27 19:38:04 -05001489struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001490trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1491 struct ftrace_event_file *ftrace_file,
1492 int type, unsigned long len,
1493 unsigned long flags, int pc)
1494{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001495 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001496 return trace_buffer_lock_reserve(*current_rb,
1497 type, len, flags, pc);
1498}
1499EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1500
1501struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001502trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1503 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001504 unsigned long flags, int pc)
1505{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001506 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001507 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001508 type, len, flags, pc);
1509}
Steven Rostedt94487d62009-05-05 19:22:53 -04001510EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001511
Steven Rostedte77405a2009-09-02 14:17:06 -04001512void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1513 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001514 unsigned long flags, int pc)
1515{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001516 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001517}
Steven Rostedt94487d62009-05-05 19:22:53 -04001518EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001519
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001520void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1521 struct ring_buffer_event *event,
1522 unsigned long flags, int pc,
1523 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001524{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001525 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001526
1527 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1528 ftrace_trace_userstack(buffer, flags, pc);
1529}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001530EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001531
Steven Rostedte77405a2009-09-02 14:17:06 -04001532void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1533 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001534{
Steven Rostedte77405a2009-09-02 14:17:06 -04001535 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001536}
Steven Rostedt12acd472009-04-17 16:01:56 -04001537EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001538
Ingo Molnare309b412008-05-12 21:20:51 +02001539void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001540trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001541 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1542 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001543{
Tom Zanussie1112b42009-03-31 00:48:49 -05001544 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001545 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001546 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001547 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001548
Steven Rostedtd7690412008-10-01 00:29:53 -04001549 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001550 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001551 return;
1552
Steven Rostedte77405a2009-09-02 14:17:06 -04001553 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001554 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001555 if (!event)
1556 return;
1557 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001558 entry->ip = ip;
1559 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001560
Steven Rostedte77405a2009-09-02 14:17:06 -04001561 if (!filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001562 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001563}
1564
Ingo Molnare309b412008-05-12 21:20:51 +02001565void
Ingo Molnar2e0f5762008-05-12 21:20:49 +02001566ftrace(struct trace_array *tr, struct trace_array_cpu *data,
Steven Rostedt38697052008-10-01 13:14:09 -04001567 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1568 int pc)
Ingo Molnar2e0f5762008-05-12 21:20:49 +02001569{
1570 if (likely(!atomic_read(&data->disabled)))
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001571 trace_function(tr, ip, parent_ip, flags, pc);
Ingo Molnar2e0f5762008-05-12 21:20:49 +02001572}
1573
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001574#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001575
1576#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1577struct ftrace_stack {
1578 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1579};
1580
1581static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1582static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1583
Steven Rostedte77405a2009-09-02 14:17:06 -04001584static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001585 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001586 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001587{
Tom Zanussie1112b42009-03-31 00:48:49 -05001588 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001589 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001590 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001591 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001592 int use_stack;
1593 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001594
1595 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001596 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001597
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001598 /*
1599 * Since events can happen in NMIs there's no safe way to
1600 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1601 * or NMI comes in, it will just have to use the default
1602 * FTRACE_STACK_SIZE.
1603 */
1604 preempt_disable_notrace();
1605
Shan Wei82146522012-11-19 13:21:01 +08001606 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001607 /*
1608 * We don't need any atomic variables, just a barrier.
1609 * If an interrupt comes in, we don't care, because it would
1610 * have exited and put the counter back to what we want.
1611 * We just need a barrier to keep gcc from moving things
1612 * around.
1613 */
1614 barrier();
1615 if (use_stack == 1) {
1616 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1617 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1618
1619 if (regs)
1620 save_stack_trace_regs(regs, &trace);
1621 else
1622 save_stack_trace(&trace);
1623
1624 if (trace.nr_entries > size)
1625 size = trace.nr_entries;
1626 } else
1627 /* From now on, use_stack is a boolean */
1628 use_stack = 0;
1629
1630 size *= sizeof(unsigned long);
1631
1632 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1633 sizeof(*entry) + size, flags, pc);
1634 if (!event)
1635 goto out;
1636 entry = ring_buffer_event_data(event);
1637
1638 memset(&entry->caller, 0, size);
1639
1640 if (use_stack)
1641 memcpy(&entry->caller, trace.entries,
1642 trace.nr_entries * sizeof(unsigned long));
1643 else {
1644 trace.max_entries = FTRACE_STACK_ENTRIES;
1645 trace.entries = entry->caller;
1646 if (regs)
1647 save_stack_trace_regs(regs, &trace);
1648 else
1649 save_stack_trace(&trace);
1650 }
1651
1652 entry->size = trace.nr_entries;
1653
Steven Rostedte77405a2009-09-02 14:17:06 -04001654 if (!filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001655 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001656
1657 out:
1658 /* Again, don't let gcc optimize things here */
1659 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001660 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001661 preempt_enable_notrace();
1662
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001663}
1664
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001665void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1666 int skip, int pc, struct pt_regs *regs)
1667{
1668 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1669 return;
1670
1671 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1672}
1673
Steven Rostedte77405a2009-09-02 14:17:06 -04001674void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1675 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001676{
1677 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1678 return;
1679
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001680 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001681}
1682
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001683void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1684 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001685{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001686 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001687}
1688
Steven Rostedt03889382009-12-11 09:48:22 -05001689/**
1690 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001691 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001692 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001693void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001694{
1695 unsigned long flags;
1696
1697 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001698 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001699
1700 local_save_flags(flags);
1701
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001702 /*
1703 * Skip 3 more, seems to get us at the caller of
1704 * this function.
1705 */
1706 skip += 3;
1707 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1708 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001709}
1710
Steven Rostedt91e86e52010-11-10 12:56:12 +01001711static DEFINE_PER_CPU(int, user_stack_count);
1712
Steven Rostedte77405a2009-09-02 14:17:06 -04001713void
1714ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001715{
Tom Zanussie1112b42009-03-31 00:48:49 -05001716 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001717 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001718 struct userstack_entry *entry;
1719 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001720
1721 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1722 return;
1723
Steven Rostedtb6345872010-03-12 20:03:30 -05001724 /*
1725 * NMIs can not handle page faults, even with fix ups.
1726 * The save user stack can (and often does) fault.
1727 */
1728 if (unlikely(in_nmi()))
1729 return;
1730
Steven Rostedt91e86e52010-11-10 12:56:12 +01001731 /*
1732 * prevent recursion, since the user stack tracing may
1733 * trigger other kernel events.
1734 */
1735 preempt_disable();
1736 if (__this_cpu_read(user_stack_count))
1737 goto out;
1738
1739 __this_cpu_inc(user_stack_count);
1740
Steven Rostedte77405a2009-09-02 14:17:06 -04001741 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001742 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001743 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001744 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001745 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001746
Steven Rostedt48659d32009-09-11 11:36:23 -04001747 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001748 memset(&entry->caller, 0, sizeof(entry->caller));
1749
1750 trace.nr_entries = 0;
1751 trace.max_entries = FTRACE_STACK_ENTRIES;
1752 trace.skip = 0;
1753 trace.entries = entry->caller;
1754
1755 save_stack_trace_user(&trace);
Steven Rostedte77405a2009-09-02 14:17:06 -04001756 if (!filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001757 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001758
Li Zefan1dbd1952010-12-09 15:47:56 +08001759 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001760 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001761 out:
1762 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001763}
1764
Hannes Eder4fd27352009-02-10 19:44:12 +01001765#ifdef UNUSED
1766static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001767{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001768 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001769}
Hannes Eder4fd27352009-02-10 19:44:12 +01001770#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001771
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001772#endif /* CONFIG_STACKTRACE */
1773
Steven Rostedt07d777f2011-09-22 14:01:55 -04001774/* created for use with alloc_percpu */
1775struct trace_buffer_struct {
1776 char buffer[TRACE_BUF_SIZE];
1777};
1778
1779static struct trace_buffer_struct *trace_percpu_buffer;
1780static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1781static struct trace_buffer_struct *trace_percpu_irq_buffer;
1782static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1783
1784/*
1785 * The buffer used is dependent on the context. There is a per cpu
1786 * buffer for normal context, softirq contex, hard irq context and
1787 * for NMI context. Thise allows for lockless recording.
1788 *
1789 * Note, if the buffers failed to be allocated, then this returns NULL
1790 */
1791static char *get_trace_buf(void)
1792{
1793 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001794
1795 /*
1796 * If we have allocated per cpu buffers, then we do not
1797 * need to do any locking.
1798 */
1799 if (in_nmi())
1800 percpu_buffer = trace_percpu_nmi_buffer;
1801 else if (in_irq())
1802 percpu_buffer = trace_percpu_irq_buffer;
1803 else if (in_softirq())
1804 percpu_buffer = trace_percpu_sirq_buffer;
1805 else
1806 percpu_buffer = trace_percpu_buffer;
1807
1808 if (!percpu_buffer)
1809 return NULL;
1810
Shan Weid8a03492012-11-13 09:53:04 +08001811 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001812}
1813
1814static int alloc_percpu_trace_buffer(void)
1815{
1816 struct trace_buffer_struct *buffers;
1817 struct trace_buffer_struct *sirq_buffers;
1818 struct trace_buffer_struct *irq_buffers;
1819 struct trace_buffer_struct *nmi_buffers;
1820
1821 buffers = alloc_percpu(struct trace_buffer_struct);
1822 if (!buffers)
1823 goto err_warn;
1824
1825 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1826 if (!sirq_buffers)
1827 goto err_sirq;
1828
1829 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1830 if (!irq_buffers)
1831 goto err_irq;
1832
1833 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1834 if (!nmi_buffers)
1835 goto err_nmi;
1836
1837 trace_percpu_buffer = buffers;
1838 trace_percpu_sirq_buffer = sirq_buffers;
1839 trace_percpu_irq_buffer = irq_buffers;
1840 trace_percpu_nmi_buffer = nmi_buffers;
1841
1842 return 0;
1843
1844 err_nmi:
1845 free_percpu(irq_buffers);
1846 err_irq:
1847 free_percpu(sirq_buffers);
1848 err_sirq:
1849 free_percpu(buffers);
1850 err_warn:
1851 WARN(1, "Could not allocate percpu trace_printk buffer");
1852 return -ENOMEM;
1853}
1854
Steven Rostedt81698832012-10-11 10:15:05 -04001855static int buffers_allocated;
1856
Steven Rostedt07d777f2011-09-22 14:01:55 -04001857void trace_printk_init_buffers(void)
1858{
Steven Rostedt07d777f2011-09-22 14:01:55 -04001859 if (buffers_allocated)
1860 return;
1861
1862 if (alloc_percpu_trace_buffer())
1863 return;
1864
1865 pr_info("ftrace: Allocated trace_printk buffers\n");
1866
Steven Rostedtb382ede62012-10-10 21:44:34 -04001867 /* Expand the buffers to set size */
1868 tracing_update_buffers();
1869
Steven Rostedt07d777f2011-09-22 14:01:55 -04001870 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04001871
1872 /*
1873 * trace_printk_init_buffers() can be called by modules.
1874 * If that happens, then we need to start cmdline recording
1875 * directly here. If the global_trace.buffer is already
1876 * allocated here, then this was called by module code.
1877 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001878 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04001879 tracing_start_cmdline_record();
1880}
1881
1882void trace_printk_start_comm(void)
1883{
1884 /* Start tracing comms if trace printk is set */
1885 if (!buffers_allocated)
1886 return;
1887 tracing_start_cmdline_record();
1888}
1889
1890static void trace_printk_start_stop_comm(int enabled)
1891{
1892 if (!buffers_allocated)
1893 return;
1894
1895 if (enabled)
1896 tracing_start_cmdline_record();
1897 else
1898 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04001899}
1900
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001901/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001902 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001903 *
1904 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04001905int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001906{
Tom Zanussie1112b42009-03-31 00:48:49 -05001907 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001908 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04001909 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001910 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001911 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001912 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001913 char *tbuffer;
1914 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001915
1916 if (unlikely(tracing_selftest_running || tracing_disabled))
1917 return 0;
1918
1919 /* Don't pollute graph traces with trace_vprintk internals */
1920 pause_graph_tracing();
1921
1922 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04001923 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001924
Steven Rostedt07d777f2011-09-22 14:01:55 -04001925 tbuffer = get_trace_buf();
1926 if (!tbuffer) {
1927 len = 0;
1928 goto out;
1929 }
1930
1931 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
1932
1933 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001934 goto out;
1935
Steven Rostedt07d777f2011-09-22 14:01:55 -04001936 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001937 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001938 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001939 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1940 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001941 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04001942 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001943 entry = ring_buffer_event_data(event);
1944 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001945 entry->fmt = fmt;
1946
Steven Rostedt07d777f2011-09-22 14:01:55 -04001947 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Steven Rostedtd9313692010-01-06 17:27:11 -05001948 if (!filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001949 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05001950 ftrace_trace_stack(buffer, flags, 6, pc);
1951 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001952
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001953out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04001954 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001955 unpause_graph_tracing();
1956
1957 return len;
1958}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001959EXPORT_SYMBOL_GPL(trace_vbprintk);
1960
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001961static int
1962__trace_array_vprintk(struct ring_buffer *buffer,
1963 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001964{
Tom Zanussie1112b42009-03-31 00:48:49 -05001965 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001966 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001967 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001968 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001969 unsigned long flags;
1970 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001971
1972 if (tracing_disabled || tracing_selftest_running)
1973 return 0;
1974
Steven Rostedt07d777f2011-09-22 14:01:55 -04001975 /* Don't pollute graph traces with trace_vprintk internals */
1976 pause_graph_tracing();
1977
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001978 pc = preempt_count();
1979 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001980
Steven Rostedt07d777f2011-09-22 14:01:55 -04001981
1982 tbuffer = get_trace_buf();
1983 if (!tbuffer) {
1984 len = 0;
1985 goto out;
1986 }
1987
1988 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
1989 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001990 goto out;
1991
Steven Rostedt07d777f2011-09-22 14:01:55 -04001992 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001993 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04001994 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04001995 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001996 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04001997 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001998 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01001999 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002000
Steven Rostedt07d777f2011-09-22 14:01:55 -04002001 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002002 entry->buf[len] = '\0';
Steven Rostedtd9313692010-01-06 17:27:11 -05002003 if (!filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002004 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002005 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002006 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002007 out:
2008 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002009 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002010
2011 return len;
2012}
Steven Rostedt659372d2009-09-03 19:11:07 -04002013
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002014int trace_array_vprintk(struct trace_array *tr,
2015 unsigned long ip, const char *fmt, va_list args)
2016{
2017 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2018}
2019
2020int trace_array_printk(struct trace_array *tr,
2021 unsigned long ip, const char *fmt, ...)
2022{
2023 int ret;
2024 va_list ap;
2025
2026 if (!(trace_flags & TRACE_ITER_PRINTK))
2027 return 0;
2028
2029 va_start(ap, fmt);
2030 ret = trace_array_vprintk(tr, ip, fmt, ap);
2031 va_end(ap);
2032 return ret;
2033}
2034
2035int trace_array_printk_buf(struct ring_buffer *buffer,
2036 unsigned long ip, const char *fmt, ...)
2037{
2038 int ret;
2039 va_list ap;
2040
2041 if (!(trace_flags & TRACE_ITER_PRINTK))
2042 return 0;
2043
2044 va_start(ap, fmt);
2045 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2046 va_end(ap);
2047 return ret;
2048}
2049
Steven Rostedt659372d2009-09-03 19:11:07 -04002050int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2051{
Steven Rostedta813a152009-10-09 01:41:35 -04002052 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002053}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002054EXPORT_SYMBOL_GPL(trace_vprintk);
2055
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002056static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002057{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002058 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2059
Steven Rostedt5a90f572008-09-03 17:42:51 -04002060 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002061 if (buf_iter)
2062 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002063}
2064
Ingo Molnare309b412008-05-12 21:20:51 +02002065static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002066peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2067 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002068{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002069 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002070 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002071
Steven Rostedtd7690412008-10-01 00:29:53 -04002072 if (buf_iter)
2073 event = ring_buffer_iter_peek(buf_iter, ts);
2074 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002075 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002076 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002077
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002078 if (event) {
2079 iter->ent_size = ring_buffer_event_length(event);
2080 return ring_buffer_event_data(event);
2081 }
2082 iter->ent_size = 0;
2083 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002084}
Steven Rostedtd7690412008-10-01 00:29:53 -04002085
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002086static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002087__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2088 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002089{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002090 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002091 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002092 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002093 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002094 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002095 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002096 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002097 int cpu;
2098
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002099 /*
2100 * If we are in a per_cpu trace file, don't bother by iterating over
2101 * all cpu and peek directly.
2102 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002103 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002104 if (ring_buffer_empty_cpu(buffer, cpu_file))
2105 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002106 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002107 if (ent_cpu)
2108 *ent_cpu = cpu_file;
2109
2110 return ent;
2111 }
2112
Steven Rostedtab464282008-05-12 21:21:00 +02002113 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002114
2115 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002116 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002117
Steven Rostedtbc21b472010-03-31 19:49:26 -04002118 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002119
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002120 /*
2121 * Pick the entry with the smallest timestamp:
2122 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002123 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002124 next = ent;
2125 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002126 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002127 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002128 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002129 }
2130 }
2131
Steven Rostedt12b5da32012-03-27 10:43:28 -04002132 iter->ent_size = next_size;
2133
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002134 if (ent_cpu)
2135 *ent_cpu = next_cpu;
2136
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002137 if (ent_ts)
2138 *ent_ts = next_ts;
2139
Steven Rostedtbc21b472010-03-31 19:49:26 -04002140 if (missing_events)
2141 *missing_events = next_lost;
2142
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002143 return next;
2144}
2145
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002146/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002147struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2148 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002149{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002150 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002151}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002152
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002153/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002154void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002155{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002156 iter->ent = __find_next_entry(iter, &iter->cpu,
2157 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002158
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002159 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002160 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002161
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002162 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002163}
2164
Ingo Molnare309b412008-05-12 21:20:51 +02002165static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002166{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002167 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002168 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002169}
2170
Ingo Molnare309b412008-05-12 21:20:51 +02002171static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002172{
2173 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002174 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002175 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002176
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002177 WARN_ON_ONCE(iter->leftover);
2178
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002179 (*pos)++;
2180
2181 /* can't go backwards */
2182 if (iter->idx > i)
2183 return NULL;
2184
2185 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002186 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002187 else
2188 ent = iter;
2189
2190 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002191 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002192
2193 iter->pos = *pos;
2194
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002195 return ent;
2196}
2197
Jason Wessel955b61e2010-08-05 09:22:23 -05002198void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002199{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002200 struct ring_buffer_event *event;
2201 struct ring_buffer_iter *buf_iter;
2202 unsigned long entries = 0;
2203 u64 ts;
2204
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002205 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002206
Steven Rostedt6d158a82012-06-27 20:46:14 -04002207 buf_iter = trace_buffer_iter(iter, cpu);
2208 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002209 return;
2210
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002211 ring_buffer_iter_reset(buf_iter);
2212
2213 /*
2214 * We could have the case with the max latency tracers
2215 * that a reset never took place on a cpu. This is evident
2216 * by the timestamp being before the start of the buffer.
2217 */
2218 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002219 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002220 break;
2221 entries++;
2222 ring_buffer_read(buf_iter, NULL);
2223 }
2224
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002225 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002226}
2227
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002228/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002229 * The current tracer is copied to avoid a global locking
2230 * all around.
2231 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002232static void *s_start(struct seq_file *m, loff_t *pos)
2233{
2234 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002235 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002236 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002237 void *p = NULL;
2238 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002239 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002240
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002241 /*
2242 * copy the tracer to avoid using a global lock all around.
2243 * iter->trace is a copy of current_trace, the pointer to the
2244 * name may be used instead of a strcmp(), as iter->trace->name
2245 * will point to the same string as current_trace->name.
2246 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002247 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002248 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2249 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002250 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002251
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002252#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002253 if (iter->snapshot && iter->trace->use_max_tr)
2254 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002255#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002256
2257 if (!iter->snapshot)
2258 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002259
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002260 if (*pos != iter->pos) {
2261 iter->ent = NULL;
2262 iter->cpu = 0;
2263 iter->idx = -1;
2264
Steven Rostedtae3b5092013-01-23 15:22:59 -05002265 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002266 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002267 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002268 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002269 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002270
Lai Jiangshanac91d852010-03-02 17:54:50 +08002271 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002272 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2273 ;
2274
2275 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002276 /*
2277 * If we overflowed the seq_file before, then we want
2278 * to just reuse the trace_seq buffer again.
2279 */
2280 if (iter->leftover)
2281 p = iter;
2282 else {
2283 l = *pos - 1;
2284 p = s_next(m, p, &l);
2285 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002286 }
2287
Lai Jiangshan4f535962009-05-18 19:35:34 +08002288 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002289 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002290 return p;
2291}
2292
2293static void s_stop(struct seq_file *m, void *p)
2294{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002295 struct trace_iterator *iter = m->private;
2296
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002297#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002298 if (iter->snapshot && iter->trace->use_max_tr)
2299 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002300#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002301
2302 if (!iter->snapshot)
2303 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002304
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002305 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002306 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002307}
2308
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002309static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002310get_total_entries(struct trace_buffer *buf,
2311 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002312{
2313 unsigned long count;
2314 int cpu;
2315
2316 *total = 0;
2317 *entries = 0;
2318
2319 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002320 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002321 /*
2322 * If this buffer has skipped entries, then we hold all
2323 * entries for the trace and we need to ignore the
2324 * ones before the time stamp.
2325 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002326 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2327 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002328 /* total is the same as the entries */
2329 *total += count;
2330 } else
2331 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002332 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002333 *entries += count;
2334 }
2335}
2336
Ingo Molnare309b412008-05-12 21:20:51 +02002337static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002338{
Michael Ellermana6168352008-08-20 16:36:11 -07002339 seq_puts(m, "# _------=> CPU# \n");
2340 seq_puts(m, "# / _-----=> irqs-off \n");
2341 seq_puts(m, "# | / _----=> need-resched \n");
2342 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2343 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002344 seq_puts(m, "# |||| / delay \n");
2345 seq_puts(m, "# cmd pid ||||| time | caller \n");
2346 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002347}
2348
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002349static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002350{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002351 unsigned long total;
2352 unsigned long entries;
2353
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002354 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002355 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2356 entries, total, num_online_cpus());
2357 seq_puts(m, "#\n");
2358}
2359
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002360static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002361{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002362 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002363 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002364 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002365}
2366
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002367static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002368{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002369 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002370 seq_puts(m, "# _-----=> irqs-off\n");
2371 seq_puts(m, "# / _----=> need-resched\n");
2372 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2373 seq_puts(m, "# || / _--=> preempt-depth\n");
2374 seq_puts(m, "# ||| / delay\n");
2375 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2376 seq_puts(m, "# | | | |||| | |\n");
2377}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002378
Jiri Olsa62b915f2010-04-02 19:01:22 +02002379void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002380print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2381{
2382 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002383 struct trace_buffer *buf = iter->trace_buffer;
2384 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002385 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002386 unsigned long entries;
2387 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002388 const char *name = "preemption";
2389
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002390 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002391
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002392 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002393
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002394 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002395 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002396 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002397 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002398 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002399 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002400 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002401 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002402 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002403 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002404#if defined(CONFIG_PREEMPT_NONE)
2405 "server",
2406#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2407 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002408#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002409 "preempt",
2410#else
2411 "unknown",
2412#endif
2413 /* These are reserved for later use */
2414 0, 0, 0, 0);
2415#ifdef CONFIG_SMP
2416 seq_printf(m, " #P:%d)\n", num_online_cpus());
2417#else
2418 seq_puts(m, ")\n");
2419#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002420 seq_puts(m, "# -----------------\n");
2421 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002422 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002423 data->comm, data->pid,
2424 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002425 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002426 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002427
2428 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002429 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002430 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2431 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002432 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002433 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2434 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002435 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002436 }
2437
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002438 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002439}
2440
Steven Rostedta3097202008-11-07 22:36:02 -05002441static void test_cpu_buff_start(struct trace_iterator *iter)
2442{
2443 struct trace_seq *s = &iter->seq;
2444
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002445 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2446 return;
2447
2448 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2449 return;
2450
Rusty Russell44623442009-01-01 10:12:23 +10302451 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002452 return;
2453
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002454 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002455 return;
2456
Rusty Russell44623442009-01-01 10:12:23 +10302457 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002458
2459 /* Don't print started cpu buffer for the first entry of the trace */
2460 if (iter->idx > 1)
2461 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2462 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002463}
2464
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002465static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002466{
Steven Rostedt214023c2008-05-12 21:20:46 +02002467 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002468 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002469 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002470 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002471
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002472 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002473
Steven Rostedta3097202008-11-07 22:36:02 -05002474 test_cpu_buff_start(iter);
2475
Steven Rostedtf633cef2008-12-23 23:24:13 -05002476 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002477
2478 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002479 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2480 if (!trace_print_lat_context(iter))
2481 goto partial;
2482 } else {
2483 if (!trace_print_context(iter))
2484 goto partial;
2485 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002486 }
2487
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002488 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002489 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002490
2491 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2492 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002493
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002494 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002495partial:
2496 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002497}
2498
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002499static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002500{
2501 struct trace_seq *s = &iter->seq;
2502 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002503 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002504
2505 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002506
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002507 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002508 if (!trace_seq_printf(s, "%d %d %llu ",
2509 entry->pid, iter->cpu, iter->ts))
2510 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002511 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002512
Steven Rostedtf633cef2008-12-23 23:24:13 -05002513 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002514 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002515 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002516
2517 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2518 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002519
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002520 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002521partial:
2522 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002523}
2524
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002525static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002526{
2527 struct trace_seq *s = &iter->seq;
2528 unsigned char newline = '\n';
2529 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002530 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002531
2532 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002533
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002534 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2535 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2536 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2537 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2538 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002539
Steven Rostedtf633cef2008-12-23 23:24:13 -05002540 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002541 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002542 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002543 if (ret != TRACE_TYPE_HANDLED)
2544 return ret;
2545 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002546
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002547 SEQ_PUT_FIELD_RET(s, newline);
2548
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002549 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002550}
2551
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002552static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002553{
2554 struct trace_seq *s = &iter->seq;
2555 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002556 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002557
2558 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002559
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002560 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2561 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b522009-02-07 19:38:43 -05002562 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002563 SEQ_PUT_FIELD_RET(s, iter->ts);
2564 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002565
Steven Rostedtf633cef2008-12-23 23:24:13 -05002566 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002567 return event ? event->funcs->binary(iter, 0, event) :
2568 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002569}
2570
Jiri Olsa62b915f2010-04-02 19:01:22 +02002571int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002572{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002573 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002574 int cpu;
2575
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002576 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002577 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002578 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002579 buf_iter = trace_buffer_iter(iter, cpu);
2580 if (buf_iter) {
2581 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002582 return 0;
2583 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002584 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002585 return 0;
2586 }
2587 return 1;
2588 }
2589
Steven Rostedtab464282008-05-12 21:21:00 +02002590 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002591 buf_iter = trace_buffer_iter(iter, cpu);
2592 if (buf_iter) {
2593 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002594 return 0;
2595 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002596 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002597 return 0;
2598 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002599 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002600
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002601 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002602}
2603
Lai Jiangshan4f535962009-05-18 19:35:34 +08002604/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002605enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002606{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002607 enum print_line_t ret;
2608
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002609 if (iter->lost_events &&
2610 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2611 iter->cpu, iter->lost_events))
2612 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002613
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002614 if (iter->trace && iter->trace->print_line) {
2615 ret = iter->trace->print_line(iter);
2616 if (ret != TRACE_TYPE_UNHANDLED)
2617 return ret;
2618 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002619
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002620 if (iter->ent->type == TRACE_BPUTS &&
2621 trace_flags & TRACE_ITER_PRINTK &&
2622 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2623 return trace_print_bputs_msg_only(iter);
2624
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002625 if (iter->ent->type == TRACE_BPRINT &&
2626 trace_flags & TRACE_ITER_PRINTK &&
2627 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002628 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002629
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002630 if (iter->ent->type == TRACE_PRINT &&
2631 trace_flags & TRACE_ITER_PRINTK &&
2632 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002633 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002634
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002635 if (trace_flags & TRACE_ITER_BIN)
2636 return print_bin_fmt(iter);
2637
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002638 if (trace_flags & TRACE_ITER_HEX)
2639 return print_hex_fmt(iter);
2640
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002641 if (trace_flags & TRACE_ITER_RAW)
2642 return print_raw_fmt(iter);
2643
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002644 return print_trace_fmt(iter);
2645}
2646
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002647void trace_latency_header(struct seq_file *m)
2648{
2649 struct trace_iterator *iter = m->private;
2650
2651 /* print nothing if the buffers are empty */
2652 if (trace_empty(iter))
2653 return;
2654
2655 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2656 print_trace_header(m, iter);
2657
2658 if (!(trace_flags & TRACE_ITER_VERBOSE))
2659 print_lat_help_header(m);
2660}
2661
Jiri Olsa62b915f2010-04-02 19:01:22 +02002662void trace_default_header(struct seq_file *m)
2663{
2664 struct trace_iterator *iter = m->private;
2665
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002666 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2667 return;
2668
Jiri Olsa62b915f2010-04-02 19:01:22 +02002669 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2670 /* print nothing if the buffers are empty */
2671 if (trace_empty(iter))
2672 return;
2673 print_trace_header(m, iter);
2674 if (!(trace_flags & TRACE_ITER_VERBOSE))
2675 print_lat_help_header(m);
2676 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002677 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2678 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002679 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002680 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002681 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002682 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002683 }
2684}
2685
Steven Rostedte0a413f2011-09-29 21:26:16 -04002686static void test_ftrace_alive(struct seq_file *m)
2687{
2688 if (!ftrace_is_dead())
2689 return;
2690 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2691 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2692}
2693
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002694#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002695static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002696{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002697 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2698 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2699 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
2700 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
2701 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2702 seq_printf(m, "# is not a '0' or '1')\n");
2703}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002704
2705static void show_snapshot_percpu_help(struct seq_file *m)
2706{
2707 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2708#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2709 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2710 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2711#else
2712 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2713 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2714#endif
2715 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2716 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2717 seq_printf(m, "# is not a '0' or '1')\n");
2718}
2719
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002720static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2721{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002722 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002723 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2724 else
2725 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2726
2727 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002728 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2729 show_snapshot_main_help(m);
2730 else
2731 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002732}
2733#else
2734/* Should never be called */
2735static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2736#endif
2737
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002738static int s_show(struct seq_file *m, void *v)
2739{
2740 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002741 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002742
2743 if (iter->ent == NULL) {
2744 if (iter->tr) {
2745 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2746 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002747 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002748 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002749 if (iter->snapshot && trace_empty(iter))
2750 print_snapshot_help(m, iter);
2751 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002752 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002753 else
2754 trace_default_header(m);
2755
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002756 } else if (iter->leftover) {
2757 /*
2758 * If we filled the seq_file buffer earlier, we
2759 * want to just show it now.
2760 */
2761 ret = trace_print_seq(m, &iter->seq);
2762
2763 /* ret should this time be zero, but you never know */
2764 iter->leftover = ret;
2765
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002766 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002767 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002768 ret = trace_print_seq(m, &iter->seq);
2769 /*
2770 * If we overflow the seq_file buffer, then it will
2771 * ask us for this data again at start up.
2772 * Use that instead.
2773 * ret is 0 if seq_file write succeeded.
2774 * -1 otherwise.
2775 */
2776 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002777 }
2778
2779 return 0;
2780}
2781
James Morris88e9d342009-09-22 16:43:43 -07002782static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002783 .start = s_start,
2784 .next = s_next,
2785 .stop = s_stop,
2786 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002787};
2788
Ingo Molnare309b412008-05-12 21:20:51 +02002789static struct trace_iterator *
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002790__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002791{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002792 struct trace_cpu *tc = inode->i_private;
2793 struct trace_array *tr = tc->tr;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002794 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002795 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002796
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002797 if (tracing_disabled)
2798 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002799
Jiri Olsa50e18b92012-04-25 10:23:39 +02002800 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002801 if (!iter)
2802 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002803
Steven Rostedt6d158a82012-06-27 20:46:14 -04002804 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2805 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002806 if (!iter->buffer_iter)
2807 goto release;
2808
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002809 /*
2810 * We make a copy of the current tracer to avoid concurrent
2811 * changes on it while we are reading.
2812 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002813 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002814 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002815 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002816 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002817
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002818 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002819
Li Zefan79f55992009-06-15 14:58:26 +08002820 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002821 goto fail;
2822
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002823 iter->tr = tr;
2824
2825#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002826 /* Currently only the top directory has a snapshot */
2827 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002828 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002829 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002830#endif
2831 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002832 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002833 iter->pos = -1;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002834 mutex_init(&iter->mutex);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002835 iter->cpu_file = tc->cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002836
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002837 /* Notify the tracer early; before we stop tracing. */
2838 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01002839 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002840
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002841 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002842 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002843 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2844
David Sharp8be07092012-11-13 12:18:22 -08002845 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2846 if (trace_clocks[trace_clock_id].in_ns)
2847 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2848
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002849 /* stop the trace while dumping if we are not opening "snapshot" */
2850 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002851 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002852
Steven Rostedtae3b5092013-01-23 15:22:59 -05002853 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002854 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002855 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002856 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002857 }
2858 ring_buffer_read_prepare_sync();
2859 for_each_tracing_cpu(cpu) {
2860 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002861 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002862 }
2863 } else {
2864 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002865 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002866 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002867 ring_buffer_read_prepare_sync();
2868 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002869 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002870 }
2871
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05002872 tr->ref++;
2873
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002874 mutex_unlock(&trace_types_lock);
2875
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002876 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002877
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002878 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002879 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002880 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04002881 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002882release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02002883 seq_release_private(inode, file);
2884 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002885}
2886
2887int tracing_open_generic(struct inode *inode, struct file *filp)
2888{
Steven Rostedt60a11772008-05-12 21:20:44 +02002889 if (tracing_disabled)
2890 return -ENODEV;
2891
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002892 filp->private_data = inode->i_private;
2893 return 0;
2894}
2895
Hannes Eder4fd27352009-02-10 19:44:12 +01002896static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002897{
matt mooney907f2782010-09-27 19:04:53 -07002898 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002899 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002900 struct trace_array *tr;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002901 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002902
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002903 if (!(file->f_mode & FMODE_READ))
2904 return 0;
2905
2906 iter = m->private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002907 tr = iter->tr;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002908
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002909 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05002910
2911 WARN_ON(!tr->ref);
2912 tr->ref--;
2913
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002914 for_each_tracing_cpu(cpu) {
2915 if (iter->buffer_iter[cpu])
2916 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2917 }
2918
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002919 if (iter->trace && iter->trace->close)
2920 iter->trace->close(iter);
2921
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002922 if (!iter->snapshot)
2923 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002924 tracing_start_tr(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002925 mutex_unlock(&trace_types_lock);
2926
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002927 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002928 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002929 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04002930 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02002931 seq_release_private(inode, file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002932 return 0;
2933}
2934
2935static int tracing_open(struct inode *inode, struct file *file)
2936{
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002937 struct trace_iterator *iter;
2938 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002939
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002940 /* If this file was open for write, then erase contents */
2941 if ((file->f_mode & FMODE_WRITE) &&
Steven Rostedt8650ae32009-07-22 23:29:30 -04002942 (file->f_flags & O_TRUNC)) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002943 struct trace_cpu *tc = inode->i_private;
2944 struct trace_array *tr = tc->tr;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002945
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002946 if (tc->cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002947 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002948 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002949 tracing_reset(&tr->trace_buffer, tc->cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002950 }
2951
2952 if (file->f_mode & FMODE_READ) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002953 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002954 if (IS_ERR(iter))
2955 ret = PTR_ERR(iter);
2956 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2957 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2958 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002959 return ret;
2960}
2961
Ingo Molnare309b412008-05-12 21:20:51 +02002962static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002963t_next(struct seq_file *m, void *v, loff_t *pos)
2964{
Li Zefanf129e962009-06-24 09:53:44 +08002965 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002966
2967 (*pos)++;
2968
2969 if (t)
2970 t = t->next;
2971
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002972 return t;
2973}
2974
2975static void *t_start(struct seq_file *m, loff_t *pos)
2976{
Li Zefanf129e962009-06-24 09:53:44 +08002977 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002978 loff_t l = 0;
2979
2980 mutex_lock(&trace_types_lock);
Li Zefanf129e962009-06-24 09:53:44 +08002981 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002982 ;
2983
2984 return t;
2985}
2986
2987static void t_stop(struct seq_file *m, void *p)
2988{
2989 mutex_unlock(&trace_types_lock);
2990}
2991
2992static int t_show(struct seq_file *m, void *v)
2993{
2994 struct tracer *t = v;
2995
2996 if (!t)
2997 return 0;
2998
2999 seq_printf(m, "%s", t->name);
3000 if (t->next)
3001 seq_putc(m, ' ');
3002 else
3003 seq_putc(m, '\n');
3004
3005 return 0;
3006}
3007
James Morris88e9d342009-09-22 16:43:43 -07003008static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003009 .start = t_start,
3010 .next = t_next,
3011 .stop = t_stop,
3012 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003013};
3014
3015static int show_traces_open(struct inode *inode, struct file *file)
3016{
Steven Rostedt60a11772008-05-12 21:20:44 +02003017 if (tracing_disabled)
3018 return -ENODEV;
3019
Li Zefanf129e962009-06-24 09:53:44 +08003020 return seq_open(file, &show_traces_seq_ops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003021}
3022
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003023static ssize_t
3024tracing_write_stub(struct file *filp, const char __user *ubuf,
3025 size_t count, loff_t *ppos)
3026{
3027 return count;
3028}
3029
Slava Pestov364829b2010-11-24 15:13:16 -08003030static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
3031{
3032 if (file->f_mode & FMODE_READ)
3033 return seq_lseek(file, offset, origin);
3034 else
3035 return 0;
3036}
3037
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003038static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003039 .open = tracing_open,
3040 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003041 .write = tracing_write_stub,
Slava Pestov364829b2010-11-24 15:13:16 -08003042 .llseek = tracing_seek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003043 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003044};
3045
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003046static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003047 .open = show_traces_open,
3048 .read = seq_read,
3049 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003050 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003051};
3052
Ingo Molnar36dfe922008-05-12 21:20:52 +02003053/*
3054 * Only trace on a CPU if the bitmask is set:
3055 */
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303056static cpumask_var_t tracing_cpumask;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003057
3058/*
3059 * The tracer itself will not take this lock, but still we want
3060 * to provide a consistent cpumask to user-space:
3061 */
3062static DEFINE_MUTEX(tracing_cpumask_update_lock);
3063
3064/*
3065 * Temporary storage for the character representation of the
3066 * CPU bitmask (and one more byte for the newline):
3067 */
3068static char mask_str[NR_CPUS + 1];
3069
Ingo Molnarc7078de2008-05-12 21:20:52 +02003070static ssize_t
3071tracing_cpumask_read(struct file *filp, char __user *ubuf,
3072 size_t count, loff_t *ppos)
3073{
Ingo Molnar36dfe922008-05-12 21:20:52 +02003074 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003075
3076 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003077
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303078 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003079 if (count - len < 2) {
3080 count = -EINVAL;
3081 goto out_err;
3082 }
3083 len += sprintf(mask_str + len, "\n");
3084 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3085
3086out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003087 mutex_unlock(&tracing_cpumask_update_lock);
3088
3089 return count;
3090}
3091
3092static ssize_t
3093tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3094 size_t count, loff_t *ppos)
3095{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003096 struct trace_array *tr = filp->private_data;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303097 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003098 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303099
3100 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3101 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003102
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303103 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003104 if (err)
3105 goto err_unlock;
3106
Li Zefan215368e2009-06-15 10:56:42 +08003107 mutex_lock(&tracing_cpumask_update_lock);
3108
Steven Rostedta5e25882008-12-02 15:34:05 -05003109 local_irq_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01003110 arch_spin_lock(&ftrace_max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003111 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003112 /*
3113 * Increase/decrease the disabled counter if we are
3114 * about to flip a bit in the cpumask:
3115 */
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303116 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
3117 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003118 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3119 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003120 }
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303121 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
3122 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003123 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3124 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003125 }
3126 }
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01003127 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003128 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003129
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303130 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003131
Ingo Molnarc7078de2008-05-12 21:20:52 +02003132 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303133 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003134
Ingo Molnarc7078de2008-05-12 21:20:52 +02003135 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003136
3137err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003138 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003139
3140 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003141}
3142
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003143static const struct file_operations tracing_cpumask_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003144 .open = tracing_open_generic,
3145 .read = tracing_cpumask_read,
3146 .write = tracing_cpumask_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003147 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003148};
3149
Li Zefanfdb372e2009-12-08 11:15:59 +08003150static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003151{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003152 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003153 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003154 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003155 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003156
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003157 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003158 tracer_flags = tr->current_trace->flags->val;
3159 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003160
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003161 for (i = 0; trace_options[i]; i++) {
3162 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003163 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003164 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003165 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003166 }
3167
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003168 for (i = 0; trace_opts[i].name; i++) {
3169 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003170 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003171 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003172 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003173 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003174 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003175
Li Zefanfdb372e2009-12-08 11:15:59 +08003176 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003177}
3178
Li Zefan8d18eaa2009-12-08 11:17:06 +08003179static int __set_tracer_option(struct tracer *trace,
3180 struct tracer_flags *tracer_flags,
3181 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003182{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003183 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003184
Li Zefan8d18eaa2009-12-08 11:17:06 +08003185 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003186 if (ret)
3187 return ret;
3188
3189 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003190 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003191 else
Zhaolei77708412009-08-07 18:53:21 +08003192 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003193 return 0;
3194}
3195
Li Zefan8d18eaa2009-12-08 11:17:06 +08003196/* Try to assign a tracer specific option */
3197static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
3198{
3199 struct tracer_flags *tracer_flags = trace->flags;
3200 struct tracer_opt *opts = NULL;
3201 int i;
3202
3203 for (i = 0; tracer_flags->opts[i].name; i++) {
3204 opts = &tracer_flags->opts[i];
3205
3206 if (strcmp(cmp, opts->name) == 0)
3207 return __set_tracer_option(trace, trace->flags,
3208 opts, neg);
3209 }
3210
3211 return -EINVAL;
3212}
3213
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003214/* Some tracers require overwrite to stay enabled */
3215int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3216{
3217 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3218 return -1;
3219
3220 return 0;
3221}
3222
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003223int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003224{
3225 /* do nothing if flag is already set */
3226 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003227 return 0;
3228
3229 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003230 if (tr->current_trace->flag_changed)
3231 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003232 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003233
3234 if (enabled)
3235 trace_flags |= mask;
3236 else
3237 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003238
3239 if (mask == TRACE_ITER_RECORD_CMD)
3240 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003241
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003242 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003243 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003244#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003245 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003246#endif
3247 }
Steven Rostedt81698832012-10-11 10:15:05 -04003248
3249 if (mask == TRACE_ITER_PRINTK)
3250 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003251
3252 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003253}
3254
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003255static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003256{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003257 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003258 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003259 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003260 int i;
3261
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003262 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003263
Li Zefan8d18eaa2009-12-08 11:17:06 +08003264 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003265 neg = 1;
3266 cmp += 2;
3267 }
3268
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003269 mutex_lock(&trace_types_lock);
3270
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003271 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003272 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003273 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003274 break;
3275 }
3276 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003277
3278 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003279 if (!trace_options[i])
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003280 ret = set_tracer_option(tr->current_trace, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003281
3282 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003283
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003284 return ret;
3285}
3286
3287static ssize_t
3288tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3289 size_t cnt, loff_t *ppos)
3290{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003291 struct seq_file *m = filp->private_data;
3292 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003293 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003294 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003295
3296 if (cnt >= sizeof(buf))
3297 return -EINVAL;
3298
3299 if (copy_from_user(&buf, ubuf, cnt))
3300 return -EFAULT;
3301
Steven Rostedta8dd2172013-01-09 20:54:17 -05003302 buf[cnt] = 0;
3303
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003304 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003305 if (ret < 0)
3306 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003307
Jiri Olsacf8517c2009-10-23 19:36:16 -04003308 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003309
3310 return cnt;
3311}
3312
Li Zefanfdb372e2009-12-08 11:15:59 +08003313static int tracing_trace_options_open(struct inode *inode, struct file *file)
3314{
3315 if (tracing_disabled)
3316 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003317
3318 return single_open(file, tracing_trace_options_show, inode->i_private);
Li Zefanfdb372e2009-12-08 11:15:59 +08003319}
3320
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003321static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003322 .open = tracing_trace_options_open,
3323 .read = seq_read,
3324 .llseek = seq_lseek,
3325 .release = single_release,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003326 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003327};
3328
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003329static const char readme_msg[] =
3330 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003331 "# echo 0 > tracing_on : quick way to disable tracing\n"
3332 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3333 " Important files:\n"
3334 " trace\t\t\t- The static contents of the buffer\n"
3335 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3336 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3337 " current_tracer\t- function and latency tracers\n"
3338 " available_tracers\t- list of configured tracers for current_tracer\n"
3339 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3340 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3341 " trace_clock\t\t-change the clock used to order events\n"
3342 " local: Per cpu clock but may not be synced across CPUs\n"
3343 " global: Synced across CPUs but slows tracing down.\n"
3344 " counter: Not a clock, but just an increment\n"
3345 " uptime: Jiffy counter from time of boot\n"
3346 " perf: Same clock that perf events use\n"
3347#ifdef CONFIG_X86_64
3348 " x86-tsc: TSC cycle counter\n"
3349#endif
3350 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3351 " tracing_cpumask\t- Limit which CPUs to trace\n"
3352 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3353 "\t\t\t Remove sub-buffer with rmdir\n"
3354 " trace_options\t\t- Set format or modify how tracing happens\n"
3355 "\t\t\t Disable an option by adding a suffix 'no' to the option name\n"
3356#ifdef CONFIG_DYNAMIC_FTRACE
3357 "\n available_filter_functions - list of functions that can be filtered on\n"
3358 " set_ftrace_filter\t- echo function name in here to only trace these functions\n"
3359 " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3360 " modules: Can select a group via module\n"
3361 " Format: :mod:<module-name>\n"
3362 " example: echo :mod:ext3 > set_ftrace_filter\n"
3363 " triggers: a command to perform when function is hit\n"
3364 " Format: <function>:<trigger>[:count]\n"
3365 " trigger: traceon, traceoff\n"
3366 " enable_event:<system>:<event>\n"
3367 " disable_event:<system>:<event>\n"
3368#ifdef CONFIG_STACKTRACE
3369 " stacktrace\n"
3370#endif
3371#ifdef CONFIG_TRACER_SNAPSHOT
3372 " snapshot\n"
3373#endif
3374 " example: echo do_fault:traceoff > set_ftrace_filter\n"
3375 " echo do_trap:traceoff:3 > set_ftrace_filter\n"
3376 " The first one will disable tracing every time do_fault is hit\n"
3377 " The second will disable tracing at most 3 times when do_trap is hit\n"
3378 " The first time do trap is hit and it disables tracing, the counter\n"
3379 " will decrement to 2. If tracing is already disabled, the counter\n"
3380 " will not decrement. It only decrements when the trigger did work\n"
3381 " To remove trigger without count:\n"
3382 " echo '!<function>:<trigger> > set_ftrace_filter\n"
3383 " To remove trigger with a count:\n"
3384 " echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3385 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3386 " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3387 " modules: Can select a group via module command :mod:\n"
3388 " Does not accept triggers\n"
3389#endif /* CONFIG_DYNAMIC_FTRACE */
3390#ifdef CONFIG_FUNCTION_TRACER
3391 " set_ftrace_pid\t- Write pid(s) to only function trace those pids (function)\n"
3392#endif
3393#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3394 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3395 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3396#endif
3397#ifdef CONFIG_TRACER_SNAPSHOT
3398 "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n"
3399 "\t\t\t Read the contents for more information\n"
3400#endif
3401#ifdef CONFIG_STACKTRACE
3402 " stack_trace\t\t- Shows the max stack trace when active\n"
3403 " stack_max_size\t- Shows current max stack size that was traced\n"
3404 "\t\t\t Write into this file to reset the max size (trigger a new trace)\n"
3405#ifdef CONFIG_DYNAMIC_FTRACE
3406 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n"
3407#endif
3408#endif /* CONFIG_STACKTRACE */
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003409;
3410
3411static ssize_t
3412tracing_readme_read(struct file *filp, char __user *ubuf,
3413 size_t cnt, loff_t *ppos)
3414{
3415 return simple_read_from_buffer(ubuf, cnt, ppos,
3416 readme_msg, strlen(readme_msg));
3417}
3418
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003419static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003420 .open = tracing_open_generic,
3421 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003422 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003423};
3424
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003425static ssize_t
Avadh Patel69abe6a2009-04-10 16:04:48 -04003426tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3427 size_t cnt, loff_t *ppos)
3428{
3429 char *buf_comm;
3430 char *file_buf;
3431 char *buf;
3432 int len = 0;
3433 int pid;
3434 int i;
3435
3436 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3437 if (!file_buf)
3438 return -ENOMEM;
3439
3440 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3441 if (!buf_comm) {
3442 kfree(file_buf);
3443 return -ENOMEM;
3444 }
3445
3446 buf = file_buf;
3447
3448 for (i = 0; i < SAVED_CMDLINES; i++) {
3449 int r;
3450
3451 pid = map_cmdline_to_pid[i];
3452 if (pid == -1 || pid == NO_CMDLINE_MAP)
3453 continue;
3454
3455 trace_find_cmdline(pid, buf_comm);
3456 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3457 buf += r;
3458 len += r;
3459 }
3460
3461 len = simple_read_from_buffer(ubuf, cnt, ppos,
3462 file_buf, len);
3463
3464 kfree(file_buf);
3465 kfree(buf_comm);
3466
3467 return len;
3468}
3469
3470static const struct file_operations tracing_saved_cmdlines_fops = {
3471 .open = tracing_open_generic,
3472 .read = tracing_saved_cmdlines_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003473 .llseek = generic_file_llseek,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003474};
3475
3476static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003477tracing_set_trace_read(struct file *filp, char __user *ubuf,
3478 size_t cnt, loff_t *ppos)
3479{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003480 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003481 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003482 int r;
3483
3484 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003485 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003486 mutex_unlock(&trace_types_lock);
3487
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003488 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003489}
3490
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003491int tracer_init(struct tracer *t, struct trace_array *tr)
3492{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003493 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003494 return t->init(tr);
3495}
3496
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003497static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003498{
3499 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003500
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003501 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003502 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003503}
3504
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003505#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003506/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003507static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3508 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003509{
3510 int cpu, ret = 0;
3511
3512 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3513 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003514 ret = ring_buffer_resize(trace_buf->buffer,
3515 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003516 if (ret < 0)
3517 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003518 per_cpu_ptr(trace_buf->data, cpu)->entries =
3519 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003520 }
3521 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003522 ret = ring_buffer_resize(trace_buf->buffer,
3523 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003524 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003525 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3526 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003527 }
3528
3529 return ret;
3530}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003531#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003532
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003533static int __tracing_resize_ring_buffer(struct trace_array *tr,
3534 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003535{
3536 int ret;
3537
3538 /*
3539 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003540 * we use the size that was given, and we can forget about
3541 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003542 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003543 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003544
Steven Rostedtb382ede62012-10-10 21:44:34 -04003545 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003546 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003547 return 0;
3548
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003549 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003550 if (ret < 0)
3551 return ret;
3552
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003553#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003554 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3555 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003556 goto out;
3557
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003558 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003559 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003560 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3561 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003562 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003563 /*
3564 * AARGH! We are left with different
3565 * size max buffer!!!!
3566 * The max buffer is our "snapshot" buffer.
3567 * When a tracer needs a snapshot (one of the
3568 * latency tracers), it swaps the max buffer
3569 * with the saved snap shot. We succeeded to
3570 * update the size of the main buffer, but failed to
3571 * update the size of the max buffer. But when we tried
3572 * to reset the main buffer to the original size, we
3573 * failed there too. This is very unlikely to
3574 * happen, but if it does, warn and kill all
3575 * tracing.
3576 */
Steven Rostedt73c51622009-03-11 13:42:01 -04003577 WARN_ON(1);
3578 tracing_disabled = 1;
3579 }
3580 return ret;
3581 }
3582
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003583 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003584 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003585 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003586 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003587
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003588 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003589#endif /* CONFIG_TRACER_MAX_TRACE */
3590
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003591 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003592 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003593 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003594 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04003595
3596 return ret;
3597}
3598
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003599static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3600 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003601{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07003602 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003603
3604 mutex_lock(&trace_types_lock);
3605
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003606 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3607 /* make sure, this cpu is enabled in the mask */
3608 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3609 ret = -EINVAL;
3610 goto out;
3611 }
3612 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003613
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003614 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003615 if (ret < 0)
3616 ret = -ENOMEM;
3617
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003618out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003619 mutex_unlock(&trace_types_lock);
3620
3621 return ret;
3622}
3623
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003624
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003625/**
3626 * tracing_update_buffers - used by tracing facility to expand ring buffers
3627 *
3628 * To save on memory when the tracing is never used on a system with it
3629 * configured in. The ring buffers are set to a minimum size. But once
3630 * a user starts to use the tracing facility, then they need to grow
3631 * to their default size.
3632 *
3633 * This function is to be called when a tracer is about to be used.
3634 */
3635int tracing_update_buffers(void)
3636{
3637 int ret = 0;
3638
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003639 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003640 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003641 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003642 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003643 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003644
3645 return ret;
3646}
3647
Steven Rostedt577b7852009-02-26 23:43:05 -05003648struct trace_option_dentry;
3649
3650static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003651create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05003652
3653static void
3654destroy_trace_option_files(struct trace_option_dentry *topts);
3655
Steven Rostedtb2821ae2009-02-02 21:38:32 -05003656static int tracing_set_tracer(const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003657{
Steven Rostedt577b7852009-02-26 23:43:05 -05003658 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003659 struct trace_array *tr = &global_trace;
3660 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003661#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003662 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003663#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003664 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003665
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003666 mutex_lock(&trace_types_lock);
3667
Steven Rostedt73c51622009-03-11 13:42:01 -04003668 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003669 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003670 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04003671 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01003672 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04003673 ret = 0;
3674 }
3675
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003676 for (t = trace_types; t; t = t->next) {
3677 if (strcmp(t->name, buf) == 0)
3678 break;
3679 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02003680 if (!t) {
3681 ret = -EINVAL;
3682 goto out;
3683 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003684 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003685 goto out;
3686
Steven Rostedt9f029e82008-11-12 15:24:24 -05003687 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003688
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003689 tr->current_trace->enabled = false;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003690
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003691 if (tr->current_trace->reset)
3692 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05003693
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003694 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003695 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05003696
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003697#ifdef CONFIG_TRACER_MAX_TRACE
3698 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05003699
3700 if (had_max_tr && !t->use_max_tr) {
3701 /*
3702 * We need to make sure that the update_max_tr sees that
3703 * current_trace changed to nop_trace to keep it from
3704 * swapping the buffers after we resize it.
3705 * The update_max_tr is called from interrupts disabled
3706 * so a synchronized_sched() is sufficient.
3707 */
3708 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04003709 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003710 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003711#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05003712 destroy_trace_option_files(topts);
3713
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003714 topts = create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003715
3716#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003717 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04003718 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003719 if (ret < 0)
3720 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003721 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003722#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05003723
Frederic Weisbecker1c800252008-11-16 05:57:26 +01003724 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003725 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01003726 if (ret)
3727 goto out;
3728 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003729
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003730 tr->current_trace = t;
3731 tr->current_trace->enabled = true;
Steven Rostedt9f029e82008-11-12 15:24:24 -05003732 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003733 out:
3734 mutex_unlock(&trace_types_lock);
3735
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003736 return ret;
3737}
3738
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003739static ssize_t
3740tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3741 size_t cnt, loff_t *ppos)
3742{
Li Zefanee6c2c12009-09-18 14:06:47 +08003743 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003744 int i;
3745 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01003746 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003747
Steven Rostedt60063a62008-10-28 10:44:24 -04003748 ret = cnt;
3749
Li Zefanee6c2c12009-09-18 14:06:47 +08003750 if (cnt > MAX_TRACER_SIZE)
3751 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003752
3753 if (copy_from_user(&buf, ubuf, cnt))
3754 return -EFAULT;
3755
3756 buf[cnt] = 0;
3757
3758 /* strip ending whitespace. */
3759 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3760 buf[i] = 0;
3761
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01003762 err = tracing_set_tracer(buf);
3763 if (err)
3764 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003765
Jiri Olsacf8517c2009-10-23 19:36:16 -04003766 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003767
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02003768 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003769}
3770
3771static ssize_t
3772tracing_max_lat_read(struct file *filp, char __user *ubuf,
3773 size_t cnt, loff_t *ppos)
3774{
3775 unsigned long *ptr = filp->private_data;
3776 char buf[64];
3777 int r;
3778
Steven Rostedtcffae432008-05-12 21:21:00 +02003779 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003780 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02003781 if (r > sizeof(buf))
3782 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003783 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003784}
3785
3786static ssize_t
3787tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3788 size_t cnt, loff_t *ppos)
3789{
Hannes Eder5e398412009-02-10 19:44:34 +01003790 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01003791 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02003792 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003793
Peter Huewe22fe9b52011-06-07 21:58:27 +02003794 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3795 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02003796 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003797
3798 *ptr = val * 1000;
3799
3800 return cnt;
3801}
3802
Steven Rostedtb3806b42008-05-12 21:20:46 +02003803static int tracing_open_pipe(struct inode *inode, struct file *filp)
3804{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003805 struct trace_cpu *tc = inode->i_private;
3806 struct trace_array *tr = tc->tr;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003807 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003808 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003809
3810 if (tracing_disabled)
3811 return -ENODEV;
3812
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003813 mutex_lock(&trace_types_lock);
3814
Steven Rostedtb3806b42008-05-12 21:20:46 +02003815 /* create a buffer to store the information to pass to userspace */
3816 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003817 if (!iter) {
3818 ret = -ENOMEM;
3819 goto out;
3820 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02003821
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003822 /*
3823 * We make a copy of the current tracer to avoid concurrent
3824 * changes on it while we are reading.
3825 */
3826 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3827 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003828 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003829 goto fail;
3830 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003831 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003832
3833 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3834 ret = -ENOMEM;
3835 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10303836 }
3837
Steven Rostedta3097202008-11-07 22:36:02 -05003838 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10303839 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05003840
Steven Rostedt112f38a72009-06-01 15:16:05 -04003841 if (trace_flags & TRACE_ITER_LATENCY_FMT)
3842 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3843
David Sharp8be07092012-11-13 12:18:22 -08003844 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3845 if (trace_clocks[trace_clock_id].in_ns)
3846 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3847
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003848 iter->cpu_file = tc->cpu;
3849 iter->tr = tc->tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003850 iter->trace_buffer = &tc->tr->trace_buffer;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003851 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003852 filp->private_data = iter;
3853
Steven Rostedt107bad82008-05-12 21:21:01 +02003854 if (iter->trace->pipe_open)
3855 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02003856
Arnd Bergmannb4447862010-07-07 23:40:11 +02003857 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003858out:
3859 mutex_unlock(&trace_types_lock);
3860 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003861
3862fail:
3863 kfree(iter->trace);
3864 kfree(iter);
3865 mutex_unlock(&trace_types_lock);
3866 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003867}
3868
3869static int tracing_release_pipe(struct inode *inode, struct file *file)
3870{
3871 struct trace_iterator *iter = file->private_data;
3872
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003873 mutex_lock(&trace_types_lock);
3874
Steven Rostedt29bf4a52009-12-09 12:37:43 -05003875 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05003876 iter->trace->pipe_close(iter);
3877
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003878 mutex_unlock(&trace_types_lock);
3879
Rusty Russell44623442009-01-01 10:12:23 +10303880 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003881 mutex_destroy(&iter->mutex);
3882 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003883 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003884
3885 return 0;
3886}
3887
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02003888static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05003889trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02003890{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05003891 /* Iterators are static, they should be filled or empty */
3892 if (trace_buffer_iter(iter, iter->cpu_file))
3893 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02003894
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05003895 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02003896 /*
3897 * Always select as readable when in blocking mode
3898 */
3899 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05003900 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003901 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05003902 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02003903}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02003904
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05003905static unsigned int
3906tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3907{
3908 struct trace_iterator *iter = filp->private_data;
3909
3910 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02003911}
3912
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01003913/*
3914 * This is a make-shift waitqueue.
3915 * A tracer might use this callback on some rare cases:
3916 *
3917 * 1) the current tracer might hold the runqueue lock when it wakes up
3918 * a reader, hence a deadlock (sched, function, and function graph tracers)
3919 * 2) the function tracers, trace all functions, we don't want
3920 * the overhead of calling wake_up and friends
3921 * (and tracing them too)
3922 *
3923 * Anyway, this is really very primitive wakeup.
3924 */
3925void poll_wait_pipe(struct trace_iterator *iter)
3926{
3927 set_current_state(TASK_INTERRUPTIBLE);
3928 /* sleep for 100 msecs, and try again. */
3929 schedule_timeout(HZ / 10);
3930}
3931
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003932/* Must be called with trace_types_lock mutex held. */
3933static int tracing_wait_pipe(struct file *filp)
3934{
3935 struct trace_iterator *iter = filp->private_data;
3936
3937 while (trace_empty(iter)) {
3938
3939 if ((filp->f_flags & O_NONBLOCK)) {
3940 return -EAGAIN;
3941 }
3942
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003943 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003944
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01003945 iter->trace->wait_pipe(iter);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003946
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003947 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003948
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01003949 if (signal_pending(current))
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003950 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003951
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003952 /*
Liu Bo250bfd32013-01-14 10:54:11 +08003953 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003954 * We still block if tracing is disabled, but we have never
3955 * read anything. This allows a user to cat this file, and
3956 * then enable tracing. But after we have read something,
3957 * we give an EOF when tracing is again disabled.
3958 *
3959 * iter->pos will be 0 if we haven't read anything.
3960 */
Liu Bo250bfd32013-01-14 10:54:11 +08003961 if (!tracing_is_enabled() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003962 break;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003963 }
3964
3965 return 1;
3966}
3967
Steven Rostedtb3806b42008-05-12 21:20:46 +02003968/*
3969 * Consumer reader.
3970 */
3971static ssize_t
3972tracing_read_pipe(struct file *filp, char __user *ubuf,
3973 size_t cnt, loff_t *ppos)
3974{
3975 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003976 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02003977 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003978
3979 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02003980 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3981 if (sret != -EBUSY)
3982 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003983
Steven Rostedtf9520752009-03-02 14:04:40 -05003984 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003985
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003986 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02003987 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003988 if (unlikely(iter->trace->name != tr->current_trace->name))
3989 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003990 mutex_unlock(&trace_types_lock);
3991
3992 /*
3993 * Avoid more than one consumer on a single file descriptor
3994 * This is just a matter of traces coherency, the ring buffer itself
3995 * is protected.
3996 */
3997 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02003998 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02003999 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4000 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004001 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004002 }
4003
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004004waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004005 sret = tracing_wait_pipe(filp);
4006 if (sret <= 0)
4007 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004008
4009 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004010 if (trace_empty(iter)) {
4011 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004012 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004013 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004014
4015 if (cnt >= PAGE_SIZE)
4016 cnt = PAGE_SIZE - 1;
4017
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004018 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004019 memset(&iter->seq, 0,
4020 sizeof(struct trace_iterator) -
4021 offsetof(struct trace_iterator, seq));
Steven Rostedt4823ed72008-05-12 21:21:01 +02004022 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004023
Lai Jiangshan4f535962009-05-18 19:35:34 +08004024 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004025 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004026 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004027 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004028 int len = iter->seq.len;
4029
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004030 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004031 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004032 /* don't print partial lines */
4033 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004034 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004035 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004036 if (ret != TRACE_TYPE_NO_CONSUME)
4037 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004038
4039 if (iter->seq.len >= cnt)
4040 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004041
4042 /*
4043 * Setting the full flag means we reached the trace_seq buffer
4044 * size and we should leave by partial output condition above.
4045 * One of the trace_seq_* functions is not used properly.
4046 */
4047 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4048 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004049 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004050 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004051 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004052
Steven Rostedtb3806b42008-05-12 21:20:46 +02004053 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004054 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4055 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004056 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004057
4058 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004059 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004060 * entries, go back to wait for more entries.
4061 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004062 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004063 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004064
Steven Rostedt107bad82008-05-12 21:21:01 +02004065out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004066 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004067
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004068 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004069}
4070
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004071static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
4072 struct pipe_buffer *buf)
4073{
4074 __free_page(buf->page);
4075}
4076
4077static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4078 unsigned int idx)
4079{
4080 __free_page(spd->pages[idx]);
4081}
4082
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004083static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004084 .can_merge = 0,
4085 .map = generic_pipe_buf_map,
4086 .unmap = generic_pipe_buf_unmap,
4087 .confirm = generic_pipe_buf_confirm,
4088 .release = tracing_pipe_buf_release,
4089 .steal = generic_pipe_buf_steal,
4090 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004091};
4092
Steven Rostedt34cd4992009-02-09 12:06:29 -05004093static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004094tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004095{
4096 size_t count;
4097 int ret;
4098
4099 /* Seq buffer is page-sized, exactly what we need. */
4100 for (;;) {
4101 count = iter->seq.len;
4102 ret = print_trace_line(iter);
4103 count = iter->seq.len - count;
4104 if (rem < count) {
4105 rem = 0;
4106 iter->seq.len -= count;
4107 break;
4108 }
4109 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4110 iter->seq.len -= count;
4111 break;
4112 }
4113
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004114 if (ret != TRACE_TYPE_NO_CONSUME)
4115 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004116 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004117 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004118 rem = 0;
4119 iter->ent = NULL;
4120 break;
4121 }
4122 }
4123
4124 return rem;
4125}
4126
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004127static ssize_t tracing_splice_read_pipe(struct file *filp,
4128 loff_t *ppos,
4129 struct pipe_inode_info *pipe,
4130 size_t len,
4131 unsigned int flags)
4132{
Jens Axboe35f3d142010-05-20 10:43:18 +02004133 struct page *pages_def[PIPE_DEF_BUFFERS];
4134 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004135 struct trace_iterator *iter = filp->private_data;
4136 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004137 .pages = pages_def,
4138 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004139 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004140 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004141 .flags = flags,
4142 .ops = &tracing_pipe_buf_ops,
4143 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004144 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004145 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004146 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004147 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004148 unsigned int i;
4149
Jens Axboe35f3d142010-05-20 10:43:18 +02004150 if (splice_grow_spd(pipe, &spd))
4151 return -ENOMEM;
4152
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004153 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004154 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004155 if (unlikely(iter->trace->name != tr->current_trace->name))
4156 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004157 mutex_unlock(&trace_types_lock);
4158
4159 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004160
4161 if (iter->trace->splice_read) {
4162 ret = iter->trace->splice_read(iter, filp,
4163 ppos, pipe, len, flags);
4164 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004165 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004166 }
4167
4168 ret = tracing_wait_pipe(filp);
4169 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004170 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004171
Jason Wessel955b61e2010-08-05 09:22:23 -05004172 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004173 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004174 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004175 }
4176
Lai Jiangshan4f535962009-05-18 19:35:34 +08004177 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004178 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004179
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004180 /* Fill as many pages as possible. */
Jens Axboe35f3d142010-05-20 10:43:18 +02004181 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
4182 spd.pages[i] = alloc_page(GFP_KERNEL);
4183 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004184 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004185
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004186 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004187
4188 /* Copy the data into the page, so we can start over. */
4189 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004190 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004191 iter->seq.len);
4192 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004193 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004194 break;
4195 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004196 spd.partial[i].offset = 0;
4197 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004198
Steven Rostedtf9520752009-03-02 14:04:40 -05004199 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004200 }
4201
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004202 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004203 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004204 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004205
4206 spd.nr_pages = i;
4207
Jens Axboe35f3d142010-05-20 10:43:18 +02004208 ret = splice_to_pipe(pipe, &spd);
4209out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004210 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004211 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004212
Steven Rostedt34cd4992009-02-09 12:06:29 -05004213out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004214 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004215 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004216}
4217
Steven Rostedta98a3c32008-05-12 21:20:59 +02004218static ssize_t
4219tracing_entries_read(struct file *filp, char __user *ubuf,
4220 size_t cnt, loff_t *ppos)
4221{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004222 struct trace_cpu *tc = filp->private_data;
4223 struct trace_array *tr = tc->tr;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004224 char buf[64];
4225 int r = 0;
4226 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004227
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004228 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004229
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004230 if (tc->cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004231 int cpu, buf_size_same;
4232 unsigned long size;
4233
4234 size = 0;
4235 buf_size_same = 1;
4236 /* check if all cpu sizes are same */
4237 for_each_tracing_cpu(cpu) {
4238 /* fill in the size from first enabled cpu */
4239 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004240 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4241 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004242 buf_size_same = 0;
4243 break;
4244 }
4245 }
4246
4247 if (buf_size_same) {
4248 if (!ring_buffer_expanded)
4249 r = sprintf(buf, "%lu (expanded: %lu)\n",
4250 size >> 10,
4251 trace_buf_size >> 10);
4252 else
4253 r = sprintf(buf, "%lu\n", size >> 10);
4254 } else
4255 r = sprintf(buf, "X\n");
4256 } else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004257 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004258
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004259 mutex_unlock(&trace_types_lock);
4260
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004261 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4262 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004263}
4264
4265static ssize_t
4266tracing_entries_write(struct file *filp, const char __user *ubuf,
4267 size_t cnt, loff_t *ppos)
4268{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004269 struct trace_cpu *tc = filp->private_data;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004270 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004271 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004272
Peter Huewe22fe9b52011-06-07 21:58:27 +02004273 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4274 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004275 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004276
4277 /* must have at least 1 entry */
4278 if (!val)
4279 return -EINVAL;
4280
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004281 /* value is in KB */
4282 val <<= 10;
4283
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004284 ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004285 if (ret < 0)
4286 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004287
Jiri Olsacf8517c2009-10-23 19:36:16 -04004288 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004289
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004290 return cnt;
4291}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004292
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004293static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004294tracing_total_entries_read(struct file *filp, char __user *ubuf,
4295 size_t cnt, loff_t *ppos)
4296{
4297 struct trace_array *tr = filp->private_data;
4298 char buf[64];
4299 int r, cpu;
4300 unsigned long size = 0, expanded_size = 0;
4301
4302 mutex_lock(&trace_types_lock);
4303 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004304 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004305 if (!ring_buffer_expanded)
4306 expanded_size += trace_buf_size >> 10;
4307 }
4308 if (ring_buffer_expanded)
4309 r = sprintf(buf, "%lu\n", size);
4310 else
4311 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4312 mutex_unlock(&trace_types_lock);
4313
4314 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4315}
4316
4317static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004318tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4319 size_t cnt, loff_t *ppos)
4320{
4321 /*
4322 * There is no need to read what the user has written, this function
4323 * is just to make sure that there is no error when "echo" is used
4324 */
4325
4326 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004327
4328 return cnt;
4329}
4330
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004331static int
4332tracing_free_buffer_release(struct inode *inode, struct file *filp)
4333{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004334 struct trace_array *tr = inode->i_private;
4335
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004336 /* disable tracing ? */
4337 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4338 tracing_off();
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004339 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004340 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004341
4342 return 0;
4343}
4344
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004345static ssize_t
4346tracing_mark_write(struct file *filp, const char __user *ubuf,
4347 size_t cnt, loff_t *fpos)
4348{
Steven Rostedtd696b582011-09-22 11:50:27 -04004349 unsigned long addr = (unsigned long)ubuf;
4350 struct ring_buffer_event *event;
4351 struct ring_buffer *buffer;
4352 struct print_entry *entry;
4353 unsigned long irq_flags;
4354 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004355 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004356 int nr_pages = 1;
4357 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004358 int offset;
4359 int size;
4360 int len;
4361 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004362 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004363
Steven Rostedtc76f0692008-11-07 22:36:02 -05004364 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004365 return -EINVAL;
4366
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004367 if (!(trace_flags & TRACE_ITER_MARKERS))
4368 return -EINVAL;
4369
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004370 if (cnt > TRACE_BUF_SIZE)
4371 cnt = TRACE_BUF_SIZE;
4372
Steven Rostedtd696b582011-09-22 11:50:27 -04004373 /*
4374 * Userspace is injecting traces into the kernel trace buffer.
4375 * We want to be as non intrusive as possible.
4376 * To do so, we do not want to allocate any special buffers
4377 * or take any locks, but instead write the userspace data
4378 * straight into the ring buffer.
4379 *
4380 * First we need to pin the userspace buffer into memory,
4381 * which, most likely it is, because it just referenced it.
4382 * But there's no guarantee that it is. By using get_user_pages_fast()
4383 * and kmap_atomic/kunmap_atomic() we can get access to the
4384 * pages directly. We then write the data directly into the
4385 * ring buffer.
4386 */
4387 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004388
Steven Rostedtd696b582011-09-22 11:50:27 -04004389 /* check if we cross pages */
4390 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4391 nr_pages = 2;
4392
4393 offset = addr & (PAGE_SIZE - 1);
4394 addr &= PAGE_MASK;
4395
4396 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4397 if (ret < nr_pages) {
4398 while (--ret >= 0)
4399 put_page(pages[ret]);
4400 written = -EFAULT;
4401 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004402 }
4403
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004404 for (i = 0; i < nr_pages; i++)
4405 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004406
4407 local_save_flags(irq_flags);
4408 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004409 buffer = global_trace.trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004410 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4411 irq_flags, preempt_count());
4412 if (!event) {
4413 /* Ring buffer disabled, return as if not open for write */
4414 written = -EBADF;
4415 goto out_unlock;
4416 }
4417
4418 entry = ring_buffer_event_data(event);
4419 entry->ip = _THIS_IP_;
4420
4421 if (nr_pages == 2) {
4422 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004423 memcpy(&entry->buf, map_page[0] + offset, len);
4424 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004425 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004426 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004427
4428 if (entry->buf[cnt - 1] != '\n') {
4429 entry->buf[cnt] = '\n';
4430 entry->buf[cnt + 1] = '\0';
4431 } else
4432 entry->buf[cnt] = '\0';
4433
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004434 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004435
4436 written = cnt;
4437
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004438 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004439
Steven Rostedtd696b582011-09-22 11:50:27 -04004440 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004441 for (i = 0; i < nr_pages; i++){
4442 kunmap_atomic(map_page[i]);
4443 put_page(pages[i]);
4444 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004445 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004446 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004447}
4448
Li Zefan13f16d22009-12-08 11:16:11 +08004449static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004450{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004451 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004452 int i;
4453
4454 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004455 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004456 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004457 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4458 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004459 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004460
Li Zefan13f16d22009-12-08 11:16:11 +08004461 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004462}
4463
4464static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4465 size_t cnt, loff_t *fpos)
4466{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004467 struct seq_file *m = filp->private_data;
4468 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004469 char buf[64];
4470 const char *clockstr;
4471 int i;
4472
4473 if (cnt >= sizeof(buf))
4474 return -EINVAL;
4475
4476 if (copy_from_user(&buf, ubuf, cnt))
4477 return -EFAULT;
4478
4479 buf[cnt] = 0;
4480
4481 clockstr = strstrip(buf);
4482
4483 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4484 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4485 break;
4486 }
4487 if (i == ARRAY_SIZE(trace_clocks))
4488 return -EINVAL;
4489
Zhaolei5079f322009-08-25 16:12:56 +08004490 mutex_lock(&trace_types_lock);
4491
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004492 tr->clock_id = i;
4493
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004494 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004495
David Sharp60303ed2012-10-11 16:27:52 -07004496 /*
4497 * New clock may not be consistent with the previous clock.
4498 * Reset the buffer so that it doesn't have incomparable timestamps.
4499 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004500 tracing_reset_online_cpus(&global_trace.trace_buffer);
4501
4502#ifdef CONFIG_TRACER_MAX_TRACE
4503 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4504 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4505 tracing_reset_online_cpus(&global_trace.max_buffer);
4506#endif
David Sharp60303ed2012-10-11 16:27:52 -07004507
Zhaolei5079f322009-08-25 16:12:56 +08004508 mutex_unlock(&trace_types_lock);
4509
4510 *fpos += cnt;
4511
4512 return cnt;
4513}
4514
Li Zefan13f16d22009-12-08 11:16:11 +08004515static int tracing_clock_open(struct inode *inode, struct file *file)
4516{
4517 if (tracing_disabled)
4518 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004519
4520 return single_open(file, tracing_clock_show, inode->i_private);
Li Zefan13f16d22009-12-08 11:16:11 +08004521}
4522
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004523struct ftrace_buffer_info {
4524 struct trace_iterator iter;
4525 void *spare;
4526 unsigned int read;
4527};
4528
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004529#ifdef CONFIG_TRACER_SNAPSHOT
4530static int tracing_snapshot_open(struct inode *inode, struct file *file)
4531{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004532 struct trace_cpu *tc = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004533 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004534 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004535 int ret = 0;
4536
4537 if (file->f_mode & FMODE_READ) {
4538 iter = __tracing_open(inode, file, true);
4539 if (IS_ERR(iter))
4540 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004541 } else {
4542 /* Writes still need the seq_file to hold the private data */
4543 m = kzalloc(sizeof(*m), GFP_KERNEL);
4544 if (!m)
4545 return -ENOMEM;
4546 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4547 if (!iter) {
4548 kfree(m);
4549 return -ENOMEM;
4550 }
4551 iter->tr = tc->tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004552 iter->trace_buffer = &tc->tr->max_buffer;
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004553 iter->cpu_file = tc->cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004554 m->private = iter;
4555 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004556 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004557
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004558 return ret;
4559}
4560
4561static ssize_t
4562tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4563 loff_t *ppos)
4564{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004565 struct seq_file *m = filp->private_data;
4566 struct trace_iterator *iter = m->private;
4567 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004568 unsigned long val;
4569 int ret;
4570
4571 ret = tracing_update_buffers();
4572 if (ret < 0)
4573 return ret;
4574
4575 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4576 if (ret)
4577 return ret;
4578
4579 mutex_lock(&trace_types_lock);
4580
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004581 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004582 ret = -EBUSY;
4583 goto out;
4584 }
4585
4586 switch (val) {
4587 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004588 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4589 ret = -EINVAL;
4590 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004591 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004592 if (tr->allocated_snapshot)
4593 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004594 break;
4595 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004596/* Only allow per-cpu swap if the ring buffer supports it */
4597#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4598 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4599 ret = -EINVAL;
4600 break;
4601 }
4602#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004603 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004604 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004605 if (ret < 0)
4606 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004607 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004608 local_irq_disable();
4609 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004610 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004611 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004612 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004613 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004614 local_irq_enable();
4615 break;
4616 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004617 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004618 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4619 tracing_reset_online_cpus(&tr->max_buffer);
4620 else
4621 tracing_reset(&tr->max_buffer, iter->cpu_file);
4622 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004623 break;
4624 }
4625
4626 if (ret >= 0) {
4627 *ppos += cnt;
4628 ret = cnt;
4629 }
4630out:
4631 mutex_unlock(&trace_types_lock);
4632 return ret;
4633}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004634
4635static int tracing_snapshot_release(struct inode *inode, struct file *file)
4636{
4637 struct seq_file *m = file->private_data;
4638
4639 if (file->f_mode & FMODE_READ)
4640 return tracing_release(inode, file);
4641
4642 /* If write only, the seq_file is just a stub */
4643 if (m)
4644 kfree(m->private);
4645 kfree(m);
4646
4647 return 0;
4648}
4649
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004650static int tracing_buffers_open(struct inode *inode, struct file *filp);
4651static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4652 size_t count, loff_t *ppos);
4653static int tracing_buffers_release(struct inode *inode, struct file *file);
4654static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4655 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4656
4657static int snapshot_raw_open(struct inode *inode, struct file *filp)
4658{
4659 struct ftrace_buffer_info *info;
4660 int ret;
4661
4662 ret = tracing_buffers_open(inode, filp);
4663 if (ret < 0)
4664 return ret;
4665
4666 info = filp->private_data;
4667
4668 if (info->iter.trace->use_max_tr) {
4669 tracing_buffers_release(inode, filp);
4670 return -EBUSY;
4671 }
4672
4673 info->iter.snapshot = true;
4674 info->iter.trace_buffer = &info->iter.tr->max_buffer;
4675
4676 return ret;
4677}
4678
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004679#endif /* CONFIG_TRACER_SNAPSHOT */
4680
4681
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004682static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004683 .open = tracing_open_generic,
4684 .read = tracing_max_lat_read,
4685 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004686 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004687};
4688
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004689static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004690 .open = tracing_open_generic,
4691 .read = tracing_set_trace_read,
4692 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004693 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004694};
4695
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004696static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004697 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004698 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004699 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004700 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004701 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004702 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02004703};
4704
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004705static const struct file_operations tracing_entries_fops = {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004706 .open = tracing_open_generic,
Steven Rostedta98a3c32008-05-12 21:20:59 +02004707 .read = tracing_entries_read,
4708 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004709 .llseek = generic_file_llseek,
Steven Rostedta98a3c32008-05-12 21:20:59 +02004710};
4711
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004712static const struct file_operations tracing_total_entries_fops = {
4713 .open = tracing_open_generic,
4714 .read = tracing_total_entries_read,
4715 .llseek = generic_file_llseek,
4716};
4717
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004718static const struct file_operations tracing_free_buffer_fops = {
4719 .write = tracing_free_buffer_write,
4720 .release = tracing_free_buffer_release,
4721};
4722
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004723static const struct file_operations tracing_mark_fops = {
Frédéric Weisbecker43a15382008-09-21 20:16:30 +02004724 .open = tracing_open_generic,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004725 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004726 .llseek = generic_file_llseek,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004727};
4728
Zhaolei5079f322009-08-25 16:12:56 +08004729static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08004730 .open = tracing_clock_open,
4731 .read = seq_read,
4732 .llseek = seq_lseek,
4733 .release = single_release,
Zhaolei5079f322009-08-25 16:12:56 +08004734 .write = tracing_clock_write,
4735};
4736
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004737#ifdef CONFIG_TRACER_SNAPSHOT
4738static const struct file_operations snapshot_fops = {
4739 .open = tracing_snapshot_open,
4740 .read = seq_read,
4741 .write = tracing_snapshot_write,
4742 .llseek = tracing_seek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004743 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004744};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004745
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004746static const struct file_operations snapshot_raw_fops = {
4747 .open = snapshot_raw_open,
4748 .read = tracing_buffers_read,
4749 .release = tracing_buffers_release,
4750 .splice_read = tracing_buffers_splice_read,
4751 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05004752};
4753
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004754#endif /* CONFIG_TRACER_SNAPSHOT */
4755
Steven Rostedt2cadf912008-12-01 22:20:19 -05004756static int tracing_buffers_open(struct inode *inode, struct file *filp)
4757{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004758 struct trace_cpu *tc = inode->i_private;
4759 struct trace_array *tr = tc->tr;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004760 struct ftrace_buffer_info *info;
4761
4762 if (tracing_disabled)
4763 return -ENODEV;
4764
4765 info = kzalloc(sizeof(*info), GFP_KERNEL);
4766 if (!info)
4767 return -ENOMEM;
4768
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004769 mutex_lock(&trace_types_lock);
4770
4771 tr->ref++;
4772
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004773 info->iter.tr = tr;
4774 info->iter.cpu_file = tc->cpu;
Steven Rostedtb6273442013-02-28 13:44:11 -05004775 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004776 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004777 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004778 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004779 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004780
4781 filp->private_data = info;
4782
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004783 mutex_unlock(&trace_types_lock);
4784
Lai Jiangshand1e7e022009-04-02 15:16:56 +08004785 return nonseekable_open(inode, filp);
Steven Rostedt2cadf912008-12-01 22:20:19 -05004786}
4787
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004788static unsigned int
4789tracing_buffers_poll(struct file *filp, poll_table *poll_table)
4790{
4791 struct ftrace_buffer_info *info = filp->private_data;
4792 struct trace_iterator *iter = &info->iter;
4793
4794 return trace_poll(iter, filp, poll_table);
4795}
4796
Steven Rostedt2cadf912008-12-01 22:20:19 -05004797static ssize_t
4798tracing_buffers_read(struct file *filp, char __user *ubuf,
4799 size_t count, loff_t *ppos)
4800{
4801 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004802 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004803 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004804 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004805
Steven Rostedt2dc5d122009-03-04 19:10:05 -05004806 if (!count)
4807 return 0;
4808
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004809 mutex_lock(&trace_types_lock);
4810
4811#ifdef CONFIG_TRACER_MAX_TRACE
4812 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
4813 size = -EBUSY;
4814 goto out_unlock;
4815 }
4816#endif
4817
Lai Jiangshanddd538f2009-04-02 15:16:59 +08004818 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004819 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
4820 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004821 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08004822 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004823 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08004824
Steven Rostedt2cadf912008-12-01 22:20:19 -05004825 /* Do we have previous read data to read? */
4826 if (info->read < PAGE_SIZE)
4827 goto read;
4828
Steven Rostedtb6273442013-02-28 13:44:11 -05004829 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004830 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004831 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05004832 &info->spare,
4833 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004834 iter->cpu_file, 0);
4835 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05004836
4837 if (ret < 0) {
4838 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004839 if ((filp->f_flags & O_NONBLOCK)) {
4840 size = -EAGAIN;
4841 goto out_unlock;
4842 }
4843 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05004844 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004845 mutex_lock(&trace_types_lock);
4846 if (signal_pending(current)) {
4847 size = -EINTR;
4848 goto out_unlock;
4849 }
Steven Rostedtb6273442013-02-28 13:44:11 -05004850 goto again;
4851 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004852 size = 0;
4853 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05004854 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05004855
Steven Rostedt436fc282011-10-14 10:44:25 -04004856 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05004857 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05004858 size = PAGE_SIZE - info->read;
4859 if (size > count)
4860 size = count;
4861
4862 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004863 if (ret == size) {
4864 size = -EFAULT;
4865 goto out_unlock;
4866 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05004867 size -= ret;
4868
Steven Rostedt2cadf912008-12-01 22:20:19 -05004869 *ppos += size;
4870 info->read += size;
4871
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004872 out_unlock:
4873 mutex_unlock(&trace_types_lock);
4874
Steven Rostedt2cadf912008-12-01 22:20:19 -05004875 return size;
4876}
4877
4878static int tracing_buffers_release(struct inode *inode, struct file *file)
4879{
4880 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004881 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004882
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004883 mutex_lock(&trace_types_lock);
4884
4885 WARN_ON(!iter->tr->ref);
4886 iter->tr->ref--;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004887
Lai Jiangshanddd538f2009-04-02 15:16:59 +08004888 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004889 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05004890 kfree(info);
4891
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004892 mutex_unlock(&trace_types_lock);
4893
Steven Rostedt2cadf912008-12-01 22:20:19 -05004894 return 0;
4895}
4896
4897struct buffer_ref {
4898 struct ring_buffer *buffer;
4899 void *page;
4900 int ref;
4901};
4902
4903static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
4904 struct pipe_buffer *buf)
4905{
4906 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4907
4908 if (--ref->ref)
4909 return;
4910
4911 ring_buffer_free_read_page(ref->buffer, ref->page);
4912 kfree(ref);
4913 buf->private = 0;
4914}
4915
Steven Rostedt2cadf912008-12-01 22:20:19 -05004916static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
4917 struct pipe_buffer *buf)
4918{
4919 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4920
4921 ref->ref++;
4922}
4923
4924/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004925static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05004926 .can_merge = 0,
4927 .map = generic_pipe_buf_map,
4928 .unmap = generic_pipe_buf_unmap,
4929 .confirm = generic_pipe_buf_confirm,
4930 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09004931 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05004932 .get = buffer_pipe_buf_get,
4933};
4934
4935/*
4936 * Callback from splice_to_pipe(), if we need to release some pages
4937 * at the end of the spd in case we error'ed out in filling the pipe.
4938 */
4939static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
4940{
4941 struct buffer_ref *ref =
4942 (struct buffer_ref *)spd->partial[i].private;
4943
4944 if (--ref->ref)
4945 return;
4946
4947 ring_buffer_free_read_page(ref->buffer, ref->page);
4948 kfree(ref);
4949 spd->partial[i].private = 0;
4950}
4951
4952static ssize_t
4953tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4954 struct pipe_inode_info *pipe, size_t len,
4955 unsigned int flags)
4956{
4957 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004958 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02004959 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4960 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05004961 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004962 .pages = pages_def,
4963 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02004964 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05004965 .flags = flags,
4966 .ops = &buffer_pipe_buf_ops,
4967 .spd_release = buffer_spd_release,
4968 };
4969 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04004970 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004971 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004972
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004973 mutex_lock(&trace_types_lock);
4974
4975#ifdef CONFIG_TRACER_MAX_TRACE
4976 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
4977 ret = -EBUSY;
4978 goto out;
4979 }
4980#endif
4981
4982 if (splice_grow_spd(pipe, &spd)) {
4983 ret = -ENOMEM;
4984 goto out;
4985 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004986
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08004987 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004988 ret = -EINVAL;
4989 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08004990 }
4991
4992 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004993 if (len < PAGE_SIZE) {
4994 ret = -EINVAL;
4995 goto out;
4996 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08004997 len &= PAGE_MASK;
4998 }
4999
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005000 again:
5001 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005002 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005003
Jens Axboe35f3d142010-05-20 10:43:18 +02005004 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005005 struct page *page;
5006 int r;
5007
5008 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5009 if (!ref)
5010 break;
5011
Steven Rostedt7267fa62009-04-29 00:16:21 -04005012 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005013 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005014 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005015 if (!ref->page) {
5016 kfree(ref);
5017 break;
5018 }
5019
5020 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005021 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005022 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005023 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005024 kfree(ref);
5025 break;
5026 }
5027
5028 /*
5029 * zero out any left over data, this is going to
5030 * user land.
5031 */
5032 size = ring_buffer_page_len(ref->page);
5033 if (size < PAGE_SIZE)
5034 memset(ref->page + size, 0, PAGE_SIZE - size);
5035
5036 page = virt_to_page(ref->page);
5037
5038 spd.pages[i] = page;
5039 spd.partial[i].len = PAGE_SIZE;
5040 spd.partial[i].offset = 0;
5041 spd.partial[i].private = (unsigned long)ref;
5042 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005043 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005044
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005045 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005046 }
5047
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005048 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005049 spd.nr_pages = i;
5050
5051 /* did we read anything? */
5052 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005053 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005054 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005055 goto out;
5056 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005057 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05005058 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005059 mutex_lock(&trace_types_lock);
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005060 if (signal_pending(current)) {
5061 ret = -EINTR;
5062 goto out;
5063 }
5064 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005065 }
5066
5067 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005068 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005069out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005070 mutex_unlock(&trace_types_lock);
5071
Steven Rostedt2cadf912008-12-01 22:20:19 -05005072 return ret;
5073}
5074
5075static const struct file_operations tracing_buffers_fops = {
5076 .open = tracing_buffers_open,
5077 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005078 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005079 .release = tracing_buffers_release,
5080 .splice_read = tracing_buffers_splice_read,
5081 .llseek = no_llseek,
5082};
5083
Steven Rostedtc8d77182009-04-29 18:03:45 -04005084static ssize_t
5085tracing_stats_read(struct file *filp, char __user *ubuf,
5086 size_t count, loff_t *ppos)
5087{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005088 struct trace_cpu *tc = filp->private_data;
5089 struct trace_array *tr = tc->tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005090 struct trace_buffer *trace_buf = &tr->trace_buffer;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005091 struct trace_seq *s;
5092 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005093 unsigned long long t;
5094 unsigned long usec_rem;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005095 int cpu = tc->cpu;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005096
Li Zefane4f2d102009-06-15 10:57:28 +08005097 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005098 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005099 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005100
5101 trace_seq_init(s);
5102
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005103 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005104 trace_seq_printf(s, "entries: %ld\n", cnt);
5105
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005106 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005107 trace_seq_printf(s, "overrun: %ld\n", cnt);
5108
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005109 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005110 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5111
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005112 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005113 trace_seq_printf(s, "bytes: %ld\n", cnt);
5114
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005115 if (trace_clocks[trace_clock_id].in_ns) {
5116 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005117 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005118 usec_rem = do_div(t, USEC_PER_SEC);
5119 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5120 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005121
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005122 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005123 usec_rem = do_div(t, USEC_PER_SEC);
5124 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5125 } else {
5126 /* counter or tsc mode for trace_clock */
5127 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005128 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005129
5130 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005131 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005132 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005133
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005134 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005135 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5136
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005137 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005138 trace_seq_printf(s, "read events: %ld\n", cnt);
5139
Steven Rostedtc8d77182009-04-29 18:03:45 -04005140 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5141
5142 kfree(s);
5143
5144 return count;
5145}
5146
5147static const struct file_operations tracing_stats_fops = {
5148 .open = tracing_open_generic,
5149 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005150 .llseek = generic_file_llseek,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005151};
5152
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005153#ifdef CONFIG_DYNAMIC_FTRACE
5154
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005155int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005156{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005157 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005158}
5159
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005160static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005161tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005162 size_t cnt, loff_t *ppos)
5163{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005164 static char ftrace_dyn_info_buffer[1024];
5165 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005166 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005167 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005168 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005169 int r;
5170
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005171 mutex_lock(&dyn_info_mutex);
5172 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005173
Steven Rostedta26a2a22008-10-31 00:03:22 -04005174 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005175 buf[r++] = '\n';
5176
5177 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5178
5179 mutex_unlock(&dyn_info_mutex);
5180
5181 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005182}
5183
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005184static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005185 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005186 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005187 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005188};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005189#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005190
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005191#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5192static void
5193ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005194{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005195 tracing_snapshot();
5196}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005197
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005198static void
5199ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5200{
5201 unsigned long *count = (long *)data;
5202
5203 if (!*count)
5204 return;
5205
5206 if (*count != -1)
5207 (*count)--;
5208
5209 tracing_snapshot();
5210}
5211
5212static int
5213ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5214 struct ftrace_probe_ops *ops, void *data)
5215{
5216 long count = (long)data;
5217
5218 seq_printf(m, "%ps:", (void *)ip);
5219
5220 seq_printf(m, "snapshot");
5221
5222 if (count == -1)
5223 seq_printf(m, ":unlimited\n");
5224 else
5225 seq_printf(m, ":count=%ld\n", count);
5226
5227 return 0;
5228}
5229
5230static struct ftrace_probe_ops snapshot_probe_ops = {
5231 .func = ftrace_snapshot,
5232 .print = ftrace_snapshot_print,
5233};
5234
5235static struct ftrace_probe_ops snapshot_count_probe_ops = {
5236 .func = ftrace_count_snapshot,
5237 .print = ftrace_snapshot_print,
5238};
5239
5240static int
5241ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5242 char *glob, char *cmd, char *param, int enable)
5243{
5244 struct ftrace_probe_ops *ops;
5245 void *count = (void *)-1;
5246 char *number;
5247 int ret;
5248
5249 /* hash funcs only work with set_ftrace_filter */
5250 if (!enable)
5251 return -EINVAL;
5252
5253 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5254
5255 if (glob[0] == '!') {
5256 unregister_ftrace_function_probe_func(glob+1, ops);
5257 return 0;
5258 }
5259
5260 if (!param)
5261 goto out_reg;
5262
5263 number = strsep(&param, ":");
5264
5265 if (!strlen(number))
5266 goto out_reg;
5267
5268 /*
5269 * We use the callback data field (which is a pointer)
5270 * as our counter.
5271 */
5272 ret = kstrtoul(number, 0, (unsigned long *)&count);
5273 if (ret)
5274 return ret;
5275
5276 out_reg:
5277 ret = register_ftrace_function_probe(glob, ops, count);
5278
5279 if (ret >= 0)
5280 alloc_snapshot(&global_trace);
5281
5282 return ret < 0 ? ret : 0;
5283}
5284
5285static struct ftrace_func_command ftrace_snapshot_cmd = {
5286 .name = "snapshot",
5287 .func = ftrace_trace_snapshot_callback,
5288};
5289
5290static int register_snapshot_cmd(void)
5291{
5292 return register_ftrace_command(&ftrace_snapshot_cmd);
5293}
5294#else
5295static inline int register_snapshot_cmd(void) { return 0; }
5296#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005297
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005298struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005299{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005300 if (tr->dir)
5301 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005302
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005303 if (!debugfs_initialized())
5304 return NULL;
5305
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005306 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5307 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005308
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005309 if (!tr->dir)
5310 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005311
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005312 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005313}
5314
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005315struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005316{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005317 return tracing_init_dentry_tr(&global_trace);
5318}
5319
5320static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5321{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005322 struct dentry *d_tracer;
5323
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005324 if (tr->percpu_dir)
5325 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005326
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005327 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005328 if (!d_tracer)
5329 return NULL;
5330
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005331 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005332
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005333 WARN_ONCE(!tr->percpu_dir,
5334 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005335
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005336 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005337}
5338
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005339static void
5340tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005341{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005342 struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005343 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005344 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005345 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005346
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005347 if (!d_percpu)
5348 return;
5349
Steven Rostedtdd49a382010-10-20 21:51:26 -04005350 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005351 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5352 if (!d_cpu) {
5353 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5354 return;
5355 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005356
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005357 /* per cpu trace_pipe */
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005358 trace_create_file("trace_pipe", 0444, d_cpu,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005359 (void *)&data->trace_cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005360
5361 /* per cpu trace */
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005362 trace_create_file("trace", 0644, d_cpu,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005363 (void *)&data->trace_cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005364
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005365 trace_create_file("trace_pipe_raw", 0444, d_cpu,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005366 (void *)&data->trace_cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005367
5368 trace_create_file("stats", 0444, d_cpu,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005369 (void *)&data->trace_cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005370
5371 trace_create_file("buffer_size_kb", 0444, d_cpu,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005372 (void *)&data->trace_cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005373
5374#ifdef CONFIG_TRACER_SNAPSHOT
5375 trace_create_file("snapshot", 0644, d_cpu,
5376 (void *)&data->trace_cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005377
5378 trace_create_file("snapshot_raw", 0444, d_cpu,
5379 (void *)&data->trace_cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005380#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005381}
5382
Steven Rostedt60a11772008-05-12 21:20:44 +02005383#ifdef CONFIG_FTRACE_SELFTEST
5384/* Let selftest have access to static functions in this file */
5385#include "trace_selftest.c"
5386#endif
5387
Steven Rostedt577b7852009-02-26 23:43:05 -05005388struct trace_option_dentry {
5389 struct tracer_opt *opt;
5390 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005391 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005392 struct dentry *entry;
5393};
5394
5395static ssize_t
5396trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5397 loff_t *ppos)
5398{
5399 struct trace_option_dentry *topt = filp->private_data;
5400 char *buf;
5401
5402 if (topt->flags->val & topt->opt->bit)
5403 buf = "1\n";
5404 else
5405 buf = "0\n";
5406
5407 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5408}
5409
5410static ssize_t
5411trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5412 loff_t *ppos)
5413{
5414 struct trace_option_dentry *topt = filp->private_data;
5415 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005416 int ret;
5417
Peter Huewe22fe9b52011-06-07 21:58:27 +02005418 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5419 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005420 return ret;
5421
Li Zefan8d18eaa2009-12-08 11:17:06 +08005422 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005423 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005424
5425 if (!!(topt->flags->val & topt->opt->bit) != val) {
5426 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005427 ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005428 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005429 mutex_unlock(&trace_types_lock);
5430 if (ret)
5431 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005432 }
5433
5434 *ppos += cnt;
5435
5436 return cnt;
5437}
5438
5439
5440static const struct file_operations trace_options_fops = {
5441 .open = tracing_open_generic,
5442 .read = trace_options_read,
5443 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005444 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005445};
5446
Steven Rostedta8259072009-02-26 22:19:12 -05005447static ssize_t
5448trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5449 loff_t *ppos)
5450{
5451 long index = (long)filp->private_data;
5452 char *buf;
5453
5454 if (trace_flags & (1 << index))
5455 buf = "1\n";
5456 else
5457 buf = "0\n";
5458
5459 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5460}
5461
5462static ssize_t
5463trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5464 loff_t *ppos)
5465{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005466 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005467 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005468 unsigned long val;
5469 int ret;
5470
Peter Huewe22fe9b52011-06-07 21:58:27 +02005471 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5472 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005473 return ret;
5474
Zhaoleif2d84b62009-08-07 18:55:48 +08005475 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005476 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005477
5478 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005479 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005480 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005481
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005482 if (ret < 0)
5483 return ret;
5484
Steven Rostedta8259072009-02-26 22:19:12 -05005485 *ppos += cnt;
5486
5487 return cnt;
5488}
5489
Steven Rostedta8259072009-02-26 22:19:12 -05005490static const struct file_operations trace_options_core_fops = {
5491 .open = tracing_open_generic,
5492 .read = trace_options_core_read,
5493 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005494 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05005495};
5496
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005497struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04005498 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005499 struct dentry *parent,
5500 void *data,
5501 const struct file_operations *fops)
5502{
5503 struct dentry *ret;
5504
5505 ret = debugfs_create_file(name, mode, parent, data, fops);
5506 if (!ret)
5507 pr_warning("Could not create debugfs '%s' entry\n", name);
5508
5509 return ret;
5510}
5511
5512
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005513static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005514{
5515 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05005516
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005517 if (tr->options)
5518 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005519
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005520 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005521 if (!d_tracer)
5522 return NULL;
5523
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005524 tr->options = debugfs_create_dir("options", d_tracer);
5525 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05005526 pr_warning("Could not create debugfs directory 'options'\n");
5527 return NULL;
5528 }
5529
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005530 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005531}
5532
Steven Rostedt577b7852009-02-26 23:43:05 -05005533static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005534create_trace_option_file(struct trace_array *tr,
5535 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005536 struct tracer_flags *flags,
5537 struct tracer_opt *opt)
5538{
5539 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05005540
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005541 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05005542 if (!t_options)
5543 return;
5544
5545 topt->flags = flags;
5546 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005547 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005548
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005549 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005550 &trace_options_fops);
5551
Steven Rostedt577b7852009-02-26 23:43:05 -05005552}
5553
5554static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005555create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05005556{
5557 struct trace_option_dentry *topts;
5558 struct tracer_flags *flags;
5559 struct tracer_opt *opts;
5560 int cnt;
5561
5562 if (!tracer)
5563 return NULL;
5564
5565 flags = tracer->flags;
5566
5567 if (!flags || !flags->opts)
5568 return NULL;
5569
5570 opts = flags->opts;
5571
5572 for (cnt = 0; opts[cnt].name; cnt++)
5573 ;
5574
Steven Rostedt0cfe8242009-02-27 10:51:10 -05005575 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05005576 if (!topts)
5577 return NULL;
5578
5579 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005580 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05005581 &opts[cnt]);
5582
5583 return topts;
5584}
5585
5586static void
5587destroy_trace_option_files(struct trace_option_dentry *topts)
5588{
5589 int cnt;
5590
5591 if (!topts)
5592 return;
5593
5594 for (cnt = 0; topts[cnt].opt; cnt++) {
5595 if (topts[cnt].entry)
5596 debugfs_remove(topts[cnt].entry);
5597 }
5598
5599 kfree(topts);
5600}
5601
Steven Rostedta8259072009-02-26 22:19:12 -05005602static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005603create_trace_option_core_file(struct trace_array *tr,
5604 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05005605{
5606 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005607
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005608 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005609 if (!t_options)
5610 return NULL;
5611
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005612 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05005613 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05005614}
5615
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005616static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005617{
5618 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005619 int i;
5620
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005621 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005622 if (!t_options)
5623 return;
5624
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005625 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005626 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05005627}
5628
Steven Rostedt499e5472012-02-22 15:50:28 -05005629static ssize_t
5630rb_simple_read(struct file *filp, char __user *ubuf,
5631 size_t cnt, loff_t *ppos)
5632{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005633 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005634 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05005635 char buf[64];
5636 int r;
5637
5638 if (buffer)
5639 r = ring_buffer_record_is_on(buffer);
5640 else
5641 r = 0;
5642
5643 r = sprintf(buf, "%d\n", r);
5644
5645 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5646}
5647
5648static ssize_t
5649rb_simple_write(struct file *filp, const char __user *ubuf,
5650 size_t cnt, loff_t *ppos)
5651{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005652 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005653 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05005654 unsigned long val;
5655 int ret;
5656
5657 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5658 if (ret)
5659 return ret;
5660
5661 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005662 mutex_lock(&trace_types_lock);
5663 if (val) {
Steven Rostedt499e5472012-02-22 15:50:28 -05005664 ring_buffer_record_on(buffer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005665 if (tr->current_trace->start)
5666 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005667 } else {
Steven Rostedt499e5472012-02-22 15:50:28 -05005668 ring_buffer_record_off(buffer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005669 if (tr->current_trace->stop)
5670 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005671 }
5672 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05005673 }
5674
5675 (*ppos)++;
5676
5677 return cnt;
5678}
5679
5680static const struct file_operations rb_simple_fops = {
5681 .open = tracing_open_generic,
5682 .read = rb_simple_read,
5683 .write = rb_simple_write,
5684 .llseek = default_llseek,
5685};
5686
Steven Rostedt277ba042012-08-03 16:10:49 -04005687struct dentry *trace_instance_dir;
5688
5689static void
5690init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5691
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005692static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
5693{
5694 int cpu;
5695
5696 for_each_tracing_cpu(cpu) {
5697 memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
5698 per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
5699 per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
5700 }
5701}
5702
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005703static int
5704allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04005705{
5706 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005707
5708 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5709
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005710 buf->buffer = ring_buffer_alloc(size, rb_flags);
5711 if (!buf->buffer)
5712 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005713
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005714 buf->data = alloc_percpu(struct trace_array_cpu);
5715 if (!buf->data) {
5716 ring_buffer_free(buf->buffer);
5717 return -ENOMEM;
5718 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005719
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005720 init_trace_buffers(tr, buf);
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005721
5722 /* Allocate the first page for all buffers */
5723 set_buffer_entries(&tr->trace_buffer,
5724 ring_buffer_size(tr->trace_buffer.buffer, 0));
5725
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005726 return 0;
5727}
5728
5729static int allocate_trace_buffers(struct trace_array *tr, int size)
5730{
5731 int ret;
5732
5733 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
5734 if (ret)
5735 return ret;
5736
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005737#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005738 ret = allocate_trace_buffer(tr, &tr->max_buffer,
5739 allocate_snapshot ? size : 1);
5740 if (WARN_ON(ret)) {
5741 ring_buffer_free(tr->trace_buffer.buffer);
5742 free_percpu(tr->trace_buffer.data);
5743 return -ENOMEM;
5744 }
5745 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005746
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005747 /*
5748 * Only the top level trace array gets its snapshot allocated
5749 * from the kernel command line.
5750 */
5751 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005752#endif
5753 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005754}
5755
5756static int new_instance_create(const char *name)
5757{
Steven Rostedt277ba042012-08-03 16:10:49 -04005758 struct trace_array *tr;
5759 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04005760
5761 mutex_lock(&trace_types_lock);
5762
5763 ret = -EEXIST;
5764 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5765 if (tr->name && strcmp(tr->name, name) == 0)
5766 goto out_unlock;
5767 }
5768
5769 ret = -ENOMEM;
5770 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
5771 if (!tr)
5772 goto out_unlock;
5773
5774 tr->name = kstrdup(name, GFP_KERNEL);
5775 if (!tr->name)
5776 goto out_free_tr;
5777
5778 raw_spin_lock_init(&tr->start_lock);
5779
5780 tr->current_trace = &nop_trace;
5781
5782 INIT_LIST_HEAD(&tr->systems);
5783 INIT_LIST_HEAD(&tr->events);
5784
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005785 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04005786 goto out_free_tr;
5787
Steven Rostedt277ba042012-08-03 16:10:49 -04005788 /* Holder for file callbacks */
5789 tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
5790 tr->trace_cpu.tr = tr;
5791
5792 tr->dir = debugfs_create_dir(name, trace_instance_dir);
5793 if (!tr->dir)
5794 goto out_free_tr;
5795
5796 ret = event_trace_add_tracer(tr->dir, tr);
5797 if (ret)
5798 goto out_free_tr;
5799
5800 init_tracer_debugfs(tr, tr->dir);
5801
5802 list_add(&tr->list, &ftrace_trace_arrays);
5803
5804 mutex_unlock(&trace_types_lock);
5805
5806 return 0;
5807
5808 out_free_tr:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005809 if (tr->trace_buffer.buffer)
5810 ring_buffer_free(tr->trace_buffer.buffer);
Steven Rostedt277ba042012-08-03 16:10:49 -04005811 kfree(tr->name);
5812 kfree(tr);
5813
5814 out_unlock:
5815 mutex_unlock(&trace_types_lock);
5816
5817 return ret;
5818
5819}
5820
Steven Rostedt0c8916c2012-08-07 16:14:16 -04005821static int instance_delete(const char *name)
5822{
5823 struct trace_array *tr;
5824 int found = 0;
5825 int ret;
5826
5827 mutex_lock(&trace_types_lock);
5828
5829 ret = -ENODEV;
5830 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5831 if (tr->name && strcmp(tr->name, name) == 0) {
5832 found = 1;
5833 break;
5834 }
5835 }
5836 if (!found)
5837 goto out_unlock;
5838
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005839 ret = -EBUSY;
5840 if (tr->ref)
5841 goto out_unlock;
5842
Steven Rostedt0c8916c2012-08-07 16:14:16 -04005843 list_del(&tr->list);
5844
5845 event_trace_del_tracer(tr);
5846 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005847 free_percpu(tr->trace_buffer.data);
5848 ring_buffer_free(tr->trace_buffer.buffer);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04005849
5850 kfree(tr->name);
5851 kfree(tr);
5852
5853 ret = 0;
5854
5855 out_unlock:
5856 mutex_unlock(&trace_types_lock);
5857
5858 return ret;
5859}
5860
Steven Rostedt277ba042012-08-03 16:10:49 -04005861static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
5862{
5863 struct dentry *parent;
5864 int ret;
5865
5866 /* Paranoid: Make sure the parent is the "instances" directory */
5867 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
5868 if (WARN_ON_ONCE(parent != trace_instance_dir))
5869 return -ENOENT;
5870
5871 /*
5872 * The inode mutex is locked, but debugfs_create_dir() will also
5873 * take the mutex. As the instances directory can not be destroyed
5874 * or changed in any other way, it is safe to unlock it, and
5875 * let the dentry try. If two users try to make the same dir at
5876 * the same time, then the new_instance_create() will determine the
5877 * winner.
5878 */
5879 mutex_unlock(&inode->i_mutex);
5880
5881 ret = new_instance_create(dentry->d_iname);
5882
5883 mutex_lock(&inode->i_mutex);
5884
5885 return ret;
5886}
5887
Steven Rostedt0c8916c2012-08-07 16:14:16 -04005888static int instance_rmdir(struct inode *inode, struct dentry *dentry)
5889{
5890 struct dentry *parent;
5891 int ret;
5892
5893 /* Paranoid: Make sure the parent is the "instances" directory */
5894 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
5895 if (WARN_ON_ONCE(parent != trace_instance_dir))
5896 return -ENOENT;
5897
5898 /* The caller did a dget() on dentry */
5899 mutex_unlock(&dentry->d_inode->i_mutex);
5900
5901 /*
5902 * The inode mutex is locked, but debugfs_create_dir() will also
5903 * take the mutex. As the instances directory can not be destroyed
5904 * or changed in any other way, it is safe to unlock it, and
5905 * let the dentry try. If two users try to make the same dir at
5906 * the same time, then the instance_delete() will determine the
5907 * winner.
5908 */
5909 mutex_unlock(&inode->i_mutex);
5910
5911 ret = instance_delete(dentry->d_iname);
5912
5913 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
5914 mutex_lock(&dentry->d_inode->i_mutex);
5915
5916 return ret;
5917}
5918
Steven Rostedt277ba042012-08-03 16:10:49 -04005919static const struct inode_operations instance_dir_inode_operations = {
5920 .lookup = simple_lookup,
5921 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04005922 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04005923};
5924
5925static __init void create_trace_instances(struct dentry *d_tracer)
5926{
5927 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
5928 if (WARN_ON(!trace_instance_dir))
5929 return;
5930
5931 /* Hijack the dir inode operations, to allow mkdir */
5932 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
5933}
5934
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005935static void
5936init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
5937{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05005938 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005939
5940 trace_create_file("trace_options", 0644, d_tracer,
5941 tr, &tracing_iter_fops);
5942
5943 trace_create_file("trace", 0644, d_tracer,
5944 (void *)&tr->trace_cpu, &tracing_fops);
5945
5946 trace_create_file("trace_pipe", 0444, d_tracer,
5947 (void *)&tr->trace_cpu, &tracing_pipe_fops);
5948
5949 trace_create_file("buffer_size_kb", 0644, d_tracer,
5950 (void *)&tr->trace_cpu, &tracing_entries_fops);
5951
5952 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
5953 tr, &tracing_total_entries_fops);
5954
Wang YanQing238ae932013-05-26 16:52:01 +08005955 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005956 tr, &tracing_free_buffer_fops);
5957
5958 trace_create_file("trace_marker", 0220, d_tracer,
5959 tr, &tracing_mark_fops);
5960
5961 trace_create_file("trace_clock", 0644, d_tracer, tr,
5962 &trace_clock_fops);
5963
5964 trace_create_file("tracing_on", 0644, d_tracer,
5965 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005966
5967#ifdef CONFIG_TRACER_SNAPSHOT
5968 trace_create_file("snapshot", 0644, d_tracer,
5969 (void *)&tr->trace_cpu, &snapshot_fops);
5970#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05005971
5972 for_each_tracing_cpu(cpu)
5973 tracing_init_debugfs_percpu(tr, cpu);
5974
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005975}
5976
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01005977static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005978{
5979 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005980
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005981 trace_access_lock_init();
5982
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005983 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09005984 if (!d_tracer)
5985 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005986
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005987 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005988
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005989 trace_create_file("tracing_cpumask", 0644, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005990 &global_trace, &tracing_cpumask_fops);
Ingo Molnarc7078de2008-05-12 21:20:52 +02005991
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005992 trace_create_file("available_tracers", 0444, d_tracer,
5993 &global_trace, &show_traces_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005994
Li Zefan339ae5d2009-04-17 10:34:30 +08005995 trace_create_file("current_tracer", 0644, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005996 &global_trace, &set_tracer_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005997
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04005998#ifdef CONFIG_TRACER_MAX_TRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005999 trace_create_file("tracing_max_latency", 0644, d_tracer,
6000 &tracing_max_latency, &tracing_max_lat_fops);
Tim Bird0e950172010-02-25 15:36:43 -08006001#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006002
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006003 trace_create_file("tracing_thresh", 0644, d_tracer,
6004 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006005
Li Zefan339ae5d2009-04-17 10:34:30 +08006006 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006007 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006008
Avadh Patel69abe6a2009-04-10 16:04:48 -04006009 trace_create_file("saved_cmdlines", 0444, d_tracer,
6010 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006011
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006012#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006013 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6014 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006015#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006016
Steven Rostedt277ba042012-08-03 16:10:49 -04006017 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006018
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006019 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006020
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006021 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006022}
6023
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006024static int trace_panic_handler(struct notifier_block *this,
6025 unsigned long event, void *unused)
6026{
Steven Rostedt944ac422008-10-23 19:26:08 -04006027 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006028 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006029 return NOTIFY_OK;
6030}
6031
6032static struct notifier_block trace_panic_notifier = {
6033 .notifier_call = trace_panic_handler,
6034 .next = NULL,
6035 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6036};
6037
6038static int trace_die_handler(struct notifier_block *self,
6039 unsigned long val,
6040 void *data)
6041{
6042 switch (val) {
6043 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006044 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006045 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006046 break;
6047 default:
6048 break;
6049 }
6050 return NOTIFY_OK;
6051}
6052
6053static struct notifier_block trace_die_notifier = {
6054 .notifier_call = trace_die_handler,
6055 .priority = 200
6056};
6057
6058/*
6059 * printk is set to max of 1024, we really don't need it that big.
6060 * Nothing should be printing 1000 characters anyway.
6061 */
6062#define TRACE_MAX_PRINT 1000
6063
6064/*
6065 * Define here KERN_TRACE so that we have one place to modify
6066 * it if we decide to change what log level the ftrace dump
6067 * should be at.
6068 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006069#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006070
Jason Wessel955b61e2010-08-05 09:22:23 -05006071void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006072trace_printk_seq(struct trace_seq *s)
6073{
6074 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006075 if (s->len >= TRACE_MAX_PRINT)
6076 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006077
6078 /* should be zero ended, but we are paranoid. */
6079 s->buffer[s->len] = 0;
6080
6081 printk(KERN_TRACE "%s", s->buffer);
6082
Steven Rostedtf9520752009-03-02 14:04:40 -05006083 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006084}
6085
Jason Wessel955b61e2010-08-05 09:22:23 -05006086void trace_init_global_iter(struct trace_iterator *iter)
6087{
6088 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006089 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006090 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006091 iter->trace_buffer = &global_trace.trace_buffer;
Jason Wessel955b61e2010-08-05 09:22:23 -05006092}
6093
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006094void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006095{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006096 /* use static because iter can be a bit big for the stack */
6097 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006098 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006099 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006100 unsigned long flags;
6101 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006102
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006103 /* Only allow one dump user at a time. */
6104 if (atomic_inc_return(&dump_running) != 1) {
6105 atomic_dec(&dump_running);
6106 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006107 }
6108
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006109 /*
6110 * Always turn off tracing when we dump.
6111 * We don't need to show trace output of what happens
6112 * between multiple crashes.
6113 *
6114 * If the user does a sysrq-z, then they can re-enable
6115 * tracing with echo 1 > tracing_on.
6116 */
6117 tracing_off();
6118
6119 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006120
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006121 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006122 trace_init_global_iter(&iter);
6123
Steven Rostedtd7690412008-10-01 00:29:53 -04006124 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006125 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006126 }
6127
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006128 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6129
Török Edwinb54d3de2008-11-22 13:28:48 +02006130 /* don't look at user memory in panic mode */
6131 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6132
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006133 switch (oops_dump_mode) {
6134 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006135 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006136 break;
6137 case DUMP_ORIG:
6138 iter.cpu_file = raw_smp_processor_id();
6139 break;
6140 case DUMP_NONE:
6141 goto out_enable;
6142 default:
6143 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006144 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006145 }
6146
6147 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006148
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006149 /* Did function tracer already get disabled? */
6150 if (ftrace_is_dead()) {
6151 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6152 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6153 }
6154
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006155 /*
6156 * We need to stop all tracing on all CPUS to read the
6157 * the next buffer. This is a bit expensive, but is
6158 * not done often. We fill all what we can read,
6159 * and then release the locks again.
6160 */
6161
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006162 while (!trace_empty(&iter)) {
6163
6164 if (!cnt)
6165 printk(KERN_TRACE "---------------------------------\n");
6166
6167 cnt++;
6168
6169 /* reset all but tr, trace, and overruns */
6170 memset(&iter.seq, 0,
6171 sizeof(struct trace_iterator) -
6172 offsetof(struct trace_iterator, seq));
6173 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6174 iter.pos = -1;
6175
Jason Wessel955b61e2010-08-05 09:22:23 -05006176 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006177 int ret;
6178
6179 ret = print_trace_line(&iter);
6180 if (ret != TRACE_TYPE_NO_CONSUME)
6181 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006182 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006183 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006184
6185 trace_printk_seq(&iter.seq);
6186 }
6187
6188 if (!cnt)
6189 printk(KERN_TRACE " (ftrace buffer empty)\n");
6190 else
6191 printk(KERN_TRACE "---------------------------------\n");
6192
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006193 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006194 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006195
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006196 for_each_tracing_cpu(cpu) {
6197 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006198 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006199 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006200 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006201}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006202EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006203
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006204__init static int tracer_alloc_buffers(void)
6205{
Steven Rostedt73c51622009-03-11 13:42:01 -04006206 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306207 int ret = -ENOMEM;
6208
David Sharp750912f2010-12-08 13:46:47 -08006209
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306210 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6211 goto out;
6212
6213 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
6214 goto out_free_buffer_mask;
6215
Steven Rostedt07d777f2011-09-22 14:01:55 -04006216 /* Only allocate trace_printk buffers if a trace_printk exists */
6217 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006218 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006219 trace_printk_init_buffers();
6220
Steven Rostedt73c51622009-03-11 13:42:01 -04006221 /* To save memory, keep the ring buffer size to its minimum */
6222 if (ring_buffer_expanded)
6223 ring_buf_size = trace_buf_size;
6224 else
6225 ring_buf_size = 1;
6226
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306227 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6228 cpumask_copy(tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006229
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006230 raw_spin_lock_init(&global_trace.start_lock);
6231
Steven Rostedtab464282008-05-12 21:21:00 +02006232 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006233 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006234 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6235 WARN_ON(1);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306236 goto out_free_cpumask;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006237 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006238
Steven Rostedt499e5472012-02-22 15:50:28 -05006239 if (global_trace.buffer_disabled)
6240 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006241
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006242 trace_init_cmdlines();
6243
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006244 /*
6245 * register_tracer() might reference current_trace, so it
6246 * needs to be set before we register anything. This is
6247 * just a bootstrap of current_trace anyway.
6248 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006249 global_trace.current_trace = &nop_trace;
6250
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006251 register_tracer(&nop_trace);
6252
Steven Rostedt60a11772008-05-12 21:20:44 +02006253 /* All seems OK, enable tracing */
6254 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006255
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006256 atomic_notifier_chain_register(&panic_notifier_list,
6257 &trace_panic_notifier);
6258
6259 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006260
Steven Rostedtae63b312012-05-03 23:09:03 -04006261 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6262
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006263 /* Holder for file callbacks */
6264 global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
6265 global_trace.trace_cpu.tr = &global_trace;
6266
Steven Rostedtae63b312012-05-03 23:09:03 -04006267 INIT_LIST_HEAD(&global_trace.systems);
6268 INIT_LIST_HEAD(&global_trace.events);
6269 list_add(&global_trace.list, &ftrace_trace_arrays);
6270
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006271 while (trace_boot_options) {
6272 char *option;
6273
6274 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006275 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006276 }
6277
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006278 register_snapshot_cmd();
6279
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006280 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006281
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306282out_free_cpumask:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006283 free_percpu(global_trace.trace_buffer.data);
6284#ifdef CONFIG_TRACER_MAX_TRACE
6285 free_percpu(global_trace.max_buffer.data);
6286#endif
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306287 free_cpumask_var(tracing_cpumask);
6288out_free_buffer_mask:
6289 free_cpumask_var(tracing_buffer_mask);
6290out:
6291 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006292}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006293
6294__init static int clear_boot_tracer(void)
6295{
6296 /*
6297 * The default tracer at boot buffer is an init section.
6298 * This function is called in lateinit. If we did not
6299 * find the boot tracer, then clear it out, to prevent
6300 * later registration from accessing the buffer that is
6301 * about to be freed.
6302 */
6303 if (!default_bootup_tracer)
6304 return 0;
6305
6306 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6307 default_bootup_tracer);
6308 default_bootup_tracer = NULL;
6309
6310 return 0;
6311}
6312
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006313early_initcall(tracer_alloc_buffers);
6314fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006315late_initcall(clear_boot_tracer);