blob: 2cb12fd98f6b395dbdc1083d01a96b527633f858 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001#ifndef _LINUX_KERNEL_TRACE_H
2#define _LINUX_KERNEL_TRACE_H
3
4#include <linux/fs.h>
5#include <asm/atomic.h>
6#include <linux/sched.h>
7#include <linux/clocksource.h>
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008#include <linux/ring_buffer.h>
Pekka Paalanenbd8ac682008-05-12 21:20:57 +02009#include <linux/mmiotrace.h>
Frédéric Weisbeckerd13744c2008-09-23 11:32:08 +010010#include <linux/ftrace.h>
Frederic Weisbecker3f5ec132008-11-11 23:21:31 +010011#include <trace/boot.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020012
Thomas Gleixner72829bc2008-05-23 21:37:28 +020013enum trace_type {
14 __TRACE_FIRST_TYPE = 0,
15
16 TRACE_FN,
17 TRACE_CTX,
18 TRACE_WAKE,
Steven Rostedtdd0e5452008-08-01 12:26:41 -040019 TRACE_CONT,
Thomas Gleixner72829bc2008-05-23 21:37:28 +020020 TRACE_STACK,
Steven Rostedtdd0e5452008-08-01 12:26:41 -040021 TRACE_PRINT,
Thomas Gleixner72829bc2008-05-23 21:37:28 +020022 TRACE_SPECIAL,
Pekka Paalanenbd8ac682008-05-12 21:20:57 +020023 TRACE_MMIO_RW,
24 TRACE_MMIO_MAP,
Steven Rostedt9f029e82008-11-12 15:24:24 -050025 TRACE_BRANCH,
Frederic Weisbecker74239072008-11-11 23:24:42 +010026 TRACE_BOOT_CALL,
27 TRACE_BOOT_RET,
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +010028 TRACE_FN_RET,
Thomas Gleixner72829bc2008-05-23 21:37:28 +020029
30 __TRACE_LAST_TYPE
31};
32
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020033/*
Steven Rostedt777e2082008-09-29 23:02:42 -040034 * The trace entry - the most basic unit of tracing. This is what
35 * is printed in the end as a single line in the trace output, such as:
36 *
37 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
38 */
39struct trace_entry {
40 unsigned char type;
41 unsigned char cpu;
42 unsigned char flags;
43 unsigned char preempt_count;
44 int pid;
45};
46
47/*
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020048 * Function trace entry - function address and parent function addres:
49 */
50struct ftrace_entry {
Steven Rostedt777e2082008-09-29 23:02:42 -040051 struct trace_entry ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020052 unsigned long ip;
53 unsigned long parent_ip;
54};
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +010055
56/* Function return entry */
57struct ftrace_ret_entry {
58 struct trace_entry ent;
59 unsigned long ip;
60 unsigned long parent_ip;
61 unsigned long long calltime;
62 unsigned long long rettime;
Frederic Weisbecker02310222008-11-17 03:22:41 +010063 unsigned long overrun;
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +010064};
Frédéric Weisbeckerd13744c2008-09-23 11:32:08 +010065extern struct tracer boot_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020066
67/*
68 * Context switch trace entry - which task (and prio) we switched from/to:
69 */
70struct ctx_switch_entry {
Steven Rostedt777e2082008-09-29 23:02:42 -040071 struct trace_entry ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020072 unsigned int prev_pid;
73 unsigned char prev_prio;
74 unsigned char prev_state;
75 unsigned int next_pid;
76 unsigned char next_prio;
Peter Zijlstrabac524d2008-05-12 21:20:53 +020077 unsigned char next_state;
Peter Zijlstra80b5e942008-09-04 10:24:16 +020078 unsigned int next_cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020079};
80
81/*
Ingo Molnarf0a920d2008-05-12 21:20:47 +020082 * Special (free-form) trace entry:
83 */
84struct special_entry {
Steven Rostedt777e2082008-09-29 23:02:42 -040085 struct trace_entry ent;
Ingo Molnarf0a920d2008-05-12 21:20:47 +020086 unsigned long arg1;
87 unsigned long arg2;
88 unsigned long arg3;
89};
90
91/*
Ingo Molnar86387f72008-05-12 21:20:51 +020092 * Stack-trace entry:
93 */
94
Ingo Molnar74f4e362008-05-12 21:21:15 +020095#define FTRACE_STACK_ENTRIES 8
Ingo Molnar86387f72008-05-12 21:20:51 +020096
97struct stack_entry {
Steven Rostedt777e2082008-09-29 23:02:42 -040098 struct trace_entry ent;
Ingo Molnar86387f72008-05-12 21:20:51 +020099 unsigned long caller[FTRACE_STACK_ENTRIES];
100};
101
102/*
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400103 * ftrace_printk entry:
104 */
105struct print_entry {
Steven Rostedt777e2082008-09-29 23:02:42 -0400106 struct trace_entry ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400107 unsigned long ip;
108 char buf[];
109};
110
Steven Rostedt777e2082008-09-29 23:02:42 -0400111#define TRACE_OLD_SIZE 88
112
113struct trace_field_cont {
114 unsigned char type;
115 /* Temporary till we get rid of this completely */
116 char buf[TRACE_OLD_SIZE - 1];
117};
118
119struct trace_mmiotrace_rw {
120 struct trace_entry ent;
121 struct mmiotrace_rw rw;
122};
123
124struct trace_mmiotrace_map {
125 struct trace_entry ent;
126 struct mmiotrace_map map;
127};
128
Frederic Weisbecker74239072008-11-11 23:24:42 +0100129struct trace_boot_call {
Steven Rostedt777e2082008-09-29 23:02:42 -0400130 struct trace_entry ent;
Frederic Weisbecker74239072008-11-11 23:24:42 +0100131 struct boot_trace_call boot_call;
132};
133
134struct trace_boot_ret {
135 struct trace_entry ent;
136 struct boot_trace_ret boot_ret;
Steven Rostedt777e2082008-09-29 23:02:42 -0400137};
138
Steven Rostedt52f232c2008-11-12 00:14:40 -0500139#define TRACE_FUNC_SIZE 30
140#define TRACE_FILE_SIZE 20
Steven Rostedt9f029e82008-11-12 15:24:24 -0500141struct trace_branch {
Steven Rostedt52f232c2008-11-12 00:14:40 -0500142 struct trace_entry ent;
143 unsigned line;
144 char func[TRACE_FUNC_SIZE+1];
145 char file[TRACE_FILE_SIZE+1];
146 char correct;
147};
148
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400149/*
Pekka Paalanenfc5e27a2008-09-16 22:02:27 +0300150 * trace_flag_type is an enumeration that holds different
151 * states when a trace occurs. These are:
Steven Rostedt92444892008-10-24 09:42:59 -0400152 * IRQS_OFF - interrupts were disabled
153 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
154 * NEED_RESCED - reschedule is requested
155 * HARDIRQ - inside an interrupt handler
156 * SOFTIRQ - inside a softirq handler
157 * CONT - multiple entries hold the trace item
Pekka Paalanenfc5e27a2008-09-16 22:02:27 +0300158 */
159enum trace_flag_type {
160 TRACE_FLAG_IRQS_OFF = 0x01,
Steven Rostedt92444892008-10-24 09:42:59 -0400161 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
162 TRACE_FLAG_NEED_RESCHED = 0x04,
163 TRACE_FLAG_HARDIRQ = 0x08,
164 TRACE_FLAG_SOFTIRQ = 0x10,
165 TRACE_FLAG_CONT = 0x20,
Pekka Paalanenfc5e27a2008-09-16 22:02:27 +0300166};
167
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +0300168#define TRACE_BUF_SIZE 1024
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200169
170/*
171 * The CPU trace array - it consists of thousands of trace entries
172 * plus some other descriptor data: (for example which task started
173 * the trace, etc.)
174 */
175struct trace_array_cpu {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200176 atomic_t disabled;
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200177
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200178 /* these fields get copied into max-trace: */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200179 unsigned long trace_idx;
Steven Rostedt53d0aa72008-05-12 21:21:01 +0200180 unsigned long overrun;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200181 unsigned long saved_latency;
182 unsigned long critical_start;
183 unsigned long critical_end;
184 unsigned long critical_sequence;
185 unsigned long nice;
186 unsigned long policy;
187 unsigned long rt_priority;
188 cycle_t preempt_timestamp;
189 pid_t pid;
190 uid_t uid;
191 char comm[TASK_COMM_LEN];
192};
193
194struct trace_iterator;
195
196/*
197 * The trace array - an array of per-CPU trace arrays. This is the
198 * highest level data structure that individual tracers deal with.
199 * They have on/off state as well:
200 */
201struct trace_array {
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400202 struct ring_buffer *buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200203 unsigned long entries;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200204 int cpu;
205 cycle_t time_start;
Steven Rostedtb3806b42008-05-12 21:20:46 +0200206 struct task_struct *waiter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200207 struct trace_array_cpu *data[NR_CPUS];
208};
209
Steven Rostedt7104f302008-10-01 10:52:51 -0400210#define FTRACE_CMP_TYPE(var, type) \
211 __builtin_types_compatible_p(typeof(var), type *)
212
213#undef IF_ASSIGN
214#define IF_ASSIGN(var, entry, etype, id) \
215 if (FTRACE_CMP_TYPE(var, etype)) { \
216 var = (typeof(var))(entry); \
217 WARN_ON(id && (entry)->type != id); \
218 break; \
219 }
220
221/* Will cause compile errors if type is not found. */
222extern void __ftrace_bad_type(void);
223
224/*
225 * The trace_assign_type is a verifier that the entry type is
226 * the same as the type being assigned. To add new types simply
227 * add a line with the following format:
228 *
229 * IF_ASSIGN(var, ent, type, id);
230 *
231 * Where "type" is the trace type that includes the trace_entry
232 * as the "ent" item. And "id" is the trace identifier that is
233 * used in the trace_type enum.
234 *
235 * If the type can have more than one id, then use zero.
236 */
237#define trace_assign_type(var, ent) \
238 do { \
239 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
240 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
241 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
242 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
243 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
244 IF_ASSIGN(var, ent, struct special_entry, 0); \
245 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
246 TRACE_MMIO_RW); \
247 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
248 TRACE_MMIO_MAP); \
Frederic Weisbecker74239072008-11-11 23:24:42 +0100249 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
250 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
Steven Rostedt9f029e82008-11-12 15:24:24 -0500251 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
Frederic Weisbecker74239072008-11-11 23:24:42 +0100252 IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\
Steven Rostedt7104f302008-10-01 10:52:51 -0400253 __ftrace_bad_type(); \
254 } while (0)
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +0200255
256/* Return values for print_line callback */
257enum print_line_t {
258 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
259 TRACE_TYPE_HANDLED = 1,
260 TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */
261};
262
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +0100263
264/*
265 * An option specific to a tracer. This is a boolean value.
266 * The bit is the bit index that sets its value on the
267 * flags value in struct tracer_flags.
268 */
269struct tracer_opt {
270 const char *name; /* Will appear on the trace_options file */
271 u32 bit; /* Mask assigned in val field in tracer_flags */
272};
273
274/*
275 * The set of specific options for a tracer. Your tracer
276 * have to set the initial value of the flags val.
277 */
278struct tracer_flags {
279 u32 val;
280 struct tracer_opt *opts;
281};
282
283/* Makes more easy to define a tracer opt */
284#define TRACER_OPT(s, b) .name = #s, .bit = b
285
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200286/*
287 * A specific tracer, represented by methods that operate on a trace array:
288 */
289struct tracer {
290 const char *name;
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100291 /* Your tracer should raise a warning if init fails */
292 int (*init)(struct trace_array *tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200293 void (*reset)(struct trace_array *tr);
Steven Rostedt90369902008-11-05 16:05:44 -0500294 void (*start)(struct trace_array *tr);
295 void (*stop)(struct trace_array *tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200296 void (*open)(struct trace_iterator *iter);
Steven Rostedt107bad82008-05-12 21:21:01 +0200297 void (*pipe_open)(struct trace_iterator *iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200298 void (*close)(struct trace_iterator *iter);
Steven Rostedt107bad82008-05-12 21:21:01 +0200299 ssize_t (*read)(struct trace_iterator *iter,
300 struct file *filp, char __user *ubuf,
301 size_t cnt, loff_t *ppos);
Steven Rostedt60a11772008-05-12 21:20:44 +0200302#ifdef CONFIG_FTRACE_STARTUP_TEST
303 int (*selftest)(struct tracer *trace,
304 struct trace_array *tr);
305#endif
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +0200306 enum print_line_t (*print_line)(struct trace_iterator *iter);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +0100307 /* If you handled the flag setting, return 0 */
308 int (*set_flag)(u32 old_flags, u32 bit, int set);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200309 struct tracer *next;
310 int print_max;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +0100311 struct tracer_flags *flags;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200312};
313
Steven Rostedt214023c2008-05-12 21:20:46 +0200314struct trace_seq {
315 unsigned char buffer[PAGE_SIZE];
316 unsigned int len;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200317 unsigned int readpos;
Steven Rostedt214023c2008-05-12 21:20:46 +0200318};
319
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200320/*
321 * Trace iterator - used by printout routines who present trace
322 * results to users and which routines might sleep, etc:
323 */
324struct trace_iterator {
325 struct trace_array *tr;
326 struct tracer *trace;
Steven Rostedt107bad82008-05-12 21:21:01 +0200327 void *private;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400328 struct ring_buffer_iter *buffer_iter[NR_CPUS];
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200329
Steven Rostedt53d0aa72008-05-12 21:21:01 +0200330 /* The below is zeroed out in pipe_read */
331 struct trace_seq seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200332 struct trace_entry *ent;
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200333 int cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400334 u64 ts;
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200335
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200336 unsigned long iter_flags;
337 loff_t pos;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200338 long idx;
Steven Rostedta3097202008-11-07 22:36:02 -0500339
340 cpumask_t started;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200341};
342
Steven Rostedt90369902008-11-05 16:05:44 -0500343int tracing_is_enabled(void);
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +0300344void trace_wake_up(void);
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400345void tracing_reset(struct trace_array *tr, int cpu);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200346int tracing_open_generic(struct inode *inode, struct file *filp);
347struct dentry *tracing_init_dentry(void);
Ingo Molnard618b3e2008-05-12 21:20:49 +0200348void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
349
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +0300350struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
351 struct trace_array_cpu *data);
352void tracing_generic_entry_update(struct trace_entry *entry,
Steven Rostedt38697052008-10-01 13:14:09 -0400353 unsigned long flags,
354 int pc);
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +0300355
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200356void ftrace(struct trace_array *tr,
357 struct trace_array_cpu *data,
358 unsigned long ip,
359 unsigned long parent_ip,
Steven Rostedt38697052008-10-01 13:14:09 -0400360 unsigned long flags, int pc);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200361void tracing_sched_switch_trace(struct trace_array *tr,
362 struct trace_array_cpu *data,
363 struct task_struct *prev,
364 struct task_struct *next,
Steven Rostedt38697052008-10-01 13:14:09 -0400365 unsigned long flags, int pc);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200366void tracing_record_cmdline(struct task_struct *tsk);
Ingo Molnar57422792008-05-12 21:20:51 +0200367
368void tracing_sched_wakeup_trace(struct trace_array *tr,
369 struct trace_array_cpu *data,
370 struct task_struct *wakee,
371 struct task_struct *cur,
Steven Rostedt38697052008-10-01 13:14:09 -0400372 unsigned long flags, int pc);
Ingo Molnarf0a920d2008-05-12 21:20:47 +0200373void trace_special(struct trace_array *tr,
374 struct trace_array_cpu *data,
375 unsigned long arg1,
376 unsigned long arg2,
Steven Rostedt38697052008-10-01 13:14:09 -0400377 unsigned long arg3, int pc);
Steven Rostedt6fb44b72008-05-12 21:20:49 +0200378void trace_function(struct trace_array *tr,
379 struct trace_array_cpu *data,
380 unsigned long ip,
381 unsigned long parent_ip,
Steven Rostedt38697052008-10-01 13:14:09 -0400382 unsigned long flags, int pc);
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +0100383void
384trace_function_return(struct ftrace_retfunc *trace);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200385
Steven Rostedt41bc8142008-05-22 11:49:22 -0400386void tracing_start_cmdline_record(void);
387void tracing_stop_cmdline_record(void);
Steven Rostedte168e052008-11-07 22:36:02 -0500388void tracing_sched_switch_assign_trace(struct trace_array *tr);
389void tracing_stop_sched_switch_record(void);
390void tracing_start_sched_switch_record(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200391int register_tracer(struct tracer *type);
392void unregister_tracer(struct tracer *type);
393
394extern unsigned long nsecs_to_usecs(unsigned long nsecs);
395
396extern unsigned long tracing_max_latency;
397extern unsigned long tracing_thresh;
398
399void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
400void update_max_tr_single(struct trace_array *tr,
401 struct task_struct *tsk, int cpu);
402
Ingo Molnare309b412008-05-12 21:20:51 +0200403extern cycle_t ftrace_now(int cpu);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200404
Steven Rostedt606576c2008-10-06 19:06:12 -0400405#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt001b6762008-07-10 20:58:10 -0400406void tracing_start_function_trace(void);
407void tracing_stop_function_trace(void);
408#else
409# define tracing_start_function_trace() do { } while (0)
410# define tracing_stop_function_trace() do { } while (0)
411#endif
412
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200413#ifdef CONFIG_CONTEXT_SWITCH_TRACER
414typedef void
415(*tracer_switch_func_t)(void *private,
Mathieu Desnoyers5b82a1b2008-05-12 21:21:10 +0200416 void *__rq,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200417 struct task_struct *prev,
418 struct task_struct *next);
419
420struct tracer_switch_ops {
421 tracer_switch_func_t func;
422 void *private;
423 struct tracer_switch_ops *next;
424};
425
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200426#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
427
428#ifdef CONFIG_DYNAMIC_FTRACE
429extern unsigned long ftrace_update_tot_cnt;
Steven Rostedtd05cdb22008-05-12 21:20:54 +0200430#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
431extern int DYN_FTRACE_TEST_NAME(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200432#endif
433
Steven Rostedt60a11772008-05-12 21:20:44 +0200434#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt60a11772008-05-12 21:20:44 +0200435extern int trace_selftest_startup_function(struct tracer *trace,
436 struct trace_array *tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200437extern int trace_selftest_startup_irqsoff(struct tracer *trace,
438 struct trace_array *tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200439extern int trace_selftest_startup_preemptoff(struct tracer *trace,
440 struct trace_array *tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200441extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
442 struct trace_array *tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200443extern int trace_selftest_startup_wakeup(struct tracer *trace,
444 struct trace_array *tr);
Steven Noonanfb1b6d82008-09-19 03:06:43 -0700445extern int trace_selftest_startup_nop(struct tracer *trace,
446 struct trace_array *tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200447extern int trace_selftest_startup_sched_switch(struct tracer *trace,
448 struct trace_array *tr);
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200449extern int trace_selftest_startup_sysprof(struct tracer *trace,
450 struct trace_array *tr);
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500451extern int trace_selftest_startup_branch(struct tracer *trace,
452 struct trace_array *tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200453#endif /* CONFIG_FTRACE_STARTUP_TEST */
454
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200455extern void *head_page(struct trace_array_cpu *data);
Thomas Gleixner72829bc2008-05-23 21:37:28 +0200456extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
Pekka Paalanenfc5e27a2008-09-16 22:02:27 +0300457extern void trace_seq_print_cont(struct trace_seq *s,
458 struct trace_iterator *iter);
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +0100459
460extern int
461seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
462 unsigned long sym_flags);
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200463extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
464 size_t cnt);
Thomas Gleixner72829bc2008-05-23 21:37:28 +0200465extern long ns2usecs(cycle_t nsec);
Pekka Paalanen801fe402008-09-16 21:58:24 +0300466extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200467
Ingo Molnar4e655512008-05-12 21:20:52 +0200468extern unsigned long trace_flags;
469
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +0100470/* Standard output formatting function used for function return traces */
471#ifdef CONFIG_FUNCTION_RET_TRACER
472extern enum print_line_t print_return_function(struct trace_iterator *iter);
473#else
474static inline enum print_line_t
475print_return_function(struct trace_iterator *iter)
476{
477 return TRACE_TYPE_UNHANDLED;
478}
479#endif
480
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200481/*
482 * trace_iterator_flags is an enumeration that defines bit
483 * positions into trace_flags that controls the output.
484 *
485 * NOTE: These bits must match the trace_options array in
486 * trace.c.
487 */
Ingo Molnar4e655512008-05-12 21:20:52 +0200488enum trace_iterator_flags {
489 TRACE_ITER_PRINT_PARENT = 0x01,
490 TRACE_ITER_SYM_OFFSET = 0x02,
491 TRACE_ITER_SYM_ADDR = 0x04,
492 TRACE_ITER_VERBOSE = 0x08,
493 TRACE_ITER_RAW = 0x10,
494 TRACE_ITER_HEX = 0x20,
495 TRACE_ITER_BIN = 0x40,
496 TRACE_ITER_BLOCK = 0x80,
497 TRACE_ITER_STACKTRACE = 0x100,
Ingo Molnar4ac3ba42008-05-12 21:20:52 +0200498 TRACE_ITER_SCHED_TREE = 0x200,
Peter Zijlstraf09ce572008-09-04 10:24:14 +0200499 TRACE_ITER_PRINTK = 0x400,
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500500 TRACE_ITER_PREEMPTONLY = 0x800,
Steven Rostedt9f029e82008-11-12 15:24:24 -0500501 TRACE_ITER_BRANCH = 0x1000,
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500502 TRACE_ITER_ANNOTATE = 0x2000,
Ingo Molnar4e655512008-05-12 21:20:52 +0200503};
504
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +0100505/*
506 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
507 * control the output of kernel symbols.
508 */
509#define TRACE_ITER_SYM_MASK \
510 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
511
Frédéric Weisbecker43a15382008-09-21 20:16:30 +0200512extern struct tracer nop_trace;
513
Steven Rostedt8f0a0562008-11-03 23:15:55 -0500514/**
515 * ftrace_preempt_disable - disable preemption scheduler safe
516 *
517 * When tracing can happen inside the scheduler, there exists
518 * cases that the tracing might happen before the need_resched
519 * flag is checked. If this happens and the tracer calls
520 * preempt_enable (after a disable), a schedule might take place
521 * causing an infinite recursion.
522 *
523 * To prevent this, we read the need_recshed flag before
524 * disabling preemption. When we want to enable preemption we
525 * check the flag, if it is set, then we call preempt_enable_no_resched.
526 * Otherwise, we call preempt_enable.
527 *
528 * The rational for doing the above is that if need resched is set
529 * and we have yet to reschedule, we are either in an atomic location
530 * (where we do not need to check for scheduling) or we are inside
531 * the scheduler and do not want to resched.
532 */
533static inline int ftrace_preempt_disable(void)
534{
535 int resched;
536
537 resched = need_resched();
538 preempt_disable_notrace();
539
540 return resched;
541}
542
543/**
544 * ftrace_preempt_enable - enable preemption scheduler safe
545 * @resched: the return value from ftrace_preempt_disable
546 *
547 * This is a scheduler safe way to enable preemption and not miss
548 * any preemption checks. The disabled saved the state of preemption.
549 * If resched is set, then we were either inside an atomic or
550 * are inside the scheduler (we would have already scheduled
551 * otherwise). In this case, we do not want to call normal
552 * preempt_enable, but preempt_enable_no_resched instead.
553 */
554static inline void ftrace_preempt_enable(int resched)
555{
556 if (resched)
557 preempt_enable_no_resched_notrace();
558 else
559 preempt_enable_notrace();
560}
561
Steven Rostedt2ed84ee2008-11-12 15:24:24 -0500562#ifdef CONFIG_BRANCH_TRACER
Steven Rostedt9f029e82008-11-12 15:24:24 -0500563extern int enable_branch_tracing(struct trace_array *tr);
564extern void disable_branch_tracing(void);
565static inline int trace_branch_enable(struct trace_array *tr)
Steven Rostedt52f232c2008-11-12 00:14:40 -0500566{
Steven Rostedt9f029e82008-11-12 15:24:24 -0500567 if (trace_flags & TRACE_ITER_BRANCH)
568 return enable_branch_tracing(tr);
Steven Rostedt52f232c2008-11-12 00:14:40 -0500569 return 0;
570}
Steven Rostedt9f029e82008-11-12 15:24:24 -0500571static inline void trace_branch_disable(void)
Steven Rostedt52f232c2008-11-12 00:14:40 -0500572{
573 /* due to races, always disable */
Steven Rostedt9f029e82008-11-12 15:24:24 -0500574 disable_branch_tracing();
Steven Rostedt52f232c2008-11-12 00:14:40 -0500575}
576#else
Steven Rostedt9f029e82008-11-12 15:24:24 -0500577static inline int trace_branch_enable(struct trace_array *tr)
Steven Rostedt52f232c2008-11-12 00:14:40 -0500578{
579 return 0;
580}
Steven Rostedt9f029e82008-11-12 15:24:24 -0500581static inline void trace_branch_disable(void)
Steven Rostedt52f232c2008-11-12 00:14:40 -0500582{
583}
Steven Rostedt2ed84ee2008-11-12 15:24:24 -0500584#endif /* CONFIG_BRANCH_TRACER */
Steven Rostedt52f232c2008-11-12 00:14:40 -0500585
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200586#endif /* _LINUX_KERNEL_TRACE_H */