Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | #ifndef _LINUX_KERNEL_TRACE_H |
| 2 | #define _LINUX_KERNEL_TRACE_H |
| 3 | |
| 4 | #include <linux/fs.h> |
| 5 | #include <asm/atomic.h> |
| 6 | #include <linux/sched.h> |
| 7 | #include <linux/clocksource.h> |
| 8 | |
| 9 | /* |
| 10 | * Function trace entry - function address and parent function addres: |
| 11 | */ |
| 12 | struct ftrace_entry { |
| 13 | unsigned long ip; |
| 14 | unsigned long parent_ip; |
| 15 | }; |
| 16 | |
| 17 | /* |
| 18 | * Context switch trace entry - which task (and prio) we switched from/to: |
| 19 | */ |
| 20 | struct ctx_switch_entry { |
| 21 | unsigned int prev_pid; |
| 22 | unsigned char prev_prio; |
| 23 | unsigned char prev_state; |
| 24 | unsigned int next_pid; |
| 25 | unsigned char next_prio; |
| 26 | }; |
| 27 | |
| 28 | /* |
Ingo Molnar | f0a920d | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 29 | * Special (free-form) trace entry: |
| 30 | */ |
| 31 | struct special_entry { |
| 32 | unsigned long arg1; |
| 33 | unsigned long arg2; |
| 34 | unsigned long arg3; |
| 35 | }; |
| 36 | |
| 37 | /* |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 38 | * The trace entry - the most basic unit of tracing. This is what |
| 39 | * is printed in the end as a single line in the trace output, such as: |
| 40 | * |
| 41 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter |
| 42 | */ |
| 43 | struct trace_entry { |
| 44 | char type; |
| 45 | char cpu; |
| 46 | char flags; |
| 47 | char preempt_count; |
| 48 | int pid; |
| 49 | cycle_t t; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 50 | union { |
| 51 | struct ftrace_entry fn; |
| 52 | struct ctx_switch_entry ctx; |
Ingo Molnar | f0a920d | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 53 | struct special_entry special; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 54 | }; |
| 55 | }; |
| 56 | |
| 57 | #define TRACE_ENTRY_SIZE sizeof(struct trace_entry) |
| 58 | |
| 59 | /* |
| 60 | * The CPU trace array - it consists of thousands of trace entries |
| 61 | * plus some other descriptor data: (for example which task started |
| 62 | * the trace, etc.) |
| 63 | */ |
| 64 | struct trace_array_cpu { |
Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 65 | struct list_head trace_pages; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 66 | atomic_t disabled; |
Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 67 | spinlock_t lock; |
Ingo Molnar | d4c5a2f | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 68 | struct lock_class_key lock_key; |
Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 69 | |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 70 | /* these fields get copied into max-trace: */ |
Steven Rostedt | 93a588f | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 71 | unsigned trace_head_idx; |
| 72 | unsigned trace_tail_idx; |
| 73 | void *trace_head; /* producer */ |
| 74 | void *trace_tail; /* consumer */ |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 75 | unsigned long trace_idx; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 76 | unsigned long saved_latency; |
| 77 | unsigned long critical_start; |
| 78 | unsigned long critical_end; |
| 79 | unsigned long critical_sequence; |
| 80 | unsigned long nice; |
| 81 | unsigned long policy; |
| 82 | unsigned long rt_priority; |
| 83 | cycle_t preempt_timestamp; |
| 84 | pid_t pid; |
| 85 | uid_t uid; |
| 86 | char comm[TASK_COMM_LEN]; |
| 87 | }; |
| 88 | |
| 89 | struct trace_iterator; |
| 90 | |
| 91 | /* |
| 92 | * The trace array - an array of per-CPU trace arrays. This is the |
| 93 | * highest level data structure that individual tracers deal with. |
| 94 | * They have on/off state as well: |
| 95 | */ |
| 96 | struct trace_array { |
| 97 | unsigned long entries; |
| 98 | long ctrl; |
| 99 | int cpu; |
| 100 | cycle_t time_start; |
Steven Rostedt | b3806b4 | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 101 | struct task_struct *waiter; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 102 | struct trace_array_cpu *data[NR_CPUS]; |
| 103 | }; |
| 104 | |
| 105 | /* |
| 106 | * A specific tracer, represented by methods that operate on a trace array: |
| 107 | */ |
| 108 | struct tracer { |
| 109 | const char *name; |
| 110 | void (*init)(struct trace_array *tr); |
| 111 | void (*reset)(struct trace_array *tr); |
| 112 | void (*open)(struct trace_iterator *iter); |
| 113 | void (*close)(struct trace_iterator *iter); |
| 114 | void (*start)(struct trace_iterator *iter); |
| 115 | void (*stop)(struct trace_iterator *iter); |
| 116 | void (*ctrl_update)(struct trace_array *tr); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 117 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
| 118 | int (*selftest)(struct tracer *trace, |
| 119 | struct trace_array *tr); |
| 120 | #endif |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 121 | struct tracer *next; |
| 122 | int print_max; |
| 123 | }; |
| 124 | |
Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 125 | struct trace_seq { |
| 126 | unsigned char buffer[PAGE_SIZE]; |
| 127 | unsigned int len; |
| 128 | }; |
| 129 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 130 | /* |
| 131 | * Trace iterator - used by printout routines who present trace |
| 132 | * results to users and which routines might sleep, etc: |
| 133 | */ |
| 134 | struct trace_iterator { |
Steven Rostedt | 214023c | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 135 | struct trace_seq seq; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 136 | struct trace_array *tr; |
| 137 | struct tracer *trace; |
Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 138 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 139 | struct trace_entry *ent; |
Ingo Molnar | 4e3c333 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 140 | int cpu; |
| 141 | |
| 142 | struct trace_entry *prev_ent; |
| 143 | int prev_cpu; |
| 144 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 145 | unsigned long iter_flags; |
| 146 | loff_t pos; |
| 147 | unsigned long next_idx[NR_CPUS]; |
Steven Rostedt | 4c11d7a | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 148 | struct list_head *next_page[NR_CPUS]; |
| 149 | unsigned next_page_idx[NR_CPUS]; |
| 150 | long idx; |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 151 | }; |
| 152 | |
| 153 | void notrace tracing_reset(struct trace_array_cpu *data); |
| 154 | int tracing_open_generic(struct inode *inode, struct file *filp); |
| 155 | struct dentry *tracing_init_dentry(void); |
| 156 | void ftrace(struct trace_array *tr, |
| 157 | struct trace_array_cpu *data, |
| 158 | unsigned long ip, |
| 159 | unsigned long parent_ip, |
| 160 | unsigned long flags); |
| 161 | void tracing_sched_switch_trace(struct trace_array *tr, |
| 162 | struct trace_array_cpu *data, |
| 163 | struct task_struct *prev, |
| 164 | struct task_struct *next, |
| 165 | unsigned long flags); |
| 166 | void tracing_record_cmdline(struct task_struct *tsk); |
Ingo Molnar | f0a920d | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 167 | void trace_special(struct trace_array *tr, |
| 168 | struct trace_array_cpu *data, |
| 169 | unsigned long arg1, |
| 170 | unsigned long arg2, |
| 171 | unsigned long arg3); |
Steven Rostedt | 6fb44b7 | 2008-05-12 21:20:49 +0200 | [diff] [blame^] | 172 | void trace_function(struct trace_array *tr, |
| 173 | struct trace_array_cpu *data, |
| 174 | unsigned long ip, |
| 175 | unsigned long parent_ip, |
| 176 | unsigned long flags); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 177 | |
| 178 | void tracing_start_function_trace(void); |
| 179 | void tracing_stop_function_trace(void); |
| 180 | int register_tracer(struct tracer *type); |
| 181 | void unregister_tracer(struct tracer *type); |
| 182 | |
| 183 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
| 184 | |
| 185 | extern unsigned long tracing_max_latency; |
| 186 | extern unsigned long tracing_thresh; |
| 187 | |
| 188 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); |
| 189 | void update_max_tr_single(struct trace_array *tr, |
| 190 | struct task_struct *tsk, int cpu); |
| 191 | |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 192 | extern notrace cycle_t ftrace_now(int cpu); |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 193 | |
| 194 | #ifdef CONFIG_SCHED_TRACER |
| 195 | extern void notrace |
| 196 | wakeup_sched_switch(struct task_struct *prev, struct task_struct *next); |
| 197 | #else |
| 198 | static inline void |
| 199 | wakeup_sched_switch(struct task_struct *prev, struct task_struct *next) |
| 200 | { |
| 201 | } |
| 202 | #endif |
| 203 | |
| 204 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER |
| 205 | typedef void |
| 206 | (*tracer_switch_func_t)(void *private, |
| 207 | struct task_struct *prev, |
| 208 | struct task_struct *next); |
| 209 | |
| 210 | struct tracer_switch_ops { |
| 211 | tracer_switch_func_t func; |
| 212 | void *private; |
| 213 | struct tracer_switch_ops *next; |
| 214 | }; |
| 215 | |
| 216 | extern int register_tracer_switch(struct tracer_switch_ops *ops); |
| 217 | extern int unregister_tracer_switch(struct tracer_switch_ops *ops); |
| 218 | |
| 219 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
| 220 | |
| 221 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 222 | extern unsigned long ftrace_update_tot_cnt; |
| 223 | #endif |
| 224 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 225 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
| 226 | #ifdef CONFIG_FTRACE |
| 227 | extern int trace_selftest_startup_function(struct tracer *trace, |
| 228 | struct trace_array *tr); |
| 229 | #endif |
| 230 | #ifdef CONFIG_IRQSOFF_TRACER |
| 231 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
| 232 | struct trace_array *tr); |
| 233 | #endif |
| 234 | #ifdef CONFIG_PREEMPT_TRACER |
| 235 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
| 236 | struct trace_array *tr); |
| 237 | #endif |
| 238 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) |
| 239 | extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, |
| 240 | struct trace_array *tr); |
| 241 | #endif |
| 242 | #ifdef CONFIG_SCHED_TRACER |
| 243 | extern int trace_selftest_startup_wakeup(struct tracer *trace, |
| 244 | struct trace_array *tr); |
| 245 | #endif |
| 246 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER |
| 247 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
| 248 | struct trace_array *tr); |
| 249 | #endif |
| 250 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
| 251 | |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 252 | extern void *head_page(struct trace_array_cpu *data); |
| 253 | |
Steven Rostedt | bc0c38d | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 254 | #endif /* _LINUX_KERNEL_TRACE_H */ |