blob: 157d4f68b0e07335e048840eecd302db797c53c8 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -080020#include <linux/suspend.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020021#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020022#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010023#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020024#include <linux/uaccess.h>
Abhishek Sagarf22f9a82008-06-21 23:50:29 +053025#include <linux/kprobes.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010026#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020027#include <linux/sysctl.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020028#include <linux/ctype.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020029#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020030
Abhishek Sagar395a59d2008-06-21 23:47:27 +053031#include <asm/ftrace.h>
32
Steven Rostedt3d083392008-05-12 21:20:42 +020033#include "trace.h"
34
Steven Rostedt69128962008-10-23 09:33:03 -040035#define FTRACE_WARN_ON(cond) \
36 do { \
37 if (WARN_ON(cond)) \
38 ftrace_kill(); \
39 } while (0)
40
41#define FTRACE_WARN_ON_ONCE(cond) \
42 do { \
43 if (WARN_ON_ONCE(cond)) \
44 ftrace_kill(); \
45 } while (0)
46
Steven Rostedt4eebcc82008-05-12 21:20:48 +020047/* ftrace_enabled is a method to turn ftrace on or off */
48int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020049static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020050
Steven Rostedt0ef8cde2008-12-03 15:36:58 -050051/* set when tracing only a pid */
Steven Rostedt978f3a42008-12-04 00:26:40 -050052struct pid *ftrace_pid_trace;
Steven Rostedt21bbecd2008-12-04 23:30:56 -050053static struct pid * const ftrace_swapper_pid = &init_struct_pid;
Steven Rostedtdf4fc312008-11-26 00:16:23 -050054
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050055/* Quick disabling of function tracer. */
56int function_trace_stop;
57
Steven Rostedt4eebcc82008-05-12 21:20:48 +020058/*
59 * ftrace_disabled is set when an anomaly is discovered.
60 * ftrace_disabled is much stronger than ftrace_enabled.
61 */
62static int ftrace_disabled __read_mostly;
63
Steven Rostedt52baf112009-02-14 01:15:39 -050064static DEFINE_MUTEX(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020065
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020066static struct ftrace_ops ftrace_list_end __read_mostly =
67{
68 .func = ftrace_stub,
69};
70
71static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
72ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050073ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -050074ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020075
Ingo Molnarf2252932008-05-22 10:37:48 +020076static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020077{
78 struct ftrace_ops *op = ftrace_list;
79
80 /* in case someone actually ports this to alpha! */
81 read_barrier_depends();
82
83 while (op != &ftrace_list_end) {
84 /* silly alpha */
85 read_barrier_depends();
86 op->func(ip, parent_ip);
87 op = op->next;
88 };
89}
90
Steven Rostedtdf4fc312008-11-26 00:16:23 -050091static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
92{
Steven Rostedt0ef8cde2008-12-03 15:36:58 -050093 if (!test_tsk_trace_trace(current))
Steven Rostedtdf4fc312008-11-26 00:16:23 -050094 return;
95
96 ftrace_pid_function(ip, parent_ip);
97}
98
99static void set_ftrace_pid_function(ftrace_func_t func)
100{
101 /* do not set ftrace_pid_function to itself! */
102 if (func != ftrace_pid_func)
103 ftrace_pid_function = func;
104}
105
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200106/**
Steven Rostedt3d083392008-05-12 21:20:42 +0200107 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200108 *
Steven Rostedt3d083392008-05-12 21:20:42 +0200109 * This NULLs the ftrace function and in essence stops
110 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200111 */
Steven Rostedt3d083392008-05-12 21:20:42 +0200112void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200113{
Steven Rostedt3d083392008-05-12 21:20:42 +0200114 ftrace_trace_function = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500115 __ftrace_trace_function = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500116 ftrace_pid_function = ftrace_stub;
Steven Rostedt3d083392008-05-12 21:20:42 +0200117}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200118
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500119#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
120/*
121 * For those archs that do not test ftrace_trace_stop in their
122 * mcount call site, we need to do it from C.
123 */
124static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
125{
126 if (function_trace_stop)
127 return;
128
129 __ftrace_trace_function(ip, parent_ip);
130}
131#endif
132
Ingo Molnare309b412008-05-12 21:20:51 +0200133static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +0200134{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200135 ops->next = ftrace_list;
136 /*
137 * We are entering ops into the ftrace_list but another
138 * CPU might be walking that list. We need to make sure
139 * the ops->next pointer is valid before another CPU sees
140 * the ops pointer included into the ftrace_list.
141 */
142 smp_wmb();
143 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +0200144
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200145 if (ftrace_enabled) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500146 ftrace_func_t func;
147
148 if (ops->next == &ftrace_list_end)
149 func = ops->func;
150 else
151 func = ftrace_list_func;
152
Steven Rostedt978f3a42008-12-04 00:26:40 -0500153 if (ftrace_pid_trace) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500154 set_ftrace_pid_function(func);
155 func = ftrace_pid_func;
156 }
157
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200158 /*
159 * For one func, simply call it directly.
160 * For more than one func, call the chain.
161 */
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500162#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500163 ftrace_trace_function = func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500164#else
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500165 __ftrace_trace_function = func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500166 ftrace_trace_function = ftrace_test_stop_func;
167#endif
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200168 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200169
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200170 return 0;
171}
172
Ingo Molnare309b412008-05-12 21:20:51 +0200173static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200174{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200175 struct ftrace_ops **p;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200176
177 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200178 * If we are removing the last function, then simply point
179 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200180 */
181 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
182 ftrace_trace_function = ftrace_stub;
183 ftrace_list = &ftrace_list_end;
Steven Rostedte6ea44e2009-02-14 01:42:44 -0500184 return 0;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200185 }
186
187 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
188 if (*p == ops)
189 break;
190
Steven Rostedte6ea44e2009-02-14 01:42:44 -0500191 if (*p != ops)
192 return -1;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200193
194 *p = (*p)->next;
195
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200196 if (ftrace_enabled) {
197 /* If we only have one func left, then call that directly */
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500198 if (ftrace_list->next == &ftrace_list_end) {
199 ftrace_func_t func = ftrace_list->func;
200
Steven Rostedt978f3a42008-12-04 00:26:40 -0500201 if (ftrace_pid_trace) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500202 set_ftrace_pid_function(func);
203 func = ftrace_pid_func;
204 }
205#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
206 ftrace_trace_function = func;
207#else
208 __ftrace_trace_function = func;
209#endif
210 }
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200211 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200212
Steven Rostedte6ea44e2009-02-14 01:42:44 -0500213 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200214}
215
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500216static void ftrace_update_pid_func(void)
217{
218 ftrace_func_t func;
219
Steven Rostedt52baf112009-02-14 01:15:39 -0500220 mutex_lock(&ftrace_lock);
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500221
222 if (ftrace_trace_function == ftrace_stub)
223 goto out;
224
225 func = ftrace_trace_function;
226
Steven Rostedt978f3a42008-12-04 00:26:40 -0500227 if (ftrace_pid_trace) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500228 set_ftrace_pid_function(func);
229 func = ftrace_pid_func;
230 } else {
Liming Wang66eafeb2008-12-02 10:33:08 +0800231 if (func == ftrace_pid_func)
232 func = ftrace_pid_function;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500233 }
234
235#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
236 ftrace_trace_function = func;
237#else
238 __ftrace_trace_function = func;
239#endif
240
241 out:
Steven Rostedt52baf112009-02-14 01:15:39 -0500242 mutex_unlock(&ftrace_lock);
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500243}
244
Steven Rostedt3d083392008-05-12 21:20:42 +0200245#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400246#ifndef CONFIG_FTRACE_MCOUNT_RECORD
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400247# error Dynamic ftrace depends on MCOUNT_RECORD
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400248#endif
249
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200250enum {
251 FTRACE_ENABLE_CALLS = (1 << 0),
252 FTRACE_DISABLE_CALLS = (1 << 1),
253 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
254 FTRACE_ENABLE_MCOUNT = (1 << 3),
255 FTRACE_DISABLE_MCOUNT = (1 << 4),
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500256 FTRACE_START_FUNC_RET = (1 << 5),
257 FTRACE_STOP_FUNC_RET = (1 << 6),
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200258};
259
Steven Rostedt5072c592008-05-12 21:20:43 +0200260static int ftrace_filtered;
261
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400262static LIST_HEAD(ftrace_new_addrs);
Steven Rostedt3d083392008-05-12 21:20:42 +0200263
Steven Rostedt41c52c02008-05-22 11:46:33 -0400264static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200265
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200266struct ftrace_page {
267 struct ftrace_page *next;
Steven Rostedt431aa3f2009-01-06 12:43:01 -0500268 int index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200269 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700270};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200271
272#define ENTRIES_PER_PAGE \
273 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
274
275/* estimate from running different kernels */
276#define NR_TO_INIT 10000
277
278static struct ftrace_page *ftrace_pages_start;
279static struct ftrace_page *ftrace_pages;
280
Steven Rostedt37ad5082008-05-12 21:20:48 +0200281static struct dyn_ftrace *ftrace_free_records;
282
Steven Rostedt265c8312009-02-13 12:43:56 -0500283/*
284 * This is a double for. Do not use 'break' to break out of the loop,
285 * you must use a goto.
286 */
287#define do_for_each_ftrace_rec(pg, rec) \
288 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
289 int _____i; \
290 for (_____i = 0; _____i < pg->index; _____i++) { \
291 rec = &pg->records[_____i];
292
293#define while_for_each_ftrace_rec() \
294 } \
295 }
Abhishek Sagarecea6562008-06-21 23:47:53 +0530296
297#ifdef CONFIG_KPROBES
Ingo Molnarf17845e2008-10-24 12:47:10 +0200298
299static int frozen_record_count;
300
Abhishek Sagarecea6562008-06-21 23:47:53 +0530301static inline void freeze_record(struct dyn_ftrace *rec)
302{
303 if (!(rec->flags & FTRACE_FL_FROZEN)) {
304 rec->flags |= FTRACE_FL_FROZEN;
305 frozen_record_count++;
306 }
307}
308
309static inline void unfreeze_record(struct dyn_ftrace *rec)
310{
311 if (rec->flags & FTRACE_FL_FROZEN) {
312 rec->flags &= ~FTRACE_FL_FROZEN;
313 frozen_record_count--;
314 }
315}
316
317static inline int record_frozen(struct dyn_ftrace *rec)
318{
319 return rec->flags & FTRACE_FL_FROZEN;
320}
321#else
322# define freeze_record(rec) ({ 0; })
323# define unfreeze_record(rec) ({ 0; })
324# define record_frozen(rec) ({ 0; })
325#endif /* CONFIG_KPROBES */
326
Ingo Molnare309b412008-05-12 21:20:51 +0200327static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad5082008-05-12 21:20:48 +0200328{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200329 rec->ip = (unsigned long)ftrace_free_records;
330 ftrace_free_records = rec;
331 rec->flags |= FTRACE_FL_FREE;
332}
333
Steven Rostedtfed19392008-08-14 22:47:19 -0400334void ftrace_release(void *start, unsigned long size)
335{
336 struct dyn_ftrace *rec;
337 struct ftrace_page *pg;
338 unsigned long s = (unsigned long)start;
339 unsigned long e = s + size;
Steven Rostedtfed19392008-08-14 22:47:19 -0400340
Steven Rostedt00fd61a2008-08-15 21:40:04 -0400341 if (ftrace_disabled || !start)
Steven Rostedtfed19392008-08-14 22:47:19 -0400342 return;
343
Steven Rostedt52baf112009-02-14 01:15:39 -0500344 mutex_lock(&ftrace_lock);
Steven Rostedt265c8312009-02-13 12:43:56 -0500345 do_for_each_ftrace_rec(pg, rec) {
346 if ((rec->ip >= s) && (rec->ip < e))
347 ftrace_free_rec(rec);
348 } while_for_each_ftrace_rec();
Steven Rostedt52baf112009-02-14 01:15:39 -0500349 mutex_unlock(&ftrace_lock);
Steven Rostedtfed19392008-08-14 22:47:19 -0400350}
351
Ingo Molnare309b412008-05-12 21:20:51 +0200352static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200353{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200354 struct dyn_ftrace *rec;
355
356 /* First check for freed records */
357 if (ftrace_free_records) {
358 rec = ftrace_free_records;
359
Steven Rostedt37ad5082008-05-12 21:20:48 +0200360 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
Steven Rostedt69128962008-10-23 09:33:03 -0400361 FTRACE_WARN_ON_ONCE(1);
Steven Rostedt37ad5082008-05-12 21:20:48 +0200362 ftrace_free_records = NULL;
363 return NULL;
364 }
365
366 ftrace_free_records = (void *)rec->ip;
367 memset(rec, 0, sizeof(*rec));
368 return rec;
369 }
370
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200371 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400372 if (!ftrace_pages->next) {
373 /* allocate another page */
374 ftrace_pages->next =
375 (void *)get_zeroed_page(GFP_KERNEL);
376 if (!ftrace_pages->next)
377 return NULL;
378 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200379 ftrace_pages = ftrace_pages->next;
380 }
381
382 return &ftrace_pages->records[ftrace_pages->index++];
383}
384
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400385static struct dyn_ftrace *
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200386ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200387{
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400388 struct dyn_ftrace *rec;
Steven Rostedt3d083392008-05-12 21:20:42 +0200389
Steven Rostedtf3c7ac42008-11-14 16:21:19 -0800390 if (ftrace_disabled)
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400391 return NULL;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200392
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400393 rec = ftrace_alloc_dyn_node(ip);
394 if (!rec)
395 return NULL;
Steven Rostedt3d083392008-05-12 21:20:42 +0200396
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400397 rec->ip = ip;
Steven Rostedt3d083392008-05-12 21:20:42 +0200398
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400399 list_add(&rec->list, &ftrace_new_addrs);
Steven Rostedt3d083392008-05-12 21:20:42 +0200400
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400401 return rec;
Steven Rostedt3d083392008-05-12 21:20:42 +0200402}
403
Steven Rostedt05736a42008-09-22 14:55:47 -0700404static void print_ip_ins(const char *fmt, unsigned char *p)
405{
406 int i;
407
408 printk(KERN_CONT "%s", fmt);
409
410 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
411 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
412}
413
Steven Rostedt31e88902008-11-14 16:21:19 -0800414static void ftrace_bug(int failed, unsigned long ip)
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800415{
416 switch (failed) {
417 case -EFAULT:
418 FTRACE_WARN_ON_ONCE(1);
419 pr_info("ftrace faulted on modifying ");
420 print_ip_sym(ip);
421 break;
422 case -EINVAL:
423 FTRACE_WARN_ON_ONCE(1);
424 pr_info("ftrace failed to modify ");
425 print_ip_sym(ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800426 print_ip_ins(" actual: ", (unsigned char *)ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800427 printk(KERN_CONT "\n");
428 break;
429 case -EPERM:
430 FTRACE_WARN_ON_ONCE(1);
431 pr_info("ftrace faulted on writing ");
432 print_ip_sym(ip);
433 break;
434 default:
435 FTRACE_WARN_ON_ONCE(1);
436 pr_info("ftrace faulted on unknown error ");
437 print_ip_sym(ip);
438 }
439}
440
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200441
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530442static int
Steven Rostedt31e88902008-11-14 16:21:19 -0800443__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200444{
Ingo Molnare309b412008-05-12 21:20:51 +0200445 unsigned long ip, fl;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100446 unsigned long ftrace_addr;
447
Shaohua Lif0001202009-01-09 11:29:42 +0800448 ftrace_addr = (unsigned long)FTRACE_ADDR;
Steven Rostedt5072c592008-05-12 21:20:43 +0200449
450 ip = rec->ip;
451
Steven Rostedt982c3502008-11-15 16:31:41 -0500452 /*
453 * If this record is not to be traced and
454 * it is not enabled then do nothing.
455 *
456 * If this record is not to be traced and
Wenji Huang57794a92009-02-06 17:33:27 +0800457 * it is enabled then disable it.
Steven Rostedt982c3502008-11-15 16:31:41 -0500458 *
459 */
460 if (rec->flags & FTRACE_FL_NOTRACE) {
461 if (rec->flags & FTRACE_FL_ENABLED)
462 rec->flags &= ~FTRACE_FL_ENABLED;
463 else
Steven Rostedt5072c592008-05-12 21:20:43 +0200464 return 0;
465
Steven Rostedt982c3502008-11-15 16:31:41 -0500466 } else if (ftrace_filtered && enable) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200467 /*
Steven Rostedt982c3502008-11-15 16:31:41 -0500468 * Filtering is on:
Steven Rostedt5072c592008-05-12 21:20:43 +0200469 */
Steven Rostedt982c3502008-11-15 16:31:41 -0500470
471 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
472
473 /* Record is filtered and enabled, do nothing */
474 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
475 return 0;
476
Wenji Huang57794a92009-02-06 17:33:27 +0800477 /* Record is not filtered or enabled, do nothing */
Steven Rostedt982c3502008-11-15 16:31:41 -0500478 if (!fl)
479 return 0;
480
481 /* Record is not filtered but enabled, disable it */
482 if (fl == FTRACE_FL_ENABLED)
Steven Rostedt5072c592008-05-12 21:20:43 +0200483 rec->flags &= ~FTRACE_FL_ENABLED;
Steven Rostedt982c3502008-11-15 16:31:41 -0500484 else
485 /* Otherwise record is filtered but not enabled, enable it */
Steven Rostedt5072c592008-05-12 21:20:43 +0200486 rec->flags |= FTRACE_FL_ENABLED;
Steven Rostedt5072c592008-05-12 21:20:43 +0200487 } else {
Steven Rostedt982c3502008-11-15 16:31:41 -0500488 /* Disable or not filtered */
Steven Rostedt5072c592008-05-12 21:20:43 +0200489
490 if (enable) {
Steven Rostedt982c3502008-11-15 16:31:41 -0500491 /* if record is enabled, do nothing */
Steven Rostedt41c52c02008-05-22 11:46:33 -0400492 if (rec->flags & FTRACE_FL_ENABLED)
Steven Rostedt5072c592008-05-12 21:20:43 +0200493 return 0;
Steven Rostedt982c3502008-11-15 16:31:41 -0500494
Steven Rostedt41c52c02008-05-22 11:46:33 -0400495 rec->flags |= FTRACE_FL_ENABLED;
Steven Rostedt982c3502008-11-15 16:31:41 -0500496
Steven Rostedt5072c592008-05-12 21:20:43 +0200497 } else {
Steven Rostedt982c3502008-11-15 16:31:41 -0500498
Wenji Huang57794a92009-02-06 17:33:27 +0800499 /* if record is not enabled, do nothing */
Steven Rostedt5072c592008-05-12 21:20:43 +0200500 if (!(rec->flags & FTRACE_FL_ENABLED))
501 return 0;
Steven Rostedt982c3502008-11-15 16:31:41 -0500502
Steven Rostedt5072c592008-05-12 21:20:43 +0200503 rec->flags &= ~FTRACE_FL_ENABLED;
504 }
505 }
506
Steven Rostedt982c3502008-11-15 16:31:41 -0500507 if (rec->flags & FTRACE_FL_ENABLED)
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100508 return ftrace_make_call(rec, ftrace_addr);
Steven Rostedt31e88902008-11-14 16:21:19 -0800509 else
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100510 return ftrace_make_nop(NULL, rec, ftrace_addr);
Steven Rostedt5072c592008-05-12 21:20:43 +0200511}
512
513static void ftrace_replace_code(int enable)
514{
Steven Rostedt265c8312009-02-13 12:43:56 -0500515 int failed;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200516 struct dyn_ftrace *rec;
517 struct ftrace_page *pg;
518
Steven Rostedt265c8312009-02-13 12:43:56 -0500519 do_for_each_ftrace_rec(pg, rec) {
520 /*
521 * Skip over free records and records that have
522 * failed.
523 */
524 if (rec->flags & FTRACE_FL_FREE ||
525 rec->flags & FTRACE_FL_FAILED)
526 continue;
Steven Rostedt5072c592008-05-12 21:20:43 +0200527
Steven Rostedt265c8312009-02-13 12:43:56 -0500528 /* ignore updates to this record's mcount site */
529 if (get_kprobe((void *)rec->ip)) {
530 freeze_record(rec);
531 continue;
532 } else {
533 unfreeze_record(rec);
Steven Rostedt5072c592008-05-12 21:20:43 +0200534 }
Steven Rostedt265c8312009-02-13 12:43:56 -0500535
536 failed = __ftrace_replace_code(rec, enable);
537 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
538 rec->flags |= FTRACE_FL_FAILED;
539 if ((system_state == SYSTEM_BOOTING) ||
540 !core_kernel_text(rec->ip)) {
541 ftrace_free_rec(rec);
542 } else
543 ftrace_bug(failed, rec->ip);
544 }
545 } while_for_each_ftrace_rec();
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200546}
547
Ingo Molnare309b412008-05-12 21:20:51 +0200548static int
Steven Rostedt31e88902008-11-14 16:21:19 -0800549ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200550{
551 unsigned long ip;
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400552 int ret;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200553
554 ip = rec->ip;
555
Shaohua Li25aac9d2009-01-09 11:29:40 +0800556 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400557 if (ret) {
Steven Rostedt31e88902008-11-14 16:21:19 -0800558 ftrace_bug(ret, ip);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200559 rec->flags |= FTRACE_FL_FAILED;
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530560 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200561 }
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530562 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200563}
564
Ingo Molnare309b412008-05-12 21:20:51 +0200565static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200566{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200567 int *command = data;
568
Steven Rostedta3583242008-11-11 15:01:42 -0500569 if (*command & FTRACE_ENABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200570 ftrace_replace_code(1);
Steven Rostedta3583242008-11-11 15:01:42 -0500571 else if (*command & FTRACE_DISABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200572 ftrace_replace_code(0);
573
574 if (*command & FTRACE_UPDATE_TRACE_FUNC)
575 ftrace_update_ftrace_func(ftrace_trace_function);
576
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500577 if (*command & FTRACE_START_FUNC_RET)
578 ftrace_enable_ftrace_graph_caller();
579 else if (*command & FTRACE_STOP_FUNC_RET)
580 ftrace_disable_ftrace_graph_caller();
581
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200582 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200583}
584
Ingo Molnare309b412008-05-12 21:20:51 +0200585static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200586{
Rusty Russell784e2d72008-07-28 12:16:31 -0500587 stop_machine(__ftrace_modify_code, &command, NULL);
Steven Rostedt3d083392008-05-12 21:20:42 +0200588}
589
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200590static ftrace_func_t saved_ftrace_func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500591static int ftrace_start_up;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500592
593static void ftrace_startup_enable(int command)
594{
595 if (saved_ftrace_func != ftrace_trace_function) {
596 saved_ftrace_func = ftrace_trace_function;
597 command |= FTRACE_UPDATE_TRACE_FUNC;
598 }
599
600 if (!command || !ftrace_enabled)
601 return;
602
603 ftrace_run_update_code(command);
604}
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200605
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500606static void ftrace_startup(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200607{
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200608 if (unlikely(ftrace_disabled))
609 return;
610
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500611 ftrace_start_up++;
Steven Rostedt982c3502008-11-15 16:31:41 -0500612 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200613
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500614 ftrace_startup_enable(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200615}
616
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500617static void ftrace_shutdown(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200618{
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200619 if (unlikely(ftrace_disabled))
620 return;
621
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500622 ftrace_start_up--;
623 if (!ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200624 command |= FTRACE_DISABLE_CALLS;
625
626 if (saved_ftrace_func != ftrace_trace_function) {
627 saved_ftrace_func = ftrace_trace_function;
628 command |= FTRACE_UPDATE_TRACE_FUNC;
629 }
630
631 if (!command || !ftrace_enabled)
Steven Rostedte6ea44e2009-02-14 01:42:44 -0500632 return;
Steven Rostedt3d083392008-05-12 21:20:42 +0200633
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200634 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200635}
636
Ingo Molnare309b412008-05-12 21:20:51 +0200637static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200638{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200639 int command = FTRACE_ENABLE_MCOUNT;
640
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200641 if (unlikely(ftrace_disabled))
642 return;
643
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200644 /* Force update next time */
645 saved_ftrace_func = NULL;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500646 /* ftrace_start_up is true if we want ftrace running */
647 if (ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200648 command |= FTRACE_ENABLE_CALLS;
649
650 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200651}
652
Ingo Molnare309b412008-05-12 21:20:51 +0200653static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200654{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200655 int command = FTRACE_DISABLE_MCOUNT;
656
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200657 if (unlikely(ftrace_disabled))
658 return;
659
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500660 /* ftrace_start_up is true if ftrace is running */
661 if (ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200662 command |= FTRACE_DISABLE_CALLS;
663
664 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200665}
666
Steven Rostedt3d083392008-05-12 21:20:42 +0200667static cycle_t ftrace_update_time;
668static unsigned long ftrace_update_cnt;
669unsigned long ftrace_update_tot_cnt;
670
Steven Rostedt31e88902008-11-14 16:21:19 -0800671static int ftrace_update_code(struct module *mod)
Steven Rostedt3d083392008-05-12 21:20:42 +0200672{
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400673 struct dyn_ftrace *p, *t;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530674 cycle_t start, stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200675
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200676 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200677 ftrace_update_cnt = 0;
678
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400679 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530680
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400681 /* If something went wrong, bail without enabling anything */
682 if (unlikely(ftrace_disabled))
683 return -1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200684
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400685 list_del_init(&p->list);
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530686
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400687 /* convert record (i.e, patch mcount-call with NOP) */
Steven Rostedt31e88902008-11-14 16:21:19 -0800688 if (ftrace_code_disable(mod, p)) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400689 p->flags |= FTRACE_FL_CONVERTED;
690 ftrace_update_cnt++;
691 } else
692 ftrace_free_rec(p);
Steven Rostedt3d083392008-05-12 21:20:42 +0200693 }
694
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200695 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200696 ftrace_update_time = stop - start;
697 ftrace_update_tot_cnt += ftrace_update_cnt;
698
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200699 return 0;
700}
701
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400702static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200703{
704 struct ftrace_page *pg;
705 int cnt;
706 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200707
708 /* allocate a few pages */
709 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
710 if (!ftrace_pages_start)
711 return -1;
712
713 /*
714 * Allocate a few more pages.
715 *
716 * TODO: have some parser search vmlinux before
717 * final linking to find all calls to ftrace.
718 * Then we can:
719 * a) know how many pages to allocate.
720 * and/or
721 * b) set up the table then.
722 *
723 * The dynamic code is still necessary for
724 * modules.
725 */
726
727 pg = ftrace_pages = ftrace_pages_start;
728
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400729 cnt = num_to_init / ENTRIES_PER_PAGE;
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400730 pr_info("ftrace: allocating %ld entries in %d pages\n",
walimis5821e1b2008-11-15 15:19:06 +0800731 num_to_init, cnt + 1);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200732
733 for (i = 0; i < cnt; i++) {
734 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
735
736 /* If we fail, we'll try later anyway */
737 if (!pg->next)
738 break;
739
740 pg = pg->next;
741 }
742
743 return 0;
744}
745
Steven Rostedt5072c592008-05-12 21:20:43 +0200746enum {
747 FTRACE_ITER_FILTER = (1 << 0),
748 FTRACE_ITER_CONT = (1 << 1),
Steven Rostedt41c52c02008-05-22 11:46:33 -0400749 FTRACE_ITER_NOTRACE = (1 << 2),
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530750 FTRACE_ITER_FAILURES = (1 << 3),
Steven Rostedt0c75a3e2009-02-16 11:21:52 -0500751 FTRACE_ITER_PRINTALL = (1 << 4),
Steven Rostedt5072c592008-05-12 21:20:43 +0200752};
753
754#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
755
756struct ftrace_iterator {
Steven Rostedt5072c592008-05-12 21:20:43 +0200757 struct ftrace_page *pg;
Steven Rostedt431aa3f2009-01-06 12:43:01 -0500758 int idx;
Steven Rostedt5072c592008-05-12 21:20:43 +0200759 unsigned flags;
760 unsigned char buffer[FTRACE_BUFF_MAX+1];
761 unsigned buffer_idx;
762 unsigned filtered;
763};
764
Ingo Molnare309b412008-05-12 21:20:51 +0200765static void *
Steven Rostedt5072c592008-05-12 21:20:43 +0200766t_next(struct seq_file *m, void *v, loff_t *pos)
767{
768 struct ftrace_iterator *iter = m->private;
769 struct dyn_ftrace *rec = NULL;
770
771 (*pos)++;
772
Steven Rostedt0c75a3e2009-02-16 11:21:52 -0500773 if (iter->flags & FTRACE_ITER_PRINTALL)
774 return NULL;
775
Steven Rostedt52baf112009-02-14 01:15:39 -0500776 mutex_lock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200777 retry:
778 if (iter->idx >= iter->pg->index) {
779 if (iter->pg->next) {
780 iter->pg = iter->pg->next;
781 iter->idx = 0;
782 goto retry;
Liming Wang50cdaf02008-11-28 12:13:21 +0800783 } else {
784 iter->idx = -1;
Steven Rostedt5072c592008-05-12 21:20:43 +0200785 }
786 } else {
787 rec = &iter->pg->records[iter->idx++];
Steven Rostedta9fdda32008-08-14 22:47:17 -0400788 if ((rec->flags & FTRACE_FL_FREE) ||
789
790 (!(iter->flags & FTRACE_ITER_FAILURES) &&
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530791 (rec->flags & FTRACE_FL_FAILED)) ||
792
793 ((iter->flags & FTRACE_ITER_FAILURES) &&
Steven Rostedta9fdda32008-08-14 22:47:17 -0400794 !(rec->flags & FTRACE_FL_FAILED)) ||
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530795
Steven Rostedt0183fb12008-11-07 22:36:02 -0500796 ((iter->flags & FTRACE_ITER_FILTER) &&
797 !(rec->flags & FTRACE_FL_FILTER)) ||
798
Steven Rostedt41c52c02008-05-22 11:46:33 -0400799 ((iter->flags & FTRACE_ITER_NOTRACE) &&
800 !(rec->flags & FTRACE_FL_NOTRACE))) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200801 rec = NULL;
802 goto retry;
803 }
804 }
Steven Rostedt52baf112009-02-14 01:15:39 -0500805 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200806
Steven Rostedt5072c592008-05-12 21:20:43 +0200807 return rec;
808}
809
810static void *t_start(struct seq_file *m, loff_t *pos)
811{
812 struct ftrace_iterator *iter = m->private;
813 void *p = NULL;
Steven Rostedt5072c592008-05-12 21:20:43 +0200814
Steven Rostedt0c75a3e2009-02-16 11:21:52 -0500815 /*
816 * For set_ftrace_filter reading, if we have the filter
817 * off, we can short cut and just print out that all
818 * functions are enabled.
819 */
820 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
821 if (*pos > 0)
822 return NULL;
823 iter->flags |= FTRACE_ITER_PRINTALL;
824 (*pos)++;
825 return iter;
826 }
827
Liming Wang50cdaf02008-11-28 12:13:21 +0800828 if (*pos > 0) {
829 if (iter->idx < 0)
830 return p;
831 (*pos)--;
832 iter->idx--;
833 }
walimis5821e1b2008-11-15 15:19:06 +0800834
Liming Wang50cdaf02008-11-28 12:13:21 +0800835 p = t_next(m, p, pos);
Steven Rostedt5072c592008-05-12 21:20:43 +0200836
837 return p;
838}
839
840static void t_stop(struct seq_file *m, void *p)
841{
842}
843
844static int t_show(struct seq_file *m, void *v)
845{
Steven Rostedt0c75a3e2009-02-16 11:21:52 -0500846 struct ftrace_iterator *iter = m->private;
Steven Rostedt5072c592008-05-12 21:20:43 +0200847 struct dyn_ftrace *rec = v;
848 char str[KSYM_SYMBOL_LEN];
849
Steven Rostedt0c75a3e2009-02-16 11:21:52 -0500850 if (iter->flags & FTRACE_ITER_PRINTALL) {
851 seq_printf(m, "#### all functions enabled ####\n");
852 return 0;
853 }
854
Steven Rostedt5072c592008-05-12 21:20:43 +0200855 if (!rec)
856 return 0;
857
858 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
859
Liming Wang50cdaf02008-11-28 12:13:21 +0800860 seq_printf(m, "%s\n", str);
Steven Rostedt5072c592008-05-12 21:20:43 +0200861
862 return 0;
863}
864
865static struct seq_operations show_ftrace_seq_ops = {
866 .start = t_start,
867 .next = t_next,
868 .stop = t_stop,
869 .show = t_show,
870};
871
Ingo Molnare309b412008-05-12 21:20:51 +0200872static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200873ftrace_avail_open(struct inode *inode, struct file *file)
874{
875 struct ftrace_iterator *iter;
876 int ret;
877
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200878 if (unlikely(ftrace_disabled))
879 return -ENODEV;
880
Steven Rostedt5072c592008-05-12 21:20:43 +0200881 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
882 if (!iter)
883 return -ENOMEM;
884
885 iter->pg = ftrace_pages_start;
Steven Rostedt5072c592008-05-12 21:20:43 +0200886
887 ret = seq_open(file, &show_ftrace_seq_ops);
888 if (!ret) {
889 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200890
Steven Rostedt5072c592008-05-12 21:20:43 +0200891 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200892 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +0200893 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200894 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200895
896 return ret;
897}
898
899int ftrace_avail_release(struct inode *inode, struct file *file)
900{
901 struct seq_file *m = (struct seq_file *)file->private_data;
902 struct ftrace_iterator *iter = m->private;
903
904 seq_release(inode, file);
905 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200906
Steven Rostedt5072c592008-05-12 21:20:43 +0200907 return 0;
908}
909
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530910static int
911ftrace_failures_open(struct inode *inode, struct file *file)
912{
913 int ret;
914 struct seq_file *m;
915 struct ftrace_iterator *iter;
916
917 ret = ftrace_avail_open(inode, file);
918 if (!ret) {
919 m = (struct seq_file *)file->private_data;
920 iter = (struct ftrace_iterator *)m->private;
921 iter->flags = FTRACE_ITER_FAILURES;
922 }
923
924 return ret;
925}
926
927
Steven Rostedt41c52c02008-05-22 11:46:33 -0400928static void ftrace_filter_reset(int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +0200929{
930 struct ftrace_page *pg;
931 struct dyn_ftrace *rec;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400932 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +0200933
Steven Rostedt52baf112009-02-14 01:15:39 -0500934 mutex_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400935 if (enable)
936 ftrace_filtered = 0;
Steven Rostedt265c8312009-02-13 12:43:56 -0500937 do_for_each_ftrace_rec(pg, rec) {
938 if (rec->flags & FTRACE_FL_FAILED)
939 continue;
940 rec->flags &= ~type;
941 } while_for_each_ftrace_rec();
Steven Rostedt52baf112009-02-14 01:15:39 -0500942 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200943}
944
Ingo Molnare309b412008-05-12 21:20:51 +0200945static int
Steven Rostedt41c52c02008-05-22 11:46:33 -0400946ftrace_regex_open(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +0200947{
948 struct ftrace_iterator *iter;
949 int ret = 0;
950
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200951 if (unlikely(ftrace_disabled))
952 return -ENODEV;
953
Steven Rostedt5072c592008-05-12 21:20:43 +0200954 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
955 if (!iter)
956 return -ENOMEM;
957
Steven Rostedt41c52c02008-05-22 11:46:33 -0400958 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200959 if ((file->f_mode & FMODE_WRITE) &&
960 !(file->f_flags & O_APPEND))
Steven Rostedt41c52c02008-05-22 11:46:33 -0400961 ftrace_filter_reset(enable);
Steven Rostedt5072c592008-05-12 21:20:43 +0200962
963 if (file->f_mode & FMODE_READ) {
964 iter->pg = ftrace_pages_start;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400965 iter->flags = enable ? FTRACE_ITER_FILTER :
966 FTRACE_ITER_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +0200967
968 ret = seq_open(file, &show_ftrace_seq_ops);
969 if (!ret) {
970 struct seq_file *m = file->private_data;
971 m->private = iter;
972 } else
973 kfree(iter);
974 } else
975 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400976 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200977
978 return ret;
979}
980
Steven Rostedt41c52c02008-05-22 11:46:33 -0400981static int
982ftrace_filter_open(struct inode *inode, struct file *file)
983{
984 return ftrace_regex_open(inode, file, 1);
985}
986
987static int
988ftrace_notrace_open(struct inode *inode, struct file *file)
989{
990 return ftrace_regex_open(inode, file, 0);
991}
992
Ingo Molnare309b412008-05-12 21:20:51 +0200993static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -0400994ftrace_regex_read(struct file *file, char __user *ubuf,
Steven Rostedt5072c592008-05-12 21:20:43 +0200995 size_t cnt, loff_t *ppos)
996{
997 if (file->f_mode & FMODE_READ)
998 return seq_read(file, ubuf, cnt, ppos);
999 else
1000 return -EPERM;
1001}
1002
Ingo Molnare309b412008-05-12 21:20:51 +02001003static loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001004ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02001005{
1006 loff_t ret;
1007
1008 if (file->f_mode & FMODE_READ)
1009 ret = seq_lseek(file, offset, origin);
1010 else
1011 file->f_pos = ret = 1;
1012
1013 return ret;
1014}
1015
1016enum {
1017 MATCH_FULL,
1018 MATCH_FRONT_ONLY,
1019 MATCH_MIDDLE_ONLY,
1020 MATCH_END_ONLY,
1021};
1022
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001023/*
1024 * (static function - no need for kernel doc)
1025 *
1026 * Pass in a buffer containing a glob and this function will
1027 * set search to point to the search part of the buffer and
1028 * return the type of search it is (see enum above).
1029 * This does modify buff.
1030 *
1031 * Returns enum type.
1032 * search returns the pointer to use for comparison.
1033 * not returns 1 if buff started with a '!'
1034 * 0 otherwise.
1035 */
1036static int
Steven Rostedt64e7c442009-02-13 17:08:48 -05001037ftrace_setup_glob(char *buff, int len, char **search, int *not)
Steven Rostedt5072c592008-05-12 21:20:43 +02001038{
Steven Rostedt5072c592008-05-12 21:20:43 +02001039 int type = MATCH_FULL;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001040 int i;
Steven Rostedtea3a6d62008-12-17 15:05:36 -05001041
1042 if (buff[0] == '!') {
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001043 *not = 1;
Steven Rostedtea3a6d62008-12-17 15:05:36 -05001044 buff++;
1045 len--;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001046 } else
1047 *not = 0;
1048
1049 *search = buff;
Steven Rostedt5072c592008-05-12 21:20:43 +02001050
1051 for (i = 0; i < len; i++) {
1052 if (buff[i] == '*') {
1053 if (!i) {
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001054 *search = buff + 1;
Steven Rostedt5072c592008-05-12 21:20:43 +02001055 type = MATCH_END_ONLY;
Steven Rostedt5072c592008-05-12 21:20:43 +02001056 } else {
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001057 if (type == MATCH_END_ONLY)
Steven Rostedt5072c592008-05-12 21:20:43 +02001058 type = MATCH_MIDDLE_ONLY;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001059 else
Steven Rostedt5072c592008-05-12 21:20:43 +02001060 type = MATCH_FRONT_ONLY;
Steven Rostedt5072c592008-05-12 21:20:43 +02001061 buff[i] = 0;
1062 break;
1063 }
1064 }
1065 }
1066
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001067 return type;
1068}
1069
Steven Rostedt64e7c442009-02-13 17:08:48 -05001070static int ftrace_match(char *str, char *regex, int len, int type)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001071{
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001072 int matched = 0;
1073 char *ptr;
1074
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001075 switch (type) {
1076 case MATCH_FULL:
1077 if (strcmp(str, regex) == 0)
1078 matched = 1;
1079 break;
1080 case MATCH_FRONT_ONLY:
1081 if (strncmp(str, regex, len) == 0)
1082 matched = 1;
1083 break;
1084 case MATCH_MIDDLE_ONLY:
1085 if (strstr(str, regex))
1086 matched = 1;
1087 break;
1088 case MATCH_END_ONLY:
1089 ptr = strstr(str, regex);
1090 if (ptr && (ptr[len] == 0))
1091 matched = 1;
1092 break;
1093 }
1094
1095 return matched;
1096}
1097
Steven Rostedt64e7c442009-02-13 17:08:48 -05001098static int
1099ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1100{
1101 char str[KSYM_SYMBOL_LEN];
1102
1103 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1104 return ftrace_match(str, regex, len, type);
1105}
1106
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001107static void ftrace_match_records(char *buff, int len, int enable)
1108{
1109 char *search;
1110 struct ftrace_page *pg;
1111 struct dyn_ftrace *rec;
1112 int type;
1113 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1114 unsigned search_len;
1115 int not;
1116
1117 type = ftrace_setup_glob(buff, len, &search, &not);
1118
1119 search_len = strlen(search);
1120
Steven Rostedt52baf112009-02-14 01:15:39 -05001121 mutex_lock(&ftrace_lock);
Steven Rostedt265c8312009-02-13 12:43:56 -05001122 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt5072c592008-05-12 21:20:43 +02001123
Steven Rostedt265c8312009-02-13 12:43:56 -05001124 if (rec->flags & FTRACE_FL_FAILED)
1125 continue;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001126
1127 if (ftrace_match_record(rec, search, search_len, type)) {
Steven Rostedt265c8312009-02-13 12:43:56 -05001128 if (not)
1129 rec->flags &= ~flag;
1130 else
1131 rec->flags |= flag;
1132 }
Steven Rostedte68746a2009-02-13 20:53:42 -05001133 /*
1134 * Only enable filtering if we have a function that
1135 * is filtered on.
1136 */
1137 if (enable && (rec->flags & FTRACE_FL_FILTER))
1138 ftrace_filtered = 1;
Steven Rostedt265c8312009-02-13 12:43:56 -05001139 } while_for_each_ftrace_rec();
Steven Rostedt52baf112009-02-14 01:15:39 -05001140 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001141}
1142
Steven Rostedt64e7c442009-02-13 17:08:48 -05001143static int
1144ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1145 char *regex, int len, int type)
1146{
1147 char str[KSYM_SYMBOL_LEN];
1148 char *modname;
1149
1150 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1151
1152 if (!modname || strcmp(modname, mod))
1153 return 0;
1154
1155 /* blank search means to match all funcs in the mod */
1156 if (len)
1157 return ftrace_match(str, regex, len, type);
1158 else
1159 return 1;
1160}
1161
1162static void ftrace_match_module_records(char *buff, char *mod, int enable)
1163{
1164 char *search = buff;
1165 struct ftrace_page *pg;
1166 struct dyn_ftrace *rec;
1167 int type = MATCH_FULL;
1168 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1169 unsigned search_len = 0;
1170 int not = 0;
1171
1172 /* blank or '*' mean the same */
1173 if (strcmp(buff, "*") == 0)
1174 buff[0] = 0;
1175
1176 /* handle the case of 'dont filter this module' */
1177 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1178 buff[0] = 0;
1179 not = 1;
1180 }
1181
1182 if (strlen(buff)) {
1183 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1184 search_len = strlen(search);
1185 }
1186
Steven Rostedt52baf112009-02-14 01:15:39 -05001187 mutex_lock(&ftrace_lock);
Steven Rostedt64e7c442009-02-13 17:08:48 -05001188 do_for_each_ftrace_rec(pg, rec) {
1189
1190 if (rec->flags & FTRACE_FL_FAILED)
1191 continue;
1192
1193 if (ftrace_match_module_record(rec, mod,
1194 search, search_len, type)) {
1195 if (not)
1196 rec->flags &= ~flag;
1197 else
1198 rec->flags |= flag;
1199 }
Steven Rostedte68746a2009-02-13 20:53:42 -05001200 if (enable && (rec->flags & FTRACE_FL_FILTER))
1201 ftrace_filtered = 1;
Steven Rostedt64e7c442009-02-13 17:08:48 -05001202
1203 } while_for_each_ftrace_rec();
Steven Rostedt52baf112009-02-14 01:15:39 -05001204 mutex_unlock(&ftrace_lock);
Steven Rostedt64e7c442009-02-13 17:08:48 -05001205}
1206
Steven Rostedtf6180772009-02-14 00:40:25 -05001207/*
1208 * We register the module command as a template to show others how
1209 * to register the a command as well.
1210 */
1211
1212static int
1213ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1214{
1215 char *mod;
1216
1217 /*
1218 * cmd == 'mod' because we only registered this func
1219 * for the 'mod' ftrace_func_command.
1220 * But if you register one func with multiple commands,
1221 * you can tell which command was used by the cmd
1222 * parameter.
1223 */
1224
1225 /* we must have a module name */
1226 if (!param)
1227 return -EINVAL;
1228
1229 mod = strsep(&param, ":");
1230 if (!strlen(mod))
1231 return -EINVAL;
1232
1233 ftrace_match_module_records(func, mod, enable);
1234 return 0;
1235}
1236
1237static struct ftrace_func_command ftrace_mod_cmd = {
1238 .name = "mod",
1239 .func = ftrace_mod_callback,
1240};
1241
1242static int __init ftrace_mod_cmd_init(void)
1243{
1244 return register_ftrace_command(&ftrace_mod_cmd);
1245}
1246device_initcall(ftrace_mod_cmd_init);
1247
1248static LIST_HEAD(ftrace_commands);
1249static DEFINE_MUTEX(ftrace_cmd_mutex);
1250
1251int register_ftrace_command(struct ftrace_func_command *cmd)
1252{
1253 struct ftrace_func_command *p;
1254 int ret = 0;
1255
1256 mutex_lock(&ftrace_cmd_mutex);
1257 list_for_each_entry(p, &ftrace_commands, list) {
1258 if (strcmp(cmd->name, p->name) == 0) {
1259 ret = -EBUSY;
1260 goto out_unlock;
1261 }
1262 }
1263 list_add(&cmd->list, &ftrace_commands);
1264 out_unlock:
1265 mutex_unlock(&ftrace_cmd_mutex);
1266
1267 return ret;
1268}
1269
1270int unregister_ftrace_command(struct ftrace_func_command *cmd)
1271{
1272 struct ftrace_func_command *p, *n;
1273 int ret = -ENODEV;
1274
1275 mutex_lock(&ftrace_cmd_mutex);
1276 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
1277 if (strcmp(cmd->name, p->name) == 0) {
1278 ret = 0;
1279 list_del_init(&p->list);
1280 goto out_unlock;
1281 }
1282 }
1283 out_unlock:
1284 mutex_unlock(&ftrace_cmd_mutex);
1285
1286 return ret;
1287}
1288
Steven Rostedt64e7c442009-02-13 17:08:48 -05001289static int ftrace_process_regex(char *buff, int len, int enable)
1290{
Steven Rostedtf6180772009-02-14 00:40:25 -05001291 struct ftrace_func_command *p;
1292 char *func, *command, *next = buff;
1293 int ret = -EINVAL;
Steven Rostedt64e7c442009-02-13 17:08:48 -05001294
1295 func = strsep(&next, ":");
1296
1297 if (!next) {
1298 ftrace_match_records(func, len, enable);
1299 return 0;
1300 }
1301
Steven Rostedtf6180772009-02-14 00:40:25 -05001302 /* command found */
Steven Rostedt64e7c442009-02-13 17:08:48 -05001303
1304 command = strsep(&next, ":");
1305
Steven Rostedtf6180772009-02-14 00:40:25 -05001306 mutex_lock(&ftrace_cmd_mutex);
1307 list_for_each_entry(p, &ftrace_commands, list) {
1308 if (strcmp(p->name, command) == 0) {
1309 ret = p->func(func, command, next, enable);
1310 goto out_unlock;
1311 }
Steven Rostedt64e7c442009-02-13 17:08:48 -05001312 }
Steven Rostedtf6180772009-02-14 00:40:25 -05001313 out_unlock:
1314 mutex_unlock(&ftrace_cmd_mutex);
Steven Rostedt64e7c442009-02-13 17:08:48 -05001315
Steven Rostedtf6180772009-02-14 00:40:25 -05001316 return ret;
Steven Rostedt64e7c442009-02-13 17:08:48 -05001317}
1318
Ingo Molnare309b412008-05-12 21:20:51 +02001319static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001320ftrace_regex_write(struct file *file, const char __user *ubuf,
1321 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001322{
1323 struct ftrace_iterator *iter;
1324 char ch;
1325 size_t read = 0;
1326 ssize_t ret;
1327
1328 if (!cnt || cnt < 0)
1329 return 0;
1330
Steven Rostedt41c52c02008-05-22 11:46:33 -04001331 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001332
1333 if (file->f_mode & FMODE_READ) {
1334 struct seq_file *m = file->private_data;
1335 iter = m->private;
1336 } else
1337 iter = file->private_data;
1338
1339 if (!*ppos) {
1340 iter->flags &= ~FTRACE_ITER_CONT;
1341 iter->buffer_idx = 0;
1342 }
1343
1344 ret = get_user(ch, ubuf++);
1345 if (ret)
1346 goto out;
1347 read++;
1348 cnt--;
1349
1350 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1351 /* skip white space */
1352 while (cnt && isspace(ch)) {
1353 ret = get_user(ch, ubuf++);
1354 if (ret)
1355 goto out;
1356 read++;
1357 cnt--;
1358 }
1359
Steven Rostedt5072c592008-05-12 21:20:43 +02001360 if (isspace(ch)) {
1361 file->f_pos += read;
1362 ret = read;
1363 goto out;
1364 }
1365
1366 iter->buffer_idx = 0;
1367 }
1368
1369 while (cnt && !isspace(ch)) {
1370 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1371 iter->buffer[iter->buffer_idx++] = ch;
1372 else {
1373 ret = -EINVAL;
1374 goto out;
1375 }
1376 ret = get_user(ch, ubuf++);
1377 if (ret)
1378 goto out;
1379 read++;
1380 cnt--;
1381 }
1382
1383 if (isspace(ch)) {
1384 iter->filtered++;
1385 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt64e7c442009-02-13 17:08:48 -05001386 ret = ftrace_process_regex(iter->buffer,
1387 iter->buffer_idx, enable);
1388 if (ret)
1389 goto out;
Steven Rostedt5072c592008-05-12 21:20:43 +02001390 iter->buffer_idx = 0;
1391 } else
1392 iter->flags |= FTRACE_ITER_CONT;
1393
1394
1395 file->f_pos += read;
1396
1397 ret = read;
1398 out:
Steven Rostedt41c52c02008-05-22 11:46:33 -04001399 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001400
1401 return ret;
1402}
1403
Steven Rostedt41c52c02008-05-22 11:46:33 -04001404static ssize_t
1405ftrace_filter_write(struct file *file, const char __user *ubuf,
1406 size_t cnt, loff_t *ppos)
1407{
1408 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1409}
1410
1411static ssize_t
1412ftrace_notrace_write(struct file *file, const char __user *ubuf,
1413 size_t cnt, loff_t *ppos)
1414{
1415 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1416}
1417
1418static void
1419ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1420{
1421 if (unlikely(ftrace_disabled))
1422 return;
1423
1424 mutex_lock(&ftrace_regex_lock);
1425 if (reset)
1426 ftrace_filter_reset(enable);
1427 if (buf)
Steven Rostedt7f24b312009-02-13 14:37:33 -05001428 ftrace_match_records(buf, len, enable);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001429 mutex_unlock(&ftrace_regex_lock);
1430}
1431
Steven Rostedt77a2b372008-05-12 21:20:45 +02001432/**
1433 * ftrace_set_filter - set a function to filter on in ftrace
1434 * @buf - the string that holds the function filter text.
1435 * @len - the length of the string.
1436 * @reset - non zero to reset all filters before applying this filter.
1437 *
1438 * Filters denote which functions should be enabled when tracing is enabled.
1439 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1440 */
Ingo Molnare309b412008-05-12 21:20:51 +02001441void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02001442{
Steven Rostedt41c52c02008-05-22 11:46:33 -04001443 ftrace_set_regex(buf, len, reset, 1);
1444}
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001445
Steven Rostedt41c52c02008-05-22 11:46:33 -04001446/**
1447 * ftrace_set_notrace - set a function to not trace in ftrace
1448 * @buf - the string that holds the function notrace text.
1449 * @len - the length of the string.
1450 * @reset - non zero to reset all filters before applying this filter.
1451 *
1452 * Notrace Filters denote which functions should not be enabled when tracing
1453 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1454 * for tracing.
1455 */
1456void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1457{
1458 ftrace_set_regex(buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02001459}
1460
Ingo Molnare309b412008-05-12 21:20:51 +02001461static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001462ftrace_regex_release(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001463{
1464 struct seq_file *m = (struct seq_file *)file->private_data;
1465 struct ftrace_iterator *iter;
1466
Steven Rostedt41c52c02008-05-22 11:46:33 -04001467 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001468 if (file->f_mode & FMODE_READ) {
1469 iter = m->private;
1470
1471 seq_release(inode, file);
1472 } else
1473 iter = file->private_data;
1474
1475 if (iter->buffer_idx) {
1476 iter->filtered++;
1477 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt7f24b312009-02-13 14:37:33 -05001478 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001479 }
1480
Steven Rostedte6ea44e2009-02-14 01:42:44 -05001481 mutex_lock(&ftrace_lock);
Steven Rostedtee02a2e2008-11-15 16:31:41 -05001482 if (ftrace_start_up && ftrace_enabled)
Steven Rostedt5072c592008-05-12 21:20:43 +02001483 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
Steven Rostedte6ea44e2009-02-14 01:42:44 -05001484 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001485
1486 kfree(iter);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001487 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001488 return 0;
1489}
1490
Steven Rostedt41c52c02008-05-22 11:46:33 -04001491static int
1492ftrace_filter_release(struct inode *inode, struct file *file)
1493{
1494 return ftrace_regex_release(inode, file, 1);
1495}
1496
1497static int
1498ftrace_notrace_release(struct inode *inode, struct file *file)
1499{
1500 return ftrace_regex_release(inode, file, 0);
1501}
1502
Steven Rostedt5072c592008-05-12 21:20:43 +02001503static struct file_operations ftrace_avail_fops = {
1504 .open = ftrace_avail_open,
1505 .read = seq_read,
1506 .llseek = seq_lseek,
1507 .release = ftrace_avail_release,
1508};
1509
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301510static struct file_operations ftrace_failures_fops = {
1511 .open = ftrace_failures_open,
1512 .read = seq_read,
1513 .llseek = seq_lseek,
1514 .release = ftrace_avail_release,
1515};
1516
Steven Rostedt5072c592008-05-12 21:20:43 +02001517static struct file_operations ftrace_filter_fops = {
1518 .open = ftrace_filter_open,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001519 .read = ftrace_regex_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02001520 .write = ftrace_filter_write,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001521 .llseek = ftrace_regex_lseek,
Steven Rostedt5072c592008-05-12 21:20:43 +02001522 .release = ftrace_filter_release,
1523};
1524
Steven Rostedt41c52c02008-05-22 11:46:33 -04001525static struct file_operations ftrace_notrace_fops = {
1526 .open = ftrace_notrace_open,
1527 .read = ftrace_regex_read,
1528 .write = ftrace_notrace_write,
1529 .llseek = ftrace_regex_lseek,
1530 .release = ftrace_notrace_release,
1531};
1532
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001533#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1534
1535static DEFINE_MUTEX(graph_lock);
1536
1537int ftrace_graph_count;
1538unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1539
1540static void *
1541g_next(struct seq_file *m, void *v, loff_t *pos)
1542{
1543 unsigned long *array = m->private;
1544 int index = *pos;
1545
1546 (*pos)++;
1547
1548 if (index >= ftrace_graph_count)
1549 return NULL;
1550
1551 return &array[index];
1552}
1553
1554static void *g_start(struct seq_file *m, loff_t *pos)
1555{
1556 void *p = NULL;
1557
1558 mutex_lock(&graph_lock);
1559
1560 p = g_next(m, p, pos);
1561
1562 return p;
1563}
1564
1565static void g_stop(struct seq_file *m, void *p)
1566{
1567 mutex_unlock(&graph_lock);
1568}
1569
1570static int g_show(struct seq_file *m, void *v)
1571{
1572 unsigned long *ptr = v;
1573 char str[KSYM_SYMBOL_LEN];
1574
1575 if (!ptr)
1576 return 0;
1577
1578 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1579
1580 seq_printf(m, "%s\n", str);
1581
1582 return 0;
1583}
1584
1585static struct seq_operations ftrace_graph_seq_ops = {
1586 .start = g_start,
1587 .next = g_next,
1588 .stop = g_stop,
1589 .show = g_show,
1590};
1591
1592static int
1593ftrace_graph_open(struct inode *inode, struct file *file)
1594{
1595 int ret = 0;
1596
1597 if (unlikely(ftrace_disabled))
1598 return -ENODEV;
1599
1600 mutex_lock(&graph_lock);
1601 if ((file->f_mode & FMODE_WRITE) &&
1602 !(file->f_flags & O_APPEND)) {
1603 ftrace_graph_count = 0;
1604 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1605 }
1606
1607 if (file->f_mode & FMODE_READ) {
1608 ret = seq_open(file, &ftrace_graph_seq_ops);
1609 if (!ret) {
1610 struct seq_file *m = file->private_data;
1611 m->private = ftrace_graph_funcs;
1612 }
1613 } else
1614 file->private_data = ftrace_graph_funcs;
1615 mutex_unlock(&graph_lock);
1616
1617 return ret;
1618}
1619
1620static ssize_t
1621ftrace_graph_read(struct file *file, char __user *ubuf,
1622 size_t cnt, loff_t *ppos)
1623{
1624 if (file->f_mode & FMODE_READ)
1625 return seq_read(file, ubuf, cnt, ppos);
1626 else
1627 return -EPERM;
1628}
1629
1630static int
1631ftrace_set_func(unsigned long *array, int idx, char *buffer)
1632{
1633 char str[KSYM_SYMBOL_LEN];
1634 struct dyn_ftrace *rec;
1635 struct ftrace_page *pg;
1636 int found = 0;
Steven Rostedt265c8312009-02-13 12:43:56 -05001637 int j;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001638
1639 if (ftrace_disabled)
1640 return -ENODEV;
1641
Steven Rostedt52baf112009-02-14 01:15:39 -05001642 mutex_lock(&ftrace_lock);
Steven Rostedt265c8312009-02-13 12:43:56 -05001643 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001644
Steven Rostedt265c8312009-02-13 12:43:56 -05001645 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1646 continue;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001647
Steven Rostedt265c8312009-02-13 12:43:56 -05001648 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1649 if (strcmp(str, buffer) == 0) {
1650 /* Return 1 if we add it to the array */
1651 found = 1;
1652 for (j = 0; j < idx; j++)
1653 if (array[j] == rec->ip) {
1654 found = 0;
1655 break;
1656 }
1657 if (found)
1658 array[idx] = rec->ip;
1659 goto out;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001660 }
Steven Rostedt265c8312009-02-13 12:43:56 -05001661 } while_for_each_ftrace_rec();
1662 out:
Steven Rostedt52baf112009-02-14 01:15:39 -05001663 mutex_unlock(&ftrace_lock);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001664
1665 return found ? 0 : -EINVAL;
1666}
1667
1668static ssize_t
1669ftrace_graph_write(struct file *file, const char __user *ubuf,
1670 size_t cnt, loff_t *ppos)
1671{
1672 unsigned char buffer[FTRACE_BUFF_MAX+1];
1673 unsigned long *array;
1674 size_t read = 0;
1675 ssize_t ret;
1676 int index = 0;
1677 char ch;
1678
1679 if (!cnt || cnt < 0)
1680 return 0;
1681
1682 mutex_lock(&graph_lock);
1683
1684 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1685 ret = -EBUSY;
1686 goto out;
1687 }
1688
1689 if (file->f_mode & FMODE_READ) {
1690 struct seq_file *m = file->private_data;
1691 array = m->private;
1692 } else
1693 array = file->private_data;
1694
1695 ret = get_user(ch, ubuf++);
1696 if (ret)
1697 goto out;
1698 read++;
1699 cnt--;
1700
1701 /* skip white space */
1702 while (cnt && isspace(ch)) {
1703 ret = get_user(ch, ubuf++);
1704 if (ret)
1705 goto out;
1706 read++;
1707 cnt--;
1708 }
1709
1710 if (isspace(ch)) {
1711 *ppos += read;
1712 ret = read;
1713 goto out;
1714 }
1715
1716 while (cnt && !isspace(ch)) {
1717 if (index < FTRACE_BUFF_MAX)
1718 buffer[index++] = ch;
1719 else {
1720 ret = -EINVAL;
1721 goto out;
1722 }
1723 ret = get_user(ch, ubuf++);
1724 if (ret)
1725 goto out;
1726 read++;
1727 cnt--;
1728 }
1729 buffer[index] = 0;
1730
1731 /* we allow only one at a time */
1732 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1733 if (ret)
1734 goto out;
1735
1736 ftrace_graph_count++;
1737
1738 file->f_pos += read;
1739
1740 ret = read;
1741 out:
1742 mutex_unlock(&graph_lock);
1743
1744 return ret;
1745}
1746
1747static const struct file_operations ftrace_graph_fops = {
1748 .open = ftrace_graph_open,
1749 .read = ftrace_graph_read,
1750 .write = ftrace_graph_write,
1751};
1752#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1753
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001754static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
Steven Rostedt5072c592008-05-12 21:20:43 +02001755{
Steven Rostedt5072c592008-05-12 21:20:43 +02001756 struct dentry *entry;
1757
Steven Rostedt5072c592008-05-12 21:20:43 +02001758 entry = debugfs_create_file("available_filter_functions", 0444,
1759 d_tracer, NULL, &ftrace_avail_fops);
1760 if (!entry)
1761 pr_warning("Could not create debugfs "
1762 "'available_filter_functions' entry\n");
1763
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301764 entry = debugfs_create_file("failures", 0444,
1765 d_tracer, NULL, &ftrace_failures_fops);
1766 if (!entry)
1767 pr_warning("Could not create debugfs 'failures' entry\n");
1768
Steven Rostedt5072c592008-05-12 21:20:43 +02001769 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1770 NULL, &ftrace_filter_fops);
1771 if (!entry)
1772 pr_warning("Could not create debugfs "
1773 "'set_ftrace_filter' entry\n");
Steven Rostedt41c52c02008-05-22 11:46:33 -04001774
1775 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1776 NULL, &ftrace_notrace_fops);
1777 if (!entry)
1778 pr_warning("Could not create debugfs "
1779 "'set_ftrace_notrace' entry\n");
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001780
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001781#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1782 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1783 NULL,
1784 &ftrace_graph_fops);
1785 if (!entry)
1786 pr_warning("Could not create debugfs "
1787 "'set_graph_function' entry\n");
1788#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1789
Steven Rostedt5072c592008-05-12 21:20:43 +02001790 return 0;
1791}
1792
Steven Rostedt31e88902008-11-14 16:21:19 -08001793static int ftrace_convert_nops(struct module *mod,
1794 unsigned long *start,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001795 unsigned long *end)
1796{
1797 unsigned long *p;
1798 unsigned long addr;
1799 unsigned long flags;
1800
Steven Rostedte6ea44e2009-02-14 01:42:44 -05001801 mutex_lock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001802 p = start;
1803 while (p < end) {
1804 addr = ftrace_call_adjust(*p++);
Steven Rostedt20e52272008-11-14 16:21:19 -08001805 /*
1806 * Some architecture linkers will pad between
1807 * the different mcount_loc sections of different
1808 * object files to satisfy alignments.
1809 * Skip any NULL pointers.
1810 */
1811 if (!addr)
1812 continue;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001813 ftrace_record_ip(addr);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001814 }
1815
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001816 /* disable interrupts to prevent kstop machine */
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001817 local_irq_save(flags);
Steven Rostedt31e88902008-11-14 16:21:19 -08001818 ftrace_update_code(mod);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001819 local_irq_restore(flags);
Steven Rostedte6ea44e2009-02-14 01:42:44 -05001820 mutex_unlock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001821
1822 return 0;
1823}
1824
Steven Rostedt31e88902008-11-14 16:21:19 -08001825void ftrace_init_module(struct module *mod,
1826 unsigned long *start, unsigned long *end)
Steven Rostedt90d595f2008-08-14 15:45:09 -04001827{
Steven Rostedt00fd61a2008-08-15 21:40:04 -04001828 if (ftrace_disabled || start == end)
Steven Rostedtfed19392008-08-14 22:47:19 -04001829 return;
Steven Rostedt31e88902008-11-14 16:21:19 -08001830 ftrace_convert_nops(mod, start, end);
Steven Rostedt90d595f2008-08-14 15:45:09 -04001831}
1832
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001833extern unsigned long __start_mcount_loc[];
1834extern unsigned long __stop_mcount_loc[];
1835
1836void __init ftrace_init(void)
1837{
1838 unsigned long count, addr, flags;
1839 int ret;
1840
1841 /* Keep the ftrace pointer to the stub */
1842 addr = (unsigned long)ftrace_stub;
1843
1844 local_irq_save(flags);
1845 ftrace_dyn_arch_init(&addr);
1846 local_irq_restore(flags);
1847
1848 /* ftrace_dyn_arch_init places the return code in addr */
1849 if (addr)
1850 goto failed;
1851
1852 count = __stop_mcount_loc - __start_mcount_loc;
1853
1854 ret = ftrace_dyn_table_alloc(count);
1855 if (ret)
1856 goto failed;
1857
1858 last_ftrace_enabled = ftrace_enabled = 1;
1859
Steven Rostedt31e88902008-11-14 16:21:19 -08001860 ret = ftrace_convert_nops(NULL,
1861 __start_mcount_loc,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001862 __stop_mcount_loc);
1863
1864 return;
1865 failed:
1866 ftrace_disabled = 1;
1867}
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001868
Steven Rostedt3d083392008-05-12 21:20:42 +02001869#else
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01001870
1871static int __init ftrace_nodyn_init(void)
1872{
1873 ftrace_enabled = 1;
1874 return 0;
1875}
1876device_initcall(ftrace_nodyn_init);
1877
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001878static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1879static inline void ftrace_startup_enable(int command) { }
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001880/* Keep as macros so we do not need to define the commands */
1881# define ftrace_startup(command) do { } while (0)
1882# define ftrace_shutdown(command) do { } while (0)
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001883# define ftrace_startup_sysctl() do { } while (0)
1884# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001885#endif /* CONFIG_DYNAMIC_FTRACE */
1886
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001887static ssize_t
1888ftrace_pid_read(struct file *file, char __user *ubuf,
1889 size_t cnt, loff_t *ppos)
1890{
1891 char buf[64];
1892 int r;
1893
Steven Rostedte32d8952008-12-04 00:26:41 -05001894 if (ftrace_pid_trace == ftrace_swapper_pid)
1895 r = sprintf(buf, "swapper tasks\n");
1896 else if (ftrace_pid_trace)
Steven Rostedt978f3a42008-12-04 00:26:40 -05001897 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001898 else
1899 r = sprintf(buf, "no pid\n");
1900
1901 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1902}
1903
Steven Rostedte32d8952008-12-04 00:26:41 -05001904static void clear_ftrace_swapper(void)
1905{
1906 struct task_struct *p;
1907 int cpu;
1908
1909 get_online_cpus();
1910 for_each_online_cpu(cpu) {
1911 p = idle_task(cpu);
1912 clear_tsk_trace_trace(p);
1913 }
1914 put_online_cpus();
1915}
1916
1917static void set_ftrace_swapper(void)
1918{
1919 struct task_struct *p;
1920 int cpu;
1921
1922 get_online_cpus();
1923 for_each_online_cpu(cpu) {
1924 p = idle_task(cpu);
1925 set_tsk_trace_trace(p);
1926 }
1927 put_online_cpus();
1928}
1929
1930static void clear_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05001931{
1932 struct task_struct *p;
1933
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01001934 rcu_read_lock();
Steven Rostedte32d8952008-12-04 00:26:41 -05001935 do_each_pid_task(pid, PIDTYPE_PID, p) {
Steven Rostedt978f3a42008-12-04 00:26:40 -05001936 clear_tsk_trace_trace(p);
Steven Rostedte32d8952008-12-04 00:26:41 -05001937 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01001938 rcu_read_unlock();
1939
Steven Rostedte32d8952008-12-04 00:26:41 -05001940 put_pid(pid);
Steven Rostedt978f3a42008-12-04 00:26:40 -05001941}
1942
Steven Rostedte32d8952008-12-04 00:26:41 -05001943static void set_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05001944{
1945 struct task_struct *p;
1946
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01001947 rcu_read_lock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05001948 do_each_pid_task(pid, PIDTYPE_PID, p) {
1949 set_tsk_trace_trace(p);
1950 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01001951 rcu_read_unlock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05001952}
1953
Steven Rostedte32d8952008-12-04 00:26:41 -05001954static void clear_ftrace_pid_task(struct pid **pid)
1955{
1956 if (*pid == ftrace_swapper_pid)
1957 clear_ftrace_swapper();
1958 else
1959 clear_ftrace_pid(*pid);
1960
1961 *pid = NULL;
1962}
1963
1964static void set_ftrace_pid_task(struct pid *pid)
1965{
1966 if (pid == ftrace_swapper_pid)
1967 set_ftrace_swapper();
1968 else
1969 set_ftrace_pid(pid);
1970}
1971
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001972static ssize_t
1973ftrace_pid_write(struct file *filp, const char __user *ubuf,
1974 size_t cnt, loff_t *ppos)
1975{
Steven Rostedt978f3a42008-12-04 00:26:40 -05001976 struct pid *pid;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001977 char buf[64];
1978 long val;
1979 int ret;
1980
1981 if (cnt >= sizeof(buf))
1982 return -EINVAL;
1983
1984 if (copy_from_user(&buf, ubuf, cnt))
1985 return -EFAULT;
1986
1987 buf[cnt] = 0;
1988
1989 ret = strict_strtol(buf, 10, &val);
1990 if (ret < 0)
1991 return ret;
1992
Steven Rostedte6ea44e2009-02-14 01:42:44 -05001993 mutex_lock(&ftrace_lock);
Steven Rostedt978f3a42008-12-04 00:26:40 -05001994 if (val < 0) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001995 /* disable pid tracing */
Steven Rostedt978f3a42008-12-04 00:26:40 -05001996 if (!ftrace_pid_trace)
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001997 goto out;
Steven Rostedt978f3a42008-12-04 00:26:40 -05001998
1999 clear_ftrace_pid_task(&ftrace_pid_trace);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002000
2001 } else {
Steven Rostedte32d8952008-12-04 00:26:41 -05002002 /* swapper task is special */
2003 if (!val) {
2004 pid = ftrace_swapper_pid;
2005 if (pid == ftrace_pid_trace)
2006 goto out;
2007 } else {
2008 pid = find_get_pid(val);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002009
Steven Rostedte32d8952008-12-04 00:26:41 -05002010 if (pid == ftrace_pid_trace) {
2011 put_pid(pid);
2012 goto out;
2013 }
Steven Rostedt978f3a42008-12-04 00:26:40 -05002014 }
2015
2016 if (ftrace_pid_trace)
2017 clear_ftrace_pid_task(&ftrace_pid_trace);
2018
2019 if (!pid)
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002020 goto out;
2021
Steven Rostedt978f3a42008-12-04 00:26:40 -05002022 ftrace_pid_trace = pid;
Steven Rostedt0ef8cde2008-12-03 15:36:58 -05002023
Steven Rostedt978f3a42008-12-04 00:26:40 -05002024 set_ftrace_pid_task(ftrace_pid_trace);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002025 }
2026
2027 /* update the function call */
2028 ftrace_update_pid_func();
2029 ftrace_startup_enable(0);
2030
2031 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002032 mutex_unlock(&ftrace_lock);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002033
2034 return cnt;
2035}
2036
2037static struct file_operations ftrace_pid_fops = {
2038 .read = ftrace_pid_read,
2039 .write = ftrace_pid_write,
2040};
2041
2042static __init int ftrace_init_debugfs(void)
2043{
2044 struct dentry *d_tracer;
2045 struct dentry *entry;
2046
2047 d_tracer = tracing_init_dentry();
2048 if (!d_tracer)
2049 return 0;
2050
2051 ftrace_init_dyn_debugfs(d_tracer);
2052
2053 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2054 NULL, &ftrace_pid_fops);
2055 if (!entry)
2056 pr_warning("Could not create debugfs "
2057 "'set_ftrace_pid' entry\n");
2058 return 0;
2059}
2060
2061fs_initcall(ftrace_init_debugfs);
2062
Steven Rostedt3d083392008-05-12 21:20:42 +02002063/**
Steven Rostedt81adbdc2008-10-23 09:33:02 -04002064 * ftrace_kill - kill ftrace
Steven Rostedta2bb6a32008-07-10 20:58:15 -04002065 *
2066 * This function should be used by panic code. It stops ftrace
2067 * but in a not so nice way. If you need to simply kill ftrace
2068 * from a non-atomic section, use ftrace_kill.
2069 */
Steven Rostedt81adbdc2008-10-23 09:33:02 -04002070void ftrace_kill(void)
Steven Rostedta2bb6a32008-07-10 20:58:15 -04002071{
2072 ftrace_disabled = 1;
2073 ftrace_enabled = 0;
Steven Rostedta2bb6a32008-07-10 20:58:15 -04002074 clear_ftrace_function();
2075}
2076
2077/**
Steven Rostedt3d083392008-05-12 21:20:42 +02002078 * register_ftrace_function - register a function for profiling
2079 * @ops - ops structure that holds the function for profiling.
2080 *
2081 * Register a function to be called by all functions in the
2082 * kernel.
2083 *
2084 * Note: @ops->func and all the functions it calls must be labeled
2085 * with "notrace", otherwise it will go into a
2086 * recursive loop.
2087 */
2088int register_ftrace_function(struct ftrace_ops *ops)
2089{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002090 int ret;
2091
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002092 if (unlikely(ftrace_disabled))
2093 return -1;
2094
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002095 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002096
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002097 ret = __register_ftrace_function(ops);
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05002098 ftrace_startup(0);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002099
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002100 mutex_unlock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002101 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02002102}
2103
2104/**
Uwe Kleine-Koenig32632922009-01-12 23:35:50 +01002105 * unregister_ftrace_function - unregister a function for profiling.
Steven Rostedt3d083392008-05-12 21:20:42 +02002106 * @ops - ops structure that holds the function to unregister
2107 *
2108 * Unregister a function that was added to be called by ftrace profiling.
2109 */
2110int unregister_ftrace_function(struct ftrace_ops *ops)
2111{
2112 int ret;
2113
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002114 mutex_lock(&ftrace_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02002115 ret = __unregister_ftrace_function(ops);
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05002116 ftrace_shutdown(0);
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002117 mutex_unlock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002118
2119 return ret;
2120}
2121
Ingo Molnare309b412008-05-12 21:20:51 +02002122int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002123ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02002124 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002125 loff_t *ppos)
2126{
2127 int ret;
2128
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002129 if (unlikely(ftrace_disabled))
2130 return -ENODEV;
2131
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002132 mutex_lock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002133
Steven Rostedt5072c592008-05-12 21:20:43 +02002134 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002135
2136 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2137 goto out;
2138
2139 last_ftrace_enabled = ftrace_enabled;
2140
2141 if (ftrace_enabled) {
2142
2143 ftrace_startup_sysctl();
2144
2145 /* we are starting ftrace again */
2146 if (ftrace_list != &ftrace_list_end) {
2147 if (ftrace_list->next == &ftrace_list_end)
2148 ftrace_trace_function = ftrace_list->func;
2149 else
2150 ftrace_trace_function = ftrace_list_func;
2151 }
2152
2153 } else {
2154 /* stopping ftrace calls (just send to ftrace_stub) */
2155 ftrace_trace_function = ftrace_stub;
2156
2157 ftrace_shutdown_sysctl();
2158 }
2159
2160 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002161 mutex_unlock(&ftrace_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02002162 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02002163}
Ingo Molnarf17845e2008-10-24 12:47:10 +02002164
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002165#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002166
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002167static atomic_t ftrace_graph_active;
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08002168static struct notifier_block ftrace_suspend_notifier;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002169
Steven Rostedte49dc192008-12-02 23:50:05 -05002170int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2171{
2172 return 0;
2173}
2174
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002175/* The callbacks that hook a function */
2176trace_func_graph_ret_t ftrace_graph_return =
2177 (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05002178trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002179
2180/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2181static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2182{
2183 int i;
2184 int ret = 0;
2185 unsigned long flags;
2186 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2187 struct task_struct *g, *t;
2188
2189 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2190 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2191 * sizeof(struct ftrace_ret_stack),
2192 GFP_KERNEL);
2193 if (!ret_stack_list[i]) {
2194 start = 0;
2195 end = i;
2196 ret = -ENOMEM;
2197 goto free;
2198 }
2199 }
2200
2201 read_lock_irqsave(&tasklist_lock, flags);
2202 do_each_thread(g, t) {
2203 if (start == end) {
2204 ret = -EAGAIN;
2205 goto unlock;
2206 }
2207
2208 if (t->ret_stack == NULL) {
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002209 t->curr_ret_stack = -1;
Frederic Weisbecker48d68b22008-12-02 00:20:39 +01002210 /* Make sure IRQs see the -1 first: */
2211 barrier();
2212 t->ret_stack = ret_stack_list[start++];
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01002213 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002214 atomic_set(&t->trace_overrun, 0);
2215 }
2216 } while_each_thread(g, t);
2217
2218unlock:
2219 read_unlock_irqrestore(&tasklist_lock, flags);
2220free:
2221 for (i = start; i < end; i++)
2222 kfree(ret_stack_list[i]);
2223 return ret;
2224}
2225
2226/* Allocate a return stack for each task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002227static int start_graph_tracing(void)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002228{
2229 struct ftrace_ret_stack **ret_stack_list;
2230 int ret;
2231
2232 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2233 sizeof(struct ftrace_ret_stack *),
2234 GFP_KERNEL);
2235
2236 if (!ret_stack_list)
2237 return -ENOMEM;
2238
2239 do {
2240 ret = alloc_retstack_tasklist(ret_stack_list);
2241 } while (ret == -EAGAIN);
2242
2243 kfree(ret_stack_list);
2244 return ret;
2245}
2246
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08002247/*
2248 * Hibernation protection.
2249 * The state of the current task is too much unstable during
2250 * suspend/restore to disk. We want to protect against that.
2251 */
2252static int
2253ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2254 void *unused)
2255{
2256 switch (state) {
2257 case PM_HIBERNATION_PREPARE:
2258 pause_graph_tracing();
2259 break;
2260
2261 case PM_POST_HIBERNATION:
2262 unpause_graph_tracing();
2263 break;
2264 }
2265 return NOTIFY_DONE;
2266}
2267
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002268int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2269 trace_func_graph_ent_t entryfunc)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002270{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002271 int ret = 0;
2272
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002273 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002274
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08002275 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2276 register_pm_notifier(&ftrace_suspend_notifier);
2277
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002278 atomic_inc(&ftrace_graph_active);
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002279 ret = start_graph_tracing();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002280 if (ret) {
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002281 atomic_dec(&ftrace_graph_active);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002282 goto out;
2283 }
Steven Rostedte53a6312008-11-26 00:16:25 -05002284
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002285 ftrace_graph_return = retfunc;
2286 ftrace_graph_entry = entryfunc;
Steven Rostedte53a6312008-11-26 00:16:25 -05002287
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05002288 ftrace_startup(FTRACE_START_FUNC_RET);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002289
2290out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002291 mutex_unlock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002292 return ret;
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002293}
2294
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002295void unregister_ftrace_graph(void)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002296{
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002297 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002298
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002299 atomic_dec(&ftrace_graph_active);
2300 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05002301 ftrace_graph_entry = ftrace_graph_entry_stub;
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05002302 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08002303 unregister_pm_notifier(&ftrace_suspend_notifier);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002304
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002305 mutex_unlock(&ftrace_lock);
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002306}
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002307
2308/* Allocate a return stack for newly created task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002309void ftrace_graph_init_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002310{
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002311 if (atomic_read(&ftrace_graph_active)) {
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002312 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2313 * sizeof(struct ftrace_ret_stack),
2314 GFP_KERNEL);
2315 if (!t->ret_stack)
2316 return;
2317 t->curr_ret_stack = -1;
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01002318 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002319 atomic_set(&t->trace_overrun, 0);
2320 } else
2321 t->ret_stack = NULL;
2322}
2323
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002324void ftrace_graph_exit_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002325{
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01002326 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2327
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002328 t->ret_stack = NULL;
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01002329 /* NULL must become visible to IRQs before we free it: */
2330 barrier();
2331
2332 kfree(ret_stack);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002333}
Steven Rostedt14a866c2008-12-02 23:50:02 -05002334
2335void ftrace_graph_stop(void)
2336{
2337 ftrace_stop();
2338}
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002339#endif
2340