blob: f615f974d90e3f68db6aea7b70cdc49ebf522ed7 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -080020#include <linux/suspend.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020021#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020022#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010023#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020024#include <linux/uaccess.h>
Steven Rostedt5855fea2011-12-16 19:27:42 -050025#include <linux/bsearch.h>
Paul Gortmaker56d82e02011-05-26 17:53:52 -040026#include <linux/module.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010027#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020028#include <linux/sysctl.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020030#include <linux/ctype.h>
Steven Rostedt68950612011-12-16 17:06:45 -050031#include <linux/sort.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020032#include <linux/list.h>
Steven Rostedt59df055f2009-02-14 15:29:06 -050033#include <linux/hash.h>
Paul E. McKenney3f379b02010-03-05 15:03:25 -080034#include <linux/rcupdate.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020035
Steven Rostedtad8d75f2009-04-14 19:39:12 -040036#include <trace/events/sched.h>
Steven Rostedt8aef2d22009-03-24 01:10:15 -040037
Steven Rostedt2af15d62009-05-28 13:37:24 -040038#include <asm/setup.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053039
Steven Rostedt0706f1c2009-03-23 23:12:58 -040040#include "trace_output.h"
Steven Rostedtbac429f2009-03-20 12:50:56 -040041#include "trace_stat.h"
Steven Rostedt3d083392008-05-12 21:20:42 +020042
Steven Rostedt69128962008-10-23 09:33:03 -040043#define FTRACE_WARN_ON(cond) \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040044 ({ \
45 int ___r = cond; \
46 if (WARN_ON(___r)) \
Steven Rostedt69128962008-10-23 09:33:03 -040047 ftrace_kill(); \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040048 ___r; \
49 })
Steven Rostedt69128962008-10-23 09:33:03 -040050
51#define FTRACE_WARN_ON_ONCE(cond) \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040052 ({ \
53 int ___r = cond; \
54 if (WARN_ON_ONCE(___r)) \
Steven Rostedt69128962008-10-23 09:33:03 -040055 ftrace_kill(); \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040056 ___r; \
57 })
Steven Rostedt69128962008-10-23 09:33:03 -040058
Steven Rostedt8fc0c702009-02-16 15:28:00 -050059/* hash bits for specific function selection */
60#define FTRACE_HASH_BITS 7
61#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
Steven Rostedt33dc9b12011-05-02 17:34:47 -040062#define FTRACE_HASH_DEFAULT_BITS 10
63#define FTRACE_HASH_MAX_BITS 12
Steven Rostedt8fc0c702009-02-16 15:28:00 -050064
Jiri Olsae2484912012-02-15 15:51:48 +010065#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
Steven Rostedt4eebcc82008-05-12 21:20:48 +020067/* ftrace_enabled is a method to turn ftrace on or off */
68int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020069static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020070
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050071/* Quick disabling of function tracer. */
72int function_trace_stop;
73
jolsa@redhat.com756d17e2009-10-13 16:33:52 -040074/* List for set_ftrace_pid's pids. */
75LIST_HEAD(ftrace_pids);
76struct ftrace_pid {
77 struct list_head list;
78 struct pid *pid;
79};
80
Steven Rostedt4eebcc82008-05-12 21:20:48 +020081/*
82 * ftrace_disabled is set when an anomaly is discovered.
83 * ftrace_disabled is much stronger than ftrace_enabled.
84 */
85static int ftrace_disabled __read_mostly;
86
Steven Rostedt52baf112009-02-14 01:15:39 -050087static DEFINE_MUTEX(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020088
Paul McQuadebd38c0e2011-05-31 20:51:55 +010089static struct ftrace_ops ftrace_list_end __read_mostly = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -040090 .func = ftrace_stub,
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020091};
92
Steven Rostedtb8489142011-05-04 09:27:52 -040093static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
Jiri Olsae2484912012-02-15 15:51:48 +010094static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
Steven Rostedtb8489142011-05-04 09:27:52 -040095static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020096ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedt6331c282011-07-13 15:11:02 -040097static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050098ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -050099ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
Steven Rostedt2b499382011-05-03 22:49:52 -0400100static struct ftrace_ops global_ops;
Jiri Olsae2484912012-02-15 15:51:48 +0100101static struct ftrace_ops control_ops;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200102
Steven Rostedtb8489142011-05-04 09:27:52 -0400103static void
104ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
105
Paul E. McKenney3f379b02010-03-05 15:03:25 -0800106/*
Steven Rostedtb8489142011-05-04 09:27:52 -0400107 * Traverse the ftrace_global_list, invoking all entries. The reason that we
Paul E. McKenney3f379b02010-03-05 15:03:25 -0800108 * can use rcu_dereference_raw() is that elements removed from this list
109 * are simply leaked, so there is no need to interact with a grace-period
110 * mechanism. The rcu_dereference_raw() calls are needed to handle
Steven Rostedtb8489142011-05-04 09:27:52 -0400111 * concurrent insertions into the ftrace_global_list.
Paul E. McKenney3f379b02010-03-05 15:03:25 -0800112 *
113 * Silly Alpha and silly pointer-speculation compiler optimizations!
114 */
Steven Rostedtb8489142011-05-04 09:27:52 -0400115static void ftrace_global_list_func(unsigned long ip,
116 unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200117{
Steven Rostedtb1cff0a2011-05-25 14:27:43 -0400118 struct ftrace_ops *op;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200119
Steven Rostedtb1cff0a2011-05-25 14:27:43 -0400120 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
121 return;
122
123 trace_recursion_set(TRACE_GLOBAL_BIT);
124 op = rcu_dereference_raw(ftrace_global_list); /*see above*/
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200125 while (op != &ftrace_list_end) {
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200126 op->func(ip, parent_ip);
Paul E. McKenney3f379b02010-03-05 15:03:25 -0800127 op = rcu_dereference_raw(op->next); /*see above*/
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200128 };
Steven Rostedtb1cff0a2011-05-25 14:27:43 -0400129 trace_recursion_clear(TRACE_GLOBAL_BIT);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200130}
131
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500132static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
133{
Steven Rostedt0ef8cde2008-12-03 15:36:58 -0500134 if (!test_tsk_trace_trace(current))
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500135 return;
136
137 ftrace_pid_function(ip, parent_ip);
138}
139
140static void set_ftrace_pid_function(ftrace_func_t func)
141{
142 /* do not set ftrace_pid_function to itself! */
143 if (func != ftrace_pid_func)
144 ftrace_pid_function = func;
145}
146
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200147/**
Steven Rostedt3d083392008-05-12 21:20:42 +0200148 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200149 *
Steven Rostedt3d083392008-05-12 21:20:42 +0200150 * This NULLs the ftrace function and in essence stops
151 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200152 */
Steven Rostedt3d083392008-05-12 21:20:42 +0200153void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200154{
Steven Rostedt3d083392008-05-12 21:20:42 +0200155 ftrace_trace_function = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500156 __ftrace_trace_function = ftrace_stub;
Steven Rostedt6331c282011-07-13 15:11:02 -0400157 __ftrace_trace_function_delay = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500158 ftrace_pid_function = ftrace_stub;
Steven Rostedt3d083392008-05-12 21:20:42 +0200159}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200160
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500161#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
162/*
163 * For those archs that do not test ftrace_trace_stop in their
164 * mcount call site, we need to do it from C.
165 */
166static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
167{
168 if (function_trace_stop)
169 return;
170
171 __ftrace_trace_function(ip, parent_ip);
172}
173#endif
174
Jiri Olsae2484912012-02-15 15:51:48 +0100175static void control_ops_disable_all(struct ftrace_ops *ops)
176{
177 int cpu;
178
179 for_each_possible_cpu(cpu)
180 *per_cpu_ptr(ops->disabled, cpu) = 1;
181}
182
183static int control_ops_alloc(struct ftrace_ops *ops)
184{
185 int __percpu *disabled;
186
187 disabled = alloc_percpu(int);
188 if (!disabled)
189 return -ENOMEM;
190
191 ops->disabled = disabled;
192 control_ops_disable_all(ops);
193 return 0;
194}
195
196static void control_ops_free(struct ftrace_ops *ops)
197{
198 free_percpu(ops->disabled);
199}
200
Steven Rostedt2b499382011-05-03 22:49:52 -0400201static void update_global_ops(void)
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400202{
203 ftrace_func_t func;
204
205 /*
206 * If there's only one function registered, then call that
207 * function directly. Otherwise, we need to iterate over the
208 * registered callers.
209 */
Steven Rostedtb8489142011-05-04 09:27:52 -0400210 if (ftrace_global_list == &ftrace_list_end ||
211 ftrace_global_list->next == &ftrace_list_end)
212 func = ftrace_global_list->func;
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400213 else
Steven Rostedtb8489142011-05-04 09:27:52 -0400214 func = ftrace_global_list_func;
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400215
216 /* If we filter on pids, update to use the pid function */
217 if (!list_empty(&ftrace_pids)) {
218 set_ftrace_pid_function(func);
219 func = ftrace_pid_func;
220 }
Steven Rostedt2b499382011-05-03 22:49:52 -0400221
222 global_ops.func = func;
223}
224
225static void update_ftrace_function(void)
226{
227 ftrace_func_t func;
228
229 update_global_ops();
230
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400231 /*
232 * If we are at the end of the list and this ops is
233 * not dynamic, then have the mcount trampoline call
234 * the function directly
235 */
Steven Rostedtb8489142011-05-04 09:27:52 -0400236 if (ftrace_ops_list == &ftrace_list_end ||
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400237 (ftrace_ops_list->next == &ftrace_list_end &&
238 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
Steven Rostedtb8489142011-05-04 09:27:52 -0400239 func = ftrace_ops_list->func;
240 else
241 func = ftrace_ops_list_func;
Steven Rostedt2b499382011-05-03 22:49:52 -0400242
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400243#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
244 ftrace_trace_function = func;
245#else
Steven Rostedt6331c282011-07-13 15:11:02 -0400246#ifdef CONFIG_DYNAMIC_FTRACE
247 /* do not update till all functions have been modified */
248 __ftrace_trace_function_delay = func;
249#else
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400250 __ftrace_trace_function = func;
Steven Rostedt6331c282011-07-13 15:11:02 -0400251#endif
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400252 ftrace_trace_function = ftrace_test_stop_func;
253#endif
254}
255
Steven Rostedt2b499382011-05-03 22:49:52 -0400256static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +0200257{
Steven Rostedt2b499382011-05-03 22:49:52 -0400258 ops->next = *list;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200259 /*
Steven Rostedtb8489142011-05-04 09:27:52 -0400260 * We are entering ops into the list but another
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200261 * CPU might be walking that list. We need to make sure
262 * the ops->next pointer is valid before another CPU sees
Steven Rostedtb8489142011-05-04 09:27:52 -0400263 * the ops pointer included into the list.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200264 */
Steven Rostedt2b499382011-05-03 22:49:52 -0400265 rcu_assign_pointer(*list, ops);
266}
Steven Rostedt3d083392008-05-12 21:20:42 +0200267
Steven Rostedt2b499382011-05-03 22:49:52 -0400268static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
269{
270 struct ftrace_ops **p;
271
272 /*
273 * If we are removing the last function, then simply point
274 * to the ftrace_stub.
275 */
276 if (*list == ops && ops->next == &ftrace_list_end) {
277 *list = &ftrace_list_end;
278 return 0;
279 }
280
281 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
282 if (*p == ops)
283 break;
284
285 if (*p != ops)
286 return -1;
287
288 *p = (*p)->next;
289 return 0;
290}
291
Jiri Olsae2484912012-02-15 15:51:48 +0100292static void add_ftrace_list_ops(struct ftrace_ops **list,
293 struct ftrace_ops *main_ops,
294 struct ftrace_ops *ops)
295{
296 int first = *list == &ftrace_list_end;
297 add_ftrace_ops(list, ops);
298 if (first)
299 add_ftrace_ops(&ftrace_ops_list, main_ops);
300}
301
302static int remove_ftrace_list_ops(struct ftrace_ops **list,
303 struct ftrace_ops *main_ops,
304 struct ftrace_ops *ops)
305{
306 int ret = remove_ftrace_ops(list, ops);
307 if (!ret && *list == &ftrace_list_end)
308 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
309 return ret;
310}
311
Steven Rostedt2b499382011-05-03 22:49:52 -0400312static int __register_ftrace_function(struct ftrace_ops *ops)
313{
314 if (ftrace_disabled)
315 return -ENODEV;
316
317 if (FTRACE_WARN_ON(ops == &global_ops))
318 return -EINVAL;
319
Steven Rostedtb8489142011-05-04 09:27:52 -0400320 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
321 return -EBUSY;
322
Jiri Olsae2484912012-02-15 15:51:48 +0100323 /* We don't support both control and global flags set. */
324 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
325 return -EINVAL;
326
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400327 if (!core_kernel_data((unsigned long)ops))
328 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
329
Steven Rostedtb8489142011-05-04 09:27:52 -0400330 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
Jiri Olsae2484912012-02-15 15:51:48 +0100331 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
Steven Rostedtb8489142011-05-04 09:27:52 -0400332 ops->flags |= FTRACE_OPS_FL_ENABLED;
Jiri Olsae2484912012-02-15 15:51:48 +0100333 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
334 if (control_ops_alloc(ops))
335 return -ENOMEM;
336 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
Steven Rostedtb8489142011-05-04 09:27:52 -0400337 } else
338 add_ftrace_ops(&ftrace_ops_list, ops);
339
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400340 if (ftrace_enabled)
341 update_ftrace_function();
Steven Rostedt3d083392008-05-12 21:20:42 +0200342
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200343 return 0;
344}
345
Ingo Molnare309b412008-05-12 21:20:51 +0200346static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200347{
Steven Rostedt2b499382011-05-03 22:49:52 -0400348 int ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200349
Steven Rostedt2b499382011-05-03 22:49:52 -0400350 if (ftrace_disabled)
351 return -ENODEV;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200352
Steven Rostedtb8489142011-05-04 09:27:52 -0400353 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
354 return -EBUSY;
355
Steven Rostedt2b499382011-05-03 22:49:52 -0400356 if (FTRACE_WARN_ON(ops == &global_ops))
357 return -EINVAL;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200358
Steven Rostedtb8489142011-05-04 09:27:52 -0400359 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
Jiri Olsae2484912012-02-15 15:51:48 +0100360 ret = remove_ftrace_list_ops(&ftrace_global_list,
361 &global_ops, ops);
Steven Rostedtb8489142011-05-04 09:27:52 -0400362 if (!ret)
363 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
Jiri Olsae2484912012-02-15 15:51:48 +0100364 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
365 ret = remove_ftrace_list_ops(&ftrace_control_list,
366 &control_ops, ops);
367 if (!ret) {
368 /*
369 * The ftrace_ops is now removed from the list,
370 * so there'll be no new users. We must ensure
371 * all current users are done before we free
372 * the control data.
373 */
374 synchronize_sched();
375 control_ops_free(ops);
376 }
Steven Rostedtb8489142011-05-04 09:27:52 -0400377 } else
378 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
379
Steven Rostedt2b499382011-05-03 22:49:52 -0400380 if (ret < 0)
381 return ret;
Steven Rostedtb8489142011-05-04 09:27:52 -0400382
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400383 if (ftrace_enabled)
384 update_ftrace_function();
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200385
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400386 /*
387 * Dynamic ops may be freed, we must make sure that all
388 * callers are done before leaving this function.
389 */
390 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
391 synchronize_sched();
392
Steven Rostedte6ea44e2009-02-14 01:42:44 -0500393 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200394}
395
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500396static void ftrace_update_pid_func(void)
397{
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400398 /* Only do something if we are tracing something */
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500399 if (ftrace_trace_function == ftrace_stub)
KOSAKI Motohiro10dd3eb2009-03-06 15:29:04 +0900400 return;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500401
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400402 update_ftrace_function();
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500403}
404
Steven Rostedt493762f2009-03-23 17:12:36 -0400405#ifdef CONFIG_FUNCTION_PROFILER
406struct ftrace_profile {
407 struct hlist_node node;
408 unsigned long ip;
409 unsigned long counter;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400410#ifdef CONFIG_FUNCTION_GRAPH_TRACER
411 unsigned long long time;
Chase Douglase330b3b2010-04-26 14:02:05 -0400412 unsigned long long time_squared;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400413#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400414};
415
416struct ftrace_profile_page {
417 struct ftrace_profile_page *next;
418 unsigned long index;
419 struct ftrace_profile records[];
420};
421
Steven Rostedtcafb1682009-03-24 20:50:39 -0400422struct ftrace_profile_stat {
423 atomic_t disabled;
424 struct hlist_head *hash;
425 struct ftrace_profile_page *pages;
426 struct ftrace_profile_page *start;
427 struct tracer_stat stat;
428};
429
Steven Rostedt493762f2009-03-23 17:12:36 -0400430#define PROFILE_RECORDS_SIZE \
431 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
432
433#define PROFILES_PER_PAGE \
434 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
435
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400436static int ftrace_profile_bits __read_mostly;
437static int ftrace_profile_enabled __read_mostly;
438
439/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
Steven Rostedt493762f2009-03-23 17:12:36 -0400440static DEFINE_MUTEX(ftrace_profile_lock);
441
Steven Rostedtcafb1682009-03-24 20:50:39 -0400442static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
Steven Rostedt493762f2009-03-23 17:12:36 -0400443
444#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
445
Steven Rostedt493762f2009-03-23 17:12:36 -0400446static void *
447function_stat_next(void *v, int idx)
448{
449 struct ftrace_profile *rec = v;
450 struct ftrace_profile_page *pg;
451
452 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
453
454 again:
Li Zefan0296e422009-06-26 11:15:37 +0800455 if (idx != 0)
456 rec++;
457
Steven Rostedt493762f2009-03-23 17:12:36 -0400458 if ((void *)rec >= (void *)&pg->records[pg->index]) {
459 pg = pg->next;
460 if (!pg)
461 return NULL;
462 rec = &pg->records[0];
463 if (!rec->counter)
464 goto again;
465 }
466
467 return rec;
468}
469
470static void *function_stat_start(struct tracer_stat *trace)
471{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400472 struct ftrace_profile_stat *stat =
473 container_of(trace, struct ftrace_profile_stat, stat);
474
475 if (!stat || !stat->start)
476 return NULL;
477
478 return function_stat_next(&stat->start->records[0], 0);
Steven Rostedt493762f2009-03-23 17:12:36 -0400479}
480
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400481#ifdef CONFIG_FUNCTION_GRAPH_TRACER
482/* function graph compares on total time */
483static int function_stat_cmp(void *p1, void *p2)
484{
485 struct ftrace_profile *a = p1;
486 struct ftrace_profile *b = p2;
487
488 if (a->time < b->time)
489 return -1;
490 if (a->time > b->time)
491 return 1;
492 else
493 return 0;
494}
495#else
496/* not function graph compares against hits */
Steven Rostedt493762f2009-03-23 17:12:36 -0400497static int function_stat_cmp(void *p1, void *p2)
498{
499 struct ftrace_profile *a = p1;
500 struct ftrace_profile *b = p2;
501
502 if (a->counter < b->counter)
503 return -1;
504 if (a->counter > b->counter)
505 return 1;
506 else
507 return 0;
508}
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400509#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400510
511static int function_stat_headers(struct seq_file *m)
512{
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400513#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt34886c82009-03-25 21:00:47 -0400514 seq_printf(m, " Function "
Chase Douglase330b3b2010-04-26 14:02:05 -0400515 "Hit Time Avg s^2\n"
Steven Rostedt34886c82009-03-25 21:00:47 -0400516 " -------- "
Chase Douglase330b3b2010-04-26 14:02:05 -0400517 "--- ---- --- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400518#else
Steven Rostedt493762f2009-03-23 17:12:36 -0400519 seq_printf(m, " Function Hit\n"
520 " -------- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400521#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400522 return 0;
523}
524
525static int function_stat_show(struct seq_file *m, void *v)
526{
527 struct ftrace_profile *rec = v;
528 char str[KSYM_SYMBOL_LEN];
Li Zefan3aaba202010-08-23 16:50:12 +0800529 int ret = 0;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400530#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt34886c82009-03-25 21:00:47 -0400531 static struct trace_seq s;
532 unsigned long long avg;
Chase Douglase330b3b2010-04-26 14:02:05 -0400533 unsigned long long stddev;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400534#endif
Li Zefan3aaba202010-08-23 16:50:12 +0800535 mutex_lock(&ftrace_profile_lock);
536
537 /* we raced with function_profile_reset() */
538 if (unlikely(rec->counter == 0)) {
539 ret = -EBUSY;
540 goto out;
541 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400542
543 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400544 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
Steven Rostedt493762f2009-03-23 17:12:36 -0400545
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400546#ifdef CONFIG_FUNCTION_GRAPH_TRACER
547 seq_printf(m, " ");
Steven Rostedt34886c82009-03-25 21:00:47 -0400548 avg = rec->time;
549 do_div(avg, rec->counter);
550
Chase Douglase330b3b2010-04-26 14:02:05 -0400551 /* Sample standard deviation (s^2) */
552 if (rec->counter <= 1)
553 stddev = 0;
554 else {
555 stddev = rec->time_squared - rec->counter * avg * avg;
556 /*
557 * Divide only 1000 for ns^2 -> us^2 conversion.
558 * trace_print_graph_duration will divide 1000 again.
559 */
560 do_div(stddev, (rec->counter - 1) * 1000);
561 }
562
Steven Rostedt34886c82009-03-25 21:00:47 -0400563 trace_seq_init(&s);
564 trace_print_graph_duration(rec->time, &s);
565 trace_seq_puts(&s, " ");
566 trace_print_graph_duration(avg, &s);
Chase Douglase330b3b2010-04-26 14:02:05 -0400567 trace_seq_puts(&s, " ");
568 trace_print_graph_duration(stddev, &s);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400569 trace_print_seq(m, &s);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400570#endif
571 seq_putc(m, '\n');
Li Zefan3aaba202010-08-23 16:50:12 +0800572out:
573 mutex_unlock(&ftrace_profile_lock);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400574
Li Zefan3aaba202010-08-23 16:50:12 +0800575 return ret;
Steven Rostedt493762f2009-03-23 17:12:36 -0400576}
577
Steven Rostedtcafb1682009-03-24 20:50:39 -0400578static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400579{
580 struct ftrace_profile_page *pg;
581
Steven Rostedtcafb1682009-03-24 20:50:39 -0400582 pg = stat->pages = stat->start;
Steven Rostedt493762f2009-03-23 17:12:36 -0400583
584 while (pg) {
585 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
586 pg->index = 0;
587 pg = pg->next;
588 }
589
Steven Rostedtcafb1682009-03-24 20:50:39 -0400590 memset(stat->hash, 0,
Steven Rostedt493762f2009-03-23 17:12:36 -0400591 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
592}
593
Steven Rostedtcafb1682009-03-24 20:50:39 -0400594int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400595{
596 struct ftrace_profile_page *pg;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400597 int functions;
598 int pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400599 int i;
600
601 /* If we already allocated, do nothing */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400602 if (stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400603 return 0;
604
Steven Rostedtcafb1682009-03-24 20:50:39 -0400605 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
606 if (!stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400607 return -ENOMEM;
608
Steven Rostedt318e0a72009-03-25 20:06:34 -0400609#ifdef CONFIG_DYNAMIC_FTRACE
610 functions = ftrace_update_tot_cnt;
611#else
612 /*
613 * We do not know the number of functions that exist because
614 * dynamic tracing is what counts them. With past experience
615 * we have around 20K functions. That should be more than enough.
616 * It is highly unlikely we will execute every function in
617 * the kernel.
618 */
619 functions = 20000;
620#endif
621
Steven Rostedtcafb1682009-03-24 20:50:39 -0400622 pg = stat->start = stat->pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400623
Steven Rostedt318e0a72009-03-25 20:06:34 -0400624 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
625
626 for (i = 0; i < pages; i++) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400627 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400628 if (!pg->next)
Steven Rostedt318e0a72009-03-25 20:06:34 -0400629 goto out_free;
Steven Rostedt493762f2009-03-23 17:12:36 -0400630 pg = pg->next;
631 }
632
633 return 0;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400634
635 out_free:
636 pg = stat->start;
637 while (pg) {
638 unsigned long tmp = (unsigned long)pg;
639
640 pg = pg->next;
641 free_page(tmp);
642 }
643
644 free_page((unsigned long)stat->pages);
645 stat->pages = NULL;
646 stat->start = NULL;
647
648 return -ENOMEM;
Steven Rostedt493762f2009-03-23 17:12:36 -0400649}
650
Steven Rostedtcafb1682009-03-24 20:50:39 -0400651static int ftrace_profile_init_cpu(int cpu)
Steven Rostedt493762f2009-03-23 17:12:36 -0400652{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400653 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400654 int size;
655
Steven Rostedtcafb1682009-03-24 20:50:39 -0400656 stat = &per_cpu(ftrace_profile_stats, cpu);
657
658 if (stat->hash) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400659 /* If the profile is already created, simply reset it */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400660 ftrace_profile_reset(stat);
Steven Rostedt493762f2009-03-23 17:12:36 -0400661 return 0;
662 }
663
664 /*
665 * We are profiling all functions, but usually only a few thousand
666 * functions are hit. We'll make a hash of 1024 items.
667 */
668 size = FTRACE_PROFILE_HASH_SIZE;
669
Steven Rostedtcafb1682009-03-24 20:50:39 -0400670 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400671
Steven Rostedtcafb1682009-03-24 20:50:39 -0400672 if (!stat->hash)
Steven Rostedt493762f2009-03-23 17:12:36 -0400673 return -ENOMEM;
674
Steven Rostedtcafb1682009-03-24 20:50:39 -0400675 if (!ftrace_profile_bits) {
676 size--;
Steven Rostedt493762f2009-03-23 17:12:36 -0400677
Steven Rostedtcafb1682009-03-24 20:50:39 -0400678 for (; size; size >>= 1)
679 ftrace_profile_bits++;
680 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400681
Steven Rostedt318e0a72009-03-25 20:06:34 -0400682 /* Preallocate the function profiling pages */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400683 if (ftrace_profile_pages_init(stat) < 0) {
684 kfree(stat->hash);
685 stat->hash = NULL;
Steven Rostedt493762f2009-03-23 17:12:36 -0400686 return -ENOMEM;
687 }
688
689 return 0;
690}
691
Steven Rostedtcafb1682009-03-24 20:50:39 -0400692static int ftrace_profile_init(void)
693{
694 int cpu;
695 int ret = 0;
696
697 for_each_online_cpu(cpu) {
698 ret = ftrace_profile_init_cpu(cpu);
699 if (ret)
700 break;
701 }
702
703 return ret;
704}
705
Steven Rostedt493762f2009-03-23 17:12:36 -0400706/* interrupts must be disabled */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400707static struct ftrace_profile *
708ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400709{
710 struct ftrace_profile *rec;
711 struct hlist_head *hhd;
712 struct hlist_node *n;
713 unsigned long key;
714
715 key = hash_long(ip, ftrace_profile_bits);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400716 hhd = &stat->hash[key];
Steven Rostedt493762f2009-03-23 17:12:36 -0400717
718 if (hlist_empty(hhd))
719 return NULL;
720
721 hlist_for_each_entry_rcu(rec, n, hhd, node) {
722 if (rec->ip == ip)
723 return rec;
724 }
725
726 return NULL;
727}
728
Steven Rostedtcafb1682009-03-24 20:50:39 -0400729static void ftrace_add_profile(struct ftrace_profile_stat *stat,
730 struct ftrace_profile *rec)
Steven Rostedt493762f2009-03-23 17:12:36 -0400731{
732 unsigned long key;
733
734 key = hash_long(rec->ip, ftrace_profile_bits);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400735 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
Steven Rostedt493762f2009-03-23 17:12:36 -0400736}
737
Steven Rostedt318e0a72009-03-25 20:06:34 -0400738/*
739 * The memory is already allocated, this simply finds a new record to use.
740 */
Steven Rostedt493762f2009-03-23 17:12:36 -0400741static struct ftrace_profile *
Steven Rostedt318e0a72009-03-25 20:06:34 -0400742ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400743{
744 struct ftrace_profile *rec = NULL;
745
Steven Rostedt318e0a72009-03-25 20:06:34 -0400746 /* prevent recursion (from NMIs) */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400747 if (atomic_inc_return(&stat->disabled) != 1)
Steven Rostedt493762f2009-03-23 17:12:36 -0400748 goto out;
749
Steven Rostedt493762f2009-03-23 17:12:36 -0400750 /*
Steven Rostedt318e0a72009-03-25 20:06:34 -0400751 * Try to find the function again since an NMI
752 * could have added it
Steven Rostedt493762f2009-03-23 17:12:36 -0400753 */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400754 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400755 if (rec)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400756 goto out;
Steven Rostedt493762f2009-03-23 17:12:36 -0400757
Steven Rostedtcafb1682009-03-24 20:50:39 -0400758 if (stat->pages->index == PROFILES_PER_PAGE) {
759 if (!stat->pages->next)
760 goto out;
761 stat->pages = stat->pages->next;
Steven Rostedt493762f2009-03-23 17:12:36 -0400762 }
763
Steven Rostedtcafb1682009-03-24 20:50:39 -0400764 rec = &stat->pages->records[stat->pages->index++];
Steven Rostedt493762f2009-03-23 17:12:36 -0400765 rec->ip = ip;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400766 ftrace_add_profile(stat, rec);
Steven Rostedt493762f2009-03-23 17:12:36 -0400767
Steven Rostedt493762f2009-03-23 17:12:36 -0400768 out:
Steven Rostedtcafb1682009-03-24 20:50:39 -0400769 atomic_dec(&stat->disabled);
Steven Rostedt493762f2009-03-23 17:12:36 -0400770
771 return rec;
772}
773
Steven Rostedt493762f2009-03-23 17:12:36 -0400774static void
775function_profile_call(unsigned long ip, unsigned long parent_ip)
776{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400777 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400778 struct ftrace_profile *rec;
779 unsigned long flags;
Steven Rostedt493762f2009-03-23 17:12:36 -0400780
781 if (!ftrace_profile_enabled)
782 return;
783
Steven Rostedt493762f2009-03-23 17:12:36 -0400784 local_irq_save(flags);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400785
786 stat = &__get_cpu_var(ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400787 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400788 goto out;
789
790 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400791 if (!rec) {
Steven Rostedt318e0a72009-03-25 20:06:34 -0400792 rec = ftrace_profile_alloc(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400793 if (!rec)
794 goto out;
795 }
796
797 rec->counter++;
798 out:
799 local_irq_restore(flags);
800}
801
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400802#ifdef CONFIG_FUNCTION_GRAPH_TRACER
803static int profile_graph_entry(struct ftrace_graph_ent *trace)
804{
805 function_profile_call(trace->func, 0);
806 return 1;
807}
808
809static void profile_graph_return(struct ftrace_graph_ret *trace)
810{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400811 struct ftrace_profile_stat *stat;
Steven Rostedta2a16d62009-03-24 23:17:58 -0400812 unsigned long long calltime;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400813 struct ftrace_profile *rec;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400814 unsigned long flags;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400815
816 local_irq_save(flags);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400817 stat = &__get_cpu_var(ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400818 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400819 goto out;
820
Steven Rostedt37e44bc2010-04-27 21:04:24 -0400821 /* If the calltime was zero'd ignore it */
822 if (!trace->calltime)
823 goto out;
824
Steven Rostedta2a16d62009-03-24 23:17:58 -0400825 calltime = trace->rettime - trace->calltime;
826
827 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
828 int index;
829
830 index = trace->depth;
831
832 /* Append this call time to the parent time to subtract */
833 if (index)
834 current->ret_stack[index - 1].subtime += calltime;
835
836 if (current->ret_stack[index].subtime < calltime)
837 calltime -= current->ret_stack[index].subtime;
838 else
839 calltime = 0;
840 }
841
Steven Rostedtcafb1682009-03-24 20:50:39 -0400842 rec = ftrace_find_profiled_func(stat, trace->func);
Chase Douglase330b3b2010-04-26 14:02:05 -0400843 if (rec) {
Steven Rostedta2a16d62009-03-24 23:17:58 -0400844 rec->time += calltime;
Chase Douglase330b3b2010-04-26 14:02:05 -0400845 rec->time_squared += calltime * calltime;
846 }
Steven Rostedta2a16d62009-03-24 23:17:58 -0400847
Steven Rostedtcafb1682009-03-24 20:50:39 -0400848 out:
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400849 local_irq_restore(flags);
850}
851
852static int register_ftrace_profiler(void)
853{
854 return register_ftrace_graph(&profile_graph_return,
855 &profile_graph_entry);
856}
857
858static void unregister_ftrace_profiler(void)
859{
860 unregister_ftrace_graph();
861}
862#else
Paul McQuadebd38c0e2011-05-31 20:51:55 +0100863static struct ftrace_ops ftrace_profile_ops __read_mostly = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400864 .func = function_profile_call,
Steven Rostedt493762f2009-03-23 17:12:36 -0400865};
866
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400867static int register_ftrace_profiler(void)
868{
869 return register_ftrace_function(&ftrace_profile_ops);
870}
871
872static void unregister_ftrace_profiler(void)
873{
874 unregister_ftrace_function(&ftrace_profile_ops);
875}
876#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
877
Steven Rostedt493762f2009-03-23 17:12:36 -0400878static ssize_t
879ftrace_profile_write(struct file *filp, const char __user *ubuf,
880 size_t cnt, loff_t *ppos)
881{
882 unsigned long val;
Steven Rostedt493762f2009-03-23 17:12:36 -0400883 int ret;
884
Peter Huewe22fe9b52011-06-07 21:58:27 +0200885 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
886 if (ret)
Steven Rostedt493762f2009-03-23 17:12:36 -0400887 return ret;
888
889 val = !!val;
890
891 mutex_lock(&ftrace_profile_lock);
892 if (ftrace_profile_enabled ^ val) {
893 if (val) {
894 ret = ftrace_profile_init();
895 if (ret < 0) {
896 cnt = ret;
897 goto out;
898 }
899
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400900 ret = register_ftrace_profiler();
901 if (ret < 0) {
902 cnt = ret;
903 goto out;
904 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400905 ftrace_profile_enabled = 1;
906 } else {
907 ftrace_profile_enabled = 0;
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400908 /*
909 * unregister_ftrace_profiler calls stop_machine
910 * so this acts like an synchronize_sched.
911 */
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400912 unregister_ftrace_profiler();
Steven Rostedt493762f2009-03-23 17:12:36 -0400913 }
914 }
915 out:
916 mutex_unlock(&ftrace_profile_lock);
917
Jiri Olsacf8517c2009-10-23 19:36:16 -0400918 *ppos += cnt;
Steven Rostedt493762f2009-03-23 17:12:36 -0400919
920 return cnt;
921}
922
923static ssize_t
924ftrace_profile_read(struct file *filp, char __user *ubuf,
925 size_t cnt, loff_t *ppos)
926{
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400927 char buf[64]; /* big enough to hold a number */
Steven Rostedt493762f2009-03-23 17:12:36 -0400928 int r;
929
930 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
931 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
932}
933
934static const struct file_operations ftrace_profile_fops = {
935 .open = tracing_open_generic,
936 .read = ftrace_profile_read,
937 .write = ftrace_profile_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200938 .llseek = default_llseek,
Steven Rostedt493762f2009-03-23 17:12:36 -0400939};
940
Steven Rostedtcafb1682009-03-24 20:50:39 -0400941/* used to initialize the real stat files */
942static struct tracer_stat function_stats __initdata = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400943 .name = "functions",
944 .stat_start = function_stat_start,
945 .stat_next = function_stat_next,
946 .stat_cmp = function_stat_cmp,
947 .stat_headers = function_stat_headers,
948 .stat_show = function_stat_show
Steven Rostedtcafb1682009-03-24 20:50:39 -0400949};
950
Steven Rostedt6ab5d662009-06-04 00:55:45 -0400951static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
Steven Rostedt493762f2009-03-23 17:12:36 -0400952{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400953 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400954 struct dentry *entry;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400955 char *name;
Steven Rostedt493762f2009-03-23 17:12:36 -0400956 int ret;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400957 int cpu;
Steven Rostedt493762f2009-03-23 17:12:36 -0400958
Steven Rostedtcafb1682009-03-24 20:50:39 -0400959 for_each_possible_cpu(cpu) {
960 stat = &per_cpu(ftrace_profile_stats, cpu);
961
962 /* allocate enough for function name + cpu number */
963 name = kmalloc(32, GFP_KERNEL);
964 if (!name) {
965 /*
966 * The files created are permanent, if something happens
967 * we still do not free memory.
968 */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400969 WARN(1,
970 "Could not allocate stat file for cpu %d\n",
971 cpu);
972 return;
973 }
974 stat->stat = function_stats;
975 snprintf(name, 32, "function%d", cpu);
976 stat->stat.name = name;
977 ret = register_stat_tracer(&stat->stat);
978 if (ret) {
979 WARN(1,
980 "Could not register function stat for cpu %d\n",
981 cpu);
982 kfree(name);
983 return;
984 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400985 }
986
987 entry = debugfs_create_file("function_profile_enabled", 0644,
988 d_tracer, NULL, &ftrace_profile_fops);
989 if (!entry)
990 pr_warning("Could not create debugfs "
991 "'function_profile_enabled' entry\n");
992}
993
994#else /* CONFIG_FUNCTION_PROFILER */
Steven Rostedt6ab5d662009-06-04 00:55:45 -0400995static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
Steven Rostedt493762f2009-03-23 17:12:36 -0400996{
997}
998#endif /* CONFIG_FUNCTION_PROFILER */
999
Ingo Molnar73d3fd92009-02-17 11:48:18 +01001000static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1001
Steven Rostedt3d083392008-05-12 21:20:42 +02001002#ifdef CONFIG_DYNAMIC_FTRACE
Ingo Molnar73d3fd92009-02-17 11:48:18 +01001003
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001004#ifndef CONFIG_FTRACE_MCOUNT_RECORD
Steven Rostedtcb7be3b2008-10-23 09:33:05 -04001005# error Dynamic ftrace depends on MCOUNT_RECORD
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001006#endif
1007
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001008static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1009
Steven Rostedtb6887d72009-02-17 12:32:04 -05001010struct ftrace_func_probe {
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001011 struct hlist_node node;
Steven Rostedtb6887d72009-02-17 12:32:04 -05001012 struct ftrace_probe_ops *ops;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001013 unsigned long flags;
1014 unsigned long ip;
1015 void *data;
1016 struct rcu_head rcu;
1017};
1018
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001019struct ftrace_func_entry {
1020 struct hlist_node hlist;
1021 unsigned long ip;
1022};
1023
1024struct ftrace_hash {
1025 unsigned long size_bits;
1026 struct hlist_head *buckets;
1027 unsigned long count;
Steven Rostedt07fd5512011-05-05 18:03:47 -04001028 struct rcu_head rcu;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001029};
1030
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001031/*
1032 * We make these constant because no one should touch them,
1033 * but they are used as the default "empty hash", to avoid allocating
1034 * it all the time. These are in a read only section such that if
1035 * anyone does try to modify it, it will cause an exception.
1036 */
1037static const struct hlist_head empty_buckets[1];
1038static const struct ftrace_hash empty_hash = {
1039 .buckets = (struct hlist_head *)empty_buckets,
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001040};
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001041#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
Steven Rostedt5072c592008-05-12 21:20:43 +02001042
Steven Rostedt2b499382011-05-03 22:49:52 -04001043static struct ftrace_ops global_ops = {
Steven Rostedtf45948e2011-05-02 12:29:25 -04001044 .func = ftrace_stub,
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001045 .notrace_hash = EMPTY_HASH,
1046 .filter_hash = EMPTY_HASH,
Steven Rostedtf45948e2011-05-02 12:29:25 -04001047};
1048
Steven Rostedt41c52c02008-05-22 11:46:33 -04001049static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001050
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001051struct ftrace_page {
1052 struct ftrace_page *next;
Steven Rostedta7900872011-12-16 16:23:44 -05001053 struct dyn_ftrace *records;
Steven Rostedt431aa3f2009-01-06 12:43:01 -05001054 int index;
Steven Rostedta7900872011-12-16 16:23:44 -05001055 int size;
David Milleraa5e5ce2008-05-13 22:06:56 -07001056};
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001057
Steven Rostedt85ae32a2011-12-16 16:30:31 -05001058static struct ftrace_page *ftrace_new_pgs;
1059
Steven Rostedta7900872011-12-16 16:23:44 -05001060#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1061#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001062
1063/* estimate from running different kernels */
1064#define NR_TO_INIT 10000
1065
1066static struct ftrace_page *ftrace_pages_start;
1067static struct ftrace_page *ftrace_pages;
1068
Steven Rostedt06a51d92011-12-19 19:07:36 -05001069static bool ftrace_hash_empty(struct ftrace_hash *hash)
1070{
1071 return !hash || !hash->count;
1072}
1073
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001074static struct ftrace_func_entry *
1075ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1076{
1077 unsigned long key;
1078 struct ftrace_func_entry *entry;
1079 struct hlist_head *hhd;
1080 struct hlist_node *n;
1081
Steven Rostedt06a51d92011-12-19 19:07:36 -05001082 if (ftrace_hash_empty(hash))
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001083 return NULL;
1084
1085 if (hash->size_bits > 0)
1086 key = hash_long(ip, hash->size_bits);
1087 else
1088 key = 0;
1089
1090 hhd = &hash->buckets[key];
1091
1092 hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1093 if (entry->ip == ip)
1094 return entry;
1095 }
1096 return NULL;
1097}
1098
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001099static void __add_hash_entry(struct ftrace_hash *hash,
1100 struct ftrace_func_entry *entry)
1101{
1102 struct hlist_head *hhd;
1103 unsigned long key;
1104
1105 if (hash->size_bits)
1106 key = hash_long(entry->ip, hash->size_bits);
1107 else
1108 key = 0;
1109
1110 hhd = &hash->buckets[key];
1111 hlist_add_head(&entry->hlist, hhd);
1112 hash->count++;
1113}
1114
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001115static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1116{
1117 struct ftrace_func_entry *entry;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001118
1119 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1120 if (!entry)
1121 return -ENOMEM;
1122
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001123 entry->ip = ip;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001124 __add_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001125
1126 return 0;
1127}
1128
1129static void
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001130free_hash_entry(struct ftrace_hash *hash,
1131 struct ftrace_func_entry *entry)
1132{
1133 hlist_del(&entry->hlist);
1134 kfree(entry);
1135 hash->count--;
1136}
1137
1138static void
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001139remove_hash_entry(struct ftrace_hash *hash,
1140 struct ftrace_func_entry *entry)
1141{
1142 hlist_del(&entry->hlist);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001143 hash->count--;
1144}
1145
1146static void ftrace_hash_clear(struct ftrace_hash *hash)
1147{
1148 struct hlist_head *hhd;
1149 struct hlist_node *tp, *tn;
1150 struct ftrace_func_entry *entry;
1151 int size = 1 << hash->size_bits;
1152 int i;
1153
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001154 if (!hash->count)
1155 return;
1156
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001157 for (i = 0; i < size; i++) {
1158 hhd = &hash->buckets[i];
1159 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001160 free_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001161 }
1162 FTRACE_WARN_ON(hash->count);
1163}
1164
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001165static void free_ftrace_hash(struct ftrace_hash *hash)
1166{
1167 if (!hash || hash == EMPTY_HASH)
1168 return;
1169 ftrace_hash_clear(hash);
1170 kfree(hash->buckets);
1171 kfree(hash);
1172}
1173
Steven Rostedt07fd5512011-05-05 18:03:47 -04001174static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1175{
1176 struct ftrace_hash *hash;
1177
1178 hash = container_of(rcu, struct ftrace_hash, rcu);
1179 free_ftrace_hash(hash);
1180}
1181
1182static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1183{
1184 if (!hash || hash == EMPTY_HASH)
1185 return;
1186 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1187}
1188
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001189static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1190{
1191 struct ftrace_hash *hash;
1192 int size;
1193
1194 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1195 if (!hash)
1196 return NULL;
1197
1198 size = 1 << size_bits;
Thomas Meyer47b0edc2011-11-29 22:08:00 +01001199 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001200
1201 if (!hash->buckets) {
1202 kfree(hash);
1203 return NULL;
1204 }
1205
1206 hash->size_bits = size_bits;
1207
1208 return hash;
1209}
1210
1211static struct ftrace_hash *
1212alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1213{
1214 struct ftrace_func_entry *entry;
1215 struct ftrace_hash *new_hash;
1216 struct hlist_node *tp;
1217 int size;
1218 int ret;
1219 int i;
1220
1221 new_hash = alloc_ftrace_hash(size_bits);
1222 if (!new_hash)
1223 return NULL;
1224
1225 /* Empty hash? */
Steven Rostedt06a51d92011-12-19 19:07:36 -05001226 if (ftrace_hash_empty(hash))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001227 return new_hash;
1228
1229 size = 1 << hash->size_bits;
1230 for (i = 0; i < size; i++) {
1231 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1232 ret = add_hash_entry(new_hash, entry->ip);
1233 if (ret < 0)
1234 goto free_hash;
1235 }
1236 }
1237
1238 FTRACE_WARN_ON(new_hash->count != hash->count);
1239
1240 return new_hash;
1241
1242 free_hash:
1243 free_ftrace_hash(new_hash);
1244 return NULL;
1245}
1246
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001247static void
1248ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1249static void
1250ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1251
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001252static int
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001253ftrace_hash_move(struct ftrace_ops *ops, int enable,
1254 struct ftrace_hash **dst, struct ftrace_hash *src)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001255{
1256 struct ftrace_func_entry *entry;
1257 struct hlist_node *tp, *tn;
1258 struct hlist_head *hhd;
Steven Rostedt07fd5512011-05-05 18:03:47 -04001259 struct ftrace_hash *old_hash;
1260 struct ftrace_hash *new_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001261 unsigned long key;
1262 int size = src->count;
1263 int bits = 0;
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001264 int ret;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001265 int i;
1266
1267 /*
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001268 * Remove the current set, update the hash and add
1269 * them back.
1270 */
1271 ftrace_hash_rec_disable(ops, enable);
1272
1273 /*
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001274 * If the new source is empty, just free dst and assign it
1275 * the empty_hash.
1276 */
1277 if (!src->count) {
Steven Rostedt07fd5512011-05-05 18:03:47 -04001278 free_ftrace_hash_rcu(*dst);
1279 rcu_assign_pointer(*dst, EMPTY_HASH);
Steven Rostedtd4d34b92011-11-04 20:32:39 -04001280 /* still need to update the function records */
1281 ret = 0;
1282 goto out;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001283 }
1284
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001285 /*
1286 * Make the hash size about 1/2 the # found
1287 */
1288 for (size /= 2; size; size >>= 1)
1289 bits++;
1290
1291 /* Don't allocate too much */
1292 if (bits > FTRACE_HASH_MAX_BITS)
1293 bits = FTRACE_HASH_MAX_BITS;
1294
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001295 ret = -ENOMEM;
Steven Rostedt07fd5512011-05-05 18:03:47 -04001296 new_hash = alloc_ftrace_hash(bits);
1297 if (!new_hash)
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001298 goto out;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001299
1300 size = 1 << src->size_bits;
1301 for (i = 0; i < size; i++) {
1302 hhd = &src->buckets[i];
1303 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1304 if (bits > 0)
1305 key = hash_long(entry->ip, bits);
1306 else
1307 key = 0;
1308 remove_hash_entry(src, entry);
Steven Rostedt07fd5512011-05-05 18:03:47 -04001309 __add_hash_entry(new_hash, entry);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001310 }
1311 }
1312
Steven Rostedt07fd5512011-05-05 18:03:47 -04001313 old_hash = *dst;
1314 rcu_assign_pointer(*dst, new_hash);
1315 free_ftrace_hash_rcu(old_hash);
1316
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001317 ret = 0;
1318 out:
1319 /*
1320 * Enable regardless of ret:
1321 * On success, we enable the new hash.
1322 * On failure, we re-enable the original hash.
1323 */
1324 ftrace_hash_rec_enable(ops, enable);
1325
1326 return ret;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001327}
1328
Steven Rostedt265c8312009-02-13 12:43:56 -05001329/*
Steven Rostedtb8489142011-05-04 09:27:52 -04001330 * Test the hashes for this ops to see if we want to call
1331 * the ops->func or not.
1332 *
1333 * It's a match if the ip is in the ops->filter_hash or
1334 * the filter_hash does not exist or is empty,
1335 * AND
1336 * the ip is not in the ops->notrace_hash.
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04001337 *
1338 * This needs to be called with preemption disabled as
1339 * the hashes are freed with call_rcu_sched().
Steven Rostedtb8489142011-05-04 09:27:52 -04001340 */
1341static int
1342ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1343{
1344 struct ftrace_hash *filter_hash;
1345 struct ftrace_hash *notrace_hash;
1346 int ret;
1347
Steven Rostedtb8489142011-05-04 09:27:52 -04001348 filter_hash = rcu_dereference_raw(ops->filter_hash);
1349 notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1350
Steven Rostedt06a51d92011-12-19 19:07:36 -05001351 if ((ftrace_hash_empty(filter_hash) ||
Steven Rostedtb8489142011-05-04 09:27:52 -04001352 ftrace_lookup_ip(filter_hash, ip)) &&
Steven Rostedt06a51d92011-12-19 19:07:36 -05001353 (ftrace_hash_empty(notrace_hash) ||
Steven Rostedtb8489142011-05-04 09:27:52 -04001354 !ftrace_lookup_ip(notrace_hash, ip)))
1355 ret = 1;
1356 else
1357 ret = 0;
Steven Rostedtb8489142011-05-04 09:27:52 -04001358
1359 return ret;
1360}
1361
1362/*
Steven Rostedt265c8312009-02-13 12:43:56 -05001363 * This is a double for. Do not use 'break' to break out of the loop,
1364 * you must use a goto.
1365 */
1366#define do_for_each_ftrace_rec(pg, rec) \
1367 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1368 int _____i; \
1369 for (_____i = 0; _____i < pg->index; _____i++) { \
1370 rec = &pg->records[_____i];
1371
1372#define while_for_each_ftrace_rec() \
1373 } \
1374 }
Abhishek Sagarecea6562008-06-21 23:47:53 +05301375
Steven Rostedt5855fea2011-12-16 19:27:42 -05001376
1377static int ftrace_cmp_recs(const void *a, const void *b)
1378{
1379 const struct dyn_ftrace *reca = a;
1380 const struct dyn_ftrace *recb = b;
1381
1382 if (reca->ip > recb->ip)
1383 return 1;
1384 if (reca->ip < recb->ip)
1385 return -1;
1386 return 0;
1387}
1388
Steven Rostedtc88fd862011-08-16 09:53:39 -04001389/**
1390 * ftrace_location - return true if the ip giving is a traced location
1391 * @ip: the instruction pointer to check
1392 *
1393 * Returns 1 if @ip given is a pointer to a ftrace location.
1394 * That is, the instruction that is either a NOP or call to
1395 * the function tracer. It checks the ftrace internal tables to
1396 * determine if the address belongs or not.
1397 */
1398int ftrace_location(unsigned long ip)
1399{
1400 struct ftrace_page *pg;
1401 struct dyn_ftrace *rec;
Steven Rostedt5855fea2011-12-16 19:27:42 -05001402 struct dyn_ftrace key;
Steven Rostedtc88fd862011-08-16 09:53:39 -04001403
Steven Rostedt5855fea2011-12-16 19:27:42 -05001404 key.ip = ip;
1405
1406 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1407 rec = bsearch(&key, pg->records, pg->index,
1408 sizeof(struct dyn_ftrace),
1409 ftrace_cmp_recs);
1410 if (rec)
Steven Rostedtc88fd862011-08-16 09:53:39 -04001411 return 1;
Steven Rostedt5855fea2011-12-16 19:27:42 -05001412 }
Steven Rostedtc88fd862011-08-16 09:53:39 -04001413
1414 return 0;
1415}
1416
Steven Rostedted926f92011-05-03 13:25:24 -04001417static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1418 int filter_hash,
1419 bool inc)
1420{
1421 struct ftrace_hash *hash;
1422 struct ftrace_hash *other_hash;
1423 struct ftrace_page *pg;
1424 struct dyn_ftrace *rec;
1425 int count = 0;
1426 int all = 0;
1427
1428 /* Only update if the ops has been registered */
1429 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1430 return;
1431
1432 /*
1433 * In the filter_hash case:
1434 * If the count is zero, we update all records.
1435 * Otherwise we just update the items in the hash.
1436 *
1437 * In the notrace_hash case:
1438 * We enable the update in the hash.
1439 * As disabling notrace means enabling the tracing,
1440 * and enabling notrace means disabling, the inc variable
1441 * gets inversed.
1442 */
1443 if (filter_hash) {
1444 hash = ops->filter_hash;
1445 other_hash = ops->notrace_hash;
Steven Rostedt06a51d92011-12-19 19:07:36 -05001446 if (ftrace_hash_empty(hash))
Steven Rostedted926f92011-05-03 13:25:24 -04001447 all = 1;
1448 } else {
1449 inc = !inc;
1450 hash = ops->notrace_hash;
1451 other_hash = ops->filter_hash;
1452 /*
1453 * If the notrace hash has no items,
1454 * then there's nothing to do.
1455 */
Steven Rostedt06a51d92011-12-19 19:07:36 -05001456 if (ftrace_hash_empty(hash))
Steven Rostedted926f92011-05-03 13:25:24 -04001457 return;
1458 }
1459
1460 do_for_each_ftrace_rec(pg, rec) {
1461 int in_other_hash = 0;
1462 int in_hash = 0;
1463 int match = 0;
1464
1465 if (all) {
1466 /*
1467 * Only the filter_hash affects all records.
1468 * Update if the record is not in the notrace hash.
1469 */
Steven Rostedtb8489142011-05-04 09:27:52 -04001470 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
Steven Rostedted926f92011-05-03 13:25:24 -04001471 match = 1;
1472 } else {
Steven Rostedt06a51d92011-12-19 19:07:36 -05001473 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1474 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
Steven Rostedted926f92011-05-03 13:25:24 -04001475
1476 /*
1477 *
1478 */
1479 if (filter_hash && in_hash && !in_other_hash)
1480 match = 1;
1481 else if (!filter_hash && in_hash &&
Steven Rostedt06a51d92011-12-19 19:07:36 -05001482 (in_other_hash || ftrace_hash_empty(other_hash)))
Steven Rostedted926f92011-05-03 13:25:24 -04001483 match = 1;
1484 }
1485 if (!match)
1486 continue;
1487
1488 if (inc) {
1489 rec->flags++;
1490 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1491 return;
1492 } else {
1493 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1494 return;
1495 rec->flags--;
1496 }
1497 count++;
1498 /* Shortcut, if we handled all records, we are done. */
1499 if (!all && count == hash->count)
1500 return;
1501 } while_for_each_ftrace_rec();
1502}
1503
1504static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1505 int filter_hash)
1506{
1507 __ftrace_hash_rec_update(ops, filter_hash, 0);
1508}
1509
1510static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1511 int filter_hash)
1512{
1513 __ftrace_hash_rec_update(ops, filter_hash, 1);
1514}
1515
Ingo Molnare309b412008-05-12 21:20:51 +02001516static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001517{
Steven Rostedta7900872011-12-16 16:23:44 -05001518 if (ftrace_pages->index == ftrace_pages->size) {
1519 /* We should have allocated enough */
1520 if (WARN_ON(!ftrace_pages->next))
1521 return NULL;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001522 ftrace_pages = ftrace_pages->next;
1523 }
1524
1525 return &ftrace_pages->records[ftrace_pages->index++];
1526}
1527
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001528static struct dyn_ftrace *
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001529ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +02001530{
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001531 struct dyn_ftrace *rec;
Steven Rostedt3d083392008-05-12 21:20:42 +02001532
Steven Rostedtf3c7ac42008-11-14 16:21:19 -08001533 if (ftrace_disabled)
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001534 return NULL;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001535
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001536 rec = ftrace_alloc_dyn_node(ip);
1537 if (!rec)
1538 return NULL;
Steven Rostedt3d083392008-05-12 21:20:42 +02001539
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001540 rec->ip = ip;
Steven Rostedt3d083392008-05-12 21:20:42 +02001541
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001542 return rec;
Steven Rostedt3d083392008-05-12 21:20:42 +02001543}
1544
Steven Rostedt05736a42008-09-22 14:55:47 -07001545static void print_ip_ins(const char *fmt, unsigned char *p)
1546{
1547 int i;
1548
1549 printk(KERN_CONT "%s", fmt);
1550
1551 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1552 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1553}
1554
Steven Rostedtc88fd862011-08-16 09:53:39 -04001555/**
1556 * ftrace_bug - report and shutdown function tracer
1557 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1558 * @ip: The address that failed
1559 *
1560 * The arch code that enables or disables the function tracing
1561 * can call ftrace_bug() when it has detected a problem in
1562 * modifying the code. @failed should be one of either:
1563 * EFAULT - if the problem happens on reading the @ip address
1564 * EINVAL - if what is read at @ip is not what was expected
1565 * EPERM - if the problem happens on writting to the @ip address
1566 */
1567void ftrace_bug(int failed, unsigned long ip)
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001568{
1569 switch (failed) {
1570 case -EFAULT:
1571 FTRACE_WARN_ON_ONCE(1);
1572 pr_info("ftrace faulted on modifying ");
1573 print_ip_sym(ip);
1574 break;
1575 case -EINVAL:
1576 FTRACE_WARN_ON_ONCE(1);
1577 pr_info("ftrace failed to modify ");
1578 print_ip_sym(ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001579 print_ip_ins(" actual: ", (unsigned char *)ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001580 printk(KERN_CONT "\n");
1581 break;
1582 case -EPERM:
1583 FTRACE_WARN_ON_ONCE(1);
1584 pr_info("ftrace faulted on writing ");
1585 print_ip_sym(ip);
1586 break;
1587 default:
1588 FTRACE_WARN_ON_ONCE(1);
1589 pr_info("ftrace faulted on unknown error ");
1590 print_ip_sym(ip);
1591 }
1592}
1593
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001594
Masami Hiramatsu2cfa1972010-02-02 16:49:11 -05001595/* Return 1 if the address range is reserved for ftrace */
1596int ftrace_text_reserved(void *start, void *end)
1597{
1598 struct dyn_ftrace *rec;
1599 struct ftrace_page *pg;
1600
1601 do_for_each_ftrace_rec(pg, rec) {
1602 if (rec->ip <= (unsigned long)end &&
1603 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1604 return 1;
1605 } while_for_each_ftrace_rec();
1606 return 0;
1607}
1608
Steven Rostedtc88fd862011-08-16 09:53:39 -04001609static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
Steven Rostedt5072c592008-05-12 21:20:43 +02001610{
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001611 unsigned long flag = 0UL;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001612
Steven Rostedt982c3502008-11-15 16:31:41 -05001613 /*
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01001614 * If we are updating calls:
Steven Rostedt982c3502008-11-15 16:31:41 -05001615 *
Steven Rostedted926f92011-05-03 13:25:24 -04001616 * If the record has a ref count, then we need to enable it
1617 * because someone is using it.
Steven Rostedt982c3502008-11-15 16:31:41 -05001618 *
Steven Rostedted926f92011-05-03 13:25:24 -04001619 * Otherwise we make sure its disabled.
1620 *
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01001621 * If we are disabling calls, then disable all records that
Steven Rostedted926f92011-05-03 13:25:24 -04001622 * are enabled.
Steven Rostedt982c3502008-11-15 16:31:41 -05001623 */
Steven Rostedtc88fd862011-08-16 09:53:39 -04001624 if (enable && (rec->flags & ~FTRACE_FL_MASK))
Steven Rostedted926f92011-05-03 13:25:24 -04001625 flag = FTRACE_FL_ENABLED;
Steven Rostedt5072c592008-05-12 21:20:43 +02001626
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001627 /* If the state of this record hasn't changed, then do nothing */
1628 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
Steven Rostedtc88fd862011-08-16 09:53:39 -04001629 return FTRACE_UPDATE_IGNORE;
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001630
1631 if (flag) {
Steven Rostedtc88fd862011-08-16 09:53:39 -04001632 if (update)
1633 rec->flags |= FTRACE_FL_ENABLED;
1634 return FTRACE_UPDATE_MAKE_CALL;
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001635 }
1636
Steven Rostedtc88fd862011-08-16 09:53:39 -04001637 if (update)
1638 rec->flags &= ~FTRACE_FL_ENABLED;
1639
1640 return FTRACE_UPDATE_MAKE_NOP;
1641}
1642
1643/**
1644 * ftrace_update_record, set a record that now is tracing or not
1645 * @rec: the record to update
1646 * @enable: set to 1 if the record is tracing, zero to force disable
1647 *
1648 * The records that represent all functions that can be traced need
1649 * to be updated when tracing has been enabled.
1650 */
1651int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1652{
1653 return ftrace_check_record(rec, enable, 1);
1654}
1655
1656/**
1657 * ftrace_test_record, check if the record has been enabled or not
1658 * @rec: the record to test
1659 * @enable: set to 1 to check if enabled, 0 if it is disabled
1660 *
1661 * The arch code may need to test if a record is already set to
1662 * tracing to determine how to modify the function code that it
1663 * represents.
1664 */
1665int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1666{
1667 return ftrace_check_record(rec, enable, 0);
1668}
1669
1670static int
1671__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1672{
1673 unsigned long ftrace_addr;
1674 int ret;
1675
1676 ftrace_addr = (unsigned long)FTRACE_ADDR;
1677
1678 ret = ftrace_update_record(rec, enable);
1679
1680 switch (ret) {
1681 case FTRACE_UPDATE_IGNORE:
1682 return 0;
1683
1684 case FTRACE_UPDATE_MAKE_CALL:
1685 return ftrace_make_call(rec, ftrace_addr);
1686
1687 case FTRACE_UPDATE_MAKE_NOP:
1688 return ftrace_make_nop(NULL, rec, ftrace_addr);
1689 }
1690
1691 return -1; /* unknow ftrace bug */
Steven Rostedt5072c592008-05-12 21:20:43 +02001692}
1693
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01001694static void ftrace_replace_code(int update)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001695{
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001696 struct dyn_ftrace *rec;
1697 struct ftrace_page *pg;
Steven Rostedt6a24a242009-02-17 11:20:26 -05001698 int failed;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001699
Steven Rostedt45a4a232011-04-21 23:16:46 -04001700 if (unlikely(ftrace_disabled))
1701 return;
1702
Steven Rostedt265c8312009-02-13 12:43:56 -05001703 do_for_each_ftrace_rec(pg, rec) {
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01001704 failed = __ftrace_replace_code(rec, update);
Zhaoleifa9d13c2009-03-13 17:16:34 +08001705 if (failed) {
Steven Rostedt3279ba32009-10-07 16:57:56 -04001706 ftrace_bug(failed, rec->ip);
1707 /* Stop processing */
1708 return;
Steven Rostedt265c8312009-02-13 12:43:56 -05001709 }
1710 } while_for_each_ftrace_rec();
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001711}
1712
Steven Rostedtc88fd862011-08-16 09:53:39 -04001713struct ftrace_rec_iter {
1714 struct ftrace_page *pg;
1715 int index;
1716};
1717
1718/**
1719 * ftrace_rec_iter_start, start up iterating over traced functions
1720 *
1721 * Returns an iterator handle that is used to iterate over all
1722 * the records that represent address locations where functions
1723 * are traced.
1724 *
1725 * May return NULL if no records are available.
1726 */
1727struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1728{
1729 /*
1730 * We only use a single iterator.
1731 * Protected by the ftrace_lock mutex.
1732 */
1733 static struct ftrace_rec_iter ftrace_rec_iter;
1734 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1735
1736 iter->pg = ftrace_pages_start;
1737 iter->index = 0;
1738
1739 /* Could have empty pages */
1740 while (iter->pg && !iter->pg->index)
1741 iter->pg = iter->pg->next;
1742
1743 if (!iter->pg)
1744 return NULL;
1745
1746 return iter;
1747}
1748
1749/**
1750 * ftrace_rec_iter_next, get the next record to process.
1751 * @iter: The handle to the iterator.
1752 *
1753 * Returns the next iterator after the given iterator @iter.
1754 */
1755struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1756{
1757 iter->index++;
1758
1759 if (iter->index >= iter->pg->index) {
1760 iter->pg = iter->pg->next;
1761 iter->index = 0;
1762
1763 /* Could have empty pages */
1764 while (iter->pg && !iter->pg->index)
1765 iter->pg = iter->pg->next;
1766 }
1767
1768 if (!iter->pg)
1769 return NULL;
1770
1771 return iter;
1772}
1773
1774/**
1775 * ftrace_rec_iter_record, get the record at the iterator location
1776 * @iter: The current iterator location
1777 *
1778 * Returns the record that the current @iter is at.
1779 */
1780struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1781{
1782 return &iter->pg->records[iter->index];
1783}
1784
Abhishek Sagar492a7ea2008-05-25 00:10:04 +05301785static int
Steven Rostedt31e88902008-11-14 16:21:19 -08001786ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001787{
1788 unsigned long ip;
Steven Rostedt593eb8a2008-10-23 09:32:59 -04001789 int ret;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001790
1791 ip = rec->ip;
1792
Steven Rostedt45a4a232011-04-21 23:16:46 -04001793 if (unlikely(ftrace_disabled))
1794 return 0;
1795
Shaohua Li25aac9d2009-01-09 11:29:40 +08001796 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
Steven Rostedt593eb8a2008-10-23 09:32:59 -04001797 if (ret) {
Steven Rostedt31e88902008-11-14 16:21:19 -08001798 ftrace_bug(ret, ip);
Abhishek Sagar492a7ea2008-05-25 00:10:04 +05301799 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +02001800 }
Abhishek Sagar492a7ea2008-05-25 00:10:04 +05301801 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001802}
1803
Steven Rostedt000ab692009-02-17 13:35:06 -05001804/*
1805 * archs can override this function if they must do something
1806 * before the modifying code is performed.
1807 */
1808int __weak ftrace_arch_code_modify_prepare(void)
1809{
1810 return 0;
1811}
1812
1813/*
1814 * archs can override this function if they must do something
1815 * after the modifying code is performed.
1816 */
1817int __weak ftrace_arch_code_modify_post_process(void)
1818{
1819 return 0;
1820}
1821
Ingo Molnare309b412008-05-12 21:20:51 +02001822static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +02001823{
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001824 int *command = data;
1825
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01001826 if (*command & FTRACE_UPDATE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001827 ftrace_replace_code(1);
Steven Rostedta3583242008-11-11 15:01:42 -05001828 else if (*command & FTRACE_DISABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001829 ftrace_replace_code(0);
1830
1831 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1832 ftrace_update_ftrace_func(ftrace_trace_function);
1833
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001834 if (*command & FTRACE_START_FUNC_RET)
1835 ftrace_enable_ftrace_graph_caller();
1836 else if (*command & FTRACE_STOP_FUNC_RET)
1837 ftrace_disable_ftrace_graph_caller();
1838
Steven Rostedtc88fd862011-08-16 09:53:39 -04001839 return 0;
1840}
1841
1842/**
1843 * ftrace_run_stop_machine, go back to the stop machine method
1844 * @command: The command to tell ftrace what to do
1845 *
1846 * If an arch needs to fall back to the stop machine method, the
1847 * it can call this function.
1848 */
1849void ftrace_run_stop_machine(int command)
1850{
1851 stop_machine(__ftrace_modify_code, &command, NULL);
1852}
1853
1854/**
1855 * arch_ftrace_update_code, modify the code to trace or not trace
1856 * @command: The command that needs to be done
1857 *
1858 * Archs can override this function if it does not need to
1859 * run stop_machine() to modify code.
1860 */
1861void __weak arch_ftrace_update_code(int command)
1862{
1863 ftrace_run_stop_machine(command);
1864}
1865
1866static void ftrace_run_update_code(int command)
1867{
1868 int ret;
1869
1870 ret = ftrace_arch_code_modify_prepare();
1871 FTRACE_WARN_ON(ret);
1872 if (ret)
1873 return;
1874 /*
1875 * Do not call function tracer while we update the code.
1876 * We are in stop machine.
1877 */
1878 function_trace_stop++;
1879
1880 /*
1881 * By default we use stop_machine() to modify the code.
1882 * But archs can do what ever they want as long as it
1883 * is safe. The stop_machine() is the safest, but also
1884 * produces the most overhead.
1885 */
1886 arch_ftrace_update_code(command);
1887
Steven Rostedt6331c282011-07-13 15:11:02 -04001888#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1889 /*
1890 * For archs that call ftrace_test_stop_func(), we must
1891 * wait till after we update all the function callers
1892 * before we update the callback. This keeps different
1893 * ops that record different functions from corrupting
1894 * each other.
1895 */
1896 __ftrace_trace_function = __ftrace_trace_function_delay;
1897#endif
1898 function_trace_stop--;
1899
Steven Rostedt000ab692009-02-17 13:35:06 -05001900 ret = ftrace_arch_code_modify_post_process();
1901 FTRACE_WARN_ON(ret);
Steven Rostedt3d083392008-05-12 21:20:42 +02001902}
1903
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001904static ftrace_func_t saved_ftrace_func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001905static int ftrace_start_up;
Steven Rostedtb8489142011-05-04 09:27:52 -04001906static int global_start_up;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001907
1908static void ftrace_startup_enable(int command)
1909{
1910 if (saved_ftrace_func != ftrace_trace_function) {
1911 saved_ftrace_func = ftrace_trace_function;
1912 command |= FTRACE_UPDATE_TRACE_FUNC;
1913 }
1914
1915 if (!command || !ftrace_enabled)
1916 return;
1917
1918 ftrace_run_update_code(command);
1919}
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001920
Steven Rostedta1cd6172011-05-23 15:24:25 -04001921static int ftrace_startup(struct ftrace_ops *ops, int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02001922{
Steven Rostedtb8489142011-05-04 09:27:52 -04001923 bool hash_enable = true;
1924
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001925 if (unlikely(ftrace_disabled))
Steven Rostedta1cd6172011-05-23 15:24:25 -04001926 return -ENODEV;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001927
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001928 ftrace_start_up++;
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01001929 command |= FTRACE_UPDATE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +02001930
Steven Rostedtb8489142011-05-04 09:27:52 -04001931 /* ops marked global share the filter hashes */
1932 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1933 ops = &global_ops;
1934 /* Don't update hash if global is already set */
1935 if (global_start_up)
1936 hash_enable = false;
1937 global_start_up++;
1938 }
1939
Steven Rostedted926f92011-05-03 13:25:24 -04001940 ops->flags |= FTRACE_OPS_FL_ENABLED;
Steven Rostedtb8489142011-05-04 09:27:52 -04001941 if (hash_enable)
Steven Rostedted926f92011-05-03 13:25:24 -04001942 ftrace_hash_rec_enable(ops, 1);
1943
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001944 ftrace_startup_enable(command);
Steven Rostedta1cd6172011-05-23 15:24:25 -04001945
1946 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +02001947}
1948
Steven Rostedtbd69c302011-05-03 21:55:54 -04001949static void ftrace_shutdown(struct ftrace_ops *ops, int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02001950{
Steven Rostedtb8489142011-05-04 09:27:52 -04001951 bool hash_disable = true;
1952
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001953 if (unlikely(ftrace_disabled))
1954 return;
1955
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001956 ftrace_start_up--;
Frederic Weisbecker9ea1a152009-06-20 06:52:21 +02001957 /*
1958 * Just warn in case of unbalance, no need to kill ftrace, it's not
1959 * critical but the ftrace_call callers may be never nopped again after
1960 * further ftrace uses.
1961 */
1962 WARN_ON_ONCE(ftrace_start_up < 0);
1963
Steven Rostedtb8489142011-05-04 09:27:52 -04001964 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1965 ops = &global_ops;
1966 global_start_up--;
1967 WARN_ON_ONCE(global_start_up < 0);
1968 /* Don't update hash if global still has users */
1969 if (global_start_up) {
1970 WARN_ON_ONCE(!ftrace_start_up);
1971 hash_disable = false;
1972 }
1973 }
1974
1975 if (hash_disable)
Steven Rostedted926f92011-05-03 13:25:24 -04001976 ftrace_hash_rec_disable(ops, 1);
1977
Steven Rostedtb8489142011-05-04 09:27:52 -04001978 if (ops != &global_ops || !global_start_up)
Steven Rostedted926f92011-05-03 13:25:24 -04001979 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
Steven Rostedtb8489142011-05-04 09:27:52 -04001980
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01001981 command |= FTRACE_UPDATE_CALLS;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001982
1983 if (saved_ftrace_func != ftrace_trace_function) {
1984 saved_ftrace_func = ftrace_trace_function;
1985 command |= FTRACE_UPDATE_TRACE_FUNC;
1986 }
1987
1988 if (!command || !ftrace_enabled)
Steven Rostedte6ea44e2009-02-14 01:42:44 -05001989 return;
Steven Rostedt3d083392008-05-12 21:20:42 +02001990
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001991 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +02001992}
1993
Ingo Molnare309b412008-05-12 21:20:51 +02001994static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001995{
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001996 if (unlikely(ftrace_disabled))
1997 return;
1998
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001999 /* Force update next time */
2000 saved_ftrace_func = NULL;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002001 /* ftrace_start_up is true if we want ftrace running */
2002 if (ftrace_start_up)
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01002003 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002004}
2005
Ingo Molnare309b412008-05-12 21:20:51 +02002006static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002007{
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002008 if (unlikely(ftrace_disabled))
2009 return;
2010
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002011 /* ftrace_start_up is true if ftrace is running */
2012 if (ftrace_start_up)
Steven Rostedt79e406d2010-09-14 22:19:46 -04002013 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002014}
2015
Steven Rostedt3d083392008-05-12 21:20:42 +02002016static cycle_t ftrace_update_time;
2017static unsigned long ftrace_update_cnt;
2018unsigned long ftrace_update_tot_cnt;
2019
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002020static int ops_traces_mod(struct ftrace_ops *ops)
2021{
2022 struct ftrace_hash *hash;
2023
2024 hash = ops->filter_hash;
Steven Rostedt06a51d92011-12-19 19:07:36 -05002025 return ftrace_hash_empty(hash);
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002026}
2027
Steven Rostedt31e88902008-11-14 16:21:19 -08002028static int ftrace_update_code(struct module *mod)
Steven Rostedt3d083392008-05-12 21:20:42 +02002029{
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002030 struct ftrace_page *pg;
Lai Jiangshane94142a2009-03-13 17:51:27 +08002031 struct dyn_ftrace *p;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +05302032 cycle_t start, stop;
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002033 unsigned long ref = 0;
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002034 int i;
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002035
2036 /*
2037 * When adding a module, we need to check if tracers are
2038 * currently enabled and if they are set to trace all functions.
2039 * If they are, we need to enable the module functions as well
2040 * as update the reference counts for those function records.
2041 */
2042 if (mod) {
2043 struct ftrace_ops *ops;
2044
2045 for (ops = ftrace_ops_list;
2046 ops != &ftrace_list_end; ops = ops->next) {
2047 if (ops->flags & FTRACE_OPS_FL_ENABLED &&
2048 ops_traces_mod(ops))
2049 ref++;
2050 }
2051 }
Steven Rostedt3d083392008-05-12 21:20:42 +02002052
Ingo Molnar750ed1a2008-05-12 21:20:46 +02002053 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02002054 ftrace_update_cnt = 0;
2055
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002056 for (pg = ftrace_new_pgs; pg; pg = pg->next) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +05302057
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002058 for (i = 0; i < pg->index; i++) {
2059 /* If something went wrong, bail without enabling anything */
2060 if (unlikely(ftrace_disabled))
2061 return -1;
Steven Rostedt3d083392008-05-12 21:20:42 +02002062
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002063 p = &pg->records[i];
2064 p->flags = ref;
Abhishek Sagar0eb96702008-06-01 21:47:30 +05302065
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002066 /*
2067 * Do the initial record conversion from mcount jump
2068 * to the NOP instructions.
2069 */
2070 if (!ftrace_code_disable(mod, p))
2071 break;
Jiri Olsa5cb084b2009-10-13 16:33:53 -04002072
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002073 ftrace_update_cnt++;
Jiri Olsa5cb084b2009-10-13 16:33:53 -04002074
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002075 /*
2076 * If the tracing is enabled, go ahead and enable the record.
2077 *
2078 * The reason not to enable the record immediatelly is the
2079 * inherent check of ftrace_make_nop/ftrace_make_call for
2080 * correct previous instructions. Making first the NOP
2081 * conversion puts the module to the correct state, thus
2082 * passing the ftrace_make_call check.
2083 */
2084 if (ftrace_start_up && ref) {
2085 int failed = __ftrace_replace_code(p, 1);
2086 if (failed)
2087 ftrace_bug(failed, p->ip);
2088 }
Jiri Olsa5cb084b2009-10-13 16:33:53 -04002089 }
Steven Rostedt3d083392008-05-12 21:20:42 +02002090 }
2091
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002092 ftrace_new_pgs = NULL;
2093
Ingo Molnar750ed1a2008-05-12 21:20:46 +02002094 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02002095 ftrace_update_time = stop - start;
2096 ftrace_update_tot_cnt += ftrace_update_cnt;
2097
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02002098 return 0;
2099}
2100
Steven Rostedta7900872011-12-16 16:23:44 -05002101static int ftrace_allocate_records(struct ftrace_page *pg, int count)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002102{
Steven Rostedta7900872011-12-16 16:23:44 -05002103 int order;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002104 int cnt;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002105
Steven Rostedta7900872011-12-16 16:23:44 -05002106 if (WARN_ON(!count))
2107 return -EINVAL;
2108
2109 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002110
2111 /*
Steven Rostedta7900872011-12-16 16:23:44 -05002112 * We want to fill as much as possible. No more than a page
2113 * may be empty.
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002114 */
Steven Rostedta7900872011-12-16 16:23:44 -05002115 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2116 order--;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002117
Steven Rostedta7900872011-12-16 16:23:44 -05002118 again:
2119 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2120
2121 if (!pg->records) {
2122 /* if we can't allocate this size, try something smaller */
2123 if (!order)
2124 return -ENOMEM;
2125 order >>= 1;
2126 goto again;
2127 }
2128
2129 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2130 pg->size = cnt;
2131
2132 if (cnt > count)
2133 cnt = count;
2134
2135 return cnt;
2136}
2137
2138static struct ftrace_page *
2139ftrace_allocate_pages(unsigned long num_to_init)
2140{
2141 struct ftrace_page *start_pg;
2142 struct ftrace_page *pg;
2143 int order;
2144 int cnt;
2145
2146 if (!num_to_init)
2147 return 0;
2148
2149 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2150 if (!pg)
2151 return NULL;
2152
2153 /*
2154 * Try to allocate as much as possible in one continues
2155 * location that fills in all of the space. We want to
2156 * waste as little space as possible.
2157 */
2158 for (;;) {
2159 cnt = ftrace_allocate_records(pg, num_to_init);
2160 if (cnt < 0)
2161 goto free_pages;
2162
2163 num_to_init -= cnt;
2164 if (!num_to_init)
2165 break;
2166
2167 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2168 if (!pg->next)
2169 goto free_pages;
2170
2171 pg = pg->next;
2172 }
2173
2174 return start_pg;
2175
2176 free_pages:
2177 while (start_pg) {
2178 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2179 free_pages((unsigned long)pg->records, order);
2180 start_pg = pg->next;
2181 kfree(pg);
2182 pg = start_pg;
2183 }
2184 pr_info("ftrace: FAILED to allocate memory for functions\n");
2185 return NULL;
2186}
2187
2188static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2189{
2190 int cnt;
2191
2192 if (!num_to_init) {
2193 pr_info("ftrace: No functions to be traced?\n");
2194 return -1;
2195 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002196
Steven Rostedt68bf21a2008-08-14 15:45:08 -04002197 cnt = num_to_init / ENTRIES_PER_PAGE;
Steven Rostedt08f5ac902008-10-23 09:33:07 -04002198 pr_info("ftrace: allocating %ld entries in %d pages\n",
walimis5821e1b2008-11-15 15:19:06 +08002199 num_to_init, cnt + 1);
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002200
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002201 return 0;
2202}
2203
Steven Rostedt5072c592008-05-12 21:20:43 +02002204#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2205
2206struct ftrace_iterator {
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002207 loff_t pos;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002208 loff_t func_pos;
2209 struct ftrace_page *pg;
2210 struct dyn_ftrace *func;
2211 struct ftrace_func_probe *probe;
2212 struct trace_parser parser;
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002213 struct ftrace_hash *hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002214 struct ftrace_ops *ops;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002215 int hidx;
2216 int idx;
2217 unsigned flags;
Steven Rostedt5072c592008-05-12 21:20:43 +02002218};
2219
Ingo Molnare309b412008-05-12 21:20:51 +02002220static void *
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002221t_hash_next(struct seq_file *m, loff_t *pos)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002222{
2223 struct ftrace_iterator *iter = m->private;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002224 struct hlist_node *hnd = NULL;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002225 struct hlist_head *hhd;
2226
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002227 (*pos)++;
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002228 iter->pos = *pos;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002229
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002230 if (iter->probe)
2231 hnd = &iter->probe->node;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002232 retry:
2233 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2234 return NULL;
2235
2236 hhd = &ftrace_func_hash[iter->hidx];
2237
2238 if (hlist_empty(hhd)) {
2239 iter->hidx++;
2240 hnd = NULL;
2241 goto retry;
2242 }
2243
2244 if (!hnd)
2245 hnd = hhd->first;
2246 else {
2247 hnd = hnd->next;
2248 if (!hnd) {
2249 iter->hidx++;
2250 goto retry;
2251 }
2252 }
2253
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002254 if (WARN_ON_ONCE(!hnd))
2255 return NULL;
2256
2257 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2258
2259 return iter;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002260}
2261
2262static void *t_hash_start(struct seq_file *m, loff_t *pos)
2263{
2264 struct ftrace_iterator *iter = m->private;
2265 void *p = NULL;
Li Zefand82d6242009-06-24 09:54:54 +08002266 loff_t l;
2267
Steven Rostedt69a30832011-12-19 15:21:16 -05002268 if (!(iter->flags & FTRACE_ITER_DO_HASH))
2269 return NULL;
2270
Steven Rostedt2bccfff2010-09-09 08:43:22 -04002271 if (iter->func_pos > *pos)
2272 return NULL;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002273
Li Zefand82d6242009-06-24 09:54:54 +08002274 iter->hidx = 0;
Steven Rostedt2bccfff2010-09-09 08:43:22 -04002275 for (l = 0; l <= (*pos - iter->func_pos); ) {
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002276 p = t_hash_next(m, &l);
Li Zefand82d6242009-06-24 09:54:54 +08002277 if (!p)
2278 break;
2279 }
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002280 if (!p)
2281 return NULL;
2282
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002283 /* Only set this if we have an item */
2284 iter->flags |= FTRACE_ITER_HASH;
2285
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002286 return iter;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002287}
2288
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002289static int
2290t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002291{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002292 struct ftrace_func_probe *rec;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002293
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002294 rec = iter->probe;
2295 if (WARN_ON_ONCE(!rec))
2296 return -EIO;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002297
Steven Rostedt809dcf22009-02-16 23:06:01 -05002298 if (rec->ops->print)
2299 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2300
Steven Rostedtb375a112009-09-17 00:05:58 -04002301 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002302
2303 if (rec->data)
2304 seq_printf(m, ":%p", rec->data);
2305 seq_putc(m, '\n');
2306
2307 return 0;
2308}
2309
2310static void *
Steven Rostedt5072c592008-05-12 21:20:43 +02002311t_next(struct seq_file *m, void *v, loff_t *pos)
2312{
2313 struct ftrace_iterator *iter = m->private;
Steven Rostedtfc13cb02011-12-19 14:41:25 -05002314 struct ftrace_ops *ops = iter->ops;
Steven Rostedt5072c592008-05-12 21:20:43 +02002315 struct dyn_ftrace *rec = NULL;
2316
Steven Rostedt45a4a232011-04-21 23:16:46 -04002317 if (unlikely(ftrace_disabled))
2318 return NULL;
2319
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002320 if (iter->flags & FTRACE_ITER_HASH)
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002321 return t_hash_next(m, pos);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002322
Steven Rostedt5072c592008-05-12 21:20:43 +02002323 (*pos)++;
Jiri Olsa1106b692011-02-16 17:35:34 +01002324 iter->pos = iter->func_pos = *pos;
Steven Rostedt5072c592008-05-12 21:20:43 +02002325
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002326 if (iter->flags & FTRACE_ITER_PRINTALL)
Steven Rostedt57c072c2010-09-14 11:21:11 -04002327 return t_hash_start(m, pos);
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002328
Steven Rostedt5072c592008-05-12 21:20:43 +02002329 retry:
2330 if (iter->idx >= iter->pg->index) {
2331 if (iter->pg->next) {
2332 iter->pg = iter->pg->next;
2333 iter->idx = 0;
2334 goto retry;
2335 }
2336 } else {
2337 rec = &iter->pg->records[iter->idx++];
Steven Rostedt32082302011-12-16 14:42:37 -05002338 if (((iter->flags & FTRACE_ITER_FILTER) &&
Steven Rostedtf45948e2011-05-02 12:29:25 -04002339 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
Steven Rostedt0183fb12008-11-07 22:36:02 -05002340
Steven Rostedt41c52c02008-05-22 11:46:33 -04002341 ((iter->flags & FTRACE_ITER_NOTRACE) &&
Steven Rostedt647bcd02011-05-03 14:39:21 -04002342 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2343
2344 ((iter->flags & FTRACE_ITER_ENABLED) &&
2345 !(rec->flags & ~FTRACE_FL_MASK))) {
2346
Steven Rostedt5072c592008-05-12 21:20:43 +02002347 rec = NULL;
2348 goto retry;
2349 }
2350 }
2351
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002352 if (!rec)
Steven Rostedt57c072c2010-09-14 11:21:11 -04002353 return t_hash_start(m, pos);
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002354
2355 iter->func = rec;
2356
2357 return iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02002358}
2359
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002360static void reset_iter_read(struct ftrace_iterator *iter)
2361{
2362 iter->pos = 0;
2363 iter->func_pos = 0;
2364 iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
Steven Rostedt5072c592008-05-12 21:20:43 +02002365}
2366
2367static void *t_start(struct seq_file *m, loff_t *pos)
2368{
2369 struct ftrace_iterator *iter = m->private;
Steven Rostedtfc13cb02011-12-19 14:41:25 -05002370 struct ftrace_ops *ops = iter->ops;
Steven Rostedt5072c592008-05-12 21:20:43 +02002371 void *p = NULL;
Li Zefan694ce0a2009-06-24 09:54:19 +08002372 loff_t l;
Steven Rostedt5072c592008-05-12 21:20:43 +02002373
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002374 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04002375
2376 if (unlikely(ftrace_disabled))
2377 return NULL;
2378
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002379 /*
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002380 * If an lseek was done, then reset and start from beginning.
2381 */
2382 if (*pos < iter->pos)
2383 reset_iter_read(iter);
2384
2385 /*
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002386 * For set_ftrace_filter reading, if we have the filter
2387 * off, we can short cut and just print out that all
2388 * functions are enabled.
2389 */
Steven Rostedt06a51d92011-12-19 19:07:36 -05002390 if (iter->flags & FTRACE_ITER_FILTER &&
2391 ftrace_hash_empty(ops->filter_hash)) {
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002392 if (*pos > 0)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002393 return t_hash_start(m, pos);
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002394 iter->flags |= FTRACE_ITER_PRINTALL;
Chris Wrightdf091622010-09-09 16:34:59 -07002395 /* reset in case of seek/pread */
2396 iter->flags &= ~FTRACE_ITER_HASH;
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002397 return iter;
2398 }
2399
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002400 if (iter->flags & FTRACE_ITER_HASH)
2401 return t_hash_start(m, pos);
2402
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002403 /*
2404 * Unfortunately, we need to restart at ftrace_pages_start
2405 * every time we let go of the ftrace_mutex. This is because
2406 * those pointers can change without the lock.
2407 */
Li Zefan694ce0a2009-06-24 09:54:19 +08002408 iter->pg = ftrace_pages_start;
2409 iter->idx = 0;
2410 for (l = 0; l <= *pos; ) {
2411 p = t_next(m, p, &l);
2412 if (!p)
2413 break;
Liming Wang50cdaf02008-11-28 12:13:21 +08002414 }
walimis5821e1b2008-11-15 15:19:06 +08002415
Steven Rostedt69a30832011-12-19 15:21:16 -05002416 if (!p)
2417 return t_hash_start(m, pos);
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002418
2419 return iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02002420}
2421
2422static void t_stop(struct seq_file *m, void *p)
2423{
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002424 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002425}
2426
2427static int t_show(struct seq_file *m, void *v)
2428{
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002429 struct ftrace_iterator *iter = m->private;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002430 struct dyn_ftrace *rec;
Steven Rostedt5072c592008-05-12 21:20:43 +02002431
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002432 if (iter->flags & FTRACE_ITER_HASH)
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002433 return t_hash_show(m, iter);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002434
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002435 if (iter->flags & FTRACE_ITER_PRINTALL) {
2436 seq_printf(m, "#### all functions enabled ####\n");
2437 return 0;
2438 }
2439
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002440 rec = iter->func;
2441
Steven Rostedt5072c592008-05-12 21:20:43 +02002442 if (!rec)
2443 return 0;
2444
Steven Rostedt647bcd02011-05-03 14:39:21 -04002445 seq_printf(m, "%ps", (void *)rec->ip);
2446 if (iter->flags & FTRACE_ITER_ENABLED)
2447 seq_printf(m, " (%ld)",
2448 rec->flags & ~FTRACE_FL_MASK);
2449 seq_printf(m, "\n");
Steven Rostedt5072c592008-05-12 21:20:43 +02002450
2451 return 0;
2452}
2453
James Morris88e9d342009-09-22 16:43:43 -07002454static const struct seq_operations show_ftrace_seq_ops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02002455 .start = t_start,
2456 .next = t_next,
2457 .stop = t_stop,
2458 .show = t_show,
2459};
2460
Ingo Molnare309b412008-05-12 21:20:51 +02002461static int
Steven Rostedt5072c592008-05-12 21:20:43 +02002462ftrace_avail_open(struct inode *inode, struct file *file)
2463{
2464 struct ftrace_iterator *iter;
2465 int ret;
2466
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002467 if (unlikely(ftrace_disabled))
2468 return -ENODEV;
2469
Steven Rostedt5072c592008-05-12 21:20:43 +02002470 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2471 if (!iter)
2472 return -ENOMEM;
2473
2474 iter->pg = ftrace_pages_start;
Steven Rostedtfc13cb02011-12-19 14:41:25 -05002475 iter->ops = &global_ops;
Steven Rostedt5072c592008-05-12 21:20:43 +02002476
2477 ret = seq_open(file, &show_ftrace_seq_ops);
2478 if (!ret) {
2479 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002480
Steven Rostedt5072c592008-05-12 21:20:43 +02002481 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002482 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +02002483 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002484 }
Steven Rostedt5072c592008-05-12 21:20:43 +02002485
2486 return ret;
2487}
2488
Steven Rostedt647bcd02011-05-03 14:39:21 -04002489static int
2490ftrace_enabled_open(struct inode *inode, struct file *file)
2491{
2492 struct ftrace_iterator *iter;
2493 int ret;
2494
2495 if (unlikely(ftrace_disabled))
2496 return -ENODEV;
2497
2498 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2499 if (!iter)
2500 return -ENOMEM;
2501
2502 iter->pg = ftrace_pages_start;
2503 iter->flags = FTRACE_ITER_ENABLED;
Steven Rostedtfc13cb02011-12-19 14:41:25 -05002504 iter->ops = &global_ops;
Steven Rostedt647bcd02011-05-03 14:39:21 -04002505
2506 ret = seq_open(file, &show_ftrace_seq_ops);
2507 if (!ret) {
2508 struct seq_file *m = file->private_data;
2509
2510 m->private = iter;
2511 } else {
2512 kfree(iter);
2513 }
2514
2515 return ret;
2516}
2517
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002518static void ftrace_filter_reset(struct ftrace_hash *hash)
Steven Rostedt5072c592008-05-12 21:20:43 +02002519{
Steven Rostedt52baf112009-02-14 01:15:39 -05002520 mutex_lock(&ftrace_lock);
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002521 ftrace_hash_clear(hash);
Steven Rostedt52baf112009-02-14 01:15:39 -05002522 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002523}
2524
Steven Rostedtfc13cb02011-12-19 14:41:25 -05002525/**
2526 * ftrace_regex_open - initialize function tracer filter files
2527 * @ops: The ftrace_ops that hold the hash filters
2528 * @flag: The type of filter to process
2529 * @inode: The inode, usually passed in to your open routine
2530 * @file: The file, usually passed in to your open routine
2531 *
2532 * ftrace_regex_open() initializes the filter files for the
2533 * @ops. Depending on @flag it may process the filter hash or
2534 * the notrace hash of @ops. With this called from the open
2535 * routine, you can use ftrace_filter_write() for the write
2536 * routine if @flag has FTRACE_ITER_FILTER set, or
2537 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2538 * ftrace_regex_lseek() should be used as the lseek routine, and
2539 * release must call ftrace_regex_release().
2540 */
2541int
Steven Rostedtf45948e2011-05-02 12:29:25 -04002542ftrace_regex_open(struct ftrace_ops *ops, int flag,
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002543 struct inode *inode, struct file *file)
Steven Rostedt5072c592008-05-12 21:20:43 +02002544{
2545 struct ftrace_iterator *iter;
Steven Rostedtf45948e2011-05-02 12:29:25 -04002546 struct ftrace_hash *hash;
Steven Rostedt5072c592008-05-12 21:20:43 +02002547 int ret = 0;
2548
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002549 if (unlikely(ftrace_disabled))
2550 return -ENODEV;
2551
Steven Rostedt5072c592008-05-12 21:20:43 +02002552 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2553 if (!iter)
2554 return -ENOMEM;
2555
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02002556 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2557 kfree(iter);
2558 return -ENOMEM;
2559 }
2560
Steven Rostedtf45948e2011-05-02 12:29:25 -04002561 if (flag & FTRACE_ITER_NOTRACE)
2562 hash = ops->notrace_hash;
2563 else
2564 hash = ops->filter_hash;
2565
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002566 iter->ops = ops;
2567 iter->flags = flag;
2568
2569 if (file->f_mode & FMODE_WRITE) {
2570 mutex_lock(&ftrace_lock);
2571 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2572 mutex_unlock(&ftrace_lock);
2573
2574 if (!iter->hash) {
2575 trace_parser_put(&iter->parser);
2576 kfree(iter);
2577 return -ENOMEM;
2578 }
2579 }
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002580
Steven Rostedt41c52c02008-05-22 11:46:33 -04002581 mutex_lock(&ftrace_regex_lock);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002582
Steven Rostedt5072c592008-05-12 21:20:43 +02002583 if ((file->f_mode & FMODE_WRITE) &&
Steven Rostedt8650ae32009-07-22 23:29:30 -04002584 (file->f_flags & O_TRUNC))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002585 ftrace_filter_reset(iter->hash);
Steven Rostedt5072c592008-05-12 21:20:43 +02002586
2587 if (file->f_mode & FMODE_READ) {
2588 iter->pg = ftrace_pages_start;
Steven Rostedt5072c592008-05-12 21:20:43 +02002589
2590 ret = seq_open(file, &show_ftrace_seq_ops);
2591 if (!ret) {
2592 struct seq_file *m = file->private_data;
2593 m->private = iter;
Li Zefan79fe2492009-09-22 13:54:28 +08002594 } else {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002595 /* Failed */
2596 free_ftrace_hash(iter->hash);
Li Zefan79fe2492009-09-22 13:54:28 +08002597 trace_parser_put(&iter->parser);
Steven Rostedt5072c592008-05-12 21:20:43 +02002598 kfree(iter);
Li Zefan79fe2492009-09-22 13:54:28 +08002599 }
Steven Rostedt5072c592008-05-12 21:20:43 +02002600 } else
2601 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -04002602 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002603
2604 return ret;
2605}
2606
Steven Rostedt41c52c02008-05-22 11:46:33 -04002607static int
2608ftrace_filter_open(struct inode *inode, struct file *file)
2609{
Steven Rostedt69a30832011-12-19 15:21:16 -05002610 return ftrace_regex_open(&global_ops,
2611 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2612 inode, file);
Steven Rostedt41c52c02008-05-22 11:46:33 -04002613}
2614
2615static int
2616ftrace_notrace_open(struct inode *inode, struct file *file)
2617{
Steven Rostedtf45948e2011-05-02 12:29:25 -04002618 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002619 inode, file);
Steven Rostedt41c52c02008-05-22 11:46:33 -04002620}
2621
Steven Rostedtfc13cb02011-12-19 14:41:25 -05002622loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04002623ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02002624{
2625 loff_t ret;
2626
2627 if (file->f_mode & FMODE_READ)
2628 ret = seq_lseek(file, offset, origin);
2629 else
2630 file->f_pos = ret = 1;
2631
2632 return ret;
2633}
2634
Steven Rostedt64e7c442009-02-13 17:08:48 -05002635static int ftrace_match(char *str, char *regex, int len, int type)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002636{
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002637 int matched = 0;
Li Zefan751e9982010-01-14 10:53:02 +08002638 int slen;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002639
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002640 switch (type) {
2641 case MATCH_FULL:
2642 if (strcmp(str, regex) == 0)
2643 matched = 1;
2644 break;
2645 case MATCH_FRONT_ONLY:
2646 if (strncmp(str, regex, len) == 0)
2647 matched = 1;
2648 break;
2649 case MATCH_MIDDLE_ONLY:
2650 if (strstr(str, regex))
2651 matched = 1;
2652 break;
2653 case MATCH_END_ONLY:
Li Zefan751e9982010-01-14 10:53:02 +08002654 slen = strlen(str);
2655 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002656 matched = 1;
2657 break;
2658 }
2659
2660 return matched;
2661}
2662
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002663static int
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002664enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
Steven Rostedt996e87b2011-04-26 16:11:03 -04002665{
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002666 struct ftrace_func_entry *entry;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002667 int ret = 0;
2668
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002669 entry = ftrace_lookup_ip(hash, rec->ip);
2670 if (not) {
2671 /* Do nothing if it doesn't exist */
2672 if (!entry)
2673 return 0;
2674
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002675 free_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002676 } else {
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002677 /* Do nothing if it exists */
2678 if (entry)
2679 return 0;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002680
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002681 ret = add_hash_entry(hash, rec->ip);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002682 }
2683 return ret;
Steven Rostedt996e87b2011-04-26 16:11:03 -04002684}
2685
Steven Rostedt64e7c442009-02-13 17:08:48 -05002686static int
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002687ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2688 char *regex, int len, int type)
Steven Rostedt64e7c442009-02-13 17:08:48 -05002689{
2690 char str[KSYM_SYMBOL_LEN];
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002691 char *modname;
Steven Rostedt64e7c442009-02-13 17:08:48 -05002692
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002693 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2694
2695 if (mod) {
2696 /* module lookup requires matching the module */
2697 if (!modname || strcmp(modname, mod))
2698 return 0;
2699
2700 /* blank search means to match all funcs in the mod */
2701 if (!len)
2702 return 1;
2703 }
2704
Steven Rostedt64e7c442009-02-13 17:08:48 -05002705 return ftrace_match(str, regex, len, type);
2706}
2707
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002708static int
2709match_records(struct ftrace_hash *hash, char *buff,
2710 int len, char *mod, int not)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002711{
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002712 unsigned search_len = 0;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002713 struct ftrace_page *pg;
2714 struct dyn_ftrace *rec;
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002715 int type = MATCH_FULL;
2716 char *search = buff;
Li Zefan311d16d2009-12-08 11:15:11 +08002717 int found = 0;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002718 int ret;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002719
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002720 if (len) {
2721 type = filter_parse_regex(buff, len, &search, &not);
2722 search_len = strlen(search);
2723 }
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002724
Steven Rostedt52baf112009-02-14 01:15:39 -05002725 mutex_lock(&ftrace_lock);
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002726
2727 if (unlikely(ftrace_disabled))
2728 goto out_unlock;
2729
Steven Rostedt265c8312009-02-13 12:43:56 -05002730 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002731 if (ftrace_match_record(rec, mod, search, search_len, type)) {
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002732 ret = enter_record(hash, rec, not);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002733 if (ret < 0) {
2734 found = ret;
2735 goto out_unlock;
2736 }
Li Zefan311d16d2009-12-08 11:15:11 +08002737 found = 1;
Steven Rostedt265c8312009-02-13 12:43:56 -05002738 }
2739 } while_for_each_ftrace_rec();
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002740 out_unlock:
Steven Rostedt52baf112009-02-14 01:15:39 -05002741 mutex_unlock(&ftrace_lock);
Li Zefan311d16d2009-12-08 11:15:11 +08002742
2743 return found;
Steven Rostedt5072c592008-05-12 21:20:43 +02002744}
2745
Steven Rostedt64e7c442009-02-13 17:08:48 -05002746static int
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002747ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
Steven Rostedt64e7c442009-02-13 17:08:48 -05002748{
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002749 return match_records(hash, buff, len, NULL, 0);
Steven Rostedt64e7c442009-02-13 17:08:48 -05002750}
2751
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002752static int
2753ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
Steven Rostedt64e7c442009-02-13 17:08:48 -05002754{
Steven Rostedt64e7c442009-02-13 17:08:48 -05002755 int not = 0;
Steven Rostedt6a24a242009-02-17 11:20:26 -05002756
Steven Rostedt64e7c442009-02-13 17:08:48 -05002757 /* blank or '*' mean the same */
2758 if (strcmp(buff, "*") == 0)
2759 buff[0] = 0;
2760
2761 /* handle the case of 'dont filter this module' */
2762 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2763 buff[0] = 0;
2764 not = 1;
2765 }
2766
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002767 return match_records(hash, buff, strlen(buff), mod, not);
Steven Rostedt64e7c442009-02-13 17:08:48 -05002768}
2769
Steven Rostedtf6180772009-02-14 00:40:25 -05002770/*
2771 * We register the module command as a template to show others how
2772 * to register the a command as well.
2773 */
2774
2775static int
Steven Rostedt43dd61c2011-07-07 11:09:22 -04002776ftrace_mod_callback(struct ftrace_hash *hash,
2777 char *func, char *cmd, char *param, int enable)
Steven Rostedtf6180772009-02-14 00:40:25 -05002778{
2779 char *mod;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002780 int ret = -EINVAL;
Steven Rostedtf6180772009-02-14 00:40:25 -05002781
2782 /*
2783 * cmd == 'mod' because we only registered this func
2784 * for the 'mod' ftrace_func_command.
2785 * But if you register one func with multiple commands,
2786 * you can tell which command was used by the cmd
2787 * parameter.
2788 */
2789
2790 /* we must have a module name */
2791 if (!param)
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002792 return ret;
Steven Rostedtf6180772009-02-14 00:40:25 -05002793
2794 mod = strsep(&param, ":");
2795 if (!strlen(mod))
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002796 return ret;
Steven Rostedtf6180772009-02-14 00:40:25 -05002797
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002798 ret = ftrace_match_module_records(hash, func, mod);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002799 if (!ret)
2800 ret = -EINVAL;
2801 if (ret < 0)
2802 return ret;
2803
2804 return 0;
Steven Rostedtf6180772009-02-14 00:40:25 -05002805}
2806
2807static struct ftrace_func_command ftrace_mod_cmd = {
2808 .name = "mod",
2809 .func = ftrace_mod_callback,
2810};
2811
2812static int __init ftrace_mod_cmd_init(void)
2813{
2814 return register_ftrace_command(&ftrace_mod_cmd);
2815}
2816device_initcall(ftrace_mod_cmd_init);
2817
Steven Rostedt59df055f2009-02-14 15:29:06 -05002818static void
Steven Rostedtb6887d72009-02-17 12:32:04 -05002819function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002820{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002821 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002822 struct hlist_head *hhd;
2823 struct hlist_node *n;
2824 unsigned long key;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002825
2826 key = hash_long(ip, FTRACE_HASH_BITS);
2827
2828 hhd = &ftrace_func_hash[key];
2829
2830 if (hlist_empty(hhd))
2831 return;
2832
2833 /*
2834 * Disable preemption for these calls to prevent a RCU grace
2835 * period. This syncs the hash iteration and freeing of items
2836 * on the hash. rcu_read_lock is too dangerous here.
2837 */
Steven Rostedt5168ae52010-06-03 09:36:50 -04002838 preempt_disable_notrace();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002839 hlist_for_each_entry_rcu(entry, n, hhd, node) {
2840 if (entry->ip == ip)
2841 entry->ops->func(ip, parent_ip, &entry->data);
2842 }
Steven Rostedt5168ae52010-06-03 09:36:50 -04002843 preempt_enable_notrace();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002844}
2845
Steven Rostedtb6887d72009-02-17 12:32:04 -05002846static struct ftrace_ops trace_probe_ops __read_mostly =
Steven Rostedt59df055f2009-02-14 15:29:06 -05002847{
Steven Rostedtfb9fb012009-03-25 13:26:41 -04002848 .func = function_trace_probe_call,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002849};
2850
Steven Rostedtb6887d72009-02-17 12:32:04 -05002851static int ftrace_probe_registered;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002852
Steven Rostedtb6887d72009-02-17 12:32:04 -05002853static void __enable_ftrace_function_probe(void)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002854{
Steven Rostedtb8489142011-05-04 09:27:52 -04002855 int ret;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002856 int i;
2857
Steven Rostedtb6887d72009-02-17 12:32:04 -05002858 if (ftrace_probe_registered)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002859 return;
2860
2861 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2862 struct hlist_head *hhd = &ftrace_func_hash[i];
2863 if (hhd->first)
2864 break;
2865 }
2866 /* Nothing registered? */
2867 if (i == FTRACE_FUNC_HASHSIZE)
2868 return;
2869
Steven Rostedtb8489142011-05-04 09:27:52 -04002870 ret = __register_ftrace_function(&trace_probe_ops);
2871 if (!ret)
Steven Rostedta1cd6172011-05-23 15:24:25 -04002872 ret = ftrace_startup(&trace_probe_ops, 0);
Steven Rostedtb8489142011-05-04 09:27:52 -04002873
Steven Rostedtb6887d72009-02-17 12:32:04 -05002874 ftrace_probe_registered = 1;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002875}
2876
Steven Rostedtb6887d72009-02-17 12:32:04 -05002877static void __disable_ftrace_function_probe(void)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002878{
Steven Rostedtb8489142011-05-04 09:27:52 -04002879 int ret;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002880 int i;
2881
Steven Rostedtb6887d72009-02-17 12:32:04 -05002882 if (!ftrace_probe_registered)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002883 return;
2884
2885 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2886 struct hlist_head *hhd = &ftrace_func_hash[i];
2887 if (hhd->first)
2888 return;
2889 }
2890
2891 /* no more funcs left */
Steven Rostedtb8489142011-05-04 09:27:52 -04002892 ret = __unregister_ftrace_function(&trace_probe_ops);
2893 if (!ret)
2894 ftrace_shutdown(&trace_probe_ops, 0);
2895
Steven Rostedtb6887d72009-02-17 12:32:04 -05002896 ftrace_probe_registered = 0;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002897}
2898
2899
2900static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2901{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002902 struct ftrace_func_probe *entry =
2903 container_of(rhp, struct ftrace_func_probe, rcu);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002904
2905 if (entry->ops->free)
2906 entry->ops->free(&entry->data);
2907 kfree(entry);
2908}
2909
2910
2911int
Steven Rostedtb6887d72009-02-17 12:32:04 -05002912register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002913 void *data)
2914{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002915 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002916 struct ftrace_page *pg;
2917 struct dyn_ftrace *rec;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002918 int type, len, not;
Steven Rostedt6a24a242009-02-17 11:20:26 -05002919 unsigned long key;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002920 int count = 0;
2921 char *search;
2922
Frederic Weisbecker3f6fe062009-09-24 21:31:51 +02002923 type = filter_parse_regex(glob, strlen(glob), &search, &not);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002924 len = strlen(search);
2925
Steven Rostedtb6887d72009-02-17 12:32:04 -05002926 /* we do not support '!' for function probes */
Steven Rostedt59df055f2009-02-14 15:29:06 -05002927 if (WARN_ON(not))
2928 return -EINVAL;
2929
2930 mutex_lock(&ftrace_lock);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002931
Steven Rostedt45a4a232011-04-21 23:16:46 -04002932 if (unlikely(ftrace_disabled))
2933 goto out_unlock;
2934
Steven Rostedt59df055f2009-02-14 15:29:06 -05002935 do_for_each_ftrace_rec(pg, rec) {
2936
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002937 if (!ftrace_match_record(rec, NULL, search, len, type))
Steven Rostedt59df055f2009-02-14 15:29:06 -05002938 continue;
2939
2940 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2941 if (!entry) {
Steven Rostedtb6887d72009-02-17 12:32:04 -05002942 /* If we did not process any, then return error */
Steven Rostedt59df055f2009-02-14 15:29:06 -05002943 if (!count)
2944 count = -ENOMEM;
2945 goto out_unlock;
2946 }
2947
2948 count++;
2949
2950 entry->data = data;
2951
2952 /*
2953 * The caller might want to do something special
2954 * for each function we find. We call the callback
2955 * to give the caller an opportunity to do so.
2956 */
2957 if (ops->callback) {
2958 if (ops->callback(rec->ip, &entry->data) < 0) {
2959 /* caller does not like this func */
2960 kfree(entry);
2961 continue;
2962 }
2963 }
2964
2965 entry->ops = ops;
2966 entry->ip = rec->ip;
2967
2968 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2969 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2970
2971 } while_for_each_ftrace_rec();
Steven Rostedtb6887d72009-02-17 12:32:04 -05002972 __enable_ftrace_function_probe();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002973
2974 out_unlock:
2975 mutex_unlock(&ftrace_lock);
2976
2977 return count;
2978}
2979
2980enum {
Steven Rostedtb6887d72009-02-17 12:32:04 -05002981 PROBE_TEST_FUNC = 1,
2982 PROBE_TEST_DATA = 2
Steven Rostedt59df055f2009-02-14 15:29:06 -05002983};
2984
2985static void
Steven Rostedtb6887d72009-02-17 12:32:04 -05002986__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002987 void *data, int flags)
2988{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002989 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002990 struct hlist_node *n, *tmp;
2991 char str[KSYM_SYMBOL_LEN];
2992 int type = MATCH_FULL;
2993 int i, len = 0;
2994 char *search;
2995
Atsushi Tsujib36461d2009-09-15 19:06:30 +09002996 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
Steven Rostedt59df055f2009-02-14 15:29:06 -05002997 glob = NULL;
Atsushi Tsujib36461d2009-09-15 19:06:30 +09002998 else if (glob) {
Steven Rostedt59df055f2009-02-14 15:29:06 -05002999 int not;
3000
Frederic Weisbecker3f6fe062009-09-24 21:31:51 +02003001 type = filter_parse_regex(glob, strlen(glob), &search, &not);
Steven Rostedt59df055f2009-02-14 15:29:06 -05003002 len = strlen(search);
3003
Steven Rostedtb6887d72009-02-17 12:32:04 -05003004 /* we do not support '!' for function probes */
Steven Rostedt59df055f2009-02-14 15:29:06 -05003005 if (WARN_ON(not))
3006 return;
3007 }
3008
3009 mutex_lock(&ftrace_lock);
3010 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3011 struct hlist_head *hhd = &ftrace_func_hash[i];
3012
3013 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
3014
3015 /* break up if statements for readability */
Steven Rostedtb6887d72009-02-17 12:32:04 -05003016 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
Steven Rostedt59df055f2009-02-14 15:29:06 -05003017 continue;
3018
Steven Rostedtb6887d72009-02-17 12:32:04 -05003019 if ((flags & PROBE_TEST_DATA) && entry->data != data)
Steven Rostedt59df055f2009-02-14 15:29:06 -05003020 continue;
3021
3022 /* do this last, since it is the most expensive */
3023 if (glob) {
3024 kallsyms_lookup(entry->ip, NULL, NULL,
3025 NULL, str);
3026 if (!ftrace_match(str, glob, len, type))
3027 continue;
3028 }
3029
3030 hlist_del(&entry->node);
3031 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
3032 }
3033 }
Steven Rostedtb6887d72009-02-17 12:32:04 -05003034 __disable_ftrace_function_probe();
Steven Rostedt59df055f2009-02-14 15:29:06 -05003035 mutex_unlock(&ftrace_lock);
3036}
3037
3038void
Steven Rostedtb6887d72009-02-17 12:32:04 -05003039unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05003040 void *data)
3041{
Steven Rostedtb6887d72009-02-17 12:32:04 -05003042 __unregister_ftrace_function_probe(glob, ops, data,
3043 PROBE_TEST_FUNC | PROBE_TEST_DATA);
Steven Rostedt59df055f2009-02-14 15:29:06 -05003044}
3045
3046void
Steven Rostedtb6887d72009-02-17 12:32:04 -05003047unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
Steven Rostedt59df055f2009-02-14 15:29:06 -05003048{
Steven Rostedtb6887d72009-02-17 12:32:04 -05003049 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
Steven Rostedt59df055f2009-02-14 15:29:06 -05003050}
3051
Steven Rostedtb6887d72009-02-17 12:32:04 -05003052void unregister_ftrace_function_probe_all(char *glob)
Steven Rostedt59df055f2009-02-14 15:29:06 -05003053{
Steven Rostedtb6887d72009-02-17 12:32:04 -05003054 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
Steven Rostedt59df055f2009-02-14 15:29:06 -05003055}
3056
Steven Rostedtf6180772009-02-14 00:40:25 -05003057static LIST_HEAD(ftrace_commands);
3058static DEFINE_MUTEX(ftrace_cmd_mutex);
3059
3060int register_ftrace_command(struct ftrace_func_command *cmd)
3061{
3062 struct ftrace_func_command *p;
3063 int ret = 0;
3064
3065 mutex_lock(&ftrace_cmd_mutex);
3066 list_for_each_entry(p, &ftrace_commands, list) {
3067 if (strcmp(cmd->name, p->name) == 0) {
3068 ret = -EBUSY;
3069 goto out_unlock;
3070 }
3071 }
3072 list_add(&cmd->list, &ftrace_commands);
3073 out_unlock:
3074 mutex_unlock(&ftrace_cmd_mutex);
3075
3076 return ret;
3077}
3078
3079int unregister_ftrace_command(struct ftrace_func_command *cmd)
3080{
3081 struct ftrace_func_command *p, *n;
3082 int ret = -ENODEV;
3083
3084 mutex_lock(&ftrace_cmd_mutex);
3085 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3086 if (strcmp(cmd->name, p->name) == 0) {
3087 ret = 0;
3088 list_del_init(&p->list);
3089 goto out_unlock;
3090 }
3091 }
3092 out_unlock:
3093 mutex_unlock(&ftrace_cmd_mutex);
3094
3095 return ret;
3096}
3097
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003098static int ftrace_process_regex(struct ftrace_hash *hash,
3099 char *buff, int len, int enable)
Steven Rostedt64e7c442009-02-13 17:08:48 -05003100{
Steven Rostedtf6180772009-02-14 00:40:25 -05003101 char *func, *command, *next = buff;
Steven Rostedt6a24a242009-02-17 11:20:26 -05003102 struct ftrace_func_command *p;
GuoWen Li0aff1c02011-06-01 19:18:47 +08003103 int ret = -EINVAL;
Steven Rostedt64e7c442009-02-13 17:08:48 -05003104
3105 func = strsep(&next, ":");
3106
3107 if (!next) {
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04003108 ret = ftrace_match_records(hash, func, len);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003109 if (!ret)
3110 ret = -EINVAL;
3111 if (ret < 0)
3112 return ret;
3113 return 0;
Steven Rostedt64e7c442009-02-13 17:08:48 -05003114 }
3115
Steven Rostedtf6180772009-02-14 00:40:25 -05003116 /* command found */
Steven Rostedt64e7c442009-02-13 17:08:48 -05003117
3118 command = strsep(&next, ":");
3119
Steven Rostedtf6180772009-02-14 00:40:25 -05003120 mutex_lock(&ftrace_cmd_mutex);
3121 list_for_each_entry(p, &ftrace_commands, list) {
3122 if (strcmp(p->name, command) == 0) {
Steven Rostedt43dd61c2011-07-07 11:09:22 -04003123 ret = p->func(hash, func, command, next, enable);
Steven Rostedtf6180772009-02-14 00:40:25 -05003124 goto out_unlock;
3125 }
Steven Rostedt64e7c442009-02-13 17:08:48 -05003126 }
Steven Rostedtf6180772009-02-14 00:40:25 -05003127 out_unlock:
3128 mutex_unlock(&ftrace_cmd_mutex);
Steven Rostedt64e7c442009-02-13 17:08:48 -05003129
Steven Rostedtf6180772009-02-14 00:40:25 -05003130 return ret;
Steven Rostedt64e7c442009-02-13 17:08:48 -05003131}
3132
Ingo Molnare309b412008-05-12 21:20:51 +02003133static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04003134ftrace_regex_write(struct file *file, const char __user *ubuf,
3135 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02003136{
3137 struct ftrace_iterator *iter;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003138 struct trace_parser *parser;
3139 ssize_t ret, read;
Steven Rostedt5072c592008-05-12 21:20:43 +02003140
Li Zefan4ba79782009-09-22 13:52:20 +08003141 if (!cnt)
Steven Rostedt5072c592008-05-12 21:20:43 +02003142 return 0;
3143
Steven Rostedt41c52c02008-05-22 11:46:33 -04003144 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003145
Steven Rostedt45a4a232011-04-21 23:16:46 -04003146 ret = -ENODEV;
3147 if (unlikely(ftrace_disabled))
3148 goto out_unlock;
3149
Steven Rostedt5072c592008-05-12 21:20:43 +02003150 if (file->f_mode & FMODE_READ) {
3151 struct seq_file *m = file->private_data;
3152 iter = m->private;
3153 } else
3154 iter = file->private_data;
3155
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003156 parser = &iter->parser;
3157 read = trace_get_user(parser, ubuf, cnt, ppos);
Steven Rostedt5072c592008-05-12 21:20:43 +02003158
Li Zefan4ba79782009-09-22 13:52:20 +08003159 if (read >= 0 && trace_parser_loaded(parser) &&
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003160 !trace_parser_cont(parser)) {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003161 ret = ftrace_process_regex(iter->hash, parser->buffer,
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003162 parser->idx, enable);
Li Zefan313254a2009-12-08 11:15:30 +08003163 trace_parser_clear(parser);
Steven Rostedt5072c592008-05-12 21:20:43 +02003164 if (ret)
Li Zefaned146b252009-11-03 08:55:38 +08003165 goto out_unlock;
Steven Rostedt5072c592008-05-12 21:20:43 +02003166 }
3167
Steven Rostedt5072c592008-05-12 21:20:43 +02003168 ret = read;
Li Zefaned146b252009-11-03 08:55:38 +08003169out_unlock:
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003170 mutex_unlock(&ftrace_regex_lock);
Li Zefaned146b252009-11-03 08:55:38 +08003171
Steven Rostedt5072c592008-05-12 21:20:43 +02003172 return ret;
3173}
3174
Steven Rostedtfc13cb02011-12-19 14:41:25 -05003175ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04003176ftrace_filter_write(struct file *file, const char __user *ubuf,
3177 size_t cnt, loff_t *ppos)
3178{
3179 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3180}
3181
Steven Rostedtfc13cb02011-12-19 14:41:25 -05003182ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04003183ftrace_notrace_write(struct file *file, const char __user *ubuf,
3184 size_t cnt, loff_t *ppos)
3185{
3186 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3187}
3188
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003189static int
Steven Rostedtf45948e2011-05-02 12:29:25 -04003190ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3191 int reset, int enable)
Steven Rostedt41c52c02008-05-22 11:46:33 -04003192{
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003193 struct ftrace_hash **orig_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04003194 struct ftrace_hash *hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003195 int ret;
Steven Rostedtf45948e2011-05-02 12:29:25 -04003196
Steven Rostedt936e0742011-05-05 22:54:01 -04003197 /* All global ops uses the global ops filters */
3198 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3199 ops = &global_ops;
3200
Steven Rostedt41c52c02008-05-22 11:46:33 -04003201 if (unlikely(ftrace_disabled))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003202 return -ENODEV;
Steven Rostedt41c52c02008-05-22 11:46:33 -04003203
Steven Rostedtf45948e2011-05-02 12:29:25 -04003204 if (enable)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003205 orig_hash = &ops->filter_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04003206 else
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003207 orig_hash = &ops->notrace_hash;
3208
3209 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3210 if (!hash)
3211 return -ENOMEM;
Steven Rostedtf45948e2011-05-02 12:29:25 -04003212
Steven Rostedt41c52c02008-05-22 11:46:33 -04003213 mutex_lock(&ftrace_regex_lock);
3214 if (reset)
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04003215 ftrace_filter_reset(hash);
Jiri Olsaac483c42012-01-02 10:04:14 +01003216 if (buf && !ftrace_match_records(hash, buf, len)) {
3217 ret = -EINVAL;
3218 goto out_regex_unlock;
3219 }
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003220
3221 mutex_lock(&ftrace_lock);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04003222 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
Steven Rostedt072126f2011-07-13 15:08:31 -04003223 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3224 && ftrace_enabled)
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01003225 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
Steven Rostedt072126f2011-07-13 15:08:31 -04003226
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003227 mutex_unlock(&ftrace_lock);
3228
Jiri Olsaac483c42012-01-02 10:04:14 +01003229 out_regex_unlock:
Steven Rostedt41c52c02008-05-22 11:46:33 -04003230 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003231
3232 free_ftrace_hash(hash);
3233 return ret;
Steven Rostedt41c52c02008-05-22 11:46:33 -04003234}
3235
Steven Rostedt77a2b372008-05-12 21:20:45 +02003236/**
3237 * ftrace_set_filter - set a function to filter on in ftrace
Steven Rostedt936e0742011-05-05 22:54:01 -04003238 * @ops - the ops to set the filter with
Steven Rostedt77a2b372008-05-12 21:20:45 +02003239 * @buf - the string that holds the function filter text.
3240 * @len - the length of the string.
3241 * @reset - non zero to reset all filters before applying this filter.
3242 *
3243 * Filters denote which functions should be enabled when tracing is enabled.
3244 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3245 */
Jiri Olsaac483c42012-01-02 10:04:14 +01003246int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
Steven Rostedt936e0742011-05-05 22:54:01 -04003247 int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02003248{
Jiri Olsaac483c42012-01-02 10:04:14 +01003249 return ftrace_set_regex(ops, buf, len, reset, 1);
Steven Rostedt41c52c02008-05-22 11:46:33 -04003250}
Steven Rostedt936e0742011-05-05 22:54:01 -04003251EXPORT_SYMBOL_GPL(ftrace_set_filter);
Steven Rostedt4eebcc82008-05-12 21:20:48 +02003252
Steven Rostedt41c52c02008-05-22 11:46:33 -04003253/**
3254 * ftrace_set_notrace - set a function to not trace in ftrace
Steven Rostedt936e0742011-05-05 22:54:01 -04003255 * @ops - the ops to set the notrace filter with
Steven Rostedt41c52c02008-05-22 11:46:33 -04003256 * @buf - the string that holds the function notrace text.
3257 * @len - the length of the string.
3258 * @reset - non zero to reset all filters before applying this filter.
3259 *
3260 * Notrace Filters denote which functions should not be enabled when tracing
3261 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3262 * for tracing.
3263 */
Jiri Olsaac483c42012-01-02 10:04:14 +01003264int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
Steven Rostedt936e0742011-05-05 22:54:01 -04003265 int len, int reset)
3266{
Jiri Olsaac483c42012-01-02 10:04:14 +01003267 return ftrace_set_regex(ops, buf, len, reset, 0);
Steven Rostedt936e0742011-05-05 22:54:01 -04003268}
3269EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3270/**
3271 * ftrace_set_filter - set a function to filter on in ftrace
3272 * @ops - the ops to set the filter with
3273 * @buf - the string that holds the function filter text.
3274 * @len - the length of the string.
3275 * @reset - non zero to reset all filters before applying this filter.
3276 *
3277 * Filters denote which functions should be enabled when tracing is enabled.
3278 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3279 */
3280void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3281{
3282 ftrace_set_regex(&global_ops, buf, len, reset, 1);
3283}
3284EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3285
3286/**
3287 * ftrace_set_notrace - set a function to not trace in ftrace
3288 * @ops - the ops to set the notrace filter with
3289 * @buf - the string that holds the function notrace text.
3290 * @len - the length of the string.
3291 * @reset - non zero to reset all filters before applying this filter.
3292 *
3293 * Notrace Filters denote which functions should not be enabled when tracing
3294 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3295 * for tracing.
3296 */
3297void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
Steven Rostedt41c52c02008-05-22 11:46:33 -04003298{
Steven Rostedtf45948e2011-05-02 12:29:25 -04003299 ftrace_set_regex(&global_ops, buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02003300}
Steven Rostedt936e0742011-05-05 22:54:01 -04003301EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
Steven Rostedt77a2b372008-05-12 21:20:45 +02003302
Steven Rostedt2af15d62009-05-28 13:37:24 -04003303/*
3304 * command line interface to allow users to set filters on boot up.
3305 */
3306#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3307static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3308static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3309
3310static int __init set_ftrace_notrace(char *str)
3311{
3312 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3313 return 1;
3314}
3315__setup("ftrace_notrace=", set_ftrace_notrace);
3316
3317static int __init set_ftrace_filter(char *str)
3318{
3319 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3320 return 1;
3321}
3322__setup("ftrace_filter=", set_ftrace_filter);
3323
Stefan Assmann369bc182009-10-12 22:17:21 +02003324#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Lai Jiangshanf6060f42009-11-05 11:16:17 +08003325static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
Steven Rostedt801c29f2010-03-05 20:02:19 -05003326static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3327
Stefan Assmann369bc182009-10-12 22:17:21 +02003328static int __init set_graph_function(char *str)
3329{
Frederic Weisbecker06f43d62009-10-14 20:43:39 +02003330 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
Stefan Assmann369bc182009-10-12 22:17:21 +02003331 return 1;
3332}
3333__setup("ftrace_graph_filter=", set_graph_function);
3334
3335static void __init set_ftrace_early_graph(char *buf)
3336{
3337 int ret;
3338 char *func;
3339
3340 while (buf) {
3341 func = strsep(&buf, ",");
3342 /* we allow only one expression at a time */
3343 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3344 func);
3345 if (ret)
3346 printk(KERN_DEBUG "ftrace: function %s not "
3347 "traceable\n", func);
3348 }
3349}
3350#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3351
Steven Rostedt2a85a372011-12-19 21:57:44 -05003352void __init
3353ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
Steven Rostedt2af15d62009-05-28 13:37:24 -04003354{
3355 char *func;
3356
3357 while (buf) {
3358 func = strsep(&buf, ",");
Steven Rostedtf45948e2011-05-02 12:29:25 -04003359 ftrace_set_regex(ops, func, strlen(func), 0, enable);
Steven Rostedt2af15d62009-05-28 13:37:24 -04003360 }
3361}
3362
3363static void __init set_ftrace_early_filters(void)
3364{
3365 if (ftrace_filter_buf[0])
Steven Rostedt2a85a372011-12-19 21:57:44 -05003366 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
Steven Rostedt2af15d62009-05-28 13:37:24 -04003367 if (ftrace_notrace_buf[0])
Steven Rostedt2a85a372011-12-19 21:57:44 -05003368 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
Stefan Assmann369bc182009-10-12 22:17:21 +02003369#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3370 if (ftrace_graph_buf[0])
3371 set_ftrace_early_graph(ftrace_graph_buf);
3372#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
Steven Rostedt2af15d62009-05-28 13:37:24 -04003373}
3374
Steven Rostedtfc13cb02011-12-19 14:41:25 -05003375int ftrace_regex_release(struct inode *inode, struct file *file)
Steven Rostedt5072c592008-05-12 21:20:43 +02003376{
3377 struct seq_file *m = (struct seq_file *)file->private_data;
3378 struct ftrace_iterator *iter;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003379 struct ftrace_hash **orig_hash;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003380 struct trace_parser *parser;
Steven Rostedted926f92011-05-03 13:25:24 -04003381 int filter_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003382 int ret;
Steven Rostedt5072c592008-05-12 21:20:43 +02003383
Steven Rostedt41c52c02008-05-22 11:46:33 -04003384 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003385 if (file->f_mode & FMODE_READ) {
3386 iter = m->private;
3387
3388 seq_release(inode, file);
3389 } else
3390 iter = file->private_data;
3391
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003392 parser = &iter->parser;
3393 if (trace_parser_loaded(parser)) {
3394 parser->buffer[parser->idx] = 0;
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04003395 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
Steven Rostedt5072c592008-05-12 21:20:43 +02003396 }
3397
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003398 trace_parser_put(parser);
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003399
Steven Rostedt058e2972011-04-29 22:35:33 -04003400 if (file->f_mode & FMODE_WRITE) {
Steven Rostedted926f92011-05-03 13:25:24 -04003401 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3402
3403 if (filter_hash)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003404 orig_hash = &iter->ops->filter_hash;
Steven Rostedted926f92011-05-03 13:25:24 -04003405 else
3406 orig_hash = &iter->ops->notrace_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003407
Steven Rostedt058e2972011-04-29 22:35:33 -04003408 mutex_lock(&ftrace_lock);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04003409 ret = ftrace_hash_move(iter->ops, filter_hash,
3410 orig_hash, iter->hash);
3411 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3412 && ftrace_enabled)
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01003413 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04003414
Steven Rostedt058e2972011-04-29 22:35:33 -04003415 mutex_unlock(&ftrace_lock);
3416 }
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003417 free_ftrace_hash(iter->hash);
3418 kfree(iter);
Steven Rostedt058e2972011-04-29 22:35:33 -04003419
Steven Rostedt41c52c02008-05-22 11:46:33 -04003420 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003421 return 0;
3422}
3423
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003424static const struct file_operations ftrace_avail_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02003425 .open = ftrace_avail_open,
3426 .read = seq_read,
3427 .llseek = seq_lseek,
Li Zefan3be04b42009-08-17 16:54:03 +08003428 .release = seq_release_private,
Steven Rostedt5072c592008-05-12 21:20:43 +02003429};
3430
Steven Rostedt647bcd02011-05-03 14:39:21 -04003431static const struct file_operations ftrace_enabled_fops = {
3432 .open = ftrace_enabled_open,
3433 .read = seq_read,
3434 .llseek = seq_lseek,
3435 .release = seq_release_private,
3436};
3437
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003438static const struct file_operations ftrace_filter_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02003439 .open = ftrace_filter_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08003440 .read = seq_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02003441 .write = ftrace_filter_write,
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003442 .llseek = ftrace_regex_lseek,
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04003443 .release = ftrace_regex_release,
Steven Rostedt5072c592008-05-12 21:20:43 +02003444};
3445
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003446static const struct file_operations ftrace_notrace_fops = {
Steven Rostedt41c52c02008-05-22 11:46:33 -04003447 .open = ftrace_notrace_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08003448 .read = seq_read,
Steven Rostedt41c52c02008-05-22 11:46:33 -04003449 .write = ftrace_notrace_write,
3450 .llseek = ftrace_regex_lseek,
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04003451 .release = ftrace_regex_release,
Steven Rostedt41c52c02008-05-22 11:46:33 -04003452};
3453
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003454#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3455
3456static DEFINE_MUTEX(graph_lock);
3457
3458int ftrace_graph_count;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003459int ftrace_graph_filter_enabled;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003460unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3461
3462static void *
Li Zefan85951842009-06-24 09:54:00 +08003463__g_next(struct seq_file *m, loff_t *pos)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003464{
Li Zefan85951842009-06-24 09:54:00 +08003465 if (*pos >= ftrace_graph_count)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003466 return NULL;
Li Zefana4ec5e02009-09-18 14:06:28 +08003467 return &ftrace_graph_funcs[*pos];
Li Zefan85951842009-06-24 09:54:00 +08003468}
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003469
Li Zefan85951842009-06-24 09:54:00 +08003470static void *
3471g_next(struct seq_file *m, void *v, loff_t *pos)
3472{
3473 (*pos)++;
3474 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003475}
3476
3477static void *g_start(struct seq_file *m, loff_t *pos)
3478{
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003479 mutex_lock(&graph_lock);
3480
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003481 /* Nothing, tell g_show to print all functions are enabled */
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003482 if (!ftrace_graph_filter_enabled && !*pos)
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003483 return (void *)1;
3484
Li Zefan85951842009-06-24 09:54:00 +08003485 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003486}
3487
3488static void g_stop(struct seq_file *m, void *p)
3489{
3490 mutex_unlock(&graph_lock);
3491}
3492
3493static int g_show(struct seq_file *m, void *v)
3494{
3495 unsigned long *ptr = v;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003496
3497 if (!ptr)
3498 return 0;
3499
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003500 if (ptr == (unsigned long *)1) {
3501 seq_printf(m, "#### all functions enabled ####\n");
3502 return 0;
3503 }
3504
Steven Rostedtb375a112009-09-17 00:05:58 -04003505 seq_printf(m, "%ps\n", (void *)*ptr);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003506
3507 return 0;
3508}
3509
James Morris88e9d342009-09-22 16:43:43 -07003510static const struct seq_operations ftrace_graph_seq_ops = {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003511 .start = g_start,
3512 .next = g_next,
3513 .stop = g_stop,
3514 .show = g_show,
3515};
3516
3517static int
3518ftrace_graph_open(struct inode *inode, struct file *file)
3519{
3520 int ret = 0;
3521
3522 if (unlikely(ftrace_disabled))
3523 return -ENODEV;
3524
3525 mutex_lock(&graph_lock);
3526 if ((file->f_mode & FMODE_WRITE) &&
Steven Rostedt8650ae32009-07-22 23:29:30 -04003527 (file->f_flags & O_TRUNC)) {
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003528 ftrace_graph_filter_enabled = 0;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003529 ftrace_graph_count = 0;
3530 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3531 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003532 mutex_unlock(&graph_lock);
3533
Li Zefana4ec5e02009-09-18 14:06:28 +08003534 if (file->f_mode & FMODE_READ)
3535 ret = seq_open(file, &ftrace_graph_seq_ops);
3536
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003537 return ret;
3538}
3539
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003540static int
Li Zefan87827112009-07-23 11:29:11 +08003541ftrace_graph_release(struct inode *inode, struct file *file)
3542{
3543 if (file->f_mode & FMODE_READ)
3544 seq_release(inode, file);
3545 return 0;
3546}
3547
3548static int
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003549ftrace_set_func(unsigned long *array, int *idx, char *buffer)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003550{
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003551 struct dyn_ftrace *rec;
3552 struct ftrace_page *pg;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003553 int search_len;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003554 int fail = 1;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003555 int type, not;
3556 char *search;
3557 bool exists;
3558 int i;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003559
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003560 /* decode regex */
Frederic Weisbecker3f6fe062009-09-24 21:31:51 +02003561 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003562 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3563 return -EBUSY;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003564
3565 search_len = strlen(search);
3566
Steven Rostedt52baf112009-02-14 01:15:39 -05003567 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04003568
3569 if (unlikely(ftrace_disabled)) {
3570 mutex_unlock(&ftrace_lock);
3571 return -ENODEV;
3572 }
3573
Steven Rostedt265c8312009-02-13 12:43:56 -05003574 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003575
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003576 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003577 /* if it is in the array */
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003578 exists = false;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003579 for (i = 0; i < *idx; i++) {
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003580 if (array[i] == rec->ip) {
3581 exists = true;
Steven Rostedt265c8312009-02-13 12:43:56 -05003582 break;
3583 }
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003584 }
3585
3586 if (!not) {
3587 fail = 0;
3588 if (!exists) {
3589 array[(*idx)++] = rec->ip;
3590 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3591 goto out;
3592 }
3593 } else {
3594 if (exists) {
3595 array[i] = array[--(*idx)];
3596 array[*idx] = 0;
3597 fail = 0;
3598 }
3599 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003600 }
Steven Rostedt265c8312009-02-13 12:43:56 -05003601 } while_for_each_ftrace_rec();
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003602out:
Steven Rostedt52baf112009-02-14 01:15:39 -05003603 mutex_unlock(&ftrace_lock);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003604
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003605 if (fail)
3606 return -EINVAL;
3607
3608 ftrace_graph_filter_enabled = 1;
3609 return 0;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003610}
3611
3612static ssize_t
3613ftrace_graph_write(struct file *file, const char __user *ubuf,
3614 size_t cnt, loff_t *ppos)
3615{
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003616 struct trace_parser parser;
Li Zefan4ba79782009-09-22 13:52:20 +08003617 ssize_t read, ret;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003618
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003619 if (!cnt)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003620 return 0;
3621
3622 mutex_lock(&graph_lock);
3623
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003624 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3625 ret = -ENOMEM;
Li Zefan1eb90f12009-09-22 13:52:57 +08003626 goto out_unlock;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003627 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003628
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003629 read = trace_get_user(&parser, ubuf, cnt, ppos);
3630
Li Zefan4ba79782009-09-22 13:52:20 +08003631 if (read >= 0 && trace_parser_loaded((&parser))) {
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003632 parser.buffer[parser.idx] = 0;
3633
3634 /* we allow only one expression at a time */
Li Zefana4ec5e02009-09-18 14:06:28 +08003635 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003636 parser.buffer);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003637 if (ret)
Li Zefan1eb90f12009-09-22 13:52:57 +08003638 goto out_free;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003639 }
3640
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003641 ret = read;
Li Zefan1eb90f12009-09-22 13:52:57 +08003642
3643out_free:
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003644 trace_parser_put(&parser);
Li Zefan1eb90f12009-09-22 13:52:57 +08003645out_unlock:
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003646 mutex_unlock(&graph_lock);
3647
3648 return ret;
3649}
3650
3651static const struct file_operations ftrace_graph_fops = {
Li Zefan87827112009-07-23 11:29:11 +08003652 .open = ftrace_graph_open,
3653 .read = seq_read,
3654 .write = ftrace_graph_write,
3655 .release = ftrace_graph_release,
Arnd Bergmann6038f372010-08-15 18:52:59 +02003656 .llseek = seq_lseek,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003657};
3658#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3659
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003660static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
Steven Rostedt5072c592008-05-12 21:20:43 +02003661{
Steven Rostedt5072c592008-05-12 21:20:43 +02003662
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003663 trace_create_file("available_filter_functions", 0444,
3664 d_tracer, NULL, &ftrace_avail_fops);
Steven Rostedt5072c592008-05-12 21:20:43 +02003665
Steven Rostedt647bcd02011-05-03 14:39:21 -04003666 trace_create_file("enabled_functions", 0444,
3667 d_tracer, NULL, &ftrace_enabled_fops);
3668
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003669 trace_create_file("set_ftrace_filter", 0644, d_tracer,
3670 NULL, &ftrace_filter_fops);
Steven Rostedt41c52c02008-05-22 11:46:33 -04003671
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003672 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
Steven Rostedt41c52c02008-05-22 11:46:33 -04003673 NULL, &ftrace_notrace_fops);
Steven Rostedtad90c0e2008-05-27 20:48:37 -04003674
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003675#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003676 trace_create_file("set_graph_function", 0444, d_tracer,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003677 NULL,
3678 &ftrace_graph_fops);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003679#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3680
Steven Rostedt5072c592008-05-12 21:20:43 +02003681 return 0;
3682}
3683
Steven Rostedt68950612011-12-16 17:06:45 -05003684static void ftrace_swap_recs(void *a, void *b, int size)
3685{
3686 struct dyn_ftrace *reca = a;
3687 struct dyn_ftrace *recb = b;
3688 struct dyn_ftrace t;
3689
3690 t = *reca;
3691 *reca = *recb;
3692 *recb = t;
3693}
3694
Jiri Olsa5cb084b2009-10-13 16:33:53 -04003695static int ftrace_process_locs(struct module *mod,
Steven Rostedt31e88902008-11-14 16:21:19 -08003696 unsigned long *start,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003697 unsigned long *end)
3698{
Steven Rostedta7900872011-12-16 16:23:44 -05003699 struct ftrace_page *pg;
3700 unsigned long count;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003701 unsigned long *p;
3702 unsigned long addr;
Steven Rostedt4376cac2011-06-24 23:28:13 -04003703 unsigned long flags = 0; /* Shut up gcc */
Steven Rostedta7900872011-12-16 16:23:44 -05003704 int ret = -ENOMEM;
3705
3706 count = end - start;
3707
3708 if (!count)
3709 return 0;
3710
3711 pg = ftrace_allocate_pages(count);
3712 if (!pg)
3713 return -ENOMEM;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003714
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003715 mutex_lock(&ftrace_lock);
Steven Rostedta7900872011-12-16 16:23:44 -05003716
Steven Rostedt32082302011-12-16 14:42:37 -05003717 /*
3718 * Core and each module needs their own pages, as
3719 * modules will free them when they are removed.
3720 * Force a new page to be allocated for modules.
3721 */
Steven Rostedta7900872011-12-16 16:23:44 -05003722 if (!mod) {
3723 WARN_ON(ftrace_pages || ftrace_pages_start);
3724 /* First initialization */
3725 ftrace_pages = ftrace_pages_start = pg;
3726 } else {
Steven Rostedt32082302011-12-16 14:42:37 -05003727 if (!ftrace_pages)
Steven Rostedta7900872011-12-16 16:23:44 -05003728 goto out;
Steven Rostedt32082302011-12-16 14:42:37 -05003729
Steven Rostedta7900872011-12-16 16:23:44 -05003730 if (WARN_ON(ftrace_pages->next)) {
3731 /* Hmm, we have free pages? */
3732 while (ftrace_pages->next)
3733 ftrace_pages = ftrace_pages->next;
Steven Rostedt32082302011-12-16 14:42:37 -05003734 }
Steven Rostedta7900872011-12-16 16:23:44 -05003735
3736 ftrace_pages->next = pg;
3737 ftrace_pages = pg;
Steven Rostedt32082302011-12-16 14:42:37 -05003738 }
3739
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003740 p = start;
3741 while (p < end) {
3742 addr = ftrace_call_adjust(*p++);
Steven Rostedt20e52272008-11-14 16:21:19 -08003743 /*
3744 * Some architecture linkers will pad between
3745 * the different mcount_loc sections of different
3746 * object files to satisfy alignments.
3747 * Skip any NULL pointers.
3748 */
3749 if (!addr)
3750 continue;
Steven Rostedta7900872011-12-16 16:23:44 -05003751 if (!ftrace_record_ip(addr))
3752 break;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003753 }
3754
Steven Rostedt85ae32a2011-12-16 16:30:31 -05003755 /* These new locations need to be initialized */
3756 ftrace_new_pgs = pg;
3757
Steven Rostedt68950612011-12-16 17:06:45 -05003758 /* Make each individual set of pages sorted by ips */
3759 for (; pg; pg = pg->next)
3760 sort(pg->records, pg->index, sizeof(struct dyn_ftrace),
3761 ftrace_cmp_recs, ftrace_swap_recs);
3762
Steven Rostedta4f18ed2011-06-07 09:26:46 -04003763 /*
Steven Rostedt4376cac2011-06-24 23:28:13 -04003764 * We only need to disable interrupts on start up
3765 * because we are modifying code that an interrupt
3766 * may execute, and the modification is not atomic.
3767 * But for modules, nothing runs the code we modify
3768 * until we are finished with it, and there's no
3769 * reason to cause large interrupt latencies while we do it.
Steven Rostedta4f18ed2011-06-07 09:26:46 -04003770 */
Steven Rostedt4376cac2011-06-24 23:28:13 -04003771 if (!mod)
3772 local_irq_save(flags);
Steven Rostedt31e88902008-11-14 16:21:19 -08003773 ftrace_update_code(mod);
Steven Rostedt4376cac2011-06-24 23:28:13 -04003774 if (!mod)
3775 local_irq_restore(flags);
Steven Rostedta7900872011-12-16 16:23:44 -05003776 ret = 0;
3777 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003778 mutex_unlock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003779
Steven Rostedta7900872011-12-16 16:23:44 -05003780 return ret;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003781}
3782
Steven Rostedt93eb6772009-04-15 13:24:06 -04003783#ifdef CONFIG_MODULES
Steven Rostedt32082302011-12-16 14:42:37 -05003784
3785#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
3786
jolsa@redhat.come7247a12009-10-07 19:00:35 +02003787void ftrace_release_mod(struct module *mod)
Steven Rostedt93eb6772009-04-15 13:24:06 -04003788{
3789 struct dyn_ftrace *rec;
Steven Rostedt32082302011-12-16 14:42:37 -05003790 struct ftrace_page **last_pg;
Steven Rostedt93eb6772009-04-15 13:24:06 -04003791 struct ftrace_page *pg;
Steven Rostedta7900872011-12-16 16:23:44 -05003792 int order;
Steven Rostedt93eb6772009-04-15 13:24:06 -04003793
Steven Rostedt93eb6772009-04-15 13:24:06 -04003794 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04003795
3796 if (ftrace_disabled)
3797 goto out_unlock;
3798
Steven Rostedt32082302011-12-16 14:42:37 -05003799 /*
3800 * Each module has its own ftrace_pages, remove
3801 * them from the list.
3802 */
3803 last_pg = &ftrace_pages_start;
3804 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
3805 rec = &pg->records[0];
jolsa@redhat.come7247a12009-10-07 19:00:35 +02003806 if (within_module_core(rec->ip, mod)) {
Steven Rostedt93eb6772009-04-15 13:24:06 -04003807 /*
Steven Rostedt32082302011-12-16 14:42:37 -05003808 * As core pages are first, the first
3809 * page should never be a module page.
Steven Rostedt93eb6772009-04-15 13:24:06 -04003810 */
Steven Rostedt32082302011-12-16 14:42:37 -05003811 if (WARN_ON(pg == ftrace_pages_start))
3812 goto out_unlock;
3813
3814 /* Check if we are deleting the last page */
3815 if (pg == ftrace_pages)
3816 ftrace_pages = next_to_ftrace_page(last_pg);
3817
3818 *last_pg = pg->next;
Steven Rostedta7900872011-12-16 16:23:44 -05003819 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3820 free_pages((unsigned long)pg->records, order);
3821 kfree(pg);
Steven Rostedt32082302011-12-16 14:42:37 -05003822 } else
3823 last_pg = &pg->next;
3824 }
Steven Rostedt45a4a232011-04-21 23:16:46 -04003825 out_unlock:
Steven Rostedt93eb6772009-04-15 13:24:06 -04003826 mutex_unlock(&ftrace_lock);
3827}
3828
3829static void ftrace_init_module(struct module *mod,
3830 unsigned long *start, unsigned long *end)
Steven Rostedt90d595f2008-08-14 15:45:09 -04003831{
Steven Rostedt00fd61a2008-08-15 21:40:04 -04003832 if (ftrace_disabled || start == end)
Steven Rostedtfed19392008-08-14 22:47:19 -04003833 return;
Jiri Olsa5cb084b2009-10-13 16:33:53 -04003834 ftrace_process_locs(mod, start, end);
Steven Rostedt90d595f2008-08-14 15:45:09 -04003835}
3836
Steven Rostedt93eb6772009-04-15 13:24:06 -04003837static int ftrace_module_notify(struct notifier_block *self,
3838 unsigned long val, void *data)
3839{
3840 struct module *mod = data;
3841
3842 switch (val) {
3843 case MODULE_STATE_COMING:
3844 ftrace_init_module(mod, mod->ftrace_callsites,
3845 mod->ftrace_callsites +
3846 mod->num_ftrace_callsites);
3847 break;
3848 case MODULE_STATE_GOING:
jolsa@redhat.come7247a12009-10-07 19:00:35 +02003849 ftrace_release_mod(mod);
Steven Rostedt93eb6772009-04-15 13:24:06 -04003850 break;
3851 }
3852
3853 return 0;
3854}
3855#else
3856static int ftrace_module_notify(struct notifier_block *self,
3857 unsigned long val, void *data)
3858{
3859 return 0;
3860}
3861#endif /* CONFIG_MODULES */
3862
3863struct notifier_block ftrace_module_nb = {
3864 .notifier_call = ftrace_module_notify,
3865 .priority = 0,
3866};
3867
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003868extern unsigned long __start_mcount_loc[];
3869extern unsigned long __stop_mcount_loc[];
3870
3871void __init ftrace_init(void)
3872{
3873 unsigned long count, addr, flags;
3874 int ret;
3875
3876 /* Keep the ftrace pointer to the stub */
3877 addr = (unsigned long)ftrace_stub;
3878
3879 local_irq_save(flags);
3880 ftrace_dyn_arch_init(&addr);
3881 local_irq_restore(flags);
3882
3883 /* ftrace_dyn_arch_init places the return code in addr */
3884 if (addr)
3885 goto failed;
3886
3887 count = __stop_mcount_loc - __start_mcount_loc;
3888
3889 ret = ftrace_dyn_table_alloc(count);
3890 if (ret)
3891 goto failed;
3892
3893 last_ftrace_enabled = ftrace_enabled = 1;
3894
Jiri Olsa5cb084b2009-10-13 16:33:53 -04003895 ret = ftrace_process_locs(NULL,
Steven Rostedt31e88902008-11-14 16:21:19 -08003896 __start_mcount_loc,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003897 __stop_mcount_loc);
3898
Steven Rostedt93eb6772009-04-15 13:24:06 -04003899 ret = register_module_notifier(&ftrace_module_nb);
Ming Lei24ed0c42009-05-17 15:31:38 +08003900 if (ret)
Steven Rostedt93eb6772009-04-15 13:24:06 -04003901 pr_warning("Failed to register trace ftrace module notifier\n");
3902
Steven Rostedt2af15d62009-05-28 13:37:24 -04003903 set_ftrace_early_filters();
3904
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003905 return;
3906 failed:
3907 ftrace_disabled = 1;
3908}
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003909
Steven Rostedt3d083392008-05-12 21:20:42 +02003910#else
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01003911
Steven Rostedt2b499382011-05-03 22:49:52 -04003912static struct ftrace_ops global_ops = {
Steven Rostedtbd69c302011-05-03 21:55:54 -04003913 .func = ftrace_stub,
3914};
3915
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01003916static int __init ftrace_nodyn_init(void)
3917{
3918 ftrace_enabled = 1;
3919 return 0;
3920}
3921device_initcall(ftrace_nodyn_init);
3922
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003923static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3924static inline void ftrace_startup_enable(int command) { }
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05003925/* Keep as macros so we do not need to define the commands */
Steven Rostedt3b6cfdb2011-05-23 15:33:49 -04003926# define ftrace_startup(ops, command) \
3927 ({ \
3928 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
3929 0; \
3930 })
Steven Rostedtbd69c302011-05-03 21:55:54 -04003931# define ftrace_shutdown(ops, command) do { } while (0)
Ingo Molnarc7aafc52008-05-12 21:20:45 +02003932# define ftrace_startup_sysctl() do { } while (0)
3933# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedtb8489142011-05-04 09:27:52 -04003934
3935static inline int
3936ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3937{
3938 return 1;
3939}
3940
Steven Rostedt3d083392008-05-12 21:20:42 +02003941#endif /* CONFIG_DYNAMIC_FTRACE */
3942
Steven Rostedtb8489142011-05-04 09:27:52 -04003943static void
Jiri Olsae2484912012-02-15 15:51:48 +01003944ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
3945{
3946 struct ftrace_ops *op;
3947
3948 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
3949 return;
3950
3951 /*
3952 * Some of the ops may be dynamically allocated,
3953 * they must be freed after a synchronize_sched().
3954 */
3955 preempt_disable_notrace();
3956 trace_recursion_set(TRACE_CONTROL_BIT);
3957 op = rcu_dereference_raw(ftrace_control_list);
3958 while (op != &ftrace_list_end) {
3959 if (!ftrace_function_local_disabled(op) &&
3960 ftrace_ops_test(op, ip))
3961 op->func(ip, parent_ip);
3962
3963 op = rcu_dereference_raw(op->next);
3964 };
3965 trace_recursion_clear(TRACE_CONTROL_BIT);
3966 preempt_enable_notrace();
3967}
3968
3969static struct ftrace_ops control_ops = {
3970 .func = ftrace_ops_control_func,
3971};
3972
3973static void
Steven Rostedtb8489142011-05-04 09:27:52 -04003974ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3975{
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003976 struct ftrace_ops *op;
Steven Rostedtb8489142011-05-04 09:27:52 -04003977
Steven Rostedtb1cff0a2011-05-25 14:27:43 -04003978 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3979 return;
3980
3981 trace_recursion_set(TRACE_INTERNAL_BIT);
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003982 /*
3983 * Some of the ops may be dynamically allocated,
3984 * they must be freed after a synchronize_sched().
3985 */
3986 preempt_disable_notrace();
3987 op = rcu_dereference_raw(ftrace_ops_list);
Steven Rostedtb8489142011-05-04 09:27:52 -04003988 while (op != &ftrace_list_end) {
3989 if (ftrace_ops_test(op, ip))
3990 op->func(ip, parent_ip);
3991 op = rcu_dereference_raw(op->next);
3992 };
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003993 preempt_enable_notrace();
Steven Rostedtb1cff0a2011-05-25 14:27:43 -04003994 trace_recursion_clear(TRACE_INTERNAL_BIT);
Steven Rostedtb8489142011-05-04 09:27:52 -04003995}
3996
Steven Rostedte32d8952008-12-04 00:26:41 -05003997static void clear_ftrace_swapper(void)
3998{
3999 struct task_struct *p;
4000 int cpu;
4001
4002 get_online_cpus();
4003 for_each_online_cpu(cpu) {
4004 p = idle_task(cpu);
4005 clear_tsk_trace_trace(p);
4006 }
4007 put_online_cpus();
4008}
4009
4010static void set_ftrace_swapper(void)
4011{
4012 struct task_struct *p;
4013 int cpu;
4014
4015 get_online_cpus();
4016 for_each_online_cpu(cpu) {
4017 p = idle_task(cpu);
4018 set_tsk_trace_trace(p);
4019 }
4020 put_online_cpus();
4021}
4022
4023static void clear_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05004024{
4025 struct task_struct *p;
4026
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01004027 rcu_read_lock();
Steven Rostedte32d8952008-12-04 00:26:41 -05004028 do_each_pid_task(pid, PIDTYPE_PID, p) {
Steven Rostedt978f3a42008-12-04 00:26:40 -05004029 clear_tsk_trace_trace(p);
Steven Rostedte32d8952008-12-04 00:26:41 -05004030 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01004031 rcu_read_unlock();
4032
Steven Rostedte32d8952008-12-04 00:26:41 -05004033 put_pid(pid);
Steven Rostedt978f3a42008-12-04 00:26:40 -05004034}
4035
Steven Rostedte32d8952008-12-04 00:26:41 -05004036static void set_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05004037{
4038 struct task_struct *p;
4039
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01004040 rcu_read_lock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05004041 do_each_pid_task(pid, PIDTYPE_PID, p) {
4042 set_tsk_trace_trace(p);
4043 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01004044 rcu_read_unlock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05004045}
4046
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004047static void clear_ftrace_pid_task(struct pid *pid)
Steven Rostedte32d8952008-12-04 00:26:41 -05004048{
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004049 if (pid == ftrace_swapper_pid)
Steven Rostedte32d8952008-12-04 00:26:41 -05004050 clear_ftrace_swapper();
4051 else
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004052 clear_ftrace_pid(pid);
Steven Rostedte32d8952008-12-04 00:26:41 -05004053}
4054
4055static void set_ftrace_pid_task(struct pid *pid)
4056{
4057 if (pid == ftrace_swapper_pid)
4058 set_ftrace_swapper();
4059 else
4060 set_ftrace_pid(pid);
4061}
4062
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004063static int ftrace_pid_add(int p)
4064{
4065 struct pid *pid;
4066 struct ftrace_pid *fpid;
4067 int ret = -EINVAL;
4068
4069 mutex_lock(&ftrace_lock);
4070
4071 if (!p)
4072 pid = ftrace_swapper_pid;
4073 else
4074 pid = find_get_pid(p);
4075
4076 if (!pid)
4077 goto out;
4078
4079 ret = 0;
4080
4081 list_for_each_entry(fpid, &ftrace_pids, list)
4082 if (fpid->pid == pid)
4083 goto out_put;
4084
4085 ret = -ENOMEM;
4086
4087 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4088 if (!fpid)
4089 goto out_put;
4090
4091 list_add(&fpid->list, &ftrace_pids);
4092 fpid->pid = pid;
4093
4094 set_ftrace_pid_task(pid);
4095
4096 ftrace_update_pid_func();
4097 ftrace_startup_enable(0);
4098
4099 mutex_unlock(&ftrace_lock);
4100 return 0;
4101
4102out_put:
4103 if (pid != ftrace_swapper_pid)
4104 put_pid(pid);
4105
4106out:
4107 mutex_unlock(&ftrace_lock);
4108 return ret;
4109}
4110
4111static void ftrace_pid_reset(void)
4112{
4113 struct ftrace_pid *fpid, *safe;
4114
4115 mutex_lock(&ftrace_lock);
4116 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4117 struct pid *pid = fpid->pid;
4118
4119 clear_ftrace_pid_task(pid);
4120
4121 list_del(&fpid->list);
4122 kfree(fpid);
4123 }
4124
4125 ftrace_update_pid_func();
4126 ftrace_startup_enable(0);
4127
4128 mutex_unlock(&ftrace_lock);
4129}
4130
4131static void *fpid_start(struct seq_file *m, loff_t *pos)
4132{
4133 mutex_lock(&ftrace_lock);
4134
4135 if (list_empty(&ftrace_pids) && (!*pos))
4136 return (void *) 1;
4137
4138 return seq_list_start(&ftrace_pids, *pos);
4139}
4140
4141static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4142{
4143 if (v == (void *)1)
4144 return NULL;
4145
4146 return seq_list_next(v, &ftrace_pids, pos);
4147}
4148
4149static void fpid_stop(struct seq_file *m, void *p)
4150{
4151 mutex_unlock(&ftrace_lock);
4152}
4153
4154static int fpid_show(struct seq_file *m, void *v)
4155{
4156 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4157
4158 if (v == (void *)1) {
4159 seq_printf(m, "no pid\n");
4160 return 0;
4161 }
4162
4163 if (fpid->pid == ftrace_swapper_pid)
4164 seq_printf(m, "swapper tasks\n");
4165 else
4166 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4167
4168 return 0;
4169}
4170
4171static const struct seq_operations ftrace_pid_sops = {
4172 .start = fpid_start,
4173 .next = fpid_next,
4174 .stop = fpid_stop,
4175 .show = fpid_show,
4176};
4177
4178static int
4179ftrace_pid_open(struct inode *inode, struct file *file)
4180{
4181 int ret = 0;
4182
4183 if ((file->f_mode & FMODE_WRITE) &&
4184 (file->f_flags & O_TRUNC))
4185 ftrace_pid_reset();
4186
4187 if (file->f_mode & FMODE_READ)
4188 ret = seq_open(file, &ftrace_pid_sops);
4189
4190 return ret;
4191}
4192
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004193static ssize_t
4194ftrace_pid_write(struct file *filp, const char __user *ubuf,
4195 size_t cnt, loff_t *ppos)
4196{
Ingo Molnar457dc922009-11-23 11:03:28 +01004197 char buf[64], *tmp;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004198 long val;
4199 int ret;
4200
4201 if (cnt >= sizeof(buf))
4202 return -EINVAL;
4203
4204 if (copy_from_user(&buf, ubuf, cnt))
4205 return -EFAULT;
4206
4207 buf[cnt] = 0;
4208
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004209 /*
4210 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4211 * to clean the filter quietly.
4212 */
Ingo Molnar457dc922009-11-23 11:03:28 +01004213 tmp = strstrip(buf);
4214 if (strlen(tmp) == 0)
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004215 return 1;
4216
Ingo Molnar457dc922009-11-23 11:03:28 +01004217 ret = strict_strtol(tmp, 10, &val);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004218 if (ret < 0)
4219 return ret;
4220
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004221 ret = ftrace_pid_add(val);
Steven Rostedt978f3a42008-12-04 00:26:40 -05004222
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004223 return ret ? ret : cnt;
4224}
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004225
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004226static int
4227ftrace_pid_release(struct inode *inode, struct file *file)
4228{
4229 if (file->f_mode & FMODE_READ)
4230 seq_release(inode, file);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004231
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004232 return 0;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004233}
4234
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004235static const struct file_operations ftrace_pid_fops = {
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004236 .open = ftrace_pid_open,
4237 .write = ftrace_pid_write,
4238 .read = seq_read,
4239 .llseek = seq_lseek,
4240 .release = ftrace_pid_release,
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004241};
4242
4243static __init int ftrace_init_debugfs(void)
4244{
4245 struct dentry *d_tracer;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004246
4247 d_tracer = tracing_init_dentry();
4248 if (!d_tracer)
4249 return 0;
4250
4251 ftrace_init_dyn_debugfs(d_tracer);
4252
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004253 trace_create_file("set_ftrace_pid", 0644, d_tracer,
4254 NULL, &ftrace_pid_fops);
Steven Rostedt493762f2009-03-23 17:12:36 -04004255
4256 ftrace_profile_debugfs(d_tracer);
4257
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004258 return 0;
4259}
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004260fs_initcall(ftrace_init_debugfs);
4261
Steven Rostedt3d083392008-05-12 21:20:42 +02004262/**
Steven Rostedt81adbdc2008-10-23 09:33:02 -04004263 * ftrace_kill - kill ftrace
Steven Rostedta2bb6a32008-07-10 20:58:15 -04004264 *
4265 * This function should be used by panic code. It stops ftrace
4266 * but in a not so nice way. If you need to simply kill ftrace
4267 * from a non-atomic section, use ftrace_kill.
4268 */
Steven Rostedt81adbdc2008-10-23 09:33:02 -04004269void ftrace_kill(void)
Steven Rostedta2bb6a32008-07-10 20:58:15 -04004270{
4271 ftrace_disabled = 1;
4272 ftrace_enabled = 0;
Steven Rostedta2bb6a32008-07-10 20:58:15 -04004273 clear_ftrace_function();
4274}
4275
4276/**
Steven Rostedte0a413f2011-09-29 21:26:16 -04004277 * Test if ftrace is dead or not.
4278 */
4279int ftrace_is_dead(void)
4280{
4281 return ftrace_disabled;
4282}
4283
4284/**
Steven Rostedt3d083392008-05-12 21:20:42 +02004285 * register_ftrace_function - register a function for profiling
4286 * @ops - ops structure that holds the function for profiling.
4287 *
4288 * Register a function to be called by all functions in the
4289 * kernel.
4290 *
4291 * Note: @ops->func and all the functions it calls must be labeled
4292 * with "notrace", otherwise it will go into a
4293 * recursive loop.
4294 */
4295int register_ftrace_function(struct ftrace_ops *ops)
4296{
Steven Rostedt45a4a232011-04-21 23:16:46 -04004297 int ret = -1;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02004298
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004299 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004300
Steven Rostedt45a4a232011-04-21 23:16:46 -04004301 if (unlikely(ftrace_disabled))
4302 goto out_unlock;
4303
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004304 ret = __register_ftrace_function(ops);
Steven Rostedtb8489142011-05-04 09:27:52 -04004305 if (!ret)
Steven Rostedta1cd6172011-05-23 15:24:25 -04004306 ret = ftrace_startup(ops, 0);
Steven Rostedtb8489142011-05-04 09:27:52 -04004307
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004308
Steven Rostedt45a4a232011-04-21 23:16:46 -04004309 out_unlock:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004310 mutex_unlock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004311 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02004312}
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04004313EXPORT_SYMBOL_GPL(register_ftrace_function);
Steven Rostedt3d083392008-05-12 21:20:42 +02004314
4315/**
Uwe Kleine-Koenig32632922009-01-12 23:35:50 +01004316 * unregister_ftrace_function - unregister a function for profiling.
Steven Rostedt3d083392008-05-12 21:20:42 +02004317 * @ops - ops structure that holds the function to unregister
4318 *
4319 * Unregister a function that was added to be called by ftrace profiling.
4320 */
4321int unregister_ftrace_function(struct ftrace_ops *ops)
4322{
4323 int ret;
4324
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004325 mutex_lock(&ftrace_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02004326 ret = __unregister_ftrace_function(ops);
Steven Rostedtb8489142011-05-04 09:27:52 -04004327 if (!ret)
4328 ftrace_shutdown(ops, 0);
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004329 mutex_unlock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004330
4331 return ret;
4332}
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04004333EXPORT_SYMBOL_GPL(unregister_ftrace_function);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004334
Ingo Molnare309b412008-05-12 21:20:51 +02004335int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004336ftrace_enable_sysctl(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07004337 void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004338 loff_t *ppos)
4339{
Steven Rostedt45a4a232011-04-21 23:16:46 -04004340 int ret = -ENODEV;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02004341
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004342 mutex_lock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004343
Steven Rostedt45a4a232011-04-21 23:16:46 -04004344 if (unlikely(ftrace_disabled))
4345 goto out;
4346
4347 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004348
Li Zefana32c7762009-06-26 16:55:51 +08004349 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004350 goto out;
4351
Li Zefana32c7762009-06-26 16:55:51 +08004352 last_ftrace_enabled = !!ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004353
4354 if (ftrace_enabled) {
4355
4356 ftrace_startup_sysctl();
4357
4358 /* we are starting ftrace again */
Steven Rostedtb8489142011-05-04 09:27:52 -04004359 if (ftrace_ops_list != &ftrace_list_end) {
4360 if (ftrace_ops_list->next == &ftrace_list_end)
4361 ftrace_trace_function = ftrace_ops_list->func;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004362 else
Steven Rostedtb8489142011-05-04 09:27:52 -04004363 ftrace_trace_function = ftrace_ops_list_func;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004364 }
4365
4366 } else {
4367 /* stopping ftrace calls (just send to ftrace_stub) */
4368 ftrace_trace_function = ftrace_stub;
4369
4370 ftrace_shutdown_sysctl();
4371 }
4372
4373 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004374 mutex_unlock(&ftrace_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02004375 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02004376}
Ingo Molnarf17845e2008-10-24 12:47:10 +02004377
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004378#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004379
Steven Rostedt597af812009-04-03 15:24:12 -04004380static int ftrace_graph_active;
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08004381static struct notifier_block ftrace_suspend_notifier;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004382
Steven Rostedte49dc192008-12-02 23:50:05 -05004383int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4384{
4385 return 0;
4386}
4387
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01004388/* The callbacks that hook a function */
4389trace_func_graph_ret_t ftrace_graph_return =
4390 (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05004391trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004392
4393/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4394static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4395{
4396 int i;
4397 int ret = 0;
4398 unsigned long flags;
4399 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4400 struct task_struct *g, *t;
4401
4402 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4403 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4404 * sizeof(struct ftrace_ret_stack),
4405 GFP_KERNEL);
4406 if (!ret_stack_list[i]) {
4407 start = 0;
4408 end = i;
4409 ret = -ENOMEM;
4410 goto free;
4411 }
4412 }
4413
4414 read_lock_irqsave(&tasklist_lock, flags);
4415 do_each_thread(g, t) {
4416 if (start == end) {
4417 ret = -EAGAIN;
4418 goto unlock;
4419 }
4420
4421 if (t->ret_stack == NULL) {
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01004422 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004423 atomic_set(&t->trace_overrun, 0);
Steven Rostedt26c01622009-06-02 14:01:19 -04004424 t->curr_ret_stack = -1;
4425 /* Make sure the tasks see the -1 first: */
4426 smp_wmb();
4427 t->ret_stack = ret_stack_list[start++];
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004428 }
4429 } while_each_thread(g, t);
4430
4431unlock:
4432 read_unlock_irqrestore(&tasklist_lock, flags);
4433free:
4434 for (i = start; i < end; i++)
4435 kfree(ret_stack_list[i]);
4436 return ret;
4437}
4438
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004439static void
Steven Rostedt38516ab2010-04-20 17:04:50 -04004440ftrace_graph_probe_sched_switch(void *ignore,
4441 struct task_struct *prev, struct task_struct *next)
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004442{
4443 unsigned long long timestamp;
4444 int index;
4445
Steven Rostedtbe6f1642009-03-24 11:06:24 -04004446 /*
4447 * Does the user want to count the time a function was asleep.
4448 * If so, do not update the time stamps.
4449 */
4450 if (trace_flags & TRACE_ITER_SLEEP_TIME)
4451 return;
4452
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004453 timestamp = trace_clock_local();
4454
4455 prev->ftrace_timestamp = timestamp;
4456
4457 /* only process tasks that we timestamped */
4458 if (!next->ftrace_timestamp)
4459 return;
4460
4461 /*
4462 * Update all the counters in next to make up for the
4463 * time next was sleeping.
4464 */
4465 timestamp -= next->ftrace_timestamp;
4466
4467 for (index = next->curr_ret_stack; index >= 0; index--)
4468 next->ret_stack[index].calltime += timestamp;
4469}
4470
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004471/* Allocate a return stack for each task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004472static int start_graph_tracing(void)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004473{
4474 struct ftrace_ret_stack **ret_stack_list;
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01004475 int ret, cpu;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004476
4477 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4478 sizeof(struct ftrace_ret_stack *),
4479 GFP_KERNEL);
4480
4481 if (!ret_stack_list)
4482 return -ENOMEM;
4483
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01004484 /* The cpu_boot init_task->ret_stack will never be freed */
Steven Rostedt179c4982009-06-02 12:03:19 -04004485 for_each_online_cpu(cpu) {
4486 if (!idle_task(cpu)->ret_stack)
Steven Rostedt868baf02011-02-10 21:26:13 -05004487 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
Steven Rostedt179c4982009-06-02 12:03:19 -04004488 }
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01004489
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004490 do {
4491 ret = alloc_retstack_tasklist(ret_stack_list);
4492 } while (ret == -EAGAIN);
4493
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004494 if (!ret) {
Steven Rostedt38516ab2010-04-20 17:04:50 -04004495 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004496 if (ret)
4497 pr_info("ftrace_graph: Couldn't activate tracepoint"
4498 " probe to kernel_sched_switch\n");
4499 }
4500
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004501 kfree(ret_stack_list);
4502 return ret;
4503}
4504
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08004505/*
4506 * Hibernation protection.
4507 * The state of the current task is too much unstable during
4508 * suspend/restore to disk. We want to protect against that.
4509 */
4510static int
4511ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4512 void *unused)
4513{
4514 switch (state) {
4515 case PM_HIBERNATION_PREPARE:
4516 pause_graph_tracing();
4517 break;
4518
4519 case PM_POST_HIBERNATION:
4520 unpause_graph_tracing();
4521 break;
4522 }
4523 return NOTIFY_DONE;
4524}
4525
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01004526int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4527 trace_func_graph_ent_t entryfunc)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004528{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004529 int ret = 0;
4530
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004531 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004532
Steven Rostedt05ce5812009-03-24 00:18:31 -04004533 /* we currently allow only one tracer registered at a time */
Steven Rostedt597af812009-04-03 15:24:12 -04004534 if (ftrace_graph_active) {
Steven Rostedt05ce5812009-03-24 00:18:31 -04004535 ret = -EBUSY;
4536 goto out;
4537 }
4538
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08004539 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4540 register_pm_notifier(&ftrace_suspend_notifier);
4541
Steven Rostedt597af812009-04-03 15:24:12 -04004542 ftrace_graph_active++;
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004543 ret = start_graph_tracing();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004544 if (ret) {
Steven Rostedt597af812009-04-03 15:24:12 -04004545 ftrace_graph_active--;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004546 goto out;
4547 }
Steven Rostedte53a6312008-11-26 00:16:25 -05004548
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01004549 ftrace_graph_return = retfunc;
4550 ftrace_graph_entry = entryfunc;
Steven Rostedte53a6312008-11-26 00:16:25 -05004551
Steven Rostedta1cd6172011-05-23 15:24:25 -04004552 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004553
4554out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004555 mutex_unlock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004556 return ret;
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004557}
4558
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004559void unregister_ftrace_graph(void)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004560{
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004561 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004562
Steven Rostedt597af812009-04-03 15:24:12 -04004563 if (unlikely(!ftrace_graph_active))
Steven Rostedt2aad1b72009-03-30 11:11:28 -04004564 goto out;
4565
Steven Rostedt597af812009-04-03 15:24:12 -04004566 ftrace_graph_active--;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01004567 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05004568 ftrace_graph_entry = ftrace_graph_entry_stub;
Steven Rostedtbd69c302011-05-03 21:55:54 -04004569 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08004570 unregister_pm_notifier(&ftrace_suspend_notifier);
Steven Rostedt38516ab2010-04-20 17:04:50 -04004571 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004572
Steven Rostedt2aad1b72009-03-30 11:11:28 -04004573 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004574 mutex_unlock(&ftrace_lock);
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004575}
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004576
Steven Rostedt868baf02011-02-10 21:26:13 -05004577static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4578
4579static void
4580graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4581{
4582 atomic_set(&t->tracing_graph_pause, 0);
4583 atomic_set(&t->trace_overrun, 0);
4584 t->ftrace_timestamp = 0;
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004585 /* make curr_ret_stack visible before we add the ret_stack */
Steven Rostedt868baf02011-02-10 21:26:13 -05004586 smp_wmb();
4587 t->ret_stack = ret_stack;
4588}
4589
4590/*
4591 * Allocate a return stack for the idle task. May be the first
4592 * time through, or it may be done by CPU hotplug online.
4593 */
4594void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4595{
4596 t->curr_ret_stack = -1;
4597 /*
4598 * The idle task has no parent, it either has its own
4599 * stack or no stack at all.
4600 */
4601 if (t->ret_stack)
4602 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4603
4604 if (ftrace_graph_active) {
4605 struct ftrace_ret_stack *ret_stack;
4606
4607 ret_stack = per_cpu(idle_ret_stack, cpu);
4608 if (!ret_stack) {
4609 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4610 * sizeof(struct ftrace_ret_stack),
4611 GFP_KERNEL);
4612 if (!ret_stack)
4613 return;
4614 per_cpu(idle_ret_stack, cpu) = ret_stack;
4615 }
4616 graph_init_task(t, ret_stack);
4617 }
4618}
4619
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004620/* Allocate a return stack for newly created task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004621void ftrace_graph_init_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004622{
Steven Rostedt84047e32009-06-02 16:51:55 -04004623 /* Make sure we do not use the parent ret_stack */
4624 t->ret_stack = NULL;
Steven Rostedtea14eb72010-03-12 19:41:23 -05004625 t->curr_ret_stack = -1;
Steven Rostedt84047e32009-06-02 16:51:55 -04004626
Steven Rostedt597af812009-04-03 15:24:12 -04004627 if (ftrace_graph_active) {
Steven Rostedt82310a32009-06-02 12:26:07 -04004628 struct ftrace_ret_stack *ret_stack;
4629
4630 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004631 * sizeof(struct ftrace_ret_stack),
4632 GFP_KERNEL);
Steven Rostedt82310a32009-06-02 12:26:07 -04004633 if (!ret_stack)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004634 return;
Steven Rostedt868baf02011-02-10 21:26:13 -05004635 graph_init_task(t, ret_stack);
Steven Rostedt84047e32009-06-02 16:51:55 -04004636 }
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004637}
4638
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004639void ftrace_graph_exit_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004640{
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01004641 struct ftrace_ret_stack *ret_stack = t->ret_stack;
4642
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004643 t->ret_stack = NULL;
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01004644 /* NULL must become visible to IRQs before we free it: */
4645 barrier();
4646
4647 kfree(ret_stack);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004648}
Steven Rostedt14a866c2008-12-02 23:50:02 -05004649
4650void ftrace_graph_stop(void)
4651{
4652 ftrace_stop();
4653}
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004654#endif