blob: ef9271b69b4f1759fa6dff8b0f8effaa19a13726 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -080020#include <linux/suspend.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020021#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020022#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010023#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020024#include <linux/uaccess.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010025#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020026#include <linux/sysctl.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020028#include <linux/ctype.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020029#include <linux/list.h>
Steven Rostedt59df055f2009-02-14 15:29:06 -050030#include <linux/hash.h>
Paul E. McKenney3f379b02010-03-05 15:03:25 -080031#include <linux/rcupdate.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020032
Steven Rostedtad8d75f2009-04-14 19:39:12 -040033#include <trace/events/sched.h>
Steven Rostedt8aef2d22009-03-24 01:10:15 -040034
Abhishek Sagar395a59d2008-06-21 23:47:27 +053035#include <asm/ftrace.h>
Steven Rostedt2af15d62009-05-28 13:37:24 -040036#include <asm/setup.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053037
Steven Rostedt0706f1c2009-03-23 23:12:58 -040038#include "trace_output.h"
Steven Rostedtbac429f2009-03-20 12:50:56 -040039#include "trace_stat.h"
Steven Rostedt3d083392008-05-12 21:20:42 +020040
Steven Rostedt69128962008-10-23 09:33:03 -040041#define FTRACE_WARN_ON(cond) \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040042 ({ \
43 int ___r = cond; \
44 if (WARN_ON(___r)) \
Steven Rostedt69128962008-10-23 09:33:03 -040045 ftrace_kill(); \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040046 ___r; \
47 })
Steven Rostedt69128962008-10-23 09:33:03 -040048
49#define FTRACE_WARN_ON_ONCE(cond) \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040050 ({ \
51 int ___r = cond; \
52 if (WARN_ON_ONCE(___r)) \
Steven Rostedt69128962008-10-23 09:33:03 -040053 ftrace_kill(); \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040054 ___r; \
55 })
Steven Rostedt69128962008-10-23 09:33:03 -040056
Steven Rostedt8fc0c702009-02-16 15:28:00 -050057/* hash bits for specific function selection */
58#define FTRACE_HASH_BITS 7
59#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
Steven Rostedt33dc9b12011-05-02 17:34:47 -040060#define FTRACE_HASH_DEFAULT_BITS 10
61#define FTRACE_HASH_MAX_BITS 12
Steven Rostedt8fc0c702009-02-16 15:28:00 -050062
Steven Rostedt4eebcc82008-05-12 21:20:48 +020063/* ftrace_enabled is a method to turn ftrace on or off */
64int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020065static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020066
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050067/* Quick disabling of function tracer. */
68int function_trace_stop;
69
jolsa@redhat.com756d17e2009-10-13 16:33:52 -040070/* List for set_ftrace_pid's pids. */
71LIST_HEAD(ftrace_pids);
72struct ftrace_pid {
73 struct list_head list;
74 struct pid *pid;
75};
76
Steven Rostedt4eebcc82008-05-12 21:20:48 +020077/*
78 * ftrace_disabled is set when an anomaly is discovered.
79 * ftrace_disabled is much stronger than ftrace_enabled.
80 */
81static int ftrace_disabled __read_mostly;
82
Steven Rostedt52baf112009-02-14 01:15:39 -050083static DEFINE_MUTEX(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020084
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020085static struct ftrace_ops ftrace_list_end __read_mostly =
86{
Steven Rostedtfb9fb012009-03-25 13:26:41 -040087 .func = ftrace_stub,
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020088};
89
Steven Rostedtb8489142011-05-04 09:27:52 -040090static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
91static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020092ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050093ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -050094ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
Steven Rostedt2b499382011-05-03 22:49:52 -040095static struct ftrace_ops global_ops;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020096
Steven Rostedtb8489142011-05-04 09:27:52 -040097static void
98ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
99
Paul E. McKenney3f379b02010-03-05 15:03:25 -0800100/*
Steven Rostedtb8489142011-05-04 09:27:52 -0400101 * Traverse the ftrace_global_list, invoking all entries. The reason that we
Paul E. McKenney3f379b02010-03-05 15:03:25 -0800102 * can use rcu_dereference_raw() is that elements removed from this list
103 * are simply leaked, so there is no need to interact with a grace-period
104 * mechanism. The rcu_dereference_raw() calls are needed to handle
Steven Rostedtb8489142011-05-04 09:27:52 -0400105 * concurrent insertions into the ftrace_global_list.
Paul E. McKenney3f379b02010-03-05 15:03:25 -0800106 *
107 * Silly Alpha and silly pointer-speculation compiler optimizations!
108 */
Steven Rostedtb8489142011-05-04 09:27:52 -0400109static void ftrace_global_list_func(unsigned long ip,
110 unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200111{
Steven Rostedtb1cff0a2011-05-25 14:27:43 -0400112 struct ftrace_ops *op;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200113
Steven Rostedtb1cff0a2011-05-25 14:27:43 -0400114 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
115 return;
116
117 trace_recursion_set(TRACE_GLOBAL_BIT);
118 op = rcu_dereference_raw(ftrace_global_list); /*see above*/
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200119 while (op != &ftrace_list_end) {
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200120 op->func(ip, parent_ip);
Paul E. McKenney3f379b02010-03-05 15:03:25 -0800121 op = rcu_dereference_raw(op->next); /*see above*/
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200122 };
Steven Rostedtb1cff0a2011-05-25 14:27:43 -0400123 trace_recursion_clear(TRACE_GLOBAL_BIT);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200124}
125
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500126static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
127{
Steven Rostedt0ef8cde2008-12-03 15:36:58 -0500128 if (!test_tsk_trace_trace(current))
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500129 return;
130
131 ftrace_pid_function(ip, parent_ip);
132}
133
134static void set_ftrace_pid_function(ftrace_func_t func)
135{
136 /* do not set ftrace_pid_function to itself! */
137 if (func != ftrace_pid_func)
138 ftrace_pid_function = func;
139}
140
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200141/**
Steven Rostedt3d083392008-05-12 21:20:42 +0200142 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200143 *
Steven Rostedt3d083392008-05-12 21:20:42 +0200144 * This NULLs the ftrace function and in essence stops
145 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200146 */
Steven Rostedt3d083392008-05-12 21:20:42 +0200147void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200148{
Steven Rostedt3d083392008-05-12 21:20:42 +0200149 ftrace_trace_function = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500150 __ftrace_trace_function = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500151 ftrace_pid_function = ftrace_stub;
Steven Rostedt3d083392008-05-12 21:20:42 +0200152}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200153
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500154#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
155/*
156 * For those archs that do not test ftrace_trace_stop in their
157 * mcount call site, we need to do it from C.
158 */
159static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
160{
161 if (function_trace_stop)
162 return;
163
164 __ftrace_trace_function(ip, parent_ip);
165}
166#endif
167
Steven Rostedt2b499382011-05-03 22:49:52 -0400168static void update_global_ops(void)
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400169{
170 ftrace_func_t func;
171
172 /*
173 * If there's only one function registered, then call that
174 * function directly. Otherwise, we need to iterate over the
175 * registered callers.
176 */
Steven Rostedtb8489142011-05-04 09:27:52 -0400177 if (ftrace_global_list == &ftrace_list_end ||
178 ftrace_global_list->next == &ftrace_list_end)
179 func = ftrace_global_list->func;
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400180 else
Steven Rostedtb8489142011-05-04 09:27:52 -0400181 func = ftrace_global_list_func;
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400182
183 /* If we filter on pids, update to use the pid function */
184 if (!list_empty(&ftrace_pids)) {
185 set_ftrace_pid_function(func);
186 func = ftrace_pid_func;
187 }
Steven Rostedt2b499382011-05-03 22:49:52 -0400188
189 global_ops.func = func;
190}
191
192static void update_ftrace_function(void)
193{
194 ftrace_func_t func;
195
196 update_global_ops();
197
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400198 /*
199 * If we are at the end of the list and this ops is
200 * not dynamic, then have the mcount trampoline call
201 * the function directly
202 */
Steven Rostedtb8489142011-05-04 09:27:52 -0400203 if (ftrace_ops_list == &ftrace_list_end ||
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400204 (ftrace_ops_list->next == &ftrace_list_end &&
205 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
Steven Rostedtb8489142011-05-04 09:27:52 -0400206 func = ftrace_ops_list->func;
207 else
208 func = ftrace_ops_list_func;
Steven Rostedt2b499382011-05-03 22:49:52 -0400209
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400210#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
211 ftrace_trace_function = func;
212#else
213 __ftrace_trace_function = func;
214 ftrace_trace_function = ftrace_test_stop_func;
215#endif
216}
217
Steven Rostedt2b499382011-05-03 22:49:52 -0400218static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +0200219{
Steven Rostedt2b499382011-05-03 22:49:52 -0400220 ops->next = *list;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200221 /*
Steven Rostedtb8489142011-05-04 09:27:52 -0400222 * We are entering ops into the list but another
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200223 * CPU might be walking that list. We need to make sure
224 * the ops->next pointer is valid before another CPU sees
Steven Rostedtb8489142011-05-04 09:27:52 -0400225 * the ops pointer included into the list.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200226 */
Steven Rostedt2b499382011-05-03 22:49:52 -0400227 rcu_assign_pointer(*list, ops);
228}
Steven Rostedt3d083392008-05-12 21:20:42 +0200229
Steven Rostedt2b499382011-05-03 22:49:52 -0400230static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
231{
232 struct ftrace_ops **p;
233
234 /*
235 * If we are removing the last function, then simply point
236 * to the ftrace_stub.
237 */
238 if (*list == ops && ops->next == &ftrace_list_end) {
239 *list = &ftrace_list_end;
240 return 0;
241 }
242
243 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
244 if (*p == ops)
245 break;
246
247 if (*p != ops)
248 return -1;
249
250 *p = (*p)->next;
251 return 0;
252}
253
254static int __register_ftrace_function(struct ftrace_ops *ops)
255{
256 if (ftrace_disabled)
257 return -ENODEV;
258
259 if (FTRACE_WARN_ON(ops == &global_ops))
260 return -EINVAL;
261
Steven Rostedtb8489142011-05-04 09:27:52 -0400262 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
263 return -EBUSY;
264
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400265 if (!core_kernel_data((unsigned long)ops))
266 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
267
Steven Rostedtb8489142011-05-04 09:27:52 -0400268 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
269 int first = ftrace_global_list == &ftrace_list_end;
270 add_ftrace_ops(&ftrace_global_list, ops);
271 ops->flags |= FTRACE_OPS_FL_ENABLED;
272 if (first)
273 add_ftrace_ops(&ftrace_ops_list, &global_ops);
274 } else
275 add_ftrace_ops(&ftrace_ops_list, ops);
276
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400277 if (ftrace_enabled)
278 update_ftrace_function();
Steven Rostedt3d083392008-05-12 21:20:42 +0200279
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200280 return 0;
281}
282
Ingo Molnare309b412008-05-12 21:20:51 +0200283static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200284{
Steven Rostedt2b499382011-05-03 22:49:52 -0400285 int ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200286
Steven Rostedt2b499382011-05-03 22:49:52 -0400287 if (ftrace_disabled)
288 return -ENODEV;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200289
Steven Rostedtb8489142011-05-04 09:27:52 -0400290 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
291 return -EBUSY;
292
Steven Rostedt2b499382011-05-03 22:49:52 -0400293 if (FTRACE_WARN_ON(ops == &global_ops))
294 return -EINVAL;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200295
Steven Rostedtb8489142011-05-04 09:27:52 -0400296 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
297 ret = remove_ftrace_ops(&ftrace_global_list, ops);
298 if (!ret && ftrace_global_list == &ftrace_list_end)
299 ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
300 if (!ret)
301 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
302 } else
303 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
304
Steven Rostedt2b499382011-05-03 22:49:52 -0400305 if (ret < 0)
306 return ret;
Steven Rostedtb8489142011-05-04 09:27:52 -0400307
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400308 if (ftrace_enabled)
309 update_ftrace_function();
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200310
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400311 /*
312 * Dynamic ops may be freed, we must make sure that all
313 * callers are done before leaving this function.
314 */
315 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
316 synchronize_sched();
317
Steven Rostedte6ea44e2009-02-14 01:42:44 -0500318 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200319}
320
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500321static void ftrace_update_pid_func(void)
322{
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400323 /* Only do something if we are tracing something */
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500324 if (ftrace_trace_function == ftrace_stub)
KOSAKI Motohiro10dd3eb2009-03-06 15:29:04 +0900325 return;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500326
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400327 update_ftrace_function();
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500328}
329
Steven Rostedt493762f2009-03-23 17:12:36 -0400330#ifdef CONFIG_FUNCTION_PROFILER
331struct ftrace_profile {
332 struct hlist_node node;
333 unsigned long ip;
334 unsigned long counter;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400335#ifdef CONFIG_FUNCTION_GRAPH_TRACER
336 unsigned long long time;
Chase Douglase330b3b2010-04-26 14:02:05 -0400337 unsigned long long time_squared;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400338#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400339};
340
341struct ftrace_profile_page {
342 struct ftrace_profile_page *next;
343 unsigned long index;
344 struct ftrace_profile records[];
345};
346
Steven Rostedtcafb1682009-03-24 20:50:39 -0400347struct ftrace_profile_stat {
348 atomic_t disabled;
349 struct hlist_head *hash;
350 struct ftrace_profile_page *pages;
351 struct ftrace_profile_page *start;
352 struct tracer_stat stat;
353};
354
Steven Rostedt493762f2009-03-23 17:12:36 -0400355#define PROFILE_RECORDS_SIZE \
356 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
357
358#define PROFILES_PER_PAGE \
359 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
360
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400361static int ftrace_profile_bits __read_mostly;
362static int ftrace_profile_enabled __read_mostly;
363
364/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
Steven Rostedt493762f2009-03-23 17:12:36 -0400365static DEFINE_MUTEX(ftrace_profile_lock);
366
Steven Rostedtcafb1682009-03-24 20:50:39 -0400367static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
Steven Rostedt493762f2009-03-23 17:12:36 -0400368
369#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
370
Steven Rostedt493762f2009-03-23 17:12:36 -0400371static void *
372function_stat_next(void *v, int idx)
373{
374 struct ftrace_profile *rec = v;
375 struct ftrace_profile_page *pg;
376
377 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
378
379 again:
Li Zefan0296e422009-06-26 11:15:37 +0800380 if (idx != 0)
381 rec++;
382
Steven Rostedt493762f2009-03-23 17:12:36 -0400383 if ((void *)rec >= (void *)&pg->records[pg->index]) {
384 pg = pg->next;
385 if (!pg)
386 return NULL;
387 rec = &pg->records[0];
388 if (!rec->counter)
389 goto again;
390 }
391
392 return rec;
393}
394
395static void *function_stat_start(struct tracer_stat *trace)
396{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400397 struct ftrace_profile_stat *stat =
398 container_of(trace, struct ftrace_profile_stat, stat);
399
400 if (!stat || !stat->start)
401 return NULL;
402
403 return function_stat_next(&stat->start->records[0], 0);
Steven Rostedt493762f2009-03-23 17:12:36 -0400404}
405
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400406#ifdef CONFIG_FUNCTION_GRAPH_TRACER
407/* function graph compares on total time */
408static int function_stat_cmp(void *p1, void *p2)
409{
410 struct ftrace_profile *a = p1;
411 struct ftrace_profile *b = p2;
412
413 if (a->time < b->time)
414 return -1;
415 if (a->time > b->time)
416 return 1;
417 else
418 return 0;
419}
420#else
421/* not function graph compares against hits */
Steven Rostedt493762f2009-03-23 17:12:36 -0400422static int function_stat_cmp(void *p1, void *p2)
423{
424 struct ftrace_profile *a = p1;
425 struct ftrace_profile *b = p2;
426
427 if (a->counter < b->counter)
428 return -1;
429 if (a->counter > b->counter)
430 return 1;
431 else
432 return 0;
433}
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400434#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400435
436static int function_stat_headers(struct seq_file *m)
437{
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400438#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt34886c82009-03-25 21:00:47 -0400439 seq_printf(m, " Function "
Chase Douglase330b3b2010-04-26 14:02:05 -0400440 "Hit Time Avg s^2\n"
Steven Rostedt34886c82009-03-25 21:00:47 -0400441 " -------- "
Chase Douglase330b3b2010-04-26 14:02:05 -0400442 "--- ---- --- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400443#else
Steven Rostedt493762f2009-03-23 17:12:36 -0400444 seq_printf(m, " Function Hit\n"
445 " -------- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400446#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400447 return 0;
448}
449
450static int function_stat_show(struct seq_file *m, void *v)
451{
452 struct ftrace_profile *rec = v;
453 char str[KSYM_SYMBOL_LEN];
Li Zefan3aaba202010-08-23 16:50:12 +0800454 int ret = 0;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400455#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt34886c82009-03-25 21:00:47 -0400456 static struct trace_seq s;
457 unsigned long long avg;
Chase Douglase330b3b2010-04-26 14:02:05 -0400458 unsigned long long stddev;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400459#endif
Li Zefan3aaba202010-08-23 16:50:12 +0800460 mutex_lock(&ftrace_profile_lock);
461
462 /* we raced with function_profile_reset() */
463 if (unlikely(rec->counter == 0)) {
464 ret = -EBUSY;
465 goto out;
466 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400467
468 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400469 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
Steven Rostedt493762f2009-03-23 17:12:36 -0400470
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400471#ifdef CONFIG_FUNCTION_GRAPH_TRACER
472 seq_printf(m, " ");
Steven Rostedt34886c82009-03-25 21:00:47 -0400473 avg = rec->time;
474 do_div(avg, rec->counter);
475
Chase Douglase330b3b2010-04-26 14:02:05 -0400476 /* Sample standard deviation (s^2) */
477 if (rec->counter <= 1)
478 stddev = 0;
479 else {
480 stddev = rec->time_squared - rec->counter * avg * avg;
481 /*
482 * Divide only 1000 for ns^2 -> us^2 conversion.
483 * trace_print_graph_duration will divide 1000 again.
484 */
485 do_div(stddev, (rec->counter - 1) * 1000);
486 }
487
Steven Rostedt34886c82009-03-25 21:00:47 -0400488 trace_seq_init(&s);
489 trace_print_graph_duration(rec->time, &s);
490 trace_seq_puts(&s, " ");
491 trace_print_graph_duration(avg, &s);
Chase Douglase330b3b2010-04-26 14:02:05 -0400492 trace_seq_puts(&s, " ");
493 trace_print_graph_duration(stddev, &s);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400494 trace_print_seq(m, &s);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400495#endif
496 seq_putc(m, '\n');
Li Zefan3aaba202010-08-23 16:50:12 +0800497out:
498 mutex_unlock(&ftrace_profile_lock);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400499
Li Zefan3aaba202010-08-23 16:50:12 +0800500 return ret;
Steven Rostedt493762f2009-03-23 17:12:36 -0400501}
502
Steven Rostedtcafb1682009-03-24 20:50:39 -0400503static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400504{
505 struct ftrace_profile_page *pg;
506
Steven Rostedtcafb1682009-03-24 20:50:39 -0400507 pg = stat->pages = stat->start;
Steven Rostedt493762f2009-03-23 17:12:36 -0400508
509 while (pg) {
510 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
511 pg->index = 0;
512 pg = pg->next;
513 }
514
Steven Rostedtcafb1682009-03-24 20:50:39 -0400515 memset(stat->hash, 0,
Steven Rostedt493762f2009-03-23 17:12:36 -0400516 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
517}
518
Steven Rostedtcafb1682009-03-24 20:50:39 -0400519int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400520{
521 struct ftrace_profile_page *pg;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400522 int functions;
523 int pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400524 int i;
525
526 /* If we already allocated, do nothing */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400527 if (stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400528 return 0;
529
Steven Rostedtcafb1682009-03-24 20:50:39 -0400530 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
531 if (!stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400532 return -ENOMEM;
533
Steven Rostedt318e0a72009-03-25 20:06:34 -0400534#ifdef CONFIG_DYNAMIC_FTRACE
535 functions = ftrace_update_tot_cnt;
536#else
537 /*
538 * We do not know the number of functions that exist because
539 * dynamic tracing is what counts them. With past experience
540 * we have around 20K functions. That should be more than enough.
541 * It is highly unlikely we will execute every function in
542 * the kernel.
543 */
544 functions = 20000;
545#endif
546
Steven Rostedtcafb1682009-03-24 20:50:39 -0400547 pg = stat->start = stat->pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400548
Steven Rostedt318e0a72009-03-25 20:06:34 -0400549 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
550
551 for (i = 0; i < pages; i++) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400552 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400553 if (!pg->next)
Steven Rostedt318e0a72009-03-25 20:06:34 -0400554 goto out_free;
Steven Rostedt493762f2009-03-23 17:12:36 -0400555 pg = pg->next;
556 }
557
558 return 0;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400559
560 out_free:
561 pg = stat->start;
562 while (pg) {
563 unsigned long tmp = (unsigned long)pg;
564
565 pg = pg->next;
566 free_page(tmp);
567 }
568
569 free_page((unsigned long)stat->pages);
570 stat->pages = NULL;
571 stat->start = NULL;
572
573 return -ENOMEM;
Steven Rostedt493762f2009-03-23 17:12:36 -0400574}
575
Steven Rostedtcafb1682009-03-24 20:50:39 -0400576static int ftrace_profile_init_cpu(int cpu)
Steven Rostedt493762f2009-03-23 17:12:36 -0400577{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400578 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400579 int size;
580
Steven Rostedtcafb1682009-03-24 20:50:39 -0400581 stat = &per_cpu(ftrace_profile_stats, cpu);
582
583 if (stat->hash) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400584 /* If the profile is already created, simply reset it */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400585 ftrace_profile_reset(stat);
Steven Rostedt493762f2009-03-23 17:12:36 -0400586 return 0;
587 }
588
589 /*
590 * We are profiling all functions, but usually only a few thousand
591 * functions are hit. We'll make a hash of 1024 items.
592 */
593 size = FTRACE_PROFILE_HASH_SIZE;
594
Steven Rostedtcafb1682009-03-24 20:50:39 -0400595 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400596
Steven Rostedtcafb1682009-03-24 20:50:39 -0400597 if (!stat->hash)
Steven Rostedt493762f2009-03-23 17:12:36 -0400598 return -ENOMEM;
599
Steven Rostedtcafb1682009-03-24 20:50:39 -0400600 if (!ftrace_profile_bits) {
601 size--;
Steven Rostedt493762f2009-03-23 17:12:36 -0400602
Steven Rostedtcafb1682009-03-24 20:50:39 -0400603 for (; size; size >>= 1)
604 ftrace_profile_bits++;
605 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400606
Steven Rostedt318e0a72009-03-25 20:06:34 -0400607 /* Preallocate the function profiling pages */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400608 if (ftrace_profile_pages_init(stat) < 0) {
609 kfree(stat->hash);
610 stat->hash = NULL;
Steven Rostedt493762f2009-03-23 17:12:36 -0400611 return -ENOMEM;
612 }
613
614 return 0;
615}
616
Steven Rostedtcafb1682009-03-24 20:50:39 -0400617static int ftrace_profile_init(void)
618{
619 int cpu;
620 int ret = 0;
621
622 for_each_online_cpu(cpu) {
623 ret = ftrace_profile_init_cpu(cpu);
624 if (ret)
625 break;
626 }
627
628 return ret;
629}
630
Steven Rostedt493762f2009-03-23 17:12:36 -0400631/* interrupts must be disabled */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400632static struct ftrace_profile *
633ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400634{
635 struct ftrace_profile *rec;
636 struct hlist_head *hhd;
637 struct hlist_node *n;
638 unsigned long key;
639
640 key = hash_long(ip, ftrace_profile_bits);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400641 hhd = &stat->hash[key];
Steven Rostedt493762f2009-03-23 17:12:36 -0400642
643 if (hlist_empty(hhd))
644 return NULL;
645
646 hlist_for_each_entry_rcu(rec, n, hhd, node) {
647 if (rec->ip == ip)
648 return rec;
649 }
650
651 return NULL;
652}
653
Steven Rostedtcafb1682009-03-24 20:50:39 -0400654static void ftrace_add_profile(struct ftrace_profile_stat *stat,
655 struct ftrace_profile *rec)
Steven Rostedt493762f2009-03-23 17:12:36 -0400656{
657 unsigned long key;
658
659 key = hash_long(rec->ip, ftrace_profile_bits);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400660 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
Steven Rostedt493762f2009-03-23 17:12:36 -0400661}
662
Steven Rostedt318e0a72009-03-25 20:06:34 -0400663/*
664 * The memory is already allocated, this simply finds a new record to use.
665 */
Steven Rostedt493762f2009-03-23 17:12:36 -0400666static struct ftrace_profile *
Steven Rostedt318e0a72009-03-25 20:06:34 -0400667ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400668{
669 struct ftrace_profile *rec = NULL;
670
Steven Rostedt318e0a72009-03-25 20:06:34 -0400671 /* prevent recursion (from NMIs) */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400672 if (atomic_inc_return(&stat->disabled) != 1)
Steven Rostedt493762f2009-03-23 17:12:36 -0400673 goto out;
674
Steven Rostedt493762f2009-03-23 17:12:36 -0400675 /*
Steven Rostedt318e0a72009-03-25 20:06:34 -0400676 * Try to find the function again since an NMI
677 * could have added it
Steven Rostedt493762f2009-03-23 17:12:36 -0400678 */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400679 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400680 if (rec)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400681 goto out;
Steven Rostedt493762f2009-03-23 17:12:36 -0400682
Steven Rostedtcafb1682009-03-24 20:50:39 -0400683 if (stat->pages->index == PROFILES_PER_PAGE) {
684 if (!stat->pages->next)
685 goto out;
686 stat->pages = stat->pages->next;
Steven Rostedt493762f2009-03-23 17:12:36 -0400687 }
688
Steven Rostedtcafb1682009-03-24 20:50:39 -0400689 rec = &stat->pages->records[stat->pages->index++];
Steven Rostedt493762f2009-03-23 17:12:36 -0400690 rec->ip = ip;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400691 ftrace_add_profile(stat, rec);
Steven Rostedt493762f2009-03-23 17:12:36 -0400692
Steven Rostedt493762f2009-03-23 17:12:36 -0400693 out:
Steven Rostedtcafb1682009-03-24 20:50:39 -0400694 atomic_dec(&stat->disabled);
Steven Rostedt493762f2009-03-23 17:12:36 -0400695
696 return rec;
697}
698
Steven Rostedt493762f2009-03-23 17:12:36 -0400699static void
700function_profile_call(unsigned long ip, unsigned long parent_ip)
701{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400702 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400703 struct ftrace_profile *rec;
704 unsigned long flags;
Steven Rostedt493762f2009-03-23 17:12:36 -0400705
706 if (!ftrace_profile_enabled)
707 return;
708
Steven Rostedt493762f2009-03-23 17:12:36 -0400709 local_irq_save(flags);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400710
711 stat = &__get_cpu_var(ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400712 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400713 goto out;
714
715 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400716 if (!rec) {
Steven Rostedt318e0a72009-03-25 20:06:34 -0400717 rec = ftrace_profile_alloc(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400718 if (!rec)
719 goto out;
720 }
721
722 rec->counter++;
723 out:
724 local_irq_restore(flags);
725}
726
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400727#ifdef CONFIG_FUNCTION_GRAPH_TRACER
728static int profile_graph_entry(struct ftrace_graph_ent *trace)
729{
730 function_profile_call(trace->func, 0);
731 return 1;
732}
733
734static void profile_graph_return(struct ftrace_graph_ret *trace)
735{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400736 struct ftrace_profile_stat *stat;
Steven Rostedta2a16d62009-03-24 23:17:58 -0400737 unsigned long long calltime;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400738 struct ftrace_profile *rec;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400739 unsigned long flags;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400740
741 local_irq_save(flags);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400742 stat = &__get_cpu_var(ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400743 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400744 goto out;
745
Steven Rostedt37e44bc2010-04-27 21:04:24 -0400746 /* If the calltime was zero'd ignore it */
747 if (!trace->calltime)
748 goto out;
749
Steven Rostedta2a16d62009-03-24 23:17:58 -0400750 calltime = trace->rettime - trace->calltime;
751
752 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
753 int index;
754
755 index = trace->depth;
756
757 /* Append this call time to the parent time to subtract */
758 if (index)
759 current->ret_stack[index - 1].subtime += calltime;
760
761 if (current->ret_stack[index].subtime < calltime)
762 calltime -= current->ret_stack[index].subtime;
763 else
764 calltime = 0;
765 }
766
Steven Rostedtcafb1682009-03-24 20:50:39 -0400767 rec = ftrace_find_profiled_func(stat, trace->func);
Chase Douglase330b3b2010-04-26 14:02:05 -0400768 if (rec) {
Steven Rostedta2a16d62009-03-24 23:17:58 -0400769 rec->time += calltime;
Chase Douglase330b3b2010-04-26 14:02:05 -0400770 rec->time_squared += calltime * calltime;
771 }
Steven Rostedta2a16d62009-03-24 23:17:58 -0400772
Steven Rostedtcafb1682009-03-24 20:50:39 -0400773 out:
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400774 local_irq_restore(flags);
775}
776
777static int register_ftrace_profiler(void)
778{
779 return register_ftrace_graph(&profile_graph_return,
780 &profile_graph_entry);
781}
782
783static void unregister_ftrace_profiler(void)
784{
785 unregister_ftrace_graph();
786}
787#else
Steven Rostedt493762f2009-03-23 17:12:36 -0400788static struct ftrace_ops ftrace_profile_ops __read_mostly =
789{
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400790 .func = function_profile_call,
Steven Rostedt493762f2009-03-23 17:12:36 -0400791};
792
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400793static int register_ftrace_profiler(void)
794{
795 return register_ftrace_function(&ftrace_profile_ops);
796}
797
798static void unregister_ftrace_profiler(void)
799{
800 unregister_ftrace_function(&ftrace_profile_ops);
801}
802#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
803
Steven Rostedt493762f2009-03-23 17:12:36 -0400804static ssize_t
805ftrace_profile_write(struct file *filp, const char __user *ubuf,
806 size_t cnt, loff_t *ppos)
807{
808 unsigned long val;
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400809 char buf[64]; /* big enough to hold a number */
Steven Rostedt493762f2009-03-23 17:12:36 -0400810 int ret;
811
812 if (cnt >= sizeof(buf))
813 return -EINVAL;
814
815 if (copy_from_user(&buf, ubuf, cnt))
816 return -EFAULT;
817
818 buf[cnt] = 0;
819
820 ret = strict_strtoul(buf, 10, &val);
821 if (ret < 0)
822 return ret;
823
824 val = !!val;
825
826 mutex_lock(&ftrace_profile_lock);
827 if (ftrace_profile_enabled ^ val) {
828 if (val) {
829 ret = ftrace_profile_init();
830 if (ret < 0) {
831 cnt = ret;
832 goto out;
833 }
834
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400835 ret = register_ftrace_profiler();
836 if (ret < 0) {
837 cnt = ret;
838 goto out;
839 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400840 ftrace_profile_enabled = 1;
841 } else {
842 ftrace_profile_enabled = 0;
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400843 /*
844 * unregister_ftrace_profiler calls stop_machine
845 * so this acts like an synchronize_sched.
846 */
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400847 unregister_ftrace_profiler();
Steven Rostedt493762f2009-03-23 17:12:36 -0400848 }
849 }
850 out:
851 mutex_unlock(&ftrace_profile_lock);
852
Jiri Olsacf8517c2009-10-23 19:36:16 -0400853 *ppos += cnt;
Steven Rostedt493762f2009-03-23 17:12:36 -0400854
855 return cnt;
856}
857
858static ssize_t
859ftrace_profile_read(struct file *filp, char __user *ubuf,
860 size_t cnt, loff_t *ppos)
861{
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400862 char buf[64]; /* big enough to hold a number */
Steven Rostedt493762f2009-03-23 17:12:36 -0400863 int r;
864
865 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
866 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
867}
868
869static const struct file_operations ftrace_profile_fops = {
870 .open = tracing_open_generic,
871 .read = ftrace_profile_read,
872 .write = ftrace_profile_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200873 .llseek = default_llseek,
Steven Rostedt493762f2009-03-23 17:12:36 -0400874};
875
Steven Rostedtcafb1682009-03-24 20:50:39 -0400876/* used to initialize the real stat files */
877static struct tracer_stat function_stats __initdata = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400878 .name = "functions",
879 .stat_start = function_stat_start,
880 .stat_next = function_stat_next,
881 .stat_cmp = function_stat_cmp,
882 .stat_headers = function_stat_headers,
883 .stat_show = function_stat_show
Steven Rostedtcafb1682009-03-24 20:50:39 -0400884};
885
Steven Rostedt6ab5d662009-06-04 00:55:45 -0400886static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
Steven Rostedt493762f2009-03-23 17:12:36 -0400887{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400888 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400889 struct dentry *entry;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400890 char *name;
Steven Rostedt493762f2009-03-23 17:12:36 -0400891 int ret;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400892 int cpu;
Steven Rostedt493762f2009-03-23 17:12:36 -0400893
Steven Rostedtcafb1682009-03-24 20:50:39 -0400894 for_each_possible_cpu(cpu) {
895 stat = &per_cpu(ftrace_profile_stats, cpu);
896
897 /* allocate enough for function name + cpu number */
898 name = kmalloc(32, GFP_KERNEL);
899 if (!name) {
900 /*
901 * The files created are permanent, if something happens
902 * we still do not free memory.
903 */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400904 WARN(1,
905 "Could not allocate stat file for cpu %d\n",
906 cpu);
907 return;
908 }
909 stat->stat = function_stats;
910 snprintf(name, 32, "function%d", cpu);
911 stat->stat.name = name;
912 ret = register_stat_tracer(&stat->stat);
913 if (ret) {
914 WARN(1,
915 "Could not register function stat for cpu %d\n",
916 cpu);
917 kfree(name);
918 return;
919 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400920 }
921
922 entry = debugfs_create_file("function_profile_enabled", 0644,
923 d_tracer, NULL, &ftrace_profile_fops);
924 if (!entry)
925 pr_warning("Could not create debugfs "
926 "'function_profile_enabled' entry\n");
927}
928
929#else /* CONFIG_FUNCTION_PROFILER */
Steven Rostedt6ab5d662009-06-04 00:55:45 -0400930static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
Steven Rostedt493762f2009-03-23 17:12:36 -0400931{
932}
933#endif /* CONFIG_FUNCTION_PROFILER */
934
Ingo Molnar73d3fd92009-02-17 11:48:18 +0100935static struct pid * const ftrace_swapper_pid = &init_struct_pid;
936
Steven Rostedt3d083392008-05-12 21:20:42 +0200937#ifdef CONFIG_DYNAMIC_FTRACE
Ingo Molnar73d3fd92009-02-17 11:48:18 +0100938
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400939#ifndef CONFIG_FTRACE_MCOUNT_RECORD
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400940# error Dynamic ftrace depends on MCOUNT_RECORD
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400941#endif
942
Steven Rostedt8fc0c702009-02-16 15:28:00 -0500943static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
944
Steven Rostedtb6887d72009-02-17 12:32:04 -0500945struct ftrace_func_probe {
Steven Rostedt8fc0c702009-02-16 15:28:00 -0500946 struct hlist_node node;
Steven Rostedtb6887d72009-02-17 12:32:04 -0500947 struct ftrace_probe_ops *ops;
Steven Rostedt8fc0c702009-02-16 15:28:00 -0500948 unsigned long flags;
949 unsigned long ip;
950 void *data;
951 struct rcu_head rcu;
952};
953
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200954enum {
955 FTRACE_ENABLE_CALLS = (1 << 0),
956 FTRACE_DISABLE_CALLS = (1 << 1),
957 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
Steven Rostedt79e406d2010-09-14 22:19:46 -0400958 FTRACE_START_FUNC_RET = (1 << 3),
959 FTRACE_STOP_FUNC_RET = (1 << 4),
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200960};
Steven Rostedtb448c4e2011-04-29 15:12:32 -0400961struct ftrace_func_entry {
962 struct hlist_node hlist;
963 unsigned long ip;
964};
965
966struct ftrace_hash {
967 unsigned long size_bits;
968 struct hlist_head *buckets;
969 unsigned long count;
Steven Rostedt07fd5512011-05-05 18:03:47 -0400970 struct rcu_head rcu;
Steven Rostedtb448c4e2011-04-29 15:12:32 -0400971};
972
Steven Rostedt33dc9b12011-05-02 17:34:47 -0400973/*
974 * We make these constant because no one should touch them,
975 * but they are used as the default "empty hash", to avoid allocating
976 * it all the time. These are in a read only section such that if
977 * anyone does try to modify it, it will cause an exception.
978 */
979static const struct hlist_head empty_buckets[1];
980static const struct ftrace_hash empty_hash = {
981 .buckets = (struct hlist_head *)empty_buckets,
Steven Rostedtb448c4e2011-04-29 15:12:32 -0400982};
Steven Rostedt33dc9b12011-05-02 17:34:47 -0400983#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
Steven Rostedt5072c592008-05-12 21:20:43 +0200984
Steven Rostedt2b499382011-05-03 22:49:52 -0400985static struct ftrace_ops global_ops = {
Steven Rostedtf45948e2011-05-02 12:29:25 -0400986 .func = ftrace_stub,
Steven Rostedt33dc9b12011-05-02 17:34:47 -0400987 .notrace_hash = EMPTY_HASH,
988 .filter_hash = EMPTY_HASH,
Steven Rostedtf45948e2011-05-02 12:29:25 -0400989};
990
Lai Jiangshane94142a2009-03-13 17:51:27 +0800991static struct dyn_ftrace *ftrace_new_addrs;
Steven Rostedt3d083392008-05-12 21:20:42 +0200992
Steven Rostedt41c52c02008-05-22 11:46:33 -0400993static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200994
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200995struct ftrace_page {
996 struct ftrace_page *next;
Steven Rostedt431aa3f2009-01-06 12:43:01 -0500997 int index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200998 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700999};
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001000
1001#define ENTRIES_PER_PAGE \
1002 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
1003
1004/* estimate from running different kernels */
1005#define NR_TO_INIT 10000
1006
1007static struct ftrace_page *ftrace_pages_start;
1008static struct ftrace_page *ftrace_pages;
1009
Steven Rostedt37ad5082008-05-12 21:20:48 +02001010static struct dyn_ftrace *ftrace_free_records;
1011
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001012static struct ftrace_func_entry *
1013ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1014{
1015 unsigned long key;
1016 struct ftrace_func_entry *entry;
1017 struct hlist_head *hhd;
1018 struct hlist_node *n;
1019
1020 if (!hash->count)
1021 return NULL;
1022
1023 if (hash->size_bits > 0)
1024 key = hash_long(ip, hash->size_bits);
1025 else
1026 key = 0;
1027
1028 hhd = &hash->buckets[key];
1029
1030 hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1031 if (entry->ip == ip)
1032 return entry;
1033 }
1034 return NULL;
1035}
1036
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001037static void __add_hash_entry(struct ftrace_hash *hash,
1038 struct ftrace_func_entry *entry)
1039{
1040 struct hlist_head *hhd;
1041 unsigned long key;
1042
1043 if (hash->size_bits)
1044 key = hash_long(entry->ip, hash->size_bits);
1045 else
1046 key = 0;
1047
1048 hhd = &hash->buckets[key];
1049 hlist_add_head(&entry->hlist, hhd);
1050 hash->count++;
1051}
1052
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001053static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1054{
1055 struct ftrace_func_entry *entry;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001056
1057 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1058 if (!entry)
1059 return -ENOMEM;
1060
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001061 entry->ip = ip;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001062 __add_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001063
1064 return 0;
1065}
1066
1067static void
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001068free_hash_entry(struct ftrace_hash *hash,
1069 struct ftrace_func_entry *entry)
1070{
1071 hlist_del(&entry->hlist);
1072 kfree(entry);
1073 hash->count--;
1074}
1075
1076static void
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001077remove_hash_entry(struct ftrace_hash *hash,
1078 struct ftrace_func_entry *entry)
1079{
1080 hlist_del(&entry->hlist);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001081 hash->count--;
1082}
1083
1084static void ftrace_hash_clear(struct ftrace_hash *hash)
1085{
1086 struct hlist_head *hhd;
1087 struct hlist_node *tp, *tn;
1088 struct ftrace_func_entry *entry;
1089 int size = 1 << hash->size_bits;
1090 int i;
1091
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001092 if (!hash->count)
1093 return;
1094
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001095 for (i = 0; i < size; i++) {
1096 hhd = &hash->buckets[i];
1097 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001098 free_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001099 }
1100 FTRACE_WARN_ON(hash->count);
1101}
1102
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001103static void free_ftrace_hash(struct ftrace_hash *hash)
1104{
1105 if (!hash || hash == EMPTY_HASH)
1106 return;
1107 ftrace_hash_clear(hash);
1108 kfree(hash->buckets);
1109 kfree(hash);
1110}
1111
Steven Rostedt07fd5512011-05-05 18:03:47 -04001112static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1113{
1114 struct ftrace_hash *hash;
1115
1116 hash = container_of(rcu, struct ftrace_hash, rcu);
1117 free_ftrace_hash(hash);
1118}
1119
1120static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1121{
1122 if (!hash || hash == EMPTY_HASH)
1123 return;
1124 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1125}
1126
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001127static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1128{
1129 struct ftrace_hash *hash;
1130 int size;
1131
1132 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1133 if (!hash)
1134 return NULL;
1135
1136 size = 1 << size_bits;
1137 hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1138
1139 if (!hash->buckets) {
1140 kfree(hash);
1141 return NULL;
1142 }
1143
1144 hash->size_bits = size_bits;
1145
1146 return hash;
1147}
1148
1149static struct ftrace_hash *
1150alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1151{
1152 struct ftrace_func_entry *entry;
1153 struct ftrace_hash *new_hash;
1154 struct hlist_node *tp;
1155 int size;
1156 int ret;
1157 int i;
1158
1159 new_hash = alloc_ftrace_hash(size_bits);
1160 if (!new_hash)
1161 return NULL;
1162
1163 /* Empty hash? */
1164 if (!hash || !hash->count)
1165 return new_hash;
1166
1167 size = 1 << hash->size_bits;
1168 for (i = 0; i < size; i++) {
1169 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1170 ret = add_hash_entry(new_hash, entry->ip);
1171 if (ret < 0)
1172 goto free_hash;
1173 }
1174 }
1175
1176 FTRACE_WARN_ON(new_hash->count != hash->count);
1177
1178 return new_hash;
1179
1180 free_hash:
1181 free_ftrace_hash(new_hash);
1182 return NULL;
1183}
1184
1185static int
1186ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1187{
1188 struct ftrace_func_entry *entry;
1189 struct hlist_node *tp, *tn;
1190 struct hlist_head *hhd;
Steven Rostedt07fd5512011-05-05 18:03:47 -04001191 struct ftrace_hash *old_hash;
1192 struct ftrace_hash *new_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001193 unsigned long key;
1194 int size = src->count;
1195 int bits = 0;
1196 int i;
1197
1198 /*
1199 * If the new source is empty, just free dst and assign it
1200 * the empty_hash.
1201 */
1202 if (!src->count) {
Steven Rostedt07fd5512011-05-05 18:03:47 -04001203 free_ftrace_hash_rcu(*dst);
1204 rcu_assign_pointer(*dst, EMPTY_HASH);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001205 return 0;
1206 }
1207
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001208 /*
1209 * Make the hash size about 1/2 the # found
1210 */
1211 for (size /= 2; size; size >>= 1)
1212 bits++;
1213
1214 /* Don't allocate too much */
1215 if (bits > FTRACE_HASH_MAX_BITS)
1216 bits = FTRACE_HASH_MAX_BITS;
1217
Steven Rostedt07fd5512011-05-05 18:03:47 -04001218 new_hash = alloc_ftrace_hash(bits);
1219 if (!new_hash)
1220 return -ENOMEM;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001221
1222 size = 1 << src->size_bits;
1223 for (i = 0; i < size; i++) {
1224 hhd = &src->buckets[i];
1225 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1226 if (bits > 0)
1227 key = hash_long(entry->ip, bits);
1228 else
1229 key = 0;
1230 remove_hash_entry(src, entry);
Steven Rostedt07fd5512011-05-05 18:03:47 -04001231 __add_hash_entry(new_hash, entry);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001232 }
1233 }
1234
Steven Rostedt07fd5512011-05-05 18:03:47 -04001235 old_hash = *dst;
1236 rcu_assign_pointer(*dst, new_hash);
1237 free_ftrace_hash_rcu(old_hash);
1238
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001239 return 0;
1240}
1241
Steven Rostedt265c8312009-02-13 12:43:56 -05001242/*
Steven Rostedtb8489142011-05-04 09:27:52 -04001243 * Test the hashes for this ops to see if we want to call
1244 * the ops->func or not.
1245 *
1246 * It's a match if the ip is in the ops->filter_hash or
1247 * the filter_hash does not exist or is empty,
1248 * AND
1249 * the ip is not in the ops->notrace_hash.
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04001250 *
1251 * This needs to be called with preemption disabled as
1252 * the hashes are freed with call_rcu_sched().
Steven Rostedtb8489142011-05-04 09:27:52 -04001253 */
1254static int
1255ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1256{
1257 struct ftrace_hash *filter_hash;
1258 struct ftrace_hash *notrace_hash;
1259 int ret;
1260
Steven Rostedtb8489142011-05-04 09:27:52 -04001261 filter_hash = rcu_dereference_raw(ops->filter_hash);
1262 notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1263
1264 if ((!filter_hash || !filter_hash->count ||
1265 ftrace_lookup_ip(filter_hash, ip)) &&
1266 (!notrace_hash || !notrace_hash->count ||
1267 !ftrace_lookup_ip(notrace_hash, ip)))
1268 ret = 1;
1269 else
1270 ret = 0;
Steven Rostedtb8489142011-05-04 09:27:52 -04001271
1272 return ret;
1273}
1274
1275/*
Steven Rostedt265c8312009-02-13 12:43:56 -05001276 * This is a double for. Do not use 'break' to break out of the loop,
1277 * you must use a goto.
1278 */
1279#define do_for_each_ftrace_rec(pg, rec) \
1280 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1281 int _____i; \
1282 for (_____i = 0; _____i < pg->index; _____i++) { \
1283 rec = &pg->records[_____i];
1284
1285#define while_for_each_ftrace_rec() \
1286 } \
1287 }
Abhishek Sagarecea6562008-06-21 23:47:53 +05301288
Steven Rostedted926f92011-05-03 13:25:24 -04001289static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1290 int filter_hash,
1291 bool inc)
1292{
1293 struct ftrace_hash *hash;
1294 struct ftrace_hash *other_hash;
1295 struct ftrace_page *pg;
1296 struct dyn_ftrace *rec;
1297 int count = 0;
1298 int all = 0;
1299
1300 /* Only update if the ops has been registered */
1301 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1302 return;
1303
1304 /*
1305 * In the filter_hash case:
1306 * If the count is zero, we update all records.
1307 * Otherwise we just update the items in the hash.
1308 *
1309 * In the notrace_hash case:
1310 * We enable the update in the hash.
1311 * As disabling notrace means enabling the tracing,
1312 * and enabling notrace means disabling, the inc variable
1313 * gets inversed.
1314 */
1315 if (filter_hash) {
1316 hash = ops->filter_hash;
1317 other_hash = ops->notrace_hash;
Steven Rostedtb8489142011-05-04 09:27:52 -04001318 if (!hash || !hash->count)
Steven Rostedted926f92011-05-03 13:25:24 -04001319 all = 1;
1320 } else {
1321 inc = !inc;
1322 hash = ops->notrace_hash;
1323 other_hash = ops->filter_hash;
1324 /*
1325 * If the notrace hash has no items,
1326 * then there's nothing to do.
1327 */
Steven Rostedtb8489142011-05-04 09:27:52 -04001328 if (hash && !hash->count)
Steven Rostedted926f92011-05-03 13:25:24 -04001329 return;
1330 }
1331
1332 do_for_each_ftrace_rec(pg, rec) {
1333 int in_other_hash = 0;
1334 int in_hash = 0;
1335 int match = 0;
1336
1337 if (all) {
1338 /*
1339 * Only the filter_hash affects all records.
1340 * Update if the record is not in the notrace hash.
1341 */
Steven Rostedtb8489142011-05-04 09:27:52 -04001342 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
Steven Rostedted926f92011-05-03 13:25:24 -04001343 match = 1;
1344 } else {
Steven Rostedtb8489142011-05-04 09:27:52 -04001345 in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
1346 in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
Steven Rostedted926f92011-05-03 13:25:24 -04001347
1348 /*
1349 *
1350 */
1351 if (filter_hash && in_hash && !in_other_hash)
1352 match = 1;
1353 else if (!filter_hash && in_hash &&
1354 (in_other_hash || !other_hash->count))
1355 match = 1;
1356 }
1357 if (!match)
1358 continue;
1359
1360 if (inc) {
1361 rec->flags++;
1362 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1363 return;
1364 } else {
1365 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1366 return;
1367 rec->flags--;
1368 }
1369 count++;
1370 /* Shortcut, if we handled all records, we are done. */
1371 if (!all && count == hash->count)
1372 return;
1373 } while_for_each_ftrace_rec();
1374}
1375
1376static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1377 int filter_hash)
1378{
1379 __ftrace_hash_rec_update(ops, filter_hash, 0);
1380}
1381
1382static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1383 int filter_hash)
1384{
1385 __ftrace_hash_rec_update(ops, filter_hash, 1);
1386}
1387
Ingo Molnare309b412008-05-12 21:20:51 +02001388static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad5082008-05-12 21:20:48 +02001389{
Lai Jiangshanee000b72009-03-24 13:38:06 +08001390 rec->freelist = ftrace_free_records;
Steven Rostedt37ad5082008-05-12 21:20:48 +02001391 ftrace_free_records = rec;
1392 rec->flags |= FTRACE_FL_FREE;
1393}
1394
Ingo Molnare309b412008-05-12 21:20:51 +02001395static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001396{
Steven Rostedt37ad5082008-05-12 21:20:48 +02001397 struct dyn_ftrace *rec;
1398
1399 /* First check for freed records */
1400 if (ftrace_free_records) {
1401 rec = ftrace_free_records;
1402
Steven Rostedt37ad5082008-05-12 21:20:48 +02001403 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
Steven Rostedt69128962008-10-23 09:33:03 -04001404 FTRACE_WARN_ON_ONCE(1);
Steven Rostedt37ad5082008-05-12 21:20:48 +02001405 ftrace_free_records = NULL;
1406 return NULL;
1407 }
1408
Lai Jiangshanee000b72009-03-24 13:38:06 +08001409 ftrace_free_records = rec->freelist;
Steven Rostedt37ad5082008-05-12 21:20:48 +02001410 memset(rec, 0, sizeof(*rec));
1411 return rec;
1412 }
1413
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001414 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001415 if (!ftrace_pages->next) {
1416 /* allocate another page */
1417 ftrace_pages->next =
1418 (void *)get_zeroed_page(GFP_KERNEL);
1419 if (!ftrace_pages->next)
1420 return NULL;
1421 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001422 ftrace_pages = ftrace_pages->next;
1423 }
1424
1425 return &ftrace_pages->records[ftrace_pages->index++];
1426}
1427
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001428static struct dyn_ftrace *
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001429ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +02001430{
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001431 struct dyn_ftrace *rec;
Steven Rostedt3d083392008-05-12 21:20:42 +02001432
Steven Rostedtf3c7ac42008-11-14 16:21:19 -08001433 if (ftrace_disabled)
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001434 return NULL;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001435
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001436 rec = ftrace_alloc_dyn_node(ip);
1437 if (!rec)
1438 return NULL;
Steven Rostedt3d083392008-05-12 21:20:42 +02001439
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001440 rec->ip = ip;
Lai Jiangshanee000b72009-03-24 13:38:06 +08001441 rec->newlist = ftrace_new_addrs;
Lai Jiangshane94142a2009-03-13 17:51:27 +08001442 ftrace_new_addrs = rec;
Steven Rostedt3d083392008-05-12 21:20:42 +02001443
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001444 return rec;
Steven Rostedt3d083392008-05-12 21:20:42 +02001445}
1446
Steven Rostedt05736a42008-09-22 14:55:47 -07001447static void print_ip_ins(const char *fmt, unsigned char *p)
1448{
1449 int i;
1450
1451 printk(KERN_CONT "%s", fmt);
1452
1453 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1454 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1455}
1456
Steven Rostedt31e88902008-11-14 16:21:19 -08001457static void ftrace_bug(int failed, unsigned long ip)
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001458{
1459 switch (failed) {
1460 case -EFAULT:
1461 FTRACE_WARN_ON_ONCE(1);
1462 pr_info("ftrace faulted on modifying ");
1463 print_ip_sym(ip);
1464 break;
1465 case -EINVAL:
1466 FTRACE_WARN_ON_ONCE(1);
1467 pr_info("ftrace failed to modify ");
1468 print_ip_sym(ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001469 print_ip_ins(" actual: ", (unsigned char *)ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001470 printk(KERN_CONT "\n");
1471 break;
1472 case -EPERM:
1473 FTRACE_WARN_ON_ONCE(1);
1474 pr_info("ftrace faulted on writing ");
1475 print_ip_sym(ip);
1476 break;
1477 default:
1478 FTRACE_WARN_ON_ONCE(1);
1479 pr_info("ftrace faulted on unknown error ");
1480 print_ip_sym(ip);
1481 }
1482}
1483
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001484
Masami Hiramatsu2cfa1972010-02-02 16:49:11 -05001485/* Return 1 if the address range is reserved for ftrace */
1486int ftrace_text_reserved(void *start, void *end)
1487{
1488 struct dyn_ftrace *rec;
1489 struct ftrace_page *pg;
1490
1491 do_for_each_ftrace_rec(pg, rec) {
1492 if (rec->ip <= (unsigned long)end &&
1493 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1494 return 1;
1495 } while_for_each_ftrace_rec();
1496 return 0;
1497}
1498
1499
Abhishek Sagar492a7ea2008-05-25 00:10:04 +05301500static int
Steven Rostedt31e88902008-11-14 16:21:19 -08001501__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001502{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001503 unsigned long ftrace_addr;
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001504 unsigned long flag = 0UL;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001505
Shaohua Lif0001202009-01-09 11:29:42 +08001506 ftrace_addr = (unsigned long)FTRACE_ADDR;
Steven Rostedt5072c592008-05-12 21:20:43 +02001507
Steven Rostedt982c3502008-11-15 16:31:41 -05001508 /*
Steven Rostedted926f92011-05-03 13:25:24 -04001509 * If we are enabling tracing:
Steven Rostedt982c3502008-11-15 16:31:41 -05001510 *
Steven Rostedted926f92011-05-03 13:25:24 -04001511 * If the record has a ref count, then we need to enable it
1512 * because someone is using it.
Steven Rostedt982c3502008-11-15 16:31:41 -05001513 *
Steven Rostedted926f92011-05-03 13:25:24 -04001514 * Otherwise we make sure its disabled.
1515 *
1516 * If we are disabling tracing, then disable all records that
1517 * are enabled.
Steven Rostedt982c3502008-11-15 16:31:41 -05001518 */
Steven Rostedted926f92011-05-03 13:25:24 -04001519 if (enable && (rec->flags & ~FTRACE_FL_MASK))
1520 flag = FTRACE_FL_ENABLED;
Steven Rostedt5072c592008-05-12 21:20:43 +02001521
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001522 /* If the state of this record hasn't changed, then do nothing */
1523 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1524 return 0;
1525
1526 if (flag) {
1527 rec->flags |= FTRACE_FL_ENABLED;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001528 return ftrace_make_call(rec, ftrace_addr);
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001529 }
1530
1531 rec->flags &= ~FTRACE_FL_ENABLED;
1532 return ftrace_make_nop(NULL, rec, ftrace_addr);
Steven Rostedt5072c592008-05-12 21:20:43 +02001533}
1534
1535static void ftrace_replace_code(int enable)
1536{
Steven Rostedt37ad5082008-05-12 21:20:48 +02001537 struct dyn_ftrace *rec;
1538 struct ftrace_page *pg;
Steven Rostedt6a24a242009-02-17 11:20:26 -05001539 int failed;
Steven Rostedt37ad5082008-05-12 21:20:48 +02001540
Steven Rostedt45a4a232011-04-21 23:16:46 -04001541 if (unlikely(ftrace_disabled))
1542 return;
1543
Steven Rostedt265c8312009-02-13 12:43:56 -05001544 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedtd2c8c3e2011-04-25 14:32:42 -04001545 /* Skip over free records */
1546 if (rec->flags & FTRACE_FL_FREE)
Steven Rostedt265c8312009-02-13 12:43:56 -05001547 continue;
Steven Rostedt5072c592008-05-12 21:20:43 +02001548
Steven Rostedt265c8312009-02-13 12:43:56 -05001549 failed = __ftrace_replace_code(rec, enable);
Zhaoleifa9d13c2009-03-13 17:16:34 +08001550 if (failed) {
Steven Rostedt3279ba32009-10-07 16:57:56 -04001551 ftrace_bug(failed, rec->ip);
1552 /* Stop processing */
1553 return;
Steven Rostedt265c8312009-02-13 12:43:56 -05001554 }
1555 } while_for_each_ftrace_rec();
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001556}
1557
Ingo Molnare309b412008-05-12 21:20:51 +02001558static int
Steven Rostedt31e88902008-11-14 16:21:19 -08001559ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001560{
1561 unsigned long ip;
Steven Rostedt593eb8a2008-10-23 09:32:59 -04001562 int ret;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001563
1564 ip = rec->ip;
1565
Steven Rostedt45a4a232011-04-21 23:16:46 -04001566 if (unlikely(ftrace_disabled))
1567 return 0;
1568
Shaohua Li25aac9d2009-01-09 11:29:40 +08001569 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
Steven Rostedt593eb8a2008-10-23 09:32:59 -04001570 if (ret) {
Steven Rostedt31e88902008-11-14 16:21:19 -08001571 ftrace_bug(ret, ip);
Abhishek Sagar492a7ea2008-05-25 00:10:04 +05301572 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +02001573 }
Abhishek Sagar492a7ea2008-05-25 00:10:04 +05301574 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001575}
1576
Steven Rostedt000ab692009-02-17 13:35:06 -05001577/*
1578 * archs can override this function if they must do something
1579 * before the modifying code is performed.
1580 */
1581int __weak ftrace_arch_code_modify_prepare(void)
1582{
1583 return 0;
1584}
1585
1586/*
1587 * archs can override this function if they must do something
1588 * after the modifying code is performed.
1589 */
1590int __weak ftrace_arch_code_modify_post_process(void)
1591{
1592 return 0;
1593}
1594
Ingo Molnare309b412008-05-12 21:20:51 +02001595static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +02001596{
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001597 int *command = data;
1598
Steven Rostedta3583242008-11-11 15:01:42 -05001599 if (*command & FTRACE_ENABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001600 ftrace_replace_code(1);
Steven Rostedta3583242008-11-11 15:01:42 -05001601 else if (*command & FTRACE_DISABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001602 ftrace_replace_code(0);
1603
1604 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1605 ftrace_update_ftrace_func(ftrace_trace_function);
1606
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001607 if (*command & FTRACE_START_FUNC_RET)
1608 ftrace_enable_ftrace_graph_caller();
1609 else if (*command & FTRACE_STOP_FUNC_RET)
1610 ftrace_disable_ftrace_graph_caller();
1611
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001612 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +02001613}
1614
Ingo Molnare309b412008-05-12 21:20:51 +02001615static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02001616{
Steven Rostedt000ab692009-02-17 13:35:06 -05001617 int ret;
1618
1619 ret = ftrace_arch_code_modify_prepare();
1620 FTRACE_WARN_ON(ret);
1621 if (ret)
1622 return;
1623
Rusty Russell784e2d72008-07-28 12:16:31 -05001624 stop_machine(__ftrace_modify_code, &command, NULL);
Steven Rostedt000ab692009-02-17 13:35:06 -05001625
1626 ret = ftrace_arch_code_modify_post_process();
1627 FTRACE_WARN_ON(ret);
Steven Rostedt3d083392008-05-12 21:20:42 +02001628}
1629
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001630static ftrace_func_t saved_ftrace_func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001631static int ftrace_start_up;
Steven Rostedtb8489142011-05-04 09:27:52 -04001632static int global_start_up;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001633
1634static void ftrace_startup_enable(int command)
1635{
1636 if (saved_ftrace_func != ftrace_trace_function) {
1637 saved_ftrace_func = ftrace_trace_function;
1638 command |= FTRACE_UPDATE_TRACE_FUNC;
1639 }
1640
1641 if (!command || !ftrace_enabled)
1642 return;
1643
1644 ftrace_run_update_code(command);
1645}
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001646
Steven Rostedta1cd6172011-05-23 15:24:25 -04001647static int ftrace_startup(struct ftrace_ops *ops, int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02001648{
Steven Rostedtb8489142011-05-04 09:27:52 -04001649 bool hash_enable = true;
1650
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001651 if (unlikely(ftrace_disabled))
Steven Rostedta1cd6172011-05-23 15:24:25 -04001652 return -ENODEV;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001653
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001654 ftrace_start_up++;
Steven Rostedt982c3502008-11-15 16:31:41 -05001655 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +02001656
Steven Rostedtb8489142011-05-04 09:27:52 -04001657 /* ops marked global share the filter hashes */
1658 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1659 ops = &global_ops;
1660 /* Don't update hash if global is already set */
1661 if (global_start_up)
1662 hash_enable = false;
1663 global_start_up++;
1664 }
1665
Steven Rostedted926f92011-05-03 13:25:24 -04001666 ops->flags |= FTRACE_OPS_FL_ENABLED;
Steven Rostedtb8489142011-05-04 09:27:52 -04001667 if (hash_enable)
Steven Rostedted926f92011-05-03 13:25:24 -04001668 ftrace_hash_rec_enable(ops, 1);
1669
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001670 ftrace_startup_enable(command);
Steven Rostedta1cd6172011-05-23 15:24:25 -04001671
1672 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +02001673}
1674
Steven Rostedtbd69c302011-05-03 21:55:54 -04001675static void ftrace_shutdown(struct ftrace_ops *ops, int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02001676{
Steven Rostedtb8489142011-05-04 09:27:52 -04001677 bool hash_disable = true;
1678
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001679 if (unlikely(ftrace_disabled))
1680 return;
1681
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001682 ftrace_start_up--;
Frederic Weisbecker9ea1a152009-06-20 06:52:21 +02001683 /*
1684 * Just warn in case of unbalance, no need to kill ftrace, it's not
1685 * critical but the ftrace_call callers may be never nopped again after
1686 * further ftrace uses.
1687 */
1688 WARN_ON_ONCE(ftrace_start_up < 0);
1689
Steven Rostedtb8489142011-05-04 09:27:52 -04001690 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1691 ops = &global_ops;
1692 global_start_up--;
1693 WARN_ON_ONCE(global_start_up < 0);
1694 /* Don't update hash if global still has users */
1695 if (global_start_up) {
1696 WARN_ON_ONCE(!ftrace_start_up);
1697 hash_disable = false;
1698 }
1699 }
1700
1701 if (hash_disable)
Steven Rostedted926f92011-05-03 13:25:24 -04001702 ftrace_hash_rec_disable(ops, 1);
1703
Steven Rostedtb8489142011-05-04 09:27:52 -04001704 if (ops != &global_ops || !global_start_up)
Steven Rostedted926f92011-05-03 13:25:24 -04001705 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
Steven Rostedtb8489142011-05-04 09:27:52 -04001706
1707 if (!ftrace_start_up)
1708 command |= FTRACE_DISABLE_CALLS;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001709
1710 if (saved_ftrace_func != ftrace_trace_function) {
1711 saved_ftrace_func = ftrace_trace_function;
1712 command |= FTRACE_UPDATE_TRACE_FUNC;
1713 }
1714
1715 if (!command || !ftrace_enabled)
Steven Rostedte6ea44e2009-02-14 01:42:44 -05001716 return;
Steven Rostedt3d083392008-05-12 21:20:42 +02001717
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001718 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +02001719}
1720
Ingo Molnare309b412008-05-12 21:20:51 +02001721static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001722{
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001723 if (unlikely(ftrace_disabled))
1724 return;
1725
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001726 /* Force update next time */
1727 saved_ftrace_func = NULL;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001728 /* ftrace_start_up is true if we want ftrace running */
1729 if (ftrace_start_up)
Steven Rostedt79e406d2010-09-14 22:19:46 -04001730 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001731}
1732
Ingo Molnare309b412008-05-12 21:20:51 +02001733static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001734{
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001735 if (unlikely(ftrace_disabled))
1736 return;
1737
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001738 /* ftrace_start_up is true if ftrace is running */
1739 if (ftrace_start_up)
Steven Rostedt79e406d2010-09-14 22:19:46 -04001740 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001741}
1742
Steven Rostedt3d083392008-05-12 21:20:42 +02001743static cycle_t ftrace_update_time;
1744static unsigned long ftrace_update_cnt;
1745unsigned long ftrace_update_tot_cnt;
1746
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04001747static int ops_traces_mod(struct ftrace_ops *ops)
1748{
1749 struct ftrace_hash *hash;
1750
1751 hash = ops->filter_hash;
1752 return !!(!hash || !hash->count);
1753}
1754
Steven Rostedt31e88902008-11-14 16:21:19 -08001755static int ftrace_update_code(struct module *mod)
Steven Rostedt3d083392008-05-12 21:20:42 +02001756{
Lai Jiangshane94142a2009-03-13 17:51:27 +08001757 struct dyn_ftrace *p;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +05301758 cycle_t start, stop;
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04001759 unsigned long ref = 0;
1760
1761 /*
1762 * When adding a module, we need to check if tracers are
1763 * currently enabled and if they are set to trace all functions.
1764 * If they are, we need to enable the module functions as well
1765 * as update the reference counts for those function records.
1766 */
1767 if (mod) {
1768 struct ftrace_ops *ops;
1769
1770 for (ops = ftrace_ops_list;
1771 ops != &ftrace_list_end; ops = ops->next) {
1772 if (ops->flags & FTRACE_OPS_FL_ENABLED &&
1773 ops_traces_mod(ops))
1774 ref++;
1775 }
1776 }
Steven Rostedt3d083392008-05-12 21:20:42 +02001777
Ingo Molnar750ed1a2008-05-12 21:20:46 +02001778 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02001779 ftrace_update_cnt = 0;
1780
Lai Jiangshane94142a2009-03-13 17:51:27 +08001781 while (ftrace_new_addrs) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +05301782
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001783 /* If something went wrong, bail without enabling anything */
1784 if (unlikely(ftrace_disabled))
1785 return -1;
Steven Rostedt3d083392008-05-12 21:20:42 +02001786
Lai Jiangshane94142a2009-03-13 17:51:27 +08001787 p = ftrace_new_addrs;
Lai Jiangshanee000b72009-03-24 13:38:06 +08001788 ftrace_new_addrs = p->newlist;
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04001789 p->flags = ref;
Abhishek Sagar0eb96702008-06-01 21:47:30 +05301790
Jiri Olsa5cb084b2009-10-13 16:33:53 -04001791 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001792 * Do the initial record conversion from mcount jump
Jiri Olsa5cb084b2009-10-13 16:33:53 -04001793 * to the NOP instructions.
1794 */
1795 if (!ftrace_code_disable(mod, p)) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001796 ftrace_free_rec(p);
Steven Rostedtd2c8c3e2011-04-25 14:32:42 -04001797 /* Game over */
1798 break;
Jiri Olsa5cb084b2009-10-13 16:33:53 -04001799 }
1800
Jiri Olsa5cb084b2009-10-13 16:33:53 -04001801 ftrace_update_cnt++;
1802
1803 /*
1804 * If the tracing is enabled, go ahead and enable the record.
1805 *
1806 * The reason not to enable the record immediatelly is the
1807 * inherent check of ftrace_make_nop/ftrace_make_call for
1808 * correct previous instructions. Making first the NOP
1809 * conversion puts the module to the correct state, thus
1810 * passing the ftrace_make_call check.
1811 */
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04001812 if (ftrace_start_up && ref) {
Jiri Olsa5cb084b2009-10-13 16:33:53 -04001813 int failed = __ftrace_replace_code(p, 1);
1814 if (failed) {
1815 ftrace_bug(failed, p->ip);
1816 ftrace_free_rec(p);
1817 }
1818 }
Steven Rostedt3d083392008-05-12 21:20:42 +02001819 }
1820
Ingo Molnar750ed1a2008-05-12 21:20:46 +02001821 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02001822 ftrace_update_time = stop - start;
1823 ftrace_update_tot_cnt += ftrace_update_cnt;
1824
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001825 return 0;
1826}
1827
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001828static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001829{
1830 struct ftrace_page *pg;
1831 int cnt;
1832 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001833
1834 /* allocate a few pages */
1835 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1836 if (!ftrace_pages_start)
1837 return -1;
1838
1839 /*
1840 * Allocate a few more pages.
1841 *
1842 * TODO: have some parser search vmlinux before
1843 * final linking to find all calls to ftrace.
1844 * Then we can:
1845 * a) know how many pages to allocate.
1846 * and/or
1847 * b) set up the table then.
1848 *
1849 * The dynamic code is still necessary for
1850 * modules.
1851 */
1852
1853 pg = ftrace_pages = ftrace_pages_start;
1854
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001855 cnt = num_to_init / ENTRIES_PER_PAGE;
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001856 pr_info("ftrace: allocating %ld entries in %d pages\n",
walimis5821e1b2008-11-15 15:19:06 +08001857 num_to_init, cnt + 1);
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001858
1859 for (i = 0; i < cnt; i++) {
1860 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1861
1862 /* If we fail, we'll try later anyway */
1863 if (!pg->next)
1864 break;
1865
1866 pg = pg->next;
1867 }
1868
1869 return 0;
1870}
1871
Steven Rostedt5072c592008-05-12 21:20:43 +02001872enum {
1873 FTRACE_ITER_FILTER = (1 << 0),
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02001874 FTRACE_ITER_NOTRACE = (1 << 1),
Steven Rostedt3499e462011-04-21 22:59:12 -04001875 FTRACE_ITER_PRINTALL = (1 << 2),
1876 FTRACE_ITER_HASH = (1 << 3),
Steven Rostedt647bcd02011-05-03 14:39:21 -04001877 FTRACE_ITER_ENABLED = (1 << 4),
Steven Rostedt5072c592008-05-12 21:20:43 +02001878};
1879
1880#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1881
1882struct ftrace_iterator {
Steven Rostedt98c4fd02010-09-10 11:47:43 -04001883 loff_t pos;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001884 loff_t func_pos;
1885 struct ftrace_page *pg;
1886 struct dyn_ftrace *func;
1887 struct ftrace_func_probe *probe;
1888 struct trace_parser parser;
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04001889 struct ftrace_hash *hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001890 struct ftrace_ops *ops;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001891 int hidx;
1892 int idx;
1893 unsigned flags;
Steven Rostedt5072c592008-05-12 21:20:43 +02001894};
1895
Ingo Molnare309b412008-05-12 21:20:51 +02001896static void *
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001897t_hash_next(struct seq_file *m, loff_t *pos)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001898{
1899 struct ftrace_iterator *iter = m->private;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001900 struct hlist_node *hnd = NULL;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001901 struct hlist_head *hhd;
1902
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001903 (*pos)++;
Steven Rostedt98c4fd02010-09-10 11:47:43 -04001904 iter->pos = *pos;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001905
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001906 if (iter->probe)
1907 hnd = &iter->probe->node;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001908 retry:
1909 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1910 return NULL;
1911
1912 hhd = &ftrace_func_hash[iter->hidx];
1913
1914 if (hlist_empty(hhd)) {
1915 iter->hidx++;
1916 hnd = NULL;
1917 goto retry;
1918 }
1919
1920 if (!hnd)
1921 hnd = hhd->first;
1922 else {
1923 hnd = hnd->next;
1924 if (!hnd) {
1925 iter->hidx++;
1926 goto retry;
1927 }
1928 }
1929
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001930 if (WARN_ON_ONCE(!hnd))
1931 return NULL;
1932
1933 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1934
1935 return iter;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001936}
1937
1938static void *t_hash_start(struct seq_file *m, loff_t *pos)
1939{
1940 struct ftrace_iterator *iter = m->private;
1941 void *p = NULL;
Li Zefand82d6242009-06-24 09:54:54 +08001942 loff_t l;
1943
Steven Rostedt2bccfff2010-09-09 08:43:22 -04001944 if (iter->func_pos > *pos)
1945 return NULL;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001946
Li Zefand82d6242009-06-24 09:54:54 +08001947 iter->hidx = 0;
Steven Rostedt2bccfff2010-09-09 08:43:22 -04001948 for (l = 0; l <= (*pos - iter->func_pos); ) {
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001949 p = t_hash_next(m, &l);
Li Zefand82d6242009-06-24 09:54:54 +08001950 if (!p)
1951 break;
1952 }
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001953 if (!p)
1954 return NULL;
1955
Steven Rostedt98c4fd02010-09-10 11:47:43 -04001956 /* Only set this if we have an item */
1957 iter->flags |= FTRACE_ITER_HASH;
1958
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001959 return iter;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001960}
1961
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001962static int
1963t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001964{
Steven Rostedtb6887d72009-02-17 12:32:04 -05001965 struct ftrace_func_probe *rec;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001966
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001967 rec = iter->probe;
1968 if (WARN_ON_ONCE(!rec))
1969 return -EIO;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001970
Steven Rostedt809dcf22009-02-16 23:06:01 -05001971 if (rec->ops->print)
1972 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1973
Steven Rostedtb375a112009-09-17 00:05:58 -04001974 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001975
1976 if (rec->data)
1977 seq_printf(m, ":%p", rec->data);
1978 seq_putc(m, '\n');
1979
1980 return 0;
1981}
1982
1983static void *
Steven Rostedt5072c592008-05-12 21:20:43 +02001984t_next(struct seq_file *m, void *v, loff_t *pos)
1985{
1986 struct ftrace_iterator *iter = m->private;
Steven Rostedtf45948e2011-05-02 12:29:25 -04001987 struct ftrace_ops *ops = &global_ops;
Steven Rostedt5072c592008-05-12 21:20:43 +02001988 struct dyn_ftrace *rec = NULL;
1989
Steven Rostedt45a4a232011-04-21 23:16:46 -04001990 if (unlikely(ftrace_disabled))
1991 return NULL;
1992
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001993 if (iter->flags & FTRACE_ITER_HASH)
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001994 return t_hash_next(m, pos);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001995
Steven Rostedt5072c592008-05-12 21:20:43 +02001996 (*pos)++;
Jiri Olsa1106b692011-02-16 17:35:34 +01001997 iter->pos = iter->func_pos = *pos;
Steven Rostedt5072c592008-05-12 21:20:43 +02001998
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05001999 if (iter->flags & FTRACE_ITER_PRINTALL)
Steven Rostedt57c072c2010-09-14 11:21:11 -04002000 return t_hash_start(m, pos);
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002001
Steven Rostedt5072c592008-05-12 21:20:43 +02002002 retry:
2003 if (iter->idx >= iter->pg->index) {
2004 if (iter->pg->next) {
2005 iter->pg = iter->pg->next;
2006 iter->idx = 0;
2007 goto retry;
2008 }
2009 } else {
2010 rec = &iter->pg->records[iter->idx++];
Steven Rostedta9fdda32008-08-14 22:47:17 -04002011 if ((rec->flags & FTRACE_FL_FREE) ||
2012
Steven Rostedt0183fb12008-11-07 22:36:02 -05002013 ((iter->flags & FTRACE_ITER_FILTER) &&
Steven Rostedtf45948e2011-05-02 12:29:25 -04002014 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
Steven Rostedt0183fb12008-11-07 22:36:02 -05002015
Steven Rostedt41c52c02008-05-22 11:46:33 -04002016 ((iter->flags & FTRACE_ITER_NOTRACE) &&
Steven Rostedt647bcd02011-05-03 14:39:21 -04002017 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2018
2019 ((iter->flags & FTRACE_ITER_ENABLED) &&
2020 !(rec->flags & ~FTRACE_FL_MASK))) {
2021
Steven Rostedt5072c592008-05-12 21:20:43 +02002022 rec = NULL;
2023 goto retry;
2024 }
2025 }
2026
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002027 if (!rec)
Steven Rostedt57c072c2010-09-14 11:21:11 -04002028 return t_hash_start(m, pos);
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002029
2030 iter->func = rec;
2031
2032 return iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02002033}
2034
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002035static void reset_iter_read(struct ftrace_iterator *iter)
2036{
2037 iter->pos = 0;
2038 iter->func_pos = 0;
2039 iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
Steven Rostedt5072c592008-05-12 21:20:43 +02002040}
2041
2042static void *t_start(struct seq_file *m, loff_t *pos)
2043{
2044 struct ftrace_iterator *iter = m->private;
Steven Rostedtf45948e2011-05-02 12:29:25 -04002045 struct ftrace_ops *ops = &global_ops;
Steven Rostedt5072c592008-05-12 21:20:43 +02002046 void *p = NULL;
Li Zefan694ce0a2009-06-24 09:54:19 +08002047 loff_t l;
Steven Rostedt5072c592008-05-12 21:20:43 +02002048
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002049 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04002050
2051 if (unlikely(ftrace_disabled))
2052 return NULL;
2053
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002054 /*
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002055 * If an lseek was done, then reset and start from beginning.
2056 */
2057 if (*pos < iter->pos)
2058 reset_iter_read(iter);
2059
2060 /*
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002061 * For set_ftrace_filter reading, if we have the filter
2062 * off, we can short cut and just print out that all
2063 * functions are enabled.
2064 */
Steven Rostedtf45948e2011-05-02 12:29:25 -04002065 if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002066 if (*pos > 0)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002067 return t_hash_start(m, pos);
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002068 iter->flags |= FTRACE_ITER_PRINTALL;
Chris Wrightdf091622010-09-09 16:34:59 -07002069 /* reset in case of seek/pread */
2070 iter->flags &= ~FTRACE_ITER_HASH;
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002071 return iter;
2072 }
2073
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002074 if (iter->flags & FTRACE_ITER_HASH)
2075 return t_hash_start(m, pos);
2076
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002077 /*
2078 * Unfortunately, we need to restart at ftrace_pages_start
2079 * every time we let go of the ftrace_mutex. This is because
2080 * those pointers can change without the lock.
2081 */
Li Zefan694ce0a2009-06-24 09:54:19 +08002082 iter->pg = ftrace_pages_start;
2083 iter->idx = 0;
2084 for (l = 0; l <= *pos; ) {
2085 p = t_next(m, p, &l);
2086 if (!p)
2087 break;
Liming Wang50cdaf02008-11-28 12:13:21 +08002088 }
walimis5821e1b2008-11-15 15:19:06 +08002089
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002090 if (!p) {
2091 if (iter->flags & FTRACE_ITER_FILTER)
2092 return t_hash_start(m, pos);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002093
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002094 return NULL;
2095 }
2096
2097 return iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02002098}
2099
2100static void t_stop(struct seq_file *m, void *p)
2101{
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002102 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002103}
2104
2105static int t_show(struct seq_file *m, void *v)
2106{
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002107 struct ftrace_iterator *iter = m->private;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002108 struct dyn_ftrace *rec;
Steven Rostedt5072c592008-05-12 21:20:43 +02002109
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002110 if (iter->flags & FTRACE_ITER_HASH)
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002111 return t_hash_show(m, iter);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002112
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002113 if (iter->flags & FTRACE_ITER_PRINTALL) {
2114 seq_printf(m, "#### all functions enabled ####\n");
2115 return 0;
2116 }
2117
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002118 rec = iter->func;
2119
Steven Rostedt5072c592008-05-12 21:20:43 +02002120 if (!rec)
2121 return 0;
2122
Steven Rostedt647bcd02011-05-03 14:39:21 -04002123 seq_printf(m, "%ps", (void *)rec->ip);
2124 if (iter->flags & FTRACE_ITER_ENABLED)
2125 seq_printf(m, " (%ld)",
2126 rec->flags & ~FTRACE_FL_MASK);
2127 seq_printf(m, "\n");
Steven Rostedt5072c592008-05-12 21:20:43 +02002128
2129 return 0;
2130}
2131
James Morris88e9d342009-09-22 16:43:43 -07002132static const struct seq_operations show_ftrace_seq_ops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02002133 .start = t_start,
2134 .next = t_next,
2135 .stop = t_stop,
2136 .show = t_show,
2137};
2138
Ingo Molnare309b412008-05-12 21:20:51 +02002139static int
Steven Rostedt5072c592008-05-12 21:20:43 +02002140ftrace_avail_open(struct inode *inode, struct file *file)
2141{
2142 struct ftrace_iterator *iter;
2143 int ret;
2144
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002145 if (unlikely(ftrace_disabled))
2146 return -ENODEV;
2147
Steven Rostedt5072c592008-05-12 21:20:43 +02002148 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2149 if (!iter)
2150 return -ENOMEM;
2151
2152 iter->pg = ftrace_pages_start;
Steven Rostedt5072c592008-05-12 21:20:43 +02002153
2154 ret = seq_open(file, &show_ftrace_seq_ops);
2155 if (!ret) {
2156 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002157
Steven Rostedt5072c592008-05-12 21:20:43 +02002158 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002159 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +02002160 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002161 }
Steven Rostedt5072c592008-05-12 21:20:43 +02002162
2163 return ret;
2164}
2165
Steven Rostedt647bcd02011-05-03 14:39:21 -04002166static int
2167ftrace_enabled_open(struct inode *inode, struct file *file)
2168{
2169 struct ftrace_iterator *iter;
2170 int ret;
2171
2172 if (unlikely(ftrace_disabled))
2173 return -ENODEV;
2174
2175 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2176 if (!iter)
2177 return -ENOMEM;
2178
2179 iter->pg = ftrace_pages_start;
2180 iter->flags = FTRACE_ITER_ENABLED;
2181
2182 ret = seq_open(file, &show_ftrace_seq_ops);
2183 if (!ret) {
2184 struct seq_file *m = file->private_data;
2185
2186 m->private = iter;
2187 } else {
2188 kfree(iter);
2189 }
2190
2191 return ret;
2192}
2193
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002194static void ftrace_filter_reset(struct ftrace_hash *hash)
Steven Rostedt5072c592008-05-12 21:20:43 +02002195{
Steven Rostedt52baf112009-02-14 01:15:39 -05002196 mutex_lock(&ftrace_lock);
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002197 ftrace_hash_clear(hash);
Steven Rostedt52baf112009-02-14 01:15:39 -05002198 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002199}
2200
Ingo Molnare309b412008-05-12 21:20:51 +02002201static int
Steven Rostedtf45948e2011-05-02 12:29:25 -04002202ftrace_regex_open(struct ftrace_ops *ops, int flag,
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002203 struct inode *inode, struct file *file)
Steven Rostedt5072c592008-05-12 21:20:43 +02002204{
2205 struct ftrace_iterator *iter;
Steven Rostedtf45948e2011-05-02 12:29:25 -04002206 struct ftrace_hash *hash;
Steven Rostedt5072c592008-05-12 21:20:43 +02002207 int ret = 0;
2208
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002209 if (unlikely(ftrace_disabled))
2210 return -ENODEV;
2211
Steven Rostedt5072c592008-05-12 21:20:43 +02002212 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2213 if (!iter)
2214 return -ENOMEM;
2215
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02002216 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2217 kfree(iter);
2218 return -ENOMEM;
2219 }
2220
Steven Rostedtf45948e2011-05-02 12:29:25 -04002221 if (flag & FTRACE_ITER_NOTRACE)
2222 hash = ops->notrace_hash;
2223 else
2224 hash = ops->filter_hash;
2225
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002226 iter->ops = ops;
2227 iter->flags = flag;
2228
2229 if (file->f_mode & FMODE_WRITE) {
2230 mutex_lock(&ftrace_lock);
2231 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2232 mutex_unlock(&ftrace_lock);
2233
2234 if (!iter->hash) {
2235 trace_parser_put(&iter->parser);
2236 kfree(iter);
2237 return -ENOMEM;
2238 }
2239 }
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002240
Steven Rostedt41c52c02008-05-22 11:46:33 -04002241 mutex_lock(&ftrace_regex_lock);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002242
Steven Rostedt5072c592008-05-12 21:20:43 +02002243 if ((file->f_mode & FMODE_WRITE) &&
Steven Rostedt8650ae32009-07-22 23:29:30 -04002244 (file->f_flags & O_TRUNC))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002245 ftrace_filter_reset(iter->hash);
Steven Rostedt5072c592008-05-12 21:20:43 +02002246
2247 if (file->f_mode & FMODE_READ) {
2248 iter->pg = ftrace_pages_start;
Steven Rostedt5072c592008-05-12 21:20:43 +02002249
2250 ret = seq_open(file, &show_ftrace_seq_ops);
2251 if (!ret) {
2252 struct seq_file *m = file->private_data;
2253 m->private = iter;
Li Zefan79fe2492009-09-22 13:54:28 +08002254 } else {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002255 /* Failed */
2256 free_ftrace_hash(iter->hash);
Li Zefan79fe2492009-09-22 13:54:28 +08002257 trace_parser_put(&iter->parser);
Steven Rostedt5072c592008-05-12 21:20:43 +02002258 kfree(iter);
Li Zefan79fe2492009-09-22 13:54:28 +08002259 }
Steven Rostedt5072c592008-05-12 21:20:43 +02002260 } else
2261 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -04002262 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002263
2264 return ret;
2265}
2266
Steven Rostedt41c52c02008-05-22 11:46:33 -04002267static int
2268ftrace_filter_open(struct inode *inode, struct file *file)
2269{
Steven Rostedtf45948e2011-05-02 12:29:25 -04002270 return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002271 inode, file);
Steven Rostedt41c52c02008-05-22 11:46:33 -04002272}
2273
2274static int
2275ftrace_notrace_open(struct inode *inode, struct file *file)
2276{
Steven Rostedtf45948e2011-05-02 12:29:25 -04002277 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002278 inode, file);
Steven Rostedt41c52c02008-05-22 11:46:33 -04002279}
2280
Ingo Molnare309b412008-05-12 21:20:51 +02002281static loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04002282ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02002283{
2284 loff_t ret;
2285
2286 if (file->f_mode & FMODE_READ)
2287 ret = seq_lseek(file, offset, origin);
2288 else
2289 file->f_pos = ret = 1;
2290
2291 return ret;
2292}
2293
Steven Rostedt64e7c442009-02-13 17:08:48 -05002294static int ftrace_match(char *str, char *regex, int len, int type)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002295{
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002296 int matched = 0;
Li Zefan751e9982010-01-14 10:53:02 +08002297 int slen;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002298
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002299 switch (type) {
2300 case MATCH_FULL:
2301 if (strcmp(str, regex) == 0)
2302 matched = 1;
2303 break;
2304 case MATCH_FRONT_ONLY:
2305 if (strncmp(str, regex, len) == 0)
2306 matched = 1;
2307 break;
2308 case MATCH_MIDDLE_ONLY:
2309 if (strstr(str, regex))
2310 matched = 1;
2311 break;
2312 case MATCH_END_ONLY:
Li Zefan751e9982010-01-14 10:53:02 +08002313 slen = strlen(str);
2314 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002315 matched = 1;
2316 break;
2317 }
2318
2319 return matched;
2320}
2321
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002322static int
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002323enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
Steven Rostedt996e87b2011-04-26 16:11:03 -04002324{
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002325 struct ftrace_func_entry *entry;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002326 int ret = 0;
2327
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002328 entry = ftrace_lookup_ip(hash, rec->ip);
2329 if (not) {
2330 /* Do nothing if it doesn't exist */
2331 if (!entry)
2332 return 0;
2333
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002334 free_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002335 } else {
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002336 /* Do nothing if it exists */
2337 if (entry)
2338 return 0;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002339
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002340 ret = add_hash_entry(hash, rec->ip);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002341 }
2342 return ret;
Steven Rostedt996e87b2011-04-26 16:11:03 -04002343}
2344
Steven Rostedt64e7c442009-02-13 17:08:48 -05002345static int
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002346ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2347 char *regex, int len, int type)
Steven Rostedt64e7c442009-02-13 17:08:48 -05002348{
2349 char str[KSYM_SYMBOL_LEN];
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002350 char *modname;
Steven Rostedt64e7c442009-02-13 17:08:48 -05002351
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002352 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2353
2354 if (mod) {
2355 /* module lookup requires matching the module */
2356 if (!modname || strcmp(modname, mod))
2357 return 0;
2358
2359 /* blank search means to match all funcs in the mod */
2360 if (!len)
2361 return 1;
2362 }
2363
Steven Rostedt64e7c442009-02-13 17:08:48 -05002364 return ftrace_match(str, regex, len, type);
2365}
2366
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002367static int
2368match_records(struct ftrace_hash *hash, char *buff,
2369 int len, char *mod, int not)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002370{
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002371 unsigned search_len = 0;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002372 struct ftrace_page *pg;
2373 struct dyn_ftrace *rec;
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002374 int type = MATCH_FULL;
2375 char *search = buff;
Li Zefan311d16d2009-12-08 11:15:11 +08002376 int found = 0;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002377 int ret;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002378
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002379 if (len) {
2380 type = filter_parse_regex(buff, len, &search, &not);
2381 search_len = strlen(search);
2382 }
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002383
Steven Rostedt52baf112009-02-14 01:15:39 -05002384 mutex_lock(&ftrace_lock);
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002385
2386 if (unlikely(ftrace_disabled))
2387 goto out_unlock;
2388
Steven Rostedt265c8312009-02-13 12:43:56 -05002389 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt5072c592008-05-12 21:20:43 +02002390
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002391 if (ftrace_match_record(rec, mod, search, search_len, type)) {
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002392 ret = enter_record(hash, rec, not);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002393 if (ret < 0) {
2394 found = ret;
2395 goto out_unlock;
2396 }
Li Zefan311d16d2009-12-08 11:15:11 +08002397 found = 1;
Steven Rostedt265c8312009-02-13 12:43:56 -05002398 }
2399 } while_for_each_ftrace_rec();
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002400 out_unlock:
Steven Rostedt52baf112009-02-14 01:15:39 -05002401 mutex_unlock(&ftrace_lock);
Li Zefan311d16d2009-12-08 11:15:11 +08002402
2403 return found;
Steven Rostedt5072c592008-05-12 21:20:43 +02002404}
2405
Steven Rostedt64e7c442009-02-13 17:08:48 -05002406static int
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002407ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
Steven Rostedt64e7c442009-02-13 17:08:48 -05002408{
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002409 return match_records(hash, buff, len, NULL, 0);
Steven Rostedt64e7c442009-02-13 17:08:48 -05002410}
2411
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002412static int
2413ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
Steven Rostedt64e7c442009-02-13 17:08:48 -05002414{
Steven Rostedt64e7c442009-02-13 17:08:48 -05002415 int not = 0;
Steven Rostedt6a24a242009-02-17 11:20:26 -05002416
Steven Rostedt64e7c442009-02-13 17:08:48 -05002417 /* blank or '*' mean the same */
2418 if (strcmp(buff, "*") == 0)
2419 buff[0] = 0;
2420
2421 /* handle the case of 'dont filter this module' */
2422 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2423 buff[0] = 0;
2424 not = 1;
2425 }
2426
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002427 return match_records(hash, buff, strlen(buff), mod, not);
Steven Rostedt64e7c442009-02-13 17:08:48 -05002428}
2429
Steven Rostedtf6180772009-02-14 00:40:25 -05002430/*
2431 * We register the module command as a template to show others how
2432 * to register the a command as well.
2433 */
2434
2435static int
Steven Rostedt43dd61c2011-07-07 11:09:22 -04002436ftrace_mod_callback(struct ftrace_hash *hash,
2437 char *func, char *cmd, char *param, int enable)
Steven Rostedtf6180772009-02-14 00:40:25 -05002438{
2439 char *mod;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002440 int ret = -EINVAL;
Steven Rostedtf6180772009-02-14 00:40:25 -05002441
2442 /*
2443 * cmd == 'mod' because we only registered this func
2444 * for the 'mod' ftrace_func_command.
2445 * But if you register one func with multiple commands,
2446 * you can tell which command was used by the cmd
2447 * parameter.
2448 */
2449
2450 /* we must have a module name */
2451 if (!param)
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002452 return ret;
Steven Rostedtf6180772009-02-14 00:40:25 -05002453
2454 mod = strsep(&param, ":");
2455 if (!strlen(mod))
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002456 return ret;
Steven Rostedtf6180772009-02-14 00:40:25 -05002457
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002458 ret = ftrace_match_module_records(hash, func, mod);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002459 if (!ret)
2460 ret = -EINVAL;
2461 if (ret < 0)
2462 return ret;
2463
2464 return 0;
Steven Rostedtf6180772009-02-14 00:40:25 -05002465}
2466
2467static struct ftrace_func_command ftrace_mod_cmd = {
2468 .name = "mod",
2469 .func = ftrace_mod_callback,
2470};
2471
2472static int __init ftrace_mod_cmd_init(void)
2473{
2474 return register_ftrace_command(&ftrace_mod_cmd);
2475}
2476device_initcall(ftrace_mod_cmd_init);
2477
Steven Rostedt59df055f2009-02-14 15:29:06 -05002478static void
Steven Rostedtb6887d72009-02-17 12:32:04 -05002479function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002480{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002481 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002482 struct hlist_head *hhd;
2483 struct hlist_node *n;
2484 unsigned long key;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002485
2486 key = hash_long(ip, FTRACE_HASH_BITS);
2487
2488 hhd = &ftrace_func_hash[key];
2489
2490 if (hlist_empty(hhd))
2491 return;
2492
2493 /*
2494 * Disable preemption for these calls to prevent a RCU grace
2495 * period. This syncs the hash iteration and freeing of items
2496 * on the hash. rcu_read_lock is too dangerous here.
2497 */
Steven Rostedt5168ae52010-06-03 09:36:50 -04002498 preempt_disable_notrace();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002499 hlist_for_each_entry_rcu(entry, n, hhd, node) {
2500 if (entry->ip == ip)
2501 entry->ops->func(ip, parent_ip, &entry->data);
2502 }
Steven Rostedt5168ae52010-06-03 09:36:50 -04002503 preempt_enable_notrace();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002504}
2505
Steven Rostedtb6887d72009-02-17 12:32:04 -05002506static struct ftrace_ops trace_probe_ops __read_mostly =
Steven Rostedt59df055f2009-02-14 15:29:06 -05002507{
Steven Rostedtfb9fb012009-03-25 13:26:41 -04002508 .func = function_trace_probe_call,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002509};
2510
Steven Rostedtb6887d72009-02-17 12:32:04 -05002511static int ftrace_probe_registered;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002512
Steven Rostedtb6887d72009-02-17 12:32:04 -05002513static void __enable_ftrace_function_probe(void)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002514{
Steven Rostedtb8489142011-05-04 09:27:52 -04002515 int ret;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002516 int i;
2517
Steven Rostedtb6887d72009-02-17 12:32:04 -05002518 if (ftrace_probe_registered)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002519 return;
2520
2521 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2522 struct hlist_head *hhd = &ftrace_func_hash[i];
2523 if (hhd->first)
2524 break;
2525 }
2526 /* Nothing registered? */
2527 if (i == FTRACE_FUNC_HASHSIZE)
2528 return;
2529
Steven Rostedtb8489142011-05-04 09:27:52 -04002530 ret = __register_ftrace_function(&trace_probe_ops);
2531 if (!ret)
Steven Rostedta1cd6172011-05-23 15:24:25 -04002532 ret = ftrace_startup(&trace_probe_ops, 0);
Steven Rostedtb8489142011-05-04 09:27:52 -04002533
Steven Rostedtb6887d72009-02-17 12:32:04 -05002534 ftrace_probe_registered = 1;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002535}
2536
Steven Rostedtb6887d72009-02-17 12:32:04 -05002537static void __disable_ftrace_function_probe(void)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002538{
Steven Rostedtb8489142011-05-04 09:27:52 -04002539 int ret;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002540 int i;
2541
Steven Rostedtb6887d72009-02-17 12:32:04 -05002542 if (!ftrace_probe_registered)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002543 return;
2544
2545 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2546 struct hlist_head *hhd = &ftrace_func_hash[i];
2547 if (hhd->first)
2548 return;
2549 }
2550
2551 /* no more funcs left */
Steven Rostedtb8489142011-05-04 09:27:52 -04002552 ret = __unregister_ftrace_function(&trace_probe_ops);
2553 if (!ret)
2554 ftrace_shutdown(&trace_probe_ops, 0);
2555
Steven Rostedtb6887d72009-02-17 12:32:04 -05002556 ftrace_probe_registered = 0;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002557}
2558
2559
2560static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2561{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002562 struct ftrace_func_probe *entry =
2563 container_of(rhp, struct ftrace_func_probe, rcu);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002564
2565 if (entry->ops->free)
2566 entry->ops->free(&entry->data);
2567 kfree(entry);
2568}
2569
2570
2571int
Steven Rostedtb6887d72009-02-17 12:32:04 -05002572register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002573 void *data)
2574{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002575 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002576 struct ftrace_page *pg;
2577 struct dyn_ftrace *rec;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002578 int type, len, not;
Steven Rostedt6a24a242009-02-17 11:20:26 -05002579 unsigned long key;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002580 int count = 0;
2581 char *search;
2582
Frederic Weisbecker3f6fe062009-09-24 21:31:51 +02002583 type = filter_parse_regex(glob, strlen(glob), &search, &not);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002584 len = strlen(search);
2585
Steven Rostedtb6887d72009-02-17 12:32:04 -05002586 /* we do not support '!' for function probes */
Steven Rostedt59df055f2009-02-14 15:29:06 -05002587 if (WARN_ON(not))
2588 return -EINVAL;
2589
2590 mutex_lock(&ftrace_lock);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002591
Steven Rostedt45a4a232011-04-21 23:16:46 -04002592 if (unlikely(ftrace_disabled))
2593 goto out_unlock;
2594
Steven Rostedt59df055f2009-02-14 15:29:06 -05002595 do_for_each_ftrace_rec(pg, rec) {
2596
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002597 if (!ftrace_match_record(rec, NULL, search, len, type))
Steven Rostedt59df055f2009-02-14 15:29:06 -05002598 continue;
2599
2600 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2601 if (!entry) {
Steven Rostedtb6887d72009-02-17 12:32:04 -05002602 /* If we did not process any, then return error */
Steven Rostedt59df055f2009-02-14 15:29:06 -05002603 if (!count)
2604 count = -ENOMEM;
2605 goto out_unlock;
2606 }
2607
2608 count++;
2609
2610 entry->data = data;
2611
2612 /*
2613 * The caller might want to do something special
2614 * for each function we find. We call the callback
2615 * to give the caller an opportunity to do so.
2616 */
2617 if (ops->callback) {
2618 if (ops->callback(rec->ip, &entry->data) < 0) {
2619 /* caller does not like this func */
2620 kfree(entry);
2621 continue;
2622 }
2623 }
2624
2625 entry->ops = ops;
2626 entry->ip = rec->ip;
2627
2628 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2629 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2630
2631 } while_for_each_ftrace_rec();
Steven Rostedtb6887d72009-02-17 12:32:04 -05002632 __enable_ftrace_function_probe();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002633
2634 out_unlock:
2635 mutex_unlock(&ftrace_lock);
2636
2637 return count;
2638}
2639
2640enum {
Steven Rostedtb6887d72009-02-17 12:32:04 -05002641 PROBE_TEST_FUNC = 1,
2642 PROBE_TEST_DATA = 2
Steven Rostedt59df055f2009-02-14 15:29:06 -05002643};
2644
2645static void
Steven Rostedtb6887d72009-02-17 12:32:04 -05002646__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002647 void *data, int flags)
2648{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002649 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002650 struct hlist_node *n, *tmp;
2651 char str[KSYM_SYMBOL_LEN];
2652 int type = MATCH_FULL;
2653 int i, len = 0;
2654 char *search;
2655
Atsushi Tsujib36461d2009-09-15 19:06:30 +09002656 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
Steven Rostedt59df055f2009-02-14 15:29:06 -05002657 glob = NULL;
Atsushi Tsujib36461d2009-09-15 19:06:30 +09002658 else if (glob) {
Steven Rostedt59df055f2009-02-14 15:29:06 -05002659 int not;
2660
Frederic Weisbecker3f6fe062009-09-24 21:31:51 +02002661 type = filter_parse_regex(glob, strlen(glob), &search, &not);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002662 len = strlen(search);
2663
Steven Rostedtb6887d72009-02-17 12:32:04 -05002664 /* we do not support '!' for function probes */
Steven Rostedt59df055f2009-02-14 15:29:06 -05002665 if (WARN_ON(not))
2666 return;
2667 }
2668
2669 mutex_lock(&ftrace_lock);
2670 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2671 struct hlist_head *hhd = &ftrace_func_hash[i];
2672
2673 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2674
2675 /* break up if statements for readability */
Steven Rostedtb6887d72009-02-17 12:32:04 -05002676 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002677 continue;
2678
Steven Rostedtb6887d72009-02-17 12:32:04 -05002679 if ((flags & PROBE_TEST_DATA) && entry->data != data)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002680 continue;
2681
2682 /* do this last, since it is the most expensive */
2683 if (glob) {
2684 kallsyms_lookup(entry->ip, NULL, NULL,
2685 NULL, str);
2686 if (!ftrace_match(str, glob, len, type))
2687 continue;
2688 }
2689
2690 hlist_del(&entry->node);
2691 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2692 }
2693 }
Steven Rostedtb6887d72009-02-17 12:32:04 -05002694 __disable_ftrace_function_probe();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002695 mutex_unlock(&ftrace_lock);
2696}
2697
2698void
Steven Rostedtb6887d72009-02-17 12:32:04 -05002699unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002700 void *data)
2701{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002702 __unregister_ftrace_function_probe(glob, ops, data,
2703 PROBE_TEST_FUNC | PROBE_TEST_DATA);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002704}
2705
2706void
Steven Rostedtb6887d72009-02-17 12:32:04 -05002707unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002708{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002709 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002710}
2711
Steven Rostedtb6887d72009-02-17 12:32:04 -05002712void unregister_ftrace_function_probe_all(char *glob)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002713{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002714 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002715}
2716
Steven Rostedtf6180772009-02-14 00:40:25 -05002717static LIST_HEAD(ftrace_commands);
2718static DEFINE_MUTEX(ftrace_cmd_mutex);
2719
2720int register_ftrace_command(struct ftrace_func_command *cmd)
2721{
2722 struct ftrace_func_command *p;
2723 int ret = 0;
2724
2725 mutex_lock(&ftrace_cmd_mutex);
2726 list_for_each_entry(p, &ftrace_commands, list) {
2727 if (strcmp(cmd->name, p->name) == 0) {
2728 ret = -EBUSY;
2729 goto out_unlock;
2730 }
2731 }
2732 list_add(&cmd->list, &ftrace_commands);
2733 out_unlock:
2734 mutex_unlock(&ftrace_cmd_mutex);
2735
2736 return ret;
2737}
2738
2739int unregister_ftrace_command(struct ftrace_func_command *cmd)
2740{
2741 struct ftrace_func_command *p, *n;
2742 int ret = -ENODEV;
2743
2744 mutex_lock(&ftrace_cmd_mutex);
2745 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2746 if (strcmp(cmd->name, p->name) == 0) {
2747 ret = 0;
2748 list_del_init(&p->list);
2749 goto out_unlock;
2750 }
2751 }
2752 out_unlock:
2753 mutex_unlock(&ftrace_cmd_mutex);
2754
2755 return ret;
2756}
2757
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002758static int ftrace_process_regex(struct ftrace_hash *hash,
2759 char *buff, int len, int enable)
Steven Rostedt64e7c442009-02-13 17:08:48 -05002760{
Steven Rostedtf6180772009-02-14 00:40:25 -05002761 char *func, *command, *next = buff;
Steven Rostedt6a24a242009-02-17 11:20:26 -05002762 struct ftrace_func_command *p;
GuoWen Li0aff1c02011-06-01 19:18:47 +08002763 int ret = -EINVAL;
Steven Rostedt64e7c442009-02-13 17:08:48 -05002764
2765 func = strsep(&next, ":");
2766
2767 if (!next) {
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002768 ret = ftrace_match_records(hash, func, len);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002769 if (!ret)
2770 ret = -EINVAL;
2771 if (ret < 0)
2772 return ret;
2773 return 0;
Steven Rostedt64e7c442009-02-13 17:08:48 -05002774 }
2775
Steven Rostedtf6180772009-02-14 00:40:25 -05002776 /* command found */
Steven Rostedt64e7c442009-02-13 17:08:48 -05002777
2778 command = strsep(&next, ":");
2779
Steven Rostedtf6180772009-02-14 00:40:25 -05002780 mutex_lock(&ftrace_cmd_mutex);
2781 list_for_each_entry(p, &ftrace_commands, list) {
2782 if (strcmp(p->name, command) == 0) {
Steven Rostedt43dd61c2011-07-07 11:09:22 -04002783 ret = p->func(hash, func, command, next, enable);
Steven Rostedtf6180772009-02-14 00:40:25 -05002784 goto out_unlock;
2785 }
Steven Rostedt64e7c442009-02-13 17:08:48 -05002786 }
Steven Rostedtf6180772009-02-14 00:40:25 -05002787 out_unlock:
2788 mutex_unlock(&ftrace_cmd_mutex);
Steven Rostedt64e7c442009-02-13 17:08:48 -05002789
Steven Rostedtf6180772009-02-14 00:40:25 -05002790 return ret;
Steven Rostedt64e7c442009-02-13 17:08:48 -05002791}
2792
Ingo Molnare309b412008-05-12 21:20:51 +02002793static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04002794ftrace_regex_write(struct file *file, const char __user *ubuf,
2795 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02002796{
2797 struct ftrace_iterator *iter;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02002798 struct trace_parser *parser;
2799 ssize_t ret, read;
Steven Rostedt5072c592008-05-12 21:20:43 +02002800
Li Zefan4ba79782009-09-22 13:52:20 +08002801 if (!cnt)
Steven Rostedt5072c592008-05-12 21:20:43 +02002802 return 0;
2803
Steven Rostedt41c52c02008-05-22 11:46:33 -04002804 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002805
Steven Rostedt45a4a232011-04-21 23:16:46 -04002806 ret = -ENODEV;
2807 if (unlikely(ftrace_disabled))
2808 goto out_unlock;
2809
Steven Rostedt5072c592008-05-12 21:20:43 +02002810 if (file->f_mode & FMODE_READ) {
2811 struct seq_file *m = file->private_data;
2812 iter = m->private;
2813 } else
2814 iter = file->private_data;
2815
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02002816 parser = &iter->parser;
2817 read = trace_get_user(parser, ubuf, cnt, ppos);
Steven Rostedt5072c592008-05-12 21:20:43 +02002818
Li Zefan4ba79782009-09-22 13:52:20 +08002819 if (read >= 0 && trace_parser_loaded(parser) &&
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02002820 !trace_parser_cont(parser)) {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002821 ret = ftrace_process_regex(iter->hash, parser->buffer,
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02002822 parser->idx, enable);
Li Zefan313254a2009-12-08 11:15:30 +08002823 trace_parser_clear(parser);
Steven Rostedt5072c592008-05-12 21:20:43 +02002824 if (ret)
Li Zefaned146b22009-11-03 08:55:38 +08002825 goto out_unlock;
Steven Rostedt5072c592008-05-12 21:20:43 +02002826 }
2827
Steven Rostedt5072c592008-05-12 21:20:43 +02002828 ret = read;
Li Zefaned146b22009-11-03 08:55:38 +08002829out_unlock:
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02002830 mutex_unlock(&ftrace_regex_lock);
Li Zefaned146b22009-11-03 08:55:38 +08002831
Steven Rostedt5072c592008-05-12 21:20:43 +02002832 return ret;
2833}
2834
Steven Rostedt41c52c02008-05-22 11:46:33 -04002835static ssize_t
2836ftrace_filter_write(struct file *file, const char __user *ubuf,
2837 size_t cnt, loff_t *ppos)
2838{
2839 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2840}
2841
2842static ssize_t
2843ftrace_notrace_write(struct file *file, const char __user *ubuf,
2844 size_t cnt, loff_t *ppos)
2845{
2846 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2847}
2848
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002849static int
Steven Rostedtf45948e2011-05-02 12:29:25 -04002850ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2851 int reset, int enable)
Steven Rostedt41c52c02008-05-22 11:46:33 -04002852{
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002853 struct ftrace_hash **orig_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04002854 struct ftrace_hash *hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002855 int ret;
Steven Rostedtf45948e2011-05-02 12:29:25 -04002856
Steven Rostedt936e0742011-05-05 22:54:01 -04002857 /* All global ops uses the global ops filters */
2858 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
2859 ops = &global_ops;
2860
Steven Rostedt41c52c02008-05-22 11:46:33 -04002861 if (unlikely(ftrace_disabled))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002862 return -ENODEV;
Steven Rostedt41c52c02008-05-22 11:46:33 -04002863
Steven Rostedtf45948e2011-05-02 12:29:25 -04002864 if (enable)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002865 orig_hash = &ops->filter_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04002866 else
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002867 orig_hash = &ops->notrace_hash;
2868
2869 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2870 if (!hash)
2871 return -ENOMEM;
Steven Rostedtf45948e2011-05-02 12:29:25 -04002872
Steven Rostedt41c52c02008-05-22 11:46:33 -04002873 mutex_lock(&ftrace_regex_lock);
2874 if (reset)
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002875 ftrace_filter_reset(hash);
Steven Rostedt41c52c02008-05-22 11:46:33 -04002876 if (buf)
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002877 ftrace_match_records(hash, buf, len);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002878
2879 mutex_lock(&ftrace_lock);
2880 ret = ftrace_hash_move(orig_hash, hash);
2881 mutex_unlock(&ftrace_lock);
2882
Steven Rostedt41c52c02008-05-22 11:46:33 -04002883 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002884
2885 free_ftrace_hash(hash);
2886 return ret;
Steven Rostedt41c52c02008-05-22 11:46:33 -04002887}
2888
Steven Rostedt77a2b372008-05-12 21:20:45 +02002889/**
2890 * ftrace_set_filter - set a function to filter on in ftrace
Steven Rostedt936e0742011-05-05 22:54:01 -04002891 * @ops - the ops to set the filter with
Steven Rostedt77a2b372008-05-12 21:20:45 +02002892 * @buf - the string that holds the function filter text.
2893 * @len - the length of the string.
2894 * @reset - non zero to reset all filters before applying this filter.
2895 *
2896 * Filters denote which functions should be enabled when tracing is enabled.
2897 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2898 */
Steven Rostedt936e0742011-05-05 22:54:01 -04002899void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
2900 int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02002901{
Steven Rostedt936e0742011-05-05 22:54:01 -04002902 ftrace_set_regex(ops, buf, len, reset, 1);
Steven Rostedt41c52c02008-05-22 11:46:33 -04002903}
Steven Rostedt936e0742011-05-05 22:54:01 -04002904EXPORT_SYMBOL_GPL(ftrace_set_filter);
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002905
Steven Rostedt41c52c02008-05-22 11:46:33 -04002906/**
2907 * ftrace_set_notrace - set a function to not trace in ftrace
Steven Rostedt936e0742011-05-05 22:54:01 -04002908 * @ops - the ops to set the notrace filter with
Steven Rostedt41c52c02008-05-22 11:46:33 -04002909 * @buf - the string that holds the function notrace text.
2910 * @len - the length of the string.
2911 * @reset - non zero to reset all filters before applying this filter.
2912 *
2913 * Notrace Filters denote which functions should not be enabled when tracing
2914 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2915 * for tracing.
2916 */
Steven Rostedt936e0742011-05-05 22:54:01 -04002917void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
2918 int len, int reset)
2919{
2920 ftrace_set_regex(ops, buf, len, reset, 0);
2921}
2922EXPORT_SYMBOL_GPL(ftrace_set_notrace);
2923/**
2924 * ftrace_set_filter - set a function to filter on in ftrace
2925 * @ops - the ops to set the filter with
2926 * @buf - the string that holds the function filter text.
2927 * @len - the length of the string.
2928 * @reset - non zero to reset all filters before applying this filter.
2929 *
2930 * Filters denote which functions should be enabled when tracing is enabled.
2931 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2932 */
2933void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
2934{
2935 ftrace_set_regex(&global_ops, buf, len, reset, 1);
2936}
2937EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
2938
2939/**
2940 * ftrace_set_notrace - set a function to not trace in ftrace
2941 * @ops - the ops to set the notrace filter with
2942 * @buf - the string that holds the function notrace text.
2943 * @len - the length of the string.
2944 * @reset - non zero to reset all filters before applying this filter.
2945 *
2946 * Notrace Filters denote which functions should not be enabled when tracing
2947 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2948 * for tracing.
2949 */
2950void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
Steven Rostedt41c52c02008-05-22 11:46:33 -04002951{
Steven Rostedtf45948e2011-05-02 12:29:25 -04002952 ftrace_set_regex(&global_ops, buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02002953}
Steven Rostedt936e0742011-05-05 22:54:01 -04002954EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
Steven Rostedt77a2b372008-05-12 21:20:45 +02002955
Steven Rostedt2af15d62009-05-28 13:37:24 -04002956/*
2957 * command line interface to allow users to set filters on boot up.
2958 */
2959#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
2960static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2961static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2962
2963static int __init set_ftrace_notrace(char *str)
2964{
2965 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2966 return 1;
2967}
2968__setup("ftrace_notrace=", set_ftrace_notrace);
2969
2970static int __init set_ftrace_filter(char *str)
2971{
2972 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2973 return 1;
2974}
2975__setup("ftrace_filter=", set_ftrace_filter);
2976
Stefan Assmann369bc182009-10-12 22:17:21 +02002977#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Lai Jiangshanf6060f42009-11-05 11:16:17 +08002978static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
Steven Rostedt801c29f2010-03-05 20:02:19 -05002979static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
2980
Stefan Assmann369bc182009-10-12 22:17:21 +02002981static int __init set_graph_function(char *str)
2982{
Frederic Weisbecker06f43d62009-10-14 20:43:39 +02002983 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
Stefan Assmann369bc182009-10-12 22:17:21 +02002984 return 1;
2985}
2986__setup("ftrace_graph_filter=", set_graph_function);
2987
2988static void __init set_ftrace_early_graph(char *buf)
2989{
2990 int ret;
2991 char *func;
2992
2993 while (buf) {
2994 func = strsep(&buf, ",");
2995 /* we allow only one expression at a time */
2996 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2997 func);
2998 if (ret)
2999 printk(KERN_DEBUG "ftrace: function %s not "
3000 "traceable\n", func);
3001 }
3002}
3003#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3004
Steven Rostedtf45948e2011-05-02 12:29:25 -04003005static void __init
3006set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
Steven Rostedt2af15d62009-05-28 13:37:24 -04003007{
3008 char *func;
3009
3010 while (buf) {
3011 func = strsep(&buf, ",");
Steven Rostedtf45948e2011-05-02 12:29:25 -04003012 ftrace_set_regex(ops, func, strlen(func), 0, enable);
Steven Rostedt2af15d62009-05-28 13:37:24 -04003013 }
3014}
3015
3016static void __init set_ftrace_early_filters(void)
3017{
3018 if (ftrace_filter_buf[0])
Steven Rostedtf45948e2011-05-02 12:29:25 -04003019 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
Steven Rostedt2af15d62009-05-28 13:37:24 -04003020 if (ftrace_notrace_buf[0])
Steven Rostedtf45948e2011-05-02 12:29:25 -04003021 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
Stefan Assmann369bc182009-10-12 22:17:21 +02003022#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3023 if (ftrace_graph_buf[0])
3024 set_ftrace_early_graph(ftrace_graph_buf);
3025#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
Steven Rostedt2af15d62009-05-28 13:37:24 -04003026}
3027
Ingo Molnare309b412008-05-12 21:20:51 +02003028static int
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04003029ftrace_regex_release(struct inode *inode, struct file *file)
Steven Rostedt5072c592008-05-12 21:20:43 +02003030{
3031 struct seq_file *m = (struct seq_file *)file->private_data;
3032 struct ftrace_iterator *iter;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003033 struct ftrace_hash **orig_hash;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003034 struct trace_parser *parser;
Steven Rostedted926f92011-05-03 13:25:24 -04003035 int filter_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003036 int ret;
Steven Rostedt5072c592008-05-12 21:20:43 +02003037
Steven Rostedt41c52c02008-05-22 11:46:33 -04003038 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003039 if (file->f_mode & FMODE_READ) {
3040 iter = m->private;
3041
3042 seq_release(inode, file);
3043 } else
3044 iter = file->private_data;
3045
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003046 parser = &iter->parser;
3047 if (trace_parser_loaded(parser)) {
3048 parser->buffer[parser->idx] = 0;
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04003049 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
Steven Rostedt5072c592008-05-12 21:20:43 +02003050 }
3051
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003052 trace_parser_put(parser);
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003053
Steven Rostedt058e2972011-04-29 22:35:33 -04003054 if (file->f_mode & FMODE_WRITE) {
Steven Rostedted926f92011-05-03 13:25:24 -04003055 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3056
3057 if (filter_hash)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003058 orig_hash = &iter->ops->filter_hash;
Steven Rostedted926f92011-05-03 13:25:24 -04003059 else
3060 orig_hash = &iter->ops->notrace_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003061
Steven Rostedt058e2972011-04-29 22:35:33 -04003062 mutex_lock(&ftrace_lock);
Steven Rostedted926f92011-05-03 13:25:24 -04003063 /*
3064 * Remove the current set, update the hash and add
3065 * them back.
3066 */
3067 ftrace_hash_rec_disable(iter->ops, filter_hash);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003068 ret = ftrace_hash_move(orig_hash, iter->hash);
Steven Rostedted926f92011-05-03 13:25:24 -04003069 if (!ret) {
3070 ftrace_hash_rec_enable(iter->ops, filter_hash);
3071 if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
3072 && ftrace_enabled)
3073 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3074 }
Steven Rostedt058e2972011-04-29 22:35:33 -04003075 mutex_unlock(&ftrace_lock);
3076 }
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003077 free_ftrace_hash(iter->hash);
3078 kfree(iter);
Steven Rostedt058e2972011-04-29 22:35:33 -04003079
Steven Rostedt41c52c02008-05-22 11:46:33 -04003080 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003081 return 0;
3082}
3083
Steven Rostedt5e2336a02009-03-05 21:44:55 -05003084static const struct file_operations ftrace_avail_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02003085 .open = ftrace_avail_open,
3086 .read = seq_read,
3087 .llseek = seq_lseek,
Li Zefan3be04b42009-08-17 16:54:03 +08003088 .release = seq_release_private,
Steven Rostedt5072c592008-05-12 21:20:43 +02003089};
3090
Steven Rostedt647bcd02011-05-03 14:39:21 -04003091static const struct file_operations ftrace_enabled_fops = {
3092 .open = ftrace_enabled_open,
3093 .read = seq_read,
3094 .llseek = seq_lseek,
3095 .release = seq_release_private,
3096};
3097
Steven Rostedt5e2336a02009-03-05 21:44:55 -05003098static const struct file_operations ftrace_filter_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02003099 .open = ftrace_filter_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08003100 .read = seq_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02003101 .write = ftrace_filter_write,
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003102 .llseek = ftrace_regex_lseek,
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04003103 .release = ftrace_regex_release,
Steven Rostedt5072c592008-05-12 21:20:43 +02003104};
3105
Steven Rostedt5e2336a02009-03-05 21:44:55 -05003106static const struct file_operations ftrace_notrace_fops = {
Steven Rostedt41c52c02008-05-22 11:46:33 -04003107 .open = ftrace_notrace_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08003108 .read = seq_read,
Steven Rostedt41c52c02008-05-22 11:46:33 -04003109 .write = ftrace_notrace_write,
3110 .llseek = ftrace_regex_lseek,
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04003111 .release = ftrace_regex_release,
Steven Rostedt41c52c02008-05-22 11:46:33 -04003112};
3113
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003114#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3115
3116static DEFINE_MUTEX(graph_lock);
3117
3118int ftrace_graph_count;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003119int ftrace_graph_filter_enabled;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003120unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3121
3122static void *
Li Zefan85951842009-06-24 09:54:00 +08003123__g_next(struct seq_file *m, loff_t *pos)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003124{
Li Zefan85951842009-06-24 09:54:00 +08003125 if (*pos >= ftrace_graph_count)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003126 return NULL;
Li Zefana4ec5e02009-09-18 14:06:28 +08003127 return &ftrace_graph_funcs[*pos];
Li Zefan85951842009-06-24 09:54:00 +08003128}
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003129
Li Zefan85951842009-06-24 09:54:00 +08003130static void *
3131g_next(struct seq_file *m, void *v, loff_t *pos)
3132{
3133 (*pos)++;
3134 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003135}
3136
3137static void *g_start(struct seq_file *m, loff_t *pos)
3138{
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003139 mutex_lock(&graph_lock);
3140
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003141 /* Nothing, tell g_show to print all functions are enabled */
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003142 if (!ftrace_graph_filter_enabled && !*pos)
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003143 return (void *)1;
3144
Li Zefan85951842009-06-24 09:54:00 +08003145 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003146}
3147
3148static void g_stop(struct seq_file *m, void *p)
3149{
3150 mutex_unlock(&graph_lock);
3151}
3152
3153static int g_show(struct seq_file *m, void *v)
3154{
3155 unsigned long *ptr = v;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003156
3157 if (!ptr)
3158 return 0;
3159
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003160 if (ptr == (unsigned long *)1) {
3161 seq_printf(m, "#### all functions enabled ####\n");
3162 return 0;
3163 }
3164
Steven Rostedtb375a112009-09-17 00:05:58 -04003165 seq_printf(m, "%ps\n", (void *)*ptr);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003166
3167 return 0;
3168}
3169
James Morris88e9d342009-09-22 16:43:43 -07003170static const struct seq_operations ftrace_graph_seq_ops = {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003171 .start = g_start,
3172 .next = g_next,
3173 .stop = g_stop,
3174 .show = g_show,
3175};
3176
3177static int
3178ftrace_graph_open(struct inode *inode, struct file *file)
3179{
3180 int ret = 0;
3181
3182 if (unlikely(ftrace_disabled))
3183 return -ENODEV;
3184
3185 mutex_lock(&graph_lock);
3186 if ((file->f_mode & FMODE_WRITE) &&
Steven Rostedt8650ae32009-07-22 23:29:30 -04003187 (file->f_flags & O_TRUNC)) {
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003188 ftrace_graph_filter_enabled = 0;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003189 ftrace_graph_count = 0;
3190 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3191 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003192 mutex_unlock(&graph_lock);
3193
Li Zefana4ec5e02009-09-18 14:06:28 +08003194 if (file->f_mode & FMODE_READ)
3195 ret = seq_open(file, &ftrace_graph_seq_ops);
3196
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003197 return ret;
3198}
3199
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003200static int
Li Zefan87827112009-07-23 11:29:11 +08003201ftrace_graph_release(struct inode *inode, struct file *file)
3202{
3203 if (file->f_mode & FMODE_READ)
3204 seq_release(inode, file);
3205 return 0;
3206}
3207
3208static int
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003209ftrace_set_func(unsigned long *array, int *idx, char *buffer)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003210{
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003211 struct dyn_ftrace *rec;
3212 struct ftrace_page *pg;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003213 int search_len;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003214 int fail = 1;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003215 int type, not;
3216 char *search;
3217 bool exists;
3218 int i;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003219
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003220 /* decode regex */
Frederic Weisbecker3f6fe062009-09-24 21:31:51 +02003221 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003222 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3223 return -EBUSY;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003224
3225 search_len = strlen(search);
3226
Steven Rostedt52baf112009-02-14 01:15:39 -05003227 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04003228
3229 if (unlikely(ftrace_disabled)) {
3230 mutex_unlock(&ftrace_lock);
3231 return -ENODEV;
3232 }
3233
Steven Rostedt265c8312009-02-13 12:43:56 -05003234 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003235
Steven Rostedt45a4a232011-04-21 23:16:46 -04003236 if (rec->flags & FTRACE_FL_FREE)
Steven Rostedt265c8312009-02-13 12:43:56 -05003237 continue;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003238
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003239 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003240 /* if it is in the array */
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003241 exists = false;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003242 for (i = 0; i < *idx; i++) {
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003243 if (array[i] == rec->ip) {
3244 exists = true;
Steven Rostedt265c8312009-02-13 12:43:56 -05003245 break;
3246 }
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003247 }
3248
3249 if (!not) {
3250 fail = 0;
3251 if (!exists) {
3252 array[(*idx)++] = rec->ip;
3253 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3254 goto out;
3255 }
3256 } else {
3257 if (exists) {
3258 array[i] = array[--(*idx)];
3259 array[*idx] = 0;
3260 fail = 0;
3261 }
3262 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003263 }
Steven Rostedt265c8312009-02-13 12:43:56 -05003264 } while_for_each_ftrace_rec();
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003265out:
Steven Rostedt52baf112009-02-14 01:15:39 -05003266 mutex_unlock(&ftrace_lock);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003267
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003268 if (fail)
3269 return -EINVAL;
3270
3271 ftrace_graph_filter_enabled = 1;
3272 return 0;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003273}
3274
3275static ssize_t
3276ftrace_graph_write(struct file *file, const char __user *ubuf,
3277 size_t cnt, loff_t *ppos)
3278{
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003279 struct trace_parser parser;
Li Zefan4ba79782009-09-22 13:52:20 +08003280 ssize_t read, ret;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003281
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003282 if (!cnt)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003283 return 0;
3284
3285 mutex_lock(&graph_lock);
3286
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003287 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3288 ret = -ENOMEM;
Li Zefan1eb90f12009-09-22 13:52:57 +08003289 goto out_unlock;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003290 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003291
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003292 read = trace_get_user(&parser, ubuf, cnt, ppos);
3293
Li Zefan4ba79782009-09-22 13:52:20 +08003294 if (read >= 0 && trace_parser_loaded((&parser))) {
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003295 parser.buffer[parser.idx] = 0;
3296
3297 /* we allow only one expression at a time */
Li Zefana4ec5e02009-09-18 14:06:28 +08003298 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003299 parser.buffer);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003300 if (ret)
Li Zefan1eb90f12009-09-22 13:52:57 +08003301 goto out_free;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003302 }
3303
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003304 ret = read;
Li Zefan1eb90f12009-09-22 13:52:57 +08003305
3306out_free:
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003307 trace_parser_put(&parser);
Li Zefan1eb90f12009-09-22 13:52:57 +08003308out_unlock:
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003309 mutex_unlock(&graph_lock);
3310
3311 return ret;
3312}
3313
3314static const struct file_operations ftrace_graph_fops = {
Li Zefan87827112009-07-23 11:29:11 +08003315 .open = ftrace_graph_open,
3316 .read = seq_read,
3317 .write = ftrace_graph_write,
3318 .release = ftrace_graph_release,
Arnd Bergmann6038f372010-08-15 18:52:59 +02003319 .llseek = seq_lseek,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003320};
3321#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3322
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003323static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
Steven Rostedt5072c592008-05-12 21:20:43 +02003324{
Steven Rostedt5072c592008-05-12 21:20:43 +02003325
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003326 trace_create_file("available_filter_functions", 0444,
3327 d_tracer, NULL, &ftrace_avail_fops);
Steven Rostedt5072c592008-05-12 21:20:43 +02003328
Steven Rostedt647bcd02011-05-03 14:39:21 -04003329 trace_create_file("enabled_functions", 0444,
3330 d_tracer, NULL, &ftrace_enabled_fops);
3331
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003332 trace_create_file("set_ftrace_filter", 0644, d_tracer,
3333 NULL, &ftrace_filter_fops);
Steven Rostedt41c52c02008-05-22 11:46:33 -04003334
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003335 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
Steven Rostedt41c52c02008-05-22 11:46:33 -04003336 NULL, &ftrace_notrace_fops);
Steven Rostedtad90c0e2008-05-27 20:48:37 -04003337
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003338#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003339 trace_create_file("set_graph_function", 0444, d_tracer,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003340 NULL,
3341 &ftrace_graph_fops);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003342#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3343
Steven Rostedt5072c592008-05-12 21:20:43 +02003344 return 0;
3345}
3346
Jiri Olsa5cb084b2009-10-13 16:33:53 -04003347static int ftrace_process_locs(struct module *mod,
Steven Rostedt31e88902008-11-14 16:21:19 -08003348 unsigned long *start,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003349 unsigned long *end)
3350{
3351 unsigned long *p;
3352 unsigned long addr;
Steven Rostedta4f18ed2011-06-07 09:26:46 -04003353 unsigned long flags;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003354
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003355 mutex_lock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003356 p = start;
3357 while (p < end) {
3358 addr = ftrace_call_adjust(*p++);
Steven Rostedt20e52272008-11-14 16:21:19 -08003359 /*
3360 * Some architecture linkers will pad between
3361 * the different mcount_loc sections of different
3362 * object files to satisfy alignments.
3363 * Skip any NULL pointers.
3364 */
3365 if (!addr)
3366 continue;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003367 ftrace_record_ip(addr);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003368 }
3369
Steven Rostedta4f18ed2011-06-07 09:26:46 -04003370 /*
3371 * Disable interrupts to prevent interrupts from executing
3372 * code that is being modified.
3373 */
3374 local_irq_save(flags);
Steven Rostedt31e88902008-11-14 16:21:19 -08003375 ftrace_update_code(mod);
Steven Rostedta4f18ed2011-06-07 09:26:46 -04003376 local_irq_restore(flags);
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003377 mutex_unlock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003378
3379 return 0;
3380}
3381
Steven Rostedt93eb6772009-04-15 13:24:06 -04003382#ifdef CONFIG_MODULES
jolsa@redhat.come7247a12009-10-07 19:00:35 +02003383void ftrace_release_mod(struct module *mod)
Steven Rostedt93eb6772009-04-15 13:24:06 -04003384{
3385 struct dyn_ftrace *rec;
3386 struct ftrace_page *pg;
Steven Rostedt93eb6772009-04-15 13:24:06 -04003387
Steven Rostedt93eb6772009-04-15 13:24:06 -04003388 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04003389
3390 if (ftrace_disabled)
3391 goto out_unlock;
3392
Steven Rostedt93eb6772009-04-15 13:24:06 -04003393 do_for_each_ftrace_rec(pg, rec) {
jolsa@redhat.come7247a12009-10-07 19:00:35 +02003394 if (within_module_core(rec->ip, mod)) {
Steven Rostedt93eb6772009-04-15 13:24:06 -04003395 /*
3396 * rec->ip is changed in ftrace_free_rec()
3397 * It should not between s and e if record was freed.
3398 */
3399 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3400 ftrace_free_rec(rec);
3401 }
3402 } while_for_each_ftrace_rec();
Steven Rostedt45a4a232011-04-21 23:16:46 -04003403 out_unlock:
Steven Rostedt93eb6772009-04-15 13:24:06 -04003404 mutex_unlock(&ftrace_lock);
3405}
3406
3407static void ftrace_init_module(struct module *mod,
3408 unsigned long *start, unsigned long *end)
Steven Rostedt90d595f2008-08-14 15:45:09 -04003409{
Steven Rostedt00fd61a2008-08-15 21:40:04 -04003410 if (ftrace_disabled || start == end)
Steven Rostedtfed19392008-08-14 22:47:19 -04003411 return;
Jiri Olsa5cb084b2009-10-13 16:33:53 -04003412 ftrace_process_locs(mod, start, end);
Steven Rostedt90d595f2008-08-14 15:45:09 -04003413}
3414
Steven Rostedt93eb6772009-04-15 13:24:06 -04003415static int ftrace_module_notify(struct notifier_block *self,
3416 unsigned long val, void *data)
3417{
3418 struct module *mod = data;
3419
3420 switch (val) {
3421 case MODULE_STATE_COMING:
3422 ftrace_init_module(mod, mod->ftrace_callsites,
3423 mod->ftrace_callsites +
3424 mod->num_ftrace_callsites);
3425 break;
3426 case MODULE_STATE_GOING:
jolsa@redhat.come7247a12009-10-07 19:00:35 +02003427 ftrace_release_mod(mod);
Steven Rostedt93eb6772009-04-15 13:24:06 -04003428 break;
3429 }
3430
3431 return 0;
3432}
3433#else
3434static int ftrace_module_notify(struct notifier_block *self,
3435 unsigned long val, void *data)
3436{
3437 return 0;
3438}
3439#endif /* CONFIG_MODULES */
3440
3441struct notifier_block ftrace_module_nb = {
3442 .notifier_call = ftrace_module_notify,
3443 .priority = 0,
3444};
3445
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003446extern unsigned long __start_mcount_loc[];
3447extern unsigned long __stop_mcount_loc[];
3448
3449void __init ftrace_init(void)
3450{
3451 unsigned long count, addr, flags;
3452 int ret;
3453
3454 /* Keep the ftrace pointer to the stub */
3455 addr = (unsigned long)ftrace_stub;
3456
3457 local_irq_save(flags);
3458 ftrace_dyn_arch_init(&addr);
3459 local_irq_restore(flags);
3460
3461 /* ftrace_dyn_arch_init places the return code in addr */
3462 if (addr)
3463 goto failed;
3464
3465 count = __stop_mcount_loc - __start_mcount_loc;
3466
3467 ret = ftrace_dyn_table_alloc(count);
3468 if (ret)
3469 goto failed;
3470
3471 last_ftrace_enabled = ftrace_enabled = 1;
3472
Jiri Olsa5cb084b2009-10-13 16:33:53 -04003473 ret = ftrace_process_locs(NULL,
Steven Rostedt31e88902008-11-14 16:21:19 -08003474 __start_mcount_loc,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003475 __stop_mcount_loc);
3476
Steven Rostedt93eb6772009-04-15 13:24:06 -04003477 ret = register_module_notifier(&ftrace_module_nb);
Ming Lei24ed0c42009-05-17 15:31:38 +08003478 if (ret)
Steven Rostedt93eb6772009-04-15 13:24:06 -04003479 pr_warning("Failed to register trace ftrace module notifier\n");
3480
Steven Rostedt2af15d62009-05-28 13:37:24 -04003481 set_ftrace_early_filters();
3482
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003483 return;
3484 failed:
3485 ftrace_disabled = 1;
3486}
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003487
Steven Rostedt3d083392008-05-12 21:20:42 +02003488#else
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01003489
Steven Rostedt2b499382011-05-03 22:49:52 -04003490static struct ftrace_ops global_ops = {
Steven Rostedtbd69c302011-05-03 21:55:54 -04003491 .func = ftrace_stub,
3492};
3493
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01003494static int __init ftrace_nodyn_init(void)
3495{
3496 ftrace_enabled = 1;
3497 return 0;
3498}
3499device_initcall(ftrace_nodyn_init);
3500
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003501static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3502static inline void ftrace_startup_enable(int command) { }
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05003503/* Keep as macros so we do not need to define the commands */
Steven Rostedt3b6cfdb2011-05-23 15:33:49 -04003504# define ftrace_startup(ops, command) \
3505 ({ \
3506 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
3507 0; \
3508 })
Steven Rostedtbd69c302011-05-03 21:55:54 -04003509# define ftrace_shutdown(ops, command) do { } while (0)
Ingo Molnarc7aafc52008-05-12 21:20:45 +02003510# define ftrace_startup_sysctl() do { } while (0)
3511# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedtb8489142011-05-04 09:27:52 -04003512
3513static inline int
3514ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3515{
3516 return 1;
3517}
3518
Steven Rostedt3d083392008-05-12 21:20:42 +02003519#endif /* CONFIG_DYNAMIC_FTRACE */
3520
Steven Rostedtb8489142011-05-04 09:27:52 -04003521static void
3522ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3523{
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003524 struct ftrace_ops *op;
Steven Rostedtb8489142011-05-04 09:27:52 -04003525
Steven Rostedtb1cff0a2011-05-25 14:27:43 -04003526 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3527 return;
3528
3529 trace_recursion_set(TRACE_INTERNAL_BIT);
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003530 /*
3531 * Some of the ops may be dynamically allocated,
3532 * they must be freed after a synchronize_sched().
3533 */
3534 preempt_disable_notrace();
3535 op = rcu_dereference_raw(ftrace_ops_list);
Steven Rostedtb8489142011-05-04 09:27:52 -04003536 while (op != &ftrace_list_end) {
3537 if (ftrace_ops_test(op, ip))
3538 op->func(ip, parent_ip);
3539 op = rcu_dereference_raw(op->next);
3540 };
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003541 preempt_enable_notrace();
Steven Rostedtb1cff0a2011-05-25 14:27:43 -04003542 trace_recursion_clear(TRACE_INTERNAL_BIT);
Steven Rostedtb8489142011-05-04 09:27:52 -04003543}
3544
Steven Rostedte32d8952008-12-04 00:26:41 -05003545static void clear_ftrace_swapper(void)
3546{
3547 struct task_struct *p;
3548 int cpu;
3549
3550 get_online_cpus();
3551 for_each_online_cpu(cpu) {
3552 p = idle_task(cpu);
3553 clear_tsk_trace_trace(p);
3554 }
3555 put_online_cpus();
3556}
3557
3558static void set_ftrace_swapper(void)
3559{
3560 struct task_struct *p;
3561 int cpu;
3562
3563 get_online_cpus();
3564 for_each_online_cpu(cpu) {
3565 p = idle_task(cpu);
3566 set_tsk_trace_trace(p);
3567 }
3568 put_online_cpus();
3569}
3570
3571static void clear_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05003572{
3573 struct task_struct *p;
3574
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01003575 rcu_read_lock();
Steven Rostedte32d8952008-12-04 00:26:41 -05003576 do_each_pid_task(pid, PIDTYPE_PID, p) {
Steven Rostedt978f3a42008-12-04 00:26:40 -05003577 clear_tsk_trace_trace(p);
Steven Rostedte32d8952008-12-04 00:26:41 -05003578 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01003579 rcu_read_unlock();
3580
Steven Rostedte32d8952008-12-04 00:26:41 -05003581 put_pid(pid);
Steven Rostedt978f3a42008-12-04 00:26:40 -05003582}
3583
Steven Rostedte32d8952008-12-04 00:26:41 -05003584static void set_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05003585{
3586 struct task_struct *p;
3587
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01003588 rcu_read_lock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05003589 do_each_pid_task(pid, PIDTYPE_PID, p) {
3590 set_tsk_trace_trace(p);
3591 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01003592 rcu_read_unlock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05003593}
3594
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003595static void clear_ftrace_pid_task(struct pid *pid)
Steven Rostedte32d8952008-12-04 00:26:41 -05003596{
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003597 if (pid == ftrace_swapper_pid)
Steven Rostedte32d8952008-12-04 00:26:41 -05003598 clear_ftrace_swapper();
3599 else
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003600 clear_ftrace_pid(pid);
Steven Rostedte32d8952008-12-04 00:26:41 -05003601}
3602
3603static void set_ftrace_pid_task(struct pid *pid)
3604{
3605 if (pid == ftrace_swapper_pid)
3606 set_ftrace_swapper();
3607 else
3608 set_ftrace_pid(pid);
3609}
3610
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003611static int ftrace_pid_add(int p)
3612{
3613 struct pid *pid;
3614 struct ftrace_pid *fpid;
3615 int ret = -EINVAL;
3616
3617 mutex_lock(&ftrace_lock);
3618
3619 if (!p)
3620 pid = ftrace_swapper_pid;
3621 else
3622 pid = find_get_pid(p);
3623
3624 if (!pid)
3625 goto out;
3626
3627 ret = 0;
3628
3629 list_for_each_entry(fpid, &ftrace_pids, list)
3630 if (fpid->pid == pid)
3631 goto out_put;
3632
3633 ret = -ENOMEM;
3634
3635 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3636 if (!fpid)
3637 goto out_put;
3638
3639 list_add(&fpid->list, &ftrace_pids);
3640 fpid->pid = pid;
3641
3642 set_ftrace_pid_task(pid);
3643
3644 ftrace_update_pid_func();
3645 ftrace_startup_enable(0);
3646
3647 mutex_unlock(&ftrace_lock);
3648 return 0;
3649
3650out_put:
3651 if (pid != ftrace_swapper_pid)
3652 put_pid(pid);
3653
3654out:
3655 mutex_unlock(&ftrace_lock);
3656 return ret;
3657}
3658
3659static void ftrace_pid_reset(void)
3660{
3661 struct ftrace_pid *fpid, *safe;
3662
3663 mutex_lock(&ftrace_lock);
3664 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3665 struct pid *pid = fpid->pid;
3666
3667 clear_ftrace_pid_task(pid);
3668
3669 list_del(&fpid->list);
3670 kfree(fpid);
3671 }
3672
3673 ftrace_update_pid_func();
3674 ftrace_startup_enable(0);
3675
3676 mutex_unlock(&ftrace_lock);
3677}
3678
3679static void *fpid_start(struct seq_file *m, loff_t *pos)
3680{
3681 mutex_lock(&ftrace_lock);
3682
3683 if (list_empty(&ftrace_pids) && (!*pos))
3684 return (void *) 1;
3685
3686 return seq_list_start(&ftrace_pids, *pos);
3687}
3688
3689static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3690{
3691 if (v == (void *)1)
3692 return NULL;
3693
3694 return seq_list_next(v, &ftrace_pids, pos);
3695}
3696
3697static void fpid_stop(struct seq_file *m, void *p)
3698{
3699 mutex_unlock(&ftrace_lock);
3700}
3701
3702static int fpid_show(struct seq_file *m, void *v)
3703{
3704 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3705
3706 if (v == (void *)1) {
3707 seq_printf(m, "no pid\n");
3708 return 0;
3709 }
3710
3711 if (fpid->pid == ftrace_swapper_pid)
3712 seq_printf(m, "swapper tasks\n");
3713 else
3714 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3715
3716 return 0;
3717}
3718
3719static const struct seq_operations ftrace_pid_sops = {
3720 .start = fpid_start,
3721 .next = fpid_next,
3722 .stop = fpid_stop,
3723 .show = fpid_show,
3724};
3725
3726static int
3727ftrace_pid_open(struct inode *inode, struct file *file)
3728{
3729 int ret = 0;
3730
3731 if ((file->f_mode & FMODE_WRITE) &&
3732 (file->f_flags & O_TRUNC))
3733 ftrace_pid_reset();
3734
3735 if (file->f_mode & FMODE_READ)
3736 ret = seq_open(file, &ftrace_pid_sops);
3737
3738 return ret;
3739}
3740
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003741static ssize_t
3742ftrace_pid_write(struct file *filp, const char __user *ubuf,
3743 size_t cnt, loff_t *ppos)
3744{
Ingo Molnar457dc922009-11-23 11:03:28 +01003745 char buf[64], *tmp;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003746 long val;
3747 int ret;
3748
3749 if (cnt >= sizeof(buf))
3750 return -EINVAL;
3751
3752 if (copy_from_user(&buf, ubuf, cnt))
3753 return -EFAULT;
3754
3755 buf[cnt] = 0;
3756
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003757 /*
3758 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3759 * to clean the filter quietly.
3760 */
Ingo Molnar457dc922009-11-23 11:03:28 +01003761 tmp = strstrip(buf);
3762 if (strlen(tmp) == 0)
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003763 return 1;
3764
Ingo Molnar457dc922009-11-23 11:03:28 +01003765 ret = strict_strtol(tmp, 10, &val);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003766 if (ret < 0)
3767 return ret;
3768
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003769 ret = ftrace_pid_add(val);
Steven Rostedt978f3a42008-12-04 00:26:40 -05003770
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003771 return ret ? ret : cnt;
3772}
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003773
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003774static int
3775ftrace_pid_release(struct inode *inode, struct file *file)
3776{
3777 if (file->f_mode & FMODE_READ)
3778 seq_release(inode, file);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003779
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003780 return 0;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003781}
3782
Steven Rostedt5e2336a02009-03-05 21:44:55 -05003783static const struct file_operations ftrace_pid_fops = {
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003784 .open = ftrace_pid_open,
3785 .write = ftrace_pid_write,
3786 .read = seq_read,
3787 .llseek = seq_lseek,
3788 .release = ftrace_pid_release,
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003789};
3790
3791static __init int ftrace_init_debugfs(void)
3792{
3793 struct dentry *d_tracer;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003794
3795 d_tracer = tracing_init_dentry();
3796 if (!d_tracer)
3797 return 0;
3798
3799 ftrace_init_dyn_debugfs(d_tracer);
3800
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003801 trace_create_file("set_ftrace_pid", 0644, d_tracer,
3802 NULL, &ftrace_pid_fops);
Steven Rostedt493762f2009-03-23 17:12:36 -04003803
3804 ftrace_profile_debugfs(d_tracer);
3805
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003806 return 0;
3807}
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003808fs_initcall(ftrace_init_debugfs);
3809
Steven Rostedt3d083392008-05-12 21:20:42 +02003810/**
Steven Rostedt81adbdc2008-10-23 09:33:02 -04003811 * ftrace_kill - kill ftrace
Steven Rostedta2bb6a32008-07-10 20:58:15 -04003812 *
3813 * This function should be used by panic code. It stops ftrace
3814 * but in a not so nice way. If you need to simply kill ftrace
3815 * from a non-atomic section, use ftrace_kill.
3816 */
Steven Rostedt81adbdc2008-10-23 09:33:02 -04003817void ftrace_kill(void)
Steven Rostedta2bb6a32008-07-10 20:58:15 -04003818{
3819 ftrace_disabled = 1;
3820 ftrace_enabled = 0;
Steven Rostedta2bb6a32008-07-10 20:58:15 -04003821 clear_ftrace_function();
3822}
3823
3824/**
Steven Rostedt3d083392008-05-12 21:20:42 +02003825 * register_ftrace_function - register a function for profiling
3826 * @ops - ops structure that holds the function for profiling.
3827 *
3828 * Register a function to be called by all functions in the
3829 * kernel.
3830 *
3831 * Note: @ops->func and all the functions it calls must be labeled
3832 * with "notrace", otherwise it will go into a
3833 * recursive loop.
3834 */
3835int register_ftrace_function(struct ftrace_ops *ops)
3836{
Steven Rostedt45a4a232011-04-21 23:16:46 -04003837 int ret = -1;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02003838
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003839 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01003840
Steven Rostedt45a4a232011-04-21 23:16:46 -04003841 if (unlikely(ftrace_disabled))
3842 goto out_unlock;
3843
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003844 ret = __register_ftrace_function(ops);
Steven Rostedtb8489142011-05-04 09:27:52 -04003845 if (!ret)
Steven Rostedta1cd6172011-05-23 15:24:25 -04003846 ret = ftrace_startup(ops, 0);
Steven Rostedtb8489142011-05-04 09:27:52 -04003847
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003848
Steven Rostedt45a4a232011-04-21 23:16:46 -04003849 out_unlock:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003850 mutex_unlock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003851 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02003852}
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003853EXPORT_SYMBOL_GPL(register_ftrace_function);
Steven Rostedt3d083392008-05-12 21:20:42 +02003854
3855/**
Uwe Kleine-Koenig32632922009-01-12 23:35:50 +01003856 * unregister_ftrace_function - unregister a function for profiling.
Steven Rostedt3d083392008-05-12 21:20:42 +02003857 * @ops - ops structure that holds the function to unregister
3858 *
3859 * Unregister a function that was added to be called by ftrace profiling.
3860 */
3861int unregister_ftrace_function(struct ftrace_ops *ops)
3862{
3863 int ret;
3864
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003865 mutex_lock(&ftrace_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02003866 ret = __unregister_ftrace_function(ops);
Steven Rostedtb8489142011-05-04 09:27:52 -04003867 if (!ret)
3868 ftrace_shutdown(ops, 0);
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003869 mutex_unlock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003870
3871 return ret;
3872}
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003873EXPORT_SYMBOL_GPL(unregister_ftrace_function);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003874
Ingo Molnare309b412008-05-12 21:20:51 +02003875int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003876ftrace_enable_sysctl(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07003877 void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003878 loff_t *ppos)
3879{
Steven Rostedt45a4a232011-04-21 23:16:46 -04003880 int ret = -ENODEV;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02003881
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003882 mutex_lock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003883
Steven Rostedt45a4a232011-04-21 23:16:46 -04003884 if (unlikely(ftrace_disabled))
3885 goto out;
3886
3887 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003888
Li Zefana32c7762009-06-26 16:55:51 +08003889 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003890 goto out;
3891
Li Zefana32c7762009-06-26 16:55:51 +08003892 last_ftrace_enabled = !!ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003893
3894 if (ftrace_enabled) {
3895
3896 ftrace_startup_sysctl();
3897
3898 /* we are starting ftrace again */
Steven Rostedtb8489142011-05-04 09:27:52 -04003899 if (ftrace_ops_list != &ftrace_list_end) {
3900 if (ftrace_ops_list->next == &ftrace_list_end)
3901 ftrace_trace_function = ftrace_ops_list->func;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003902 else
Steven Rostedtb8489142011-05-04 09:27:52 -04003903 ftrace_trace_function = ftrace_ops_list_func;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003904 }
3905
3906 } else {
3907 /* stopping ftrace calls (just send to ftrace_stub) */
3908 ftrace_trace_function = ftrace_stub;
3909
3910 ftrace_shutdown_sysctl();
3911 }
3912
3913 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003914 mutex_unlock(&ftrace_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02003915 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02003916}
Ingo Molnarf17845e2008-10-24 12:47:10 +02003917
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01003918#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01003919
Steven Rostedt597af812009-04-03 15:24:12 -04003920static int ftrace_graph_active;
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08003921static struct notifier_block ftrace_suspend_notifier;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003922
Steven Rostedte49dc192008-12-02 23:50:05 -05003923int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3924{
3925 return 0;
3926}
3927
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01003928/* The callbacks that hook a function */
3929trace_func_graph_ret_t ftrace_graph_return =
3930 (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05003931trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003932
3933/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3934static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3935{
3936 int i;
3937 int ret = 0;
3938 unsigned long flags;
3939 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3940 struct task_struct *g, *t;
3941
3942 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3943 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3944 * sizeof(struct ftrace_ret_stack),
3945 GFP_KERNEL);
3946 if (!ret_stack_list[i]) {
3947 start = 0;
3948 end = i;
3949 ret = -ENOMEM;
3950 goto free;
3951 }
3952 }
3953
3954 read_lock_irqsave(&tasklist_lock, flags);
3955 do_each_thread(g, t) {
3956 if (start == end) {
3957 ret = -EAGAIN;
3958 goto unlock;
3959 }
3960
3961 if (t->ret_stack == NULL) {
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01003962 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003963 atomic_set(&t->trace_overrun, 0);
Steven Rostedt26c01622009-06-02 14:01:19 -04003964 t->curr_ret_stack = -1;
3965 /* Make sure the tasks see the -1 first: */
3966 smp_wmb();
3967 t->ret_stack = ret_stack_list[start++];
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003968 }
3969 } while_each_thread(g, t);
3970
3971unlock:
3972 read_unlock_irqrestore(&tasklist_lock, flags);
3973free:
3974 for (i = start; i < end; i++)
3975 kfree(ret_stack_list[i]);
3976 return ret;
3977}
3978
Steven Rostedt8aef2d22009-03-24 01:10:15 -04003979static void
Steven Rostedt38516ab2010-04-20 17:04:50 -04003980ftrace_graph_probe_sched_switch(void *ignore,
3981 struct task_struct *prev, struct task_struct *next)
Steven Rostedt8aef2d22009-03-24 01:10:15 -04003982{
3983 unsigned long long timestamp;
3984 int index;
3985
Steven Rostedtbe6f1642009-03-24 11:06:24 -04003986 /*
3987 * Does the user want to count the time a function was asleep.
3988 * If so, do not update the time stamps.
3989 */
3990 if (trace_flags & TRACE_ITER_SLEEP_TIME)
3991 return;
3992
Steven Rostedt8aef2d22009-03-24 01:10:15 -04003993 timestamp = trace_clock_local();
3994
3995 prev->ftrace_timestamp = timestamp;
3996
3997 /* only process tasks that we timestamped */
3998 if (!next->ftrace_timestamp)
3999 return;
4000
4001 /*
4002 * Update all the counters in next to make up for the
4003 * time next was sleeping.
4004 */
4005 timestamp -= next->ftrace_timestamp;
4006
4007 for (index = next->curr_ret_stack; index >= 0; index--)
4008 next->ret_stack[index].calltime += timestamp;
4009}
4010
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004011/* Allocate a return stack for each task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004012static int start_graph_tracing(void)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004013{
4014 struct ftrace_ret_stack **ret_stack_list;
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01004015 int ret, cpu;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004016
4017 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4018 sizeof(struct ftrace_ret_stack *),
4019 GFP_KERNEL);
4020
4021 if (!ret_stack_list)
4022 return -ENOMEM;
4023
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01004024 /* The cpu_boot init_task->ret_stack will never be freed */
Steven Rostedt179c4982009-06-02 12:03:19 -04004025 for_each_online_cpu(cpu) {
4026 if (!idle_task(cpu)->ret_stack)
Steven Rostedt868baf02011-02-10 21:26:13 -05004027 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
Steven Rostedt179c4982009-06-02 12:03:19 -04004028 }
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01004029
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004030 do {
4031 ret = alloc_retstack_tasklist(ret_stack_list);
4032 } while (ret == -EAGAIN);
4033
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004034 if (!ret) {
Steven Rostedt38516ab2010-04-20 17:04:50 -04004035 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004036 if (ret)
4037 pr_info("ftrace_graph: Couldn't activate tracepoint"
4038 " probe to kernel_sched_switch\n");
4039 }
4040
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004041 kfree(ret_stack_list);
4042 return ret;
4043}
4044
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08004045/*
4046 * Hibernation protection.
4047 * The state of the current task is too much unstable during
4048 * suspend/restore to disk. We want to protect against that.
4049 */
4050static int
4051ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4052 void *unused)
4053{
4054 switch (state) {
4055 case PM_HIBERNATION_PREPARE:
4056 pause_graph_tracing();
4057 break;
4058
4059 case PM_POST_HIBERNATION:
4060 unpause_graph_tracing();
4061 break;
4062 }
4063 return NOTIFY_DONE;
4064}
4065
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01004066int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4067 trace_func_graph_ent_t entryfunc)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004068{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004069 int ret = 0;
4070
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004071 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004072
Steven Rostedt05ce5812009-03-24 00:18:31 -04004073 /* we currently allow only one tracer registered at a time */
Steven Rostedt597af812009-04-03 15:24:12 -04004074 if (ftrace_graph_active) {
Steven Rostedt05ce5812009-03-24 00:18:31 -04004075 ret = -EBUSY;
4076 goto out;
4077 }
4078
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08004079 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4080 register_pm_notifier(&ftrace_suspend_notifier);
4081
Steven Rostedt597af812009-04-03 15:24:12 -04004082 ftrace_graph_active++;
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004083 ret = start_graph_tracing();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004084 if (ret) {
Steven Rostedt597af812009-04-03 15:24:12 -04004085 ftrace_graph_active--;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004086 goto out;
4087 }
Steven Rostedte53a6312008-11-26 00:16:25 -05004088
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01004089 ftrace_graph_return = retfunc;
4090 ftrace_graph_entry = entryfunc;
Steven Rostedte53a6312008-11-26 00:16:25 -05004091
Steven Rostedta1cd6172011-05-23 15:24:25 -04004092 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004093
4094out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004095 mutex_unlock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004096 return ret;
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004097}
4098
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004099void unregister_ftrace_graph(void)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004100{
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004101 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004102
Steven Rostedt597af812009-04-03 15:24:12 -04004103 if (unlikely(!ftrace_graph_active))
Steven Rostedt2aad1b72009-03-30 11:11:28 -04004104 goto out;
4105
Steven Rostedt597af812009-04-03 15:24:12 -04004106 ftrace_graph_active--;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01004107 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05004108 ftrace_graph_entry = ftrace_graph_entry_stub;
Steven Rostedtbd69c302011-05-03 21:55:54 -04004109 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08004110 unregister_pm_notifier(&ftrace_suspend_notifier);
Steven Rostedt38516ab2010-04-20 17:04:50 -04004111 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004112
Steven Rostedt2aad1b72009-03-30 11:11:28 -04004113 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004114 mutex_unlock(&ftrace_lock);
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004115}
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004116
Steven Rostedt868baf02011-02-10 21:26:13 -05004117static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4118
4119static void
4120graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4121{
4122 atomic_set(&t->tracing_graph_pause, 0);
4123 atomic_set(&t->trace_overrun, 0);
4124 t->ftrace_timestamp = 0;
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004125 /* make curr_ret_stack visible before we add the ret_stack */
Steven Rostedt868baf02011-02-10 21:26:13 -05004126 smp_wmb();
4127 t->ret_stack = ret_stack;
4128}
4129
4130/*
4131 * Allocate a return stack for the idle task. May be the first
4132 * time through, or it may be done by CPU hotplug online.
4133 */
4134void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4135{
4136 t->curr_ret_stack = -1;
4137 /*
4138 * The idle task has no parent, it either has its own
4139 * stack or no stack at all.
4140 */
4141 if (t->ret_stack)
4142 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4143
4144 if (ftrace_graph_active) {
4145 struct ftrace_ret_stack *ret_stack;
4146
4147 ret_stack = per_cpu(idle_ret_stack, cpu);
4148 if (!ret_stack) {
4149 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4150 * sizeof(struct ftrace_ret_stack),
4151 GFP_KERNEL);
4152 if (!ret_stack)
4153 return;
4154 per_cpu(idle_ret_stack, cpu) = ret_stack;
4155 }
4156 graph_init_task(t, ret_stack);
4157 }
4158}
4159
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004160/* Allocate a return stack for newly created task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004161void ftrace_graph_init_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004162{
Steven Rostedt84047e32009-06-02 16:51:55 -04004163 /* Make sure we do not use the parent ret_stack */
4164 t->ret_stack = NULL;
Steven Rostedtea14eb72010-03-12 19:41:23 -05004165 t->curr_ret_stack = -1;
Steven Rostedt84047e32009-06-02 16:51:55 -04004166
Steven Rostedt597af812009-04-03 15:24:12 -04004167 if (ftrace_graph_active) {
Steven Rostedt82310a32009-06-02 12:26:07 -04004168 struct ftrace_ret_stack *ret_stack;
4169
4170 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004171 * sizeof(struct ftrace_ret_stack),
4172 GFP_KERNEL);
Steven Rostedt82310a32009-06-02 12:26:07 -04004173 if (!ret_stack)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004174 return;
Steven Rostedt868baf02011-02-10 21:26:13 -05004175 graph_init_task(t, ret_stack);
Steven Rostedt84047e32009-06-02 16:51:55 -04004176 }
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004177}
4178
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004179void ftrace_graph_exit_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004180{
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01004181 struct ftrace_ret_stack *ret_stack = t->ret_stack;
4182
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004183 t->ret_stack = NULL;
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01004184 /* NULL must become visible to IRQs before we free it: */
4185 barrier();
4186
4187 kfree(ret_stack);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004188}
Steven Rostedt14a866c2008-12-02 23:50:02 -05004189
4190void ftrace_graph_stop(void)
4191{
4192 ftrace_stop();
4193}
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004194#endif