blob: 217caeca71cd04f7db9486a9b53c473fb98c8ff0 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -080020#include <linux/suspend.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020021#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020022#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010023#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020024#include <linux/uaccess.h>
Abhishek Sagarf22f9a82008-06-21 23:50:29 +053025#include <linux/kprobes.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010026#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020027#include <linux/sysctl.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020028#include <linux/ctype.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020029#include <linux/list.h>
Steven Rostedt59df055f2009-02-14 15:29:06 -050030#include <linux/hash.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020031
Steven Rostedtad8d75f2009-04-14 19:39:12 -040032#include <trace/events/sched.h>
Steven Rostedt8aef2d22009-03-24 01:10:15 -040033
Abhishek Sagar395a59d2008-06-21 23:47:27 +053034#include <asm/ftrace.h>
Steven Rostedt2af15d62009-05-28 13:37:24 -040035#include <asm/setup.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053036
Steven Rostedt0706f1c2009-03-23 23:12:58 -040037#include "trace_output.h"
Steven Rostedtbac429f2009-03-20 12:50:56 -040038#include "trace_stat.h"
Steven Rostedt3d083392008-05-12 21:20:42 +020039
Steven Rostedt69128962008-10-23 09:33:03 -040040#define FTRACE_WARN_ON(cond) \
41 do { \
42 if (WARN_ON(cond)) \
43 ftrace_kill(); \
44 } while (0)
45
46#define FTRACE_WARN_ON_ONCE(cond) \
47 do { \
48 if (WARN_ON_ONCE(cond)) \
49 ftrace_kill(); \
50 } while (0)
51
Steven Rostedt8fc0c702009-02-16 15:28:00 -050052/* hash bits for specific function selection */
53#define FTRACE_HASH_BITS 7
54#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
55
Steven Rostedt4eebcc82008-05-12 21:20:48 +020056/* ftrace_enabled is a method to turn ftrace on or off */
57int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020058static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020059
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050060/* Quick disabling of function tracer. */
61int function_trace_stop;
62
Steven Rostedt4eebcc82008-05-12 21:20:48 +020063/*
64 * ftrace_disabled is set when an anomaly is discovered.
65 * ftrace_disabled is much stronger than ftrace_enabled.
66 */
67static int ftrace_disabled __read_mostly;
68
Steven Rostedt52baf112009-02-14 01:15:39 -050069static DEFINE_MUTEX(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020070
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020071static struct ftrace_ops ftrace_list_end __read_mostly =
72{
Steven Rostedtfb9fb012009-03-25 13:26:41 -040073 .func = ftrace_stub,
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020074};
75
76static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
77ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050078ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -050079ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020080
Ingo Molnarf2252932008-05-22 10:37:48 +020081static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020082{
83 struct ftrace_ops *op = ftrace_list;
84
85 /* in case someone actually ports this to alpha! */
86 read_barrier_depends();
87
88 while (op != &ftrace_list_end) {
89 /* silly alpha */
90 read_barrier_depends();
91 op->func(ip, parent_ip);
92 op = op->next;
93 };
94}
95
Steven Rostedtdf4fc312008-11-26 00:16:23 -050096static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
97{
Steven Rostedt0ef8cde2008-12-03 15:36:58 -050098 if (!test_tsk_trace_trace(current))
Steven Rostedtdf4fc312008-11-26 00:16:23 -050099 return;
100
101 ftrace_pid_function(ip, parent_ip);
102}
103
104static void set_ftrace_pid_function(ftrace_func_t func)
105{
106 /* do not set ftrace_pid_function to itself! */
107 if (func != ftrace_pid_func)
108 ftrace_pid_function = func;
109}
110
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200111/**
Steven Rostedt3d083392008-05-12 21:20:42 +0200112 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200113 *
Steven Rostedt3d083392008-05-12 21:20:42 +0200114 * This NULLs the ftrace function and in essence stops
115 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200116 */
Steven Rostedt3d083392008-05-12 21:20:42 +0200117void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200118{
Steven Rostedt3d083392008-05-12 21:20:42 +0200119 ftrace_trace_function = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500120 __ftrace_trace_function = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500121 ftrace_pid_function = ftrace_stub;
Steven Rostedt3d083392008-05-12 21:20:42 +0200122}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200123
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500124#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
125/*
126 * For those archs that do not test ftrace_trace_stop in their
127 * mcount call site, we need to do it from C.
128 */
129static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
130{
131 if (function_trace_stop)
132 return;
133
134 __ftrace_trace_function(ip, parent_ip);
135}
136#endif
137
Ingo Molnare309b412008-05-12 21:20:51 +0200138static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +0200139{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200140 ops->next = ftrace_list;
141 /*
142 * We are entering ops into the ftrace_list but another
143 * CPU might be walking that list. We need to make sure
144 * the ops->next pointer is valid before another CPU sees
145 * the ops pointer included into the ftrace_list.
146 */
147 smp_wmb();
148 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +0200149
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200150 if (ftrace_enabled) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500151 ftrace_func_t func;
152
153 if (ops->next == &ftrace_list_end)
154 func = ops->func;
155 else
156 func = ftrace_list_func;
157
Steven Rostedt978f3a42008-12-04 00:26:40 -0500158 if (ftrace_pid_trace) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500159 set_ftrace_pid_function(func);
160 func = ftrace_pid_func;
161 }
162
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200163 /*
164 * For one func, simply call it directly.
165 * For more than one func, call the chain.
166 */
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500167#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500168 ftrace_trace_function = func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500169#else
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500170 __ftrace_trace_function = func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500171 ftrace_trace_function = ftrace_test_stop_func;
172#endif
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200173 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200174
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200175 return 0;
176}
177
Ingo Molnare309b412008-05-12 21:20:51 +0200178static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200179{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200180 struct ftrace_ops **p;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200181
182 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200183 * If we are removing the last function, then simply point
184 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200185 */
186 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
187 ftrace_trace_function = ftrace_stub;
188 ftrace_list = &ftrace_list_end;
Steven Rostedte6ea44e2009-02-14 01:42:44 -0500189 return 0;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200190 }
191
192 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
193 if (*p == ops)
194 break;
195
Steven Rostedte6ea44e2009-02-14 01:42:44 -0500196 if (*p != ops)
197 return -1;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200198
199 *p = (*p)->next;
200
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200201 if (ftrace_enabled) {
202 /* If we only have one func left, then call that directly */
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500203 if (ftrace_list->next == &ftrace_list_end) {
204 ftrace_func_t func = ftrace_list->func;
205
Steven Rostedt978f3a42008-12-04 00:26:40 -0500206 if (ftrace_pid_trace) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500207 set_ftrace_pid_function(func);
208 func = ftrace_pid_func;
209 }
210#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
211 ftrace_trace_function = func;
212#else
213 __ftrace_trace_function = func;
214#endif
215 }
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200216 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200217
Steven Rostedte6ea44e2009-02-14 01:42:44 -0500218 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200219}
220
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500221static void ftrace_update_pid_func(void)
222{
223 ftrace_func_t func;
224
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500225 if (ftrace_trace_function == ftrace_stub)
KOSAKI Motohiro10dd3eb2009-03-06 15:29:04 +0900226 return;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500227
228 func = ftrace_trace_function;
229
Steven Rostedt978f3a42008-12-04 00:26:40 -0500230 if (ftrace_pid_trace) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500231 set_ftrace_pid_function(func);
232 func = ftrace_pid_func;
233 } else {
Liming Wang66eafeb2008-12-02 10:33:08 +0800234 if (func == ftrace_pid_func)
235 func = ftrace_pid_function;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500236 }
237
238#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
239 ftrace_trace_function = func;
240#else
241 __ftrace_trace_function = func;
242#endif
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500243}
244
Steven Rostedt493762f2009-03-23 17:12:36 -0400245#ifdef CONFIG_FUNCTION_PROFILER
246struct ftrace_profile {
247 struct hlist_node node;
248 unsigned long ip;
249 unsigned long counter;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400250#ifdef CONFIG_FUNCTION_GRAPH_TRACER
251 unsigned long long time;
252#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400253};
254
255struct ftrace_profile_page {
256 struct ftrace_profile_page *next;
257 unsigned long index;
258 struct ftrace_profile records[];
259};
260
Steven Rostedtcafb1682009-03-24 20:50:39 -0400261struct ftrace_profile_stat {
262 atomic_t disabled;
263 struct hlist_head *hash;
264 struct ftrace_profile_page *pages;
265 struct ftrace_profile_page *start;
266 struct tracer_stat stat;
267};
268
Steven Rostedt493762f2009-03-23 17:12:36 -0400269#define PROFILE_RECORDS_SIZE \
270 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
271
272#define PROFILES_PER_PAGE \
273 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
274
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400275static int ftrace_profile_bits __read_mostly;
276static int ftrace_profile_enabled __read_mostly;
277
278/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
Steven Rostedt493762f2009-03-23 17:12:36 -0400279static DEFINE_MUTEX(ftrace_profile_lock);
280
Steven Rostedtcafb1682009-03-24 20:50:39 -0400281static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
Steven Rostedt493762f2009-03-23 17:12:36 -0400282
283#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
284
Steven Rostedt493762f2009-03-23 17:12:36 -0400285static void *
286function_stat_next(void *v, int idx)
287{
288 struct ftrace_profile *rec = v;
289 struct ftrace_profile_page *pg;
290
291 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
292
293 again:
Li Zefan0296e422009-06-26 11:15:37 +0800294 if (idx != 0)
295 rec++;
296
Steven Rostedt493762f2009-03-23 17:12:36 -0400297 if ((void *)rec >= (void *)&pg->records[pg->index]) {
298 pg = pg->next;
299 if (!pg)
300 return NULL;
301 rec = &pg->records[0];
302 if (!rec->counter)
303 goto again;
304 }
305
306 return rec;
307}
308
309static void *function_stat_start(struct tracer_stat *trace)
310{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400311 struct ftrace_profile_stat *stat =
312 container_of(trace, struct ftrace_profile_stat, stat);
313
314 if (!stat || !stat->start)
315 return NULL;
316
317 return function_stat_next(&stat->start->records[0], 0);
Steven Rostedt493762f2009-03-23 17:12:36 -0400318}
319
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400320#ifdef CONFIG_FUNCTION_GRAPH_TRACER
321/* function graph compares on total time */
322static int function_stat_cmp(void *p1, void *p2)
323{
324 struct ftrace_profile *a = p1;
325 struct ftrace_profile *b = p2;
326
327 if (a->time < b->time)
328 return -1;
329 if (a->time > b->time)
330 return 1;
331 else
332 return 0;
333}
334#else
335/* not function graph compares against hits */
Steven Rostedt493762f2009-03-23 17:12:36 -0400336static int function_stat_cmp(void *p1, void *p2)
337{
338 struct ftrace_profile *a = p1;
339 struct ftrace_profile *b = p2;
340
341 if (a->counter < b->counter)
342 return -1;
343 if (a->counter > b->counter)
344 return 1;
345 else
346 return 0;
347}
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400348#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400349
350static int function_stat_headers(struct seq_file *m)
351{
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400352#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt34886c82009-03-25 21:00:47 -0400353 seq_printf(m, " Function "
354 "Hit Time Avg\n"
355 " -------- "
356 "--- ---- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400357#else
Steven Rostedt493762f2009-03-23 17:12:36 -0400358 seq_printf(m, " Function Hit\n"
359 " -------- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400360#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400361 return 0;
362}
363
364static int function_stat_show(struct seq_file *m, void *v)
365{
366 struct ftrace_profile *rec = v;
367 char str[KSYM_SYMBOL_LEN];
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400368#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400369 static DEFINE_MUTEX(mutex);
Steven Rostedt34886c82009-03-25 21:00:47 -0400370 static struct trace_seq s;
371 unsigned long long avg;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400372#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400373
374 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400375 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
Steven Rostedt493762f2009-03-23 17:12:36 -0400376
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400377#ifdef CONFIG_FUNCTION_GRAPH_TRACER
378 seq_printf(m, " ");
Steven Rostedt34886c82009-03-25 21:00:47 -0400379 avg = rec->time;
380 do_div(avg, rec->counter);
381
382 mutex_lock(&mutex);
383 trace_seq_init(&s);
384 trace_print_graph_duration(rec->time, &s);
385 trace_seq_puts(&s, " ");
386 trace_print_graph_duration(avg, &s);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400387 trace_print_seq(m, &s);
388 mutex_unlock(&mutex);
389#endif
390 seq_putc(m, '\n');
391
Steven Rostedt493762f2009-03-23 17:12:36 -0400392 return 0;
393}
394
Steven Rostedtcafb1682009-03-24 20:50:39 -0400395static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400396{
397 struct ftrace_profile_page *pg;
398
Steven Rostedtcafb1682009-03-24 20:50:39 -0400399 pg = stat->pages = stat->start;
Steven Rostedt493762f2009-03-23 17:12:36 -0400400
401 while (pg) {
402 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
403 pg->index = 0;
404 pg = pg->next;
405 }
406
Steven Rostedtcafb1682009-03-24 20:50:39 -0400407 memset(stat->hash, 0,
Steven Rostedt493762f2009-03-23 17:12:36 -0400408 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
409}
410
Steven Rostedtcafb1682009-03-24 20:50:39 -0400411int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400412{
413 struct ftrace_profile_page *pg;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400414 int functions;
415 int pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400416 int i;
417
418 /* If we already allocated, do nothing */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400419 if (stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400420 return 0;
421
Steven Rostedtcafb1682009-03-24 20:50:39 -0400422 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
423 if (!stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400424 return -ENOMEM;
425
Steven Rostedt318e0a72009-03-25 20:06:34 -0400426#ifdef CONFIG_DYNAMIC_FTRACE
427 functions = ftrace_update_tot_cnt;
428#else
429 /*
430 * We do not know the number of functions that exist because
431 * dynamic tracing is what counts them. With past experience
432 * we have around 20K functions. That should be more than enough.
433 * It is highly unlikely we will execute every function in
434 * the kernel.
435 */
436 functions = 20000;
437#endif
438
Steven Rostedtcafb1682009-03-24 20:50:39 -0400439 pg = stat->start = stat->pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400440
Steven Rostedt318e0a72009-03-25 20:06:34 -0400441 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
442
443 for (i = 0; i < pages; i++) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400444 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400445 if (!pg->next)
Steven Rostedt318e0a72009-03-25 20:06:34 -0400446 goto out_free;
Steven Rostedt493762f2009-03-23 17:12:36 -0400447 pg = pg->next;
448 }
449
450 return 0;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400451
452 out_free:
453 pg = stat->start;
454 while (pg) {
455 unsigned long tmp = (unsigned long)pg;
456
457 pg = pg->next;
458 free_page(tmp);
459 }
460
461 free_page((unsigned long)stat->pages);
462 stat->pages = NULL;
463 stat->start = NULL;
464
465 return -ENOMEM;
Steven Rostedt493762f2009-03-23 17:12:36 -0400466}
467
Steven Rostedtcafb1682009-03-24 20:50:39 -0400468static int ftrace_profile_init_cpu(int cpu)
Steven Rostedt493762f2009-03-23 17:12:36 -0400469{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400470 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400471 int size;
472
Steven Rostedtcafb1682009-03-24 20:50:39 -0400473 stat = &per_cpu(ftrace_profile_stats, cpu);
474
475 if (stat->hash) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400476 /* If the profile is already created, simply reset it */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400477 ftrace_profile_reset(stat);
Steven Rostedt493762f2009-03-23 17:12:36 -0400478 return 0;
479 }
480
481 /*
482 * We are profiling all functions, but usually only a few thousand
483 * functions are hit. We'll make a hash of 1024 items.
484 */
485 size = FTRACE_PROFILE_HASH_SIZE;
486
Steven Rostedtcafb1682009-03-24 20:50:39 -0400487 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400488
Steven Rostedtcafb1682009-03-24 20:50:39 -0400489 if (!stat->hash)
Steven Rostedt493762f2009-03-23 17:12:36 -0400490 return -ENOMEM;
491
Steven Rostedtcafb1682009-03-24 20:50:39 -0400492 if (!ftrace_profile_bits) {
493 size--;
Steven Rostedt493762f2009-03-23 17:12:36 -0400494
Steven Rostedtcafb1682009-03-24 20:50:39 -0400495 for (; size; size >>= 1)
496 ftrace_profile_bits++;
497 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400498
Steven Rostedt318e0a72009-03-25 20:06:34 -0400499 /* Preallocate the function profiling pages */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400500 if (ftrace_profile_pages_init(stat) < 0) {
501 kfree(stat->hash);
502 stat->hash = NULL;
Steven Rostedt493762f2009-03-23 17:12:36 -0400503 return -ENOMEM;
504 }
505
506 return 0;
507}
508
Steven Rostedtcafb1682009-03-24 20:50:39 -0400509static int ftrace_profile_init(void)
510{
511 int cpu;
512 int ret = 0;
513
514 for_each_online_cpu(cpu) {
515 ret = ftrace_profile_init_cpu(cpu);
516 if (ret)
517 break;
518 }
519
520 return ret;
521}
522
Steven Rostedt493762f2009-03-23 17:12:36 -0400523/* interrupts must be disabled */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400524static struct ftrace_profile *
525ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400526{
527 struct ftrace_profile *rec;
528 struct hlist_head *hhd;
529 struct hlist_node *n;
530 unsigned long key;
531
532 key = hash_long(ip, ftrace_profile_bits);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400533 hhd = &stat->hash[key];
Steven Rostedt493762f2009-03-23 17:12:36 -0400534
535 if (hlist_empty(hhd))
536 return NULL;
537
538 hlist_for_each_entry_rcu(rec, n, hhd, node) {
539 if (rec->ip == ip)
540 return rec;
541 }
542
543 return NULL;
544}
545
Steven Rostedtcafb1682009-03-24 20:50:39 -0400546static void ftrace_add_profile(struct ftrace_profile_stat *stat,
547 struct ftrace_profile *rec)
Steven Rostedt493762f2009-03-23 17:12:36 -0400548{
549 unsigned long key;
550
551 key = hash_long(rec->ip, ftrace_profile_bits);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400552 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
Steven Rostedt493762f2009-03-23 17:12:36 -0400553}
554
Steven Rostedt318e0a72009-03-25 20:06:34 -0400555/*
556 * The memory is already allocated, this simply finds a new record to use.
557 */
Steven Rostedt493762f2009-03-23 17:12:36 -0400558static struct ftrace_profile *
Steven Rostedt318e0a72009-03-25 20:06:34 -0400559ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400560{
561 struct ftrace_profile *rec = NULL;
562
Steven Rostedt318e0a72009-03-25 20:06:34 -0400563 /* prevent recursion (from NMIs) */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400564 if (atomic_inc_return(&stat->disabled) != 1)
Steven Rostedt493762f2009-03-23 17:12:36 -0400565 goto out;
566
Steven Rostedt493762f2009-03-23 17:12:36 -0400567 /*
Steven Rostedt318e0a72009-03-25 20:06:34 -0400568 * Try to find the function again since an NMI
569 * could have added it
Steven Rostedt493762f2009-03-23 17:12:36 -0400570 */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400571 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400572 if (rec)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400573 goto out;
Steven Rostedt493762f2009-03-23 17:12:36 -0400574
Steven Rostedtcafb1682009-03-24 20:50:39 -0400575 if (stat->pages->index == PROFILES_PER_PAGE) {
576 if (!stat->pages->next)
577 goto out;
578 stat->pages = stat->pages->next;
Steven Rostedt493762f2009-03-23 17:12:36 -0400579 }
580
Steven Rostedtcafb1682009-03-24 20:50:39 -0400581 rec = &stat->pages->records[stat->pages->index++];
Steven Rostedt493762f2009-03-23 17:12:36 -0400582 rec->ip = ip;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400583 ftrace_add_profile(stat, rec);
Steven Rostedt493762f2009-03-23 17:12:36 -0400584
Steven Rostedt493762f2009-03-23 17:12:36 -0400585 out:
Steven Rostedtcafb1682009-03-24 20:50:39 -0400586 atomic_dec(&stat->disabled);
Steven Rostedt493762f2009-03-23 17:12:36 -0400587
588 return rec;
589}
590
Steven Rostedt493762f2009-03-23 17:12:36 -0400591static void
592function_profile_call(unsigned long ip, unsigned long parent_ip)
593{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400594 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400595 struct ftrace_profile *rec;
596 unsigned long flags;
Steven Rostedt493762f2009-03-23 17:12:36 -0400597
598 if (!ftrace_profile_enabled)
599 return;
600
Steven Rostedt493762f2009-03-23 17:12:36 -0400601 local_irq_save(flags);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400602
603 stat = &__get_cpu_var(ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400604 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400605 goto out;
606
607 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400608 if (!rec) {
Steven Rostedt318e0a72009-03-25 20:06:34 -0400609 rec = ftrace_profile_alloc(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400610 if (!rec)
611 goto out;
612 }
613
614 rec->counter++;
615 out:
616 local_irq_restore(flags);
617}
618
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400619#ifdef CONFIG_FUNCTION_GRAPH_TRACER
620static int profile_graph_entry(struct ftrace_graph_ent *trace)
621{
622 function_profile_call(trace->func, 0);
623 return 1;
624}
625
626static void profile_graph_return(struct ftrace_graph_ret *trace)
627{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400628 struct ftrace_profile_stat *stat;
Steven Rostedta2a16d62009-03-24 23:17:58 -0400629 unsigned long long calltime;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400630 struct ftrace_profile *rec;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400631 unsigned long flags;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400632
633 local_irq_save(flags);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400634 stat = &__get_cpu_var(ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400635 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400636 goto out;
637
Steven Rostedta2a16d62009-03-24 23:17:58 -0400638 calltime = trace->rettime - trace->calltime;
639
640 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
641 int index;
642
643 index = trace->depth;
644
645 /* Append this call time to the parent time to subtract */
646 if (index)
647 current->ret_stack[index - 1].subtime += calltime;
648
649 if (current->ret_stack[index].subtime < calltime)
650 calltime -= current->ret_stack[index].subtime;
651 else
652 calltime = 0;
653 }
654
Steven Rostedtcafb1682009-03-24 20:50:39 -0400655 rec = ftrace_find_profiled_func(stat, trace->func);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400656 if (rec)
Steven Rostedta2a16d62009-03-24 23:17:58 -0400657 rec->time += calltime;
658
Steven Rostedtcafb1682009-03-24 20:50:39 -0400659 out:
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400660 local_irq_restore(flags);
661}
662
663static int register_ftrace_profiler(void)
664{
665 return register_ftrace_graph(&profile_graph_return,
666 &profile_graph_entry);
667}
668
669static void unregister_ftrace_profiler(void)
670{
671 unregister_ftrace_graph();
672}
673#else
Steven Rostedt493762f2009-03-23 17:12:36 -0400674static struct ftrace_ops ftrace_profile_ops __read_mostly =
675{
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400676 .func = function_profile_call,
Steven Rostedt493762f2009-03-23 17:12:36 -0400677};
678
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400679static int register_ftrace_profiler(void)
680{
681 return register_ftrace_function(&ftrace_profile_ops);
682}
683
684static void unregister_ftrace_profiler(void)
685{
686 unregister_ftrace_function(&ftrace_profile_ops);
687}
688#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
689
Steven Rostedt493762f2009-03-23 17:12:36 -0400690static ssize_t
691ftrace_profile_write(struct file *filp, const char __user *ubuf,
692 size_t cnt, loff_t *ppos)
693{
694 unsigned long val;
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400695 char buf[64]; /* big enough to hold a number */
Steven Rostedt493762f2009-03-23 17:12:36 -0400696 int ret;
697
698 if (cnt >= sizeof(buf))
699 return -EINVAL;
700
701 if (copy_from_user(&buf, ubuf, cnt))
702 return -EFAULT;
703
704 buf[cnt] = 0;
705
706 ret = strict_strtoul(buf, 10, &val);
707 if (ret < 0)
708 return ret;
709
710 val = !!val;
711
712 mutex_lock(&ftrace_profile_lock);
713 if (ftrace_profile_enabled ^ val) {
714 if (val) {
715 ret = ftrace_profile_init();
716 if (ret < 0) {
717 cnt = ret;
718 goto out;
719 }
720
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400721 ret = register_ftrace_profiler();
722 if (ret < 0) {
723 cnt = ret;
724 goto out;
725 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400726 ftrace_profile_enabled = 1;
727 } else {
728 ftrace_profile_enabled = 0;
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400729 /*
730 * unregister_ftrace_profiler calls stop_machine
731 * so this acts like an synchronize_sched.
732 */
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400733 unregister_ftrace_profiler();
Steven Rostedt493762f2009-03-23 17:12:36 -0400734 }
735 }
736 out:
737 mutex_unlock(&ftrace_profile_lock);
738
739 filp->f_pos += cnt;
740
741 return cnt;
742}
743
744static ssize_t
745ftrace_profile_read(struct file *filp, char __user *ubuf,
746 size_t cnt, loff_t *ppos)
747{
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400748 char buf[64]; /* big enough to hold a number */
Steven Rostedt493762f2009-03-23 17:12:36 -0400749 int r;
750
751 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
752 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
753}
754
755static const struct file_operations ftrace_profile_fops = {
756 .open = tracing_open_generic,
757 .read = ftrace_profile_read,
758 .write = ftrace_profile_write,
759};
760
Steven Rostedtcafb1682009-03-24 20:50:39 -0400761/* used to initialize the real stat files */
762static struct tracer_stat function_stats __initdata = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400763 .name = "functions",
764 .stat_start = function_stat_start,
765 .stat_next = function_stat_next,
766 .stat_cmp = function_stat_cmp,
767 .stat_headers = function_stat_headers,
768 .stat_show = function_stat_show
Steven Rostedtcafb1682009-03-24 20:50:39 -0400769};
770
Steven Rostedt493762f2009-03-23 17:12:36 -0400771static void ftrace_profile_debugfs(struct dentry *d_tracer)
772{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400773 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400774 struct dentry *entry;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400775 char *name;
Steven Rostedt493762f2009-03-23 17:12:36 -0400776 int ret;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400777 int cpu;
Steven Rostedt493762f2009-03-23 17:12:36 -0400778
Steven Rostedtcafb1682009-03-24 20:50:39 -0400779 for_each_possible_cpu(cpu) {
780 stat = &per_cpu(ftrace_profile_stats, cpu);
781
782 /* allocate enough for function name + cpu number */
783 name = kmalloc(32, GFP_KERNEL);
784 if (!name) {
785 /*
786 * The files created are permanent, if something happens
787 * we still do not free memory.
788 */
789 kfree(stat);
790 WARN(1,
791 "Could not allocate stat file for cpu %d\n",
792 cpu);
793 return;
794 }
795 stat->stat = function_stats;
796 snprintf(name, 32, "function%d", cpu);
797 stat->stat.name = name;
798 ret = register_stat_tracer(&stat->stat);
799 if (ret) {
800 WARN(1,
801 "Could not register function stat for cpu %d\n",
802 cpu);
803 kfree(name);
804 return;
805 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400806 }
807
808 entry = debugfs_create_file("function_profile_enabled", 0644,
809 d_tracer, NULL, &ftrace_profile_fops);
810 if (!entry)
811 pr_warning("Could not create debugfs "
812 "'function_profile_enabled' entry\n");
813}
814
815#else /* CONFIG_FUNCTION_PROFILER */
816static void ftrace_profile_debugfs(struct dentry *d_tracer)
817{
818}
819#endif /* CONFIG_FUNCTION_PROFILER */
820
Ingo Molnar73d3fd92009-02-17 11:48:18 +0100821/* set when tracing only a pid */
822struct pid *ftrace_pid_trace;
823static struct pid * const ftrace_swapper_pid = &init_struct_pid;
824
Steven Rostedt3d083392008-05-12 21:20:42 +0200825#ifdef CONFIG_DYNAMIC_FTRACE
Ingo Molnar73d3fd92009-02-17 11:48:18 +0100826
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400827#ifndef CONFIG_FTRACE_MCOUNT_RECORD
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400828# error Dynamic ftrace depends on MCOUNT_RECORD
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400829#endif
830
Steven Rostedt8fc0c702009-02-16 15:28:00 -0500831static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
832
Steven Rostedtb6887d72009-02-17 12:32:04 -0500833struct ftrace_func_probe {
Steven Rostedt8fc0c702009-02-16 15:28:00 -0500834 struct hlist_node node;
Steven Rostedtb6887d72009-02-17 12:32:04 -0500835 struct ftrace_probe_ops *ops;
Steven Rostedt8fc0c702009-02-16 15:28:00 -0500836 unsigned long flags;
837 unsigned long ip;
838 void *data;
839 struct rcu_head rcu;
840};
841
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200842enum {
843 FTRACE_ENABLE_CALLS = (1 << 0),
844 FTRACE_DISABLE_CALLS = (1 << 1),
845 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
846 FTRACE_ENABLE_MCOUNT = (1 << 3),
847 FTRACE_DISABLE_MCOUNT = (1 << 4),
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500848 FTRACE_START_FUNC_RET = (1 << 5),
849 FTRACE_STOP_FUNC_RET = (1 << 6),
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200850};
851
Steven Rostedt5072c592008-05-12 21:20:43 +0200852static int ftrace_filtered;
853
Lai Jiangshane94142a2009-03-13 17:51:27 +0800854static struct dyn_ftrace *ftrace_new_addrs;
Steven Rostedt3d083392008-05-12 21:20:42 +0200855
Steven Rostedt41c52c02008-05-22 11:46:33 -0400856static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200857
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200858struct ftrace_page {
859 struct ftrace_page *next;
Steven Rostedt431aa3f2009-01-06 12:43:01 -0500860 int index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200861 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700862};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200863
864#define ENTRIES_PER_PAGE \
865 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
866
867/* estimate from running different kernels */
868#define NR_TO_INIT 10000
869
870static struct ftrace_page *ftrace_pages_start;
871static struct ftrace_page *ftrace_pages;
872
Steven Rostedt37ad5082008-05-12 21:20:48 +0200873static struct dyn_ftrace *ftrace_free_records;
874
Steven Rostedt265c8312009-02-13 12:43:56 -0500875/*
876 * This is a double for. Do not use 'break' to break out of the loop,
877 * you must use a goto.
878 */
879#define do_for_each_ftrace_rec(pg, rec) \
880 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
881 int _____i; \
882 for (_____i = 0; _____i < pg->index; _____i++) { \
883 rec = &pg->records[_____i];
884
885#define while_for_each_ftrace_rec() \
886 } \
887 }
Abhishek Sagarecea6562008-06-21 23:47:53 +0530888
889#ifdef CONFIG_KPROBES
Ingo Molnarf17845e2008-10-24 12:47:10 +0200890
891static int frozen_record_count;
892
Abhishek Sagarecea6562008-06-21 23:47:53 +0530893static inline void freeze_record(struct dyn_ftrace *rec)
894{
895 if (!(rec->flags & FTRACE_FL_FROZEN)) {
896 rec->flags |= FTRACE_FL_FROZEN;
897 frozen_record_count++;
898 }
899}
900
901static inline void unfreeze_record(struct dyn_ftrace *rec)
902{
903 if (rec->flags & FTRACE_FL_FROZEN) {
904 rec->flags &= ~FTRACE_FL_FROZEN;
905 frozen_record_count--;
906 }
907}
908
909static inline int record_frozen(struct dyn_ftrace *rec)
910{
911 return rec->flags & FTRACE_FL_FROZEN;
912}
913#else
914# define freeze_record(rec) ({ 0; })
915# define unfreeze_record(rec) ({ 0; })
916# define record_frozen(rec) ({ 0; })
917#endif /* CONFIG_KPROBES */
918
Ingo Molnare309b412008-05-12 21:20:51 +0200919static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad5082008-05-12 21:20:48 +0200920{
Lai Jiangshanee000b72009-03-24 13:38:06 +0800921 rec->freelist = ftrace_free_records;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200922 ftrace_free_records = rec;
923 rec->flags |= FTRACE_FL_FREE;
924}
925
Ingo Molnare309b412008-05-12 21:20:51 +0200926static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200927{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200928 struct dyn_ftrace *rec;
929
930 /* First check for freed records */
931 if (ftrace_free_records) {
932 rec = ftrace_free_records;
933
Steven Rostedt37ad5082008-05-12 21:20:48 +0200934 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
Steven Rostedt69128962008-10-23 09:33:03 -0400935 FTRACE_WARN_ON_ONCE(1);
Steven Rostedt37ad5082008-05-12 21:20:48 +0200936 ftrace_free_records = NULL;
937 return NULL;
938 }
939
Lai Jiangshanee000b72009-03-24 13:38:06 +0800940 ftrace_free_records = rec->freelist;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200941 memset(rec, 0, sizeof(*rec));
942 return rec;
943 }
944
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200945 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400946 if (!ftrace_pages->next) {
947 /* allocate another page */
948 ftrace_pages->next =
949 (void *)get_zeroed_page(GFP_KERNEL);
950 if (!ftrace_pages->next)
951 return NULL;
952 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200953 ftrace_pages = ftrace_pages->next;
954 }
955
956 return &ftrace_pages->records[ftrace_pages->index++];
957}
958
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400959static struct dyn_ftrace *
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200960ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200961{
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400962 struct dyn_ftrace *rec;
Steven Rostedt3d083392008-05-12 21:20:42 +0200963
Steven Rostedtf3c7ac42008-11-14 16:21:19 -0800964 if (ftrace_disabled)
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400965 return NULL;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200966
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400967 rec = ftrace_alloc_dyn_node(ip);
968 if (!rec)
969 return NULL;
Steven Rostedt3d083392008-05-12 21:20:42 +0200970
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400971 rec->ip = ip;
Lai Jiangshanee000b72009-03-24 13:38:06 +0800972 rec->newlist = ftrace_new_addrs;
Lai Jiangshane94142a2009-03-13 17:51:27 +0800973 ftrace_new_addrs = rec;
Steven Rostedt3d083392008-05-12 21:20:42 +0200974
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400975 return rec;
Steven Rostedt3d083392008-05-12 21:20:42 +0200976}
977
Steven Rostedt05736a42008-09-22 14:55:47 -0700978static void print_ip_ins(const char *fmt, unsigned char *p)
979{
980 int i;
981
982 printk(KERN_CONT "%s", fmt);
983
984 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
985 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
986}
987
Steven Rostedt31e88902008-11-14 16:21:19 -0800988static void ftrace_bug(int failed, unsigned long ip)
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800989{
990 switch (failed) {
991 case -EFAULT:
992 FTRACE_WARN_ON_ONCE(1);
993 pr_info("ftrace faulted on modifying ");
994 print_ip_sym(ip);
995 break;
996 case -EINVAL:
997 FTRACE_WARN_ON_ONCE(1);
998 pr_info("ftrace failed to modify ");
999 print_ip_sym(ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001000 print_ip_ins(" actual: ", (unsigned char *)ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001001 printk(KERN_CONT "\n");
1002 break;
1003 case -EPERM:
1004 FTRACE_WARN_ON_ONCE(1);
1005 pr_info("ftrace faulted on writing ");
1006 print_ip_sym(ip);
1007 break;
1008 default:
1009 FTRACE_WARN_ON_ONCE(1);
1010 pr_info("ftrace faulted on unknown error ");
1011 print_ip_sym(ip);
1012 }
1013}
1014
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001015
Abhishek Sagar492a7ea2008-05-25 00:10:04 +05301016static int
Steven Rostedt31e88902008-11-14 16:21:19 -08001017__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001018{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001019 unsigned long ftrace_addr;
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001020 unsigned long flag = 0UL;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001021
Shaohua Lif0001202009-01-09 11:29:42 +08001022 ftrace_addr = (unsigned long)FTRACE_ADDR;
Steven Rostedt5072c592008-05-12 21:20:43 +02001023
Steven Rostedt982c3502008-11-15 16:31:41 -05001024 /*
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001025 * If this record is not to be traced or we want to disable it,
1026 * then disable it.
Steven Rostedt982c3502008-11-15 16:31:41 -05001027 *
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001028 * If we want to enable it and filtering is off, then enable it.
Steven Rostedt982c3502008-11-15 16:31:41 -05001029 *
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001030 * If we want to enable it and filtering is on, enable it only if
1031 * it's filtered
Steven Rostedt982c3502008-11-15 16:31:41 -05001032 */
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001033 if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
1034 if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
1035 flag = FTRACE_FL_ENABLED;
Steven Rostedt5072c592008-05-12 21:20:43 +02001036 }
1037
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001038 /* If the state of this record hasn't changed, then do nothing */
1039 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1040 return 0;
1041
1042 if (flag) {
1043 rec->flags |= FTRACE_FL_ENABLED;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001044 return ftrace_make_call(rec, ftrace_addr);
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001045 }
1046
1047 rec->flags &= ~FTRACE_FL_ENABLED;
1048 return ftrace_make_nop(NULL, rec, ftrace_addr);
Steven Rostedt5072c592008-05-12 21:20:43 +02001049}
1050
1051static void ftrace_replace_code(int enable)
1052{
Steven Rostedt37ad5082008-05-12 21:20:48 +02001053 struct dyn_ftrace *rec;
1054 struct ftrace_page *pg;
Steven Rostedt6a24a242009-02-17 11:20:26 -05001055 int failed;
Steven Rostedt37ad5082008-05-12 21:20:48 +02001056
Steven Rostedt265c8312009-02-13 12:43:56 -05001057 do_for_each_ftrace_rec(pg, rec) {
1058 /*
Zhaoleifa9d13c2009-03-13 17:16:34 +08001059 * Skip over free records, records that have
1060 * failed and not converted.
Steven Rostedt265c8312009-02-13 12:43:56 -05001061 */
1062 if (rec->flags & FTRACE_FL_FREE ||
Zhaoleifa9d13c2009-03-13 17:16:34 +08001063 rec->flags & FTRACE_FL_FAILED ||
Frederic Weisbecker03303542009-03-16 22:41:00 +01001064 !(rec->flags & FTRACE_FL_CONVERTED))
Steven Rostedt265c8312009-02-13 12:43:56 -05001065 continue;
Steven Rostedt5072c592008-05-12 21:20:43 +02001066
Steven Rostedt265c8312009-02-13 12:43:56 -05001067 /* ignore updates to this record's mcount site */
1068 if (get_kprobe((void *)rec->ip)) {
1069 freeze_record(rec);
1070 continue;
1071 } else {
1072 unfreeze_record(rec);
Steven Rostedt5072c592008-05-12 21:20:43 +02001073 }
Steven Rostedt265c8312009-02-13 12:43:56 -05001074
1075 failed = __ftrace_replace_code(rec, enable);
Zhaoleifa9d13c2009-03-13 17:16:34 +08001076 if (failed) {
Steven Rostedt265c8312009-02-13 12:43:56 -05001077 rec->flags |= FTRACE_FL_FAILED;
1078 if ((system_state == SYSTEM_BOOTING) ||
1079 !core_kernel_text(rec->ip)) {
1080 ftrace_free_rec(rec);
Steven Rostedt43772452009-02-19 13:41:27 -05001081 } else {
Steven Rostedt265c8312009-02-13 12:43:56 -05001082 ftrace_bug(failed, rec->ip);
Steven Rostedt43772452009-02-19 13:41:27 -05001083 /* Stop processing */
1084 return;
1085 }
Steven Rostedt265c8312009-02-13 12:43:56 -05001086 }
1087 } while_for_each_ftrace_rec();
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001088}
1089
Ingo Molnare309b412008-05-12 21:20:51 +02001090static int
Steven Rostedt31e88902008-11-14 16:21:19 -08001091ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001092{
1093 unsigned long ip;
Steven Rostedt593eb8a2008-10-23 09:32:59 -04001094 int ret;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001095
1096 ip = rec->ip;
1097
Shaohua Li25aac9d2009-01-09 11:29:40 +08001098 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
Steven Rostedt593eb8a2008-10-23 09:32:59 -04001099 if (ret) {
Steven Rostedt31e88902008-11-14 16:21:19 -08001100 ftrace_bug(ret, ip);
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001101 rec->flags |= FTRACE_FL_FAILED;
Abhishek Sagar492a7ea2008-05-25 00:10:04 +05301102 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +02001103 }
Abhishek Sagar492a7ea2008-05-25 00:10:04 +05301104 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001105}
1106
Steven Rostedt000ab692009-02-17 13:35:06 -05001107/*
1108 * archs can override this function if they must do something
1109 * before the modifying code is performed.
1110 */
1111int __weak ftrace_arch_code_modify_prepare(void)
1112{
1113 return 0;
1114}
1115
1116/*
1117 * archs can override this function if they must do something
1118 * after the modifying code is performed.
1119 */
1120int __weak ftrace_arch_code_modify_post_process(void)
1121{
1122 return 0;
1123}
1124
Ingo Molnare309b412008-05-12 21:20:51 +02001125static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +02001126{
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001127 int *command = data;
1128
Steven Rostedta3583242008-11-11 15:01:42 -05001129 if (*command & FTRACE_ENABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001130 ftrace_replace_code(1);
Steven Rostedta3583242008-11-11 15:01:42 -05001131 else if (*command & FTRACE_DISABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001132 ftrace_replace_code(0);
1133
1134 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1135 ftrace_update_ftrace_func(ftrace_trace_function);
1136
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001137 if (*command & FTRACE_START_FUNC_RET)
1138 ftrace_enable_ftrace_graph_caller();
1139 else if (*command & FTRACE_STOP_FUNC_RET)
1140 ftrace_disable_ftrace_graph_caller();
1141
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001142 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +02001143}
1144
Ingo Molnare309b412008-05-12 21:20:51 +02001145static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02001146{
Steven Rostedt000ab692009-02-17 13:35:06 -05001147 int ret;
1148
1149 ret = ftrace_arch_code_modify_prepare();
1150 FTRACE_WARN_ON(ret);
1151 if (ret)
1152 return;
1153
Rusty Russell784e2d72008-07-28 12:16:31 -05001154 stop_machine(__ftrace_modify_code, &command, NULL);
Steven Rostedt000ab692009-02-17 13:35:06 -05001155
1156 ret = ftrace_arch_code_modify_post_process();
1157 FTRACE_WARN_ON(ret);
Steven Rostedt3d083392008-05-12 21:20:42 +02001158}
1159
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001160static ftrace_func_t saved_ftrace_func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001161static int ftrace_start_up;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001162
1163static void ftrace_startup_enable(int command)
1164{
1165 if (saved_ftrace_func != ftrace_trace_function) {
1166 saved_ftrace_func = ftrace_trace_function;
1167 command |= FTRACE_UPDATE_TRACE_FUNC;
1168 }
1169
1170 if (!command || !ftrace_enabled)
1171 return;
1172
1173 ftrace_run_update_code(command);
1174}
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001175
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001176static void ftrace_startup(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02001177{
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001178 if (unlikely(ftrace_disabled))
1179 return;
1180
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001181 ftrace_start_up++;
Steven Rostedt982c3502008-11-15 16:31:41 -05001182 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +02001183
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001184 ftrace_startup_enable(command);
Steven Rostedt3d083392008-05-12 21:20:42 +02001185}
1186
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001187static void ftrace_shutdown(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02001188{
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001189 if (unlikely(ftrace_disabled))
1190 return;
1191
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001192 ftrace_start_up--;
Frederic Weisbecker9ea1a152009-06-20 06:52:21 +02001193 /*
1194 * Just warn in case of unbalance, no need to kill ftrace, it's not
1195 * critical but the ftrace_call callers may be never nopped again after
1196 * further ftrace uses.
1197 */
1198 WARN_ON_ONCE(ftrace_start_up < 0);
1199
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001200 if (!ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001201 command |= FTRACE_DISABLE_CALLS;
1202
1203 if (saved_ftrace_func != ftrace_trace_function) {
1204 saved_ftrace_func = ftrace_trace_function;
1205 command |= FTRACE_UPDATE_TRACE_FUNC;
1206 }
1207
1208 if (!command || !ftrace_enabled)
Steven Rostedte6ea44e2009-02-14 01:42:44 -05001209 return;
Steven Rostedt3d083392008-05-12 21:20:42 +02001210
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001211 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +02001212}
1213
Ingo Molnare309b412008-05-12 21:20:51 +02001214static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001215{
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001216 int command = FTRACE_ENABLE_MCOUNT;
1217
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001218 if (unlikely(ftrace_disabled))
1219 return;
1220
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001221 /* Force update next time */
1222 saved_ftrace_func = NULL;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001223 /* ftrace_start_up is true if we want ftrace running */
1224 if (ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001225 command |= FTRACE_ENABLE_CALLS;
1226
1227 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001228}
1229
Ingo Molnare309b412008-05-12 21:20:51 +02001230static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001231{
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001232 int command = FTRACE_DISABLE_MCOUNT;
1233
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001234 if (unlikely(ftrace_disabled))
1235 return;
1236
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001237 /* ftrace_start_up is true if ftrace is running */
1238 if (ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001239 command |= FTRACE_DISABLE_CALLS;
1240
1241 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001242}
1243
Steven Rostedt3d083392008-05-12 21:20:42 +02001244static cycle_t ftrace_update_time;
1245static unsigned long ftrace_update_cnt;
1246unsigned long ftrace_update_tot_cnt;
1247
Steven Rostedt31e88902008-11-14 16:21:19 -08001248static int ftrace_update_code(struct module *mod)
Steven Rostedt3d083392008-05-12 21:20:42 +02001249{
Lai Jiangshane94142a2009-03-13 17:51:27 +08001250 struct dyn_ftrace *p;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +05301251 cycle_t start, stop;
Steven Rostedt3d083392008-05-12 21:20:42 +02001252
Ingo Molnar750ed1a2008-05-12 21:20:46 +02001253 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02001254 ftrace_update_cnt = 0;
1255
Lai Jiangshane94142a2009-03-13 17:51:27 +08001256 while (ftrace_new_addrs) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +05301257
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001258 /* If something went wrong, bail without enabling anything */
1259 if (unlikely(ftrace_disabled))
1260 return -1;
Steven Rostedt3d083392008-05-12 21:20:42 +02001261
Lai Jiangshane94142a2009-03-13 17:51:27 +08001262 p = ftrace_new_addrs;
Lai Jiangshanee000b72009-03-24 13:38:06 +08001263 ftrace_new_addrs = p->newlist;
Lai Jiangshane94142a2009-03-13 17:51:27 +08001264 p->flags = 0L;
Abhishek Sagar0eb96702008-06-01 21:47:30 +05301265
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001266 /* convert record (i.e, patch mcount-call with NOP) */
Steven Rostedt31e88902008-11-14 16:21:19 -08001267 if (ftrace_code_disable(mod, p)) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001268 p->flags |= FTRACE_FL_CONVERTED;
1269 ftrace_update_cnt++;
1270 } else
1271 ftrace_free_rec(p);
Steven Rostedt3d083392008-05-12 21:20:42 +02001272 }
1273
Ingo Molnar750ed1a2008-05-12 21:20:46 +02001274 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02001275 ftrace_update_time = stop - start;
1276 ftrace_update_tot_cnt += ftrace_update_cnt;
1277
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001278 return 0;
1279}
1280
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001281static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001282{
1283 struct ftrace_page *pg;
1284 int cnt;
1285 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001286
1287 /* allocate a few pages */
1288 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1289 if (!ftrace_pages_start)
1290 return -1;
1291
1292 /*
1293 * Allocate a few more pages.
1294 *
1295 * TODO: have some parser search vmlinux before
1296 * final linking to find all calls to ftrace.
1297 * Then we can:
1298 * a) know how many pages to allocate.
1299 * and/or
1300 * b) set up the table then.
1301 *
1302 * The dynamic code is still necessary for
1303 * modules.
1304 */
1305
1306 pg = ftrace_pages = ftrace_pages_start;
1307
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001308 cnt = num_to_init / ENTRIES_PER_PAGE;
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001309 pr_info("ftrace: allocating %ld entries in %d pages\n",
walimis5821e1b2008-11-15 15:19:06 +08001310 num_to_init, cnt + 1);
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001311
1312 for (i = 0; i < cnt; i++) {
1313 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1314
1315 /* If we fail, we'll try later anyway */
1316 if (!pg->next)
1317 break;
1318
1319 pg = pg->next;
1320 }
1321
1322 return 0;
1323}
1324
Steven Rostedt5072c592008-05-12 21:20:43 +02001325enum {
1326 FTRACE_ITER_FILTER = (1 << 0),
1327 FTRACE_ITER_CONT = (1 << 1),
Steven Rostedt41c52c02008-05-22 11:46:33 -04001328 FTRACE_ITER_NOTRACE = (1 << 2),
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301329 FTRACE_ITER_FAILURES = (1 << 3),
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05001330 FTRACE_ITER_PRINTALL = (1 << 4),
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001331 FTRACE_ITER_HASH = (1 << 5),
Steven Rostedt5072c592008-05-12 21:20:43 +02001332};
1333
1334#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1335
1336struct ftrace_iterator {
Steven Rostedt5072c592008-05-12 21:20:43 +02001337 struct ftrace_page *pg;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001338 int hidx;
Steven Rostedt431aa3f2009-01-06 12:43:01 -05001339 int idx;
Steven Rostedt5072c592008-05-12 21:20:43 +02001340 unsigned flags;
1341 unsigned char buffer[FTRACE_BUFF_MAX+1];
1342 unsigned buffer_idx;
1343 unsigned filtered;
1344};
1345
Ingo Molnare309b412008-05-12 21:20:51 +02001346static void *
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001347t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1348{
1349 struct ftrace_iterator *iter = m->private;
1350 struct hlist_node *hnd = v;
1351 struct hlist_head *hhd;
1352
1353 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1354
1355 (*pos)++;
1356
1357 retry:
1358 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1359 return NULL;
1360
1361 hhd = &ftrace_func_hash[iter->hidx];
1362
1363 if (hlist_empty(hhd)) {
1364 iter->hidx++;
1365 hnd = NULL;
1366 goto retry;
1367 }
1368
1369 if (!hnd)
1370 hnd = hhd->first;
1371 else {
1372 hnd = hnd->next;
1373 if (!hnd) {
1374 iter->hidx++;
1375 goto retry;
1376 }
1377 }
1378
1379 return hnd;
1380}
1381
1382static void *t_hash_start(struct seq_file *m, loff_t *pos)
1383{
1384 struct ftrace_iterator *iter = m->private;
1385 void *p = NULL;
Li Zefand82d6242009-06-24 09:54:54 +08001386 loff_t l;
1387
1388 if (!(iter->flags & FTRACE_ITER_HASH))
1389 *pos = 0;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001390
1391 iter->flags |= FTRACE_ITER_HASH;
1392
Li Zefand82d6242009-06-24 09:54:54 +08001393 iter->hidx = 0;
1394 for (l = 0; l <= *pos; ) {
1395 p = t_hash_next(m, p, &l);
1396 if (!p)
1397 break;
1398 }
1399 return p;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001400}
1401
1402static int t_hash_show(struct seq_file *m, void *v)
1403{
Steven Rostedtb6887d72009-02-17 12:32:04 -05001404 struct ftrace_func_probe *rec;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001405 struct hlist_node *hnd = v;
1406 char str[KSYM_SYMBOL_LEN];
1407
Steven Rostedtb6887d72009-02-17 12:32:04 -05001408 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001409
Steven Rostedt809dcf22009-02-16 23:06:01 -05001410 if (rec->ops->print)
1411 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1412
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001413 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1414 seq_printf(m, "%s:", str);
1415
1416 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
1417 seq_printf(m, "%s", str);
1418
1419 if (rec->data)
1420 seq_printf(m, ":%p", rec->data);
1421 seq_putc(m, '\n');
1422
1423 return 0;
1424}
1425
1426static void *
Steven Rostedt5072c592008-05-12 21:20:43 +02001427t_next(struct seq_file *m, void *v, loff_t *pos)
1428{
1429 struct ftrace_iterator *iter = m->private;
1430 struct dyn_ftrace *rec = NULL;
1431
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001432 if (iter->flags & FTRACE_ITER_HASH)
1433 return t_hash_next(m, v, pos);
1434
Steven Rostedt5072c592008-05-12 21:20:43 +02001435 (*pos)++;
1436
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05001437 if (iter->flags & FTRACE_ITER_PRINTALL)
1438 return NULL;
1439
Steven Rostedt5072c592008-05-12 21:20:43 +02001440 retry:
1441 if (iter->idx >= iter->pg->index) {
1442 if (iter->pg->next) {
1443 iter->pg = iter->pg->next;
1444 iter->idx = 0;
1445 goto retry;
1446 }
1447 } else {
1448 rec = &iter->pg->records[iter->idx++];
Steven Rostedta9fdda32008-08-14 22:47:17 -04001449 if ((rec->flags & FTRACE_FL_FREE) ||
1450
1451 (!(iter->flags & FTRACE_ITER_FAILURES) &&
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301452 (rec->flags & FTRACE_FL_FAILED)) ||
1453
1454 ((iter->flags & FTRACE_ITER_FAILURES) &&
Steven Rostedta9fdda32008-08-14 22:47:17 -04001455 !(rec->flags & FTRACE_FL_FAILED)) ||
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301456
Steven Rostedt0183fb12008-11-07 22:36:02 -05001457 ((iter->flags & FTRACE_ITER_FILTER) &&
1458 !(rec->flags & FTRACE_FL_FILTER)) ||
1459
Steven Rostedt41c52c02008-05-22 11:46:33 -04001460 ((iter->flags & FTRACE_ITER_NOTRACE) &&
1461 !(rec->flags & FTRACE_FL_NOTRACE))) {
Steven Rostedt5072c592008-05-12 21:20:43 +02001462 rec = NULL;
1463 goto retry;
1464 }
1465 }
1466
Steven Rostedt5072c592008-05-12 21:20:43 +02001467 return rec;
1468}
1469
1470static void *t_start(struct seq_file *m, loff_t *pos)
1471{
1472 struct ftrace_iterator *iter = m->private;
1473 void *p = NULL;
Li Zefan694ce0a2009-06-24 09:54:19 +08001474 loff_t l;
Steven Rostedt5072c592008-05-12 21:20:43 +02001475
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001476 mutex_lock(&ftrace_lock);
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05001477 /*
1478 * For set_ftrace_filter reading, if we have the filter
1479 * off, we can short cut and just print out that all
1480 * functions are enabled.
1481 */
1482 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1483 if (*pos > 0)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001484 return t_hash_start(m, pos);
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05001485 iter->flags |= FTRACE_ITER_PRINTALL;
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05001486 return iter;
1487 }
1488
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001489 if (iter->flags & FTRACE_ITER_HASH)
1490 return t_hash_start(m, pos);
1491
Li Zefan694ce0a2009-06-24 09:54:19 +08001492 iter->pg = ftrace_pages_start;
1493 iter->idx = 0;
1494 for (l = 0; l <= *pos; ) {
1495 p = t_next(m, p, &l);
1496 if (!p)
1497 break;
Liming Wang50cdaf02008-11-28 12:13:21 +08001498 }
walimis5821e1b2008-11-15 15:19:06 +08001499
Li Zefan694ce0a2009-06-24 09:54:19 +08001500 if (!p && iter->flags & FTRACE_ITER_FILTER)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001501 return t_hash_start(m, pos);
1502
Steven Rostedt5072c592008-05-12 21:20:43 +02001503 return p;
1504}
1505
1506static void t_stop(struct seq_file *m, void *p)
1507{
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001508 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001509}
1510
1511static int t_show(struct seq_file *m, void *v)
1512{
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05001513 struct ftrace_iterator *iter = m->private;
Steven Rostedt5072c592008-05-12 21:20:43 +02001514 struct dyn_ftrace *rec = v;
1515 char str[KSYM_SYMBOL_LEN];
1516
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001517 if (iter->flags & FTRACE_ITER_HASH)
1518 return t_hash_show(m, v);
1519
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05001520 if (iter->flags & FTRACE_ITER_PRINTALL) {
1521 seq_printf(m, "#### all functions enabled ####\n");
1522 return 0;
1523 }
1524
Steven Rostedt5072c592008-05-12 21:20:43 +02001525 if (!rec)
1526 return 0;
1527
1528 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1529
Liming Wang50cdaf02008-11-28 12:13:21 +08001530 seq_printf(m, "%s\n", str);
Steven Rostedt5072c592008-05-12 21:20:43 +02001531
1532 return 0;
1533}
1534
1535static struct seq_operations show_ftrace_seq_ops = {
1536 .start = t_start,
1537 .next = t_next,
1538 .stop = t_stop,
1539 .show = t_show,
1540};
1541
Ingo Molnare309b412008-05-12 21:20:51 +02001542static int
Steven Rostedt5072c592008-05-12 21:20:43 +02001543ftrace_avail_open(struct inode *inode, struct file *file)
1544{
1545 struct ftrace_iterator *iter;
1546 int ret;
1547
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001548 if (unlikely(ftrace_disabled))
1549 return -ENODEV;
1550
Steven Rostedt5072c592008-05-12 21:20:43 +02001551 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1552 if (!iter)
1553 return -ENOMEM;
1554
1555 iter->pg = ftrace_pages_start;
Steven Rostedt5072c592008-05-12 21:20:43 +02001556
1557 ret = seq_open(file, &show_ftrace_seq_ops);
1558 if (!ret) {
1559 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001560
Steven Rostedt5072c592008-05-12 21:20:43 +02001561 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001562 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +02001563 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001564 }
Steven Rostedt5072c592008-05-12 21:20:43 +02001565
1566 return ret;
1567}
1568
1569int ftrace_avail_release(struct inode *inode, struct file *file)
1570{
1571 struct seq_file *m = (struct seq_file *)file->private_data;
1572 struct ftrace_iterator *iter = m->private;
1573
1574 seq_release(inode, file);
1575 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001576
Steven Rostedt5072c592008-05-12 21:20:43 +02001577 return 0;
1578}
1579
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301580static int
1581ftrace_failures_open(struct inode *inode, struct file *file)
1582{
1583 int ret;
1584 struct seq_file *m;
1585 struct ftrace_iterator *iter;
1586
1587 ret = ftrace_avail_open(inode, file);
1588 if (!ret) {
1589 m = (struct seq_file *)file->private_data;
1590 iter = (struct ftrace_iterator *)m->private;
1591 iter->flags = FTRACE_ITER_FAILURES;
1592 }
1593
1594 return ret;
1595}
1596
1597
Steven Rostedt41c52c02008-05-22 11:46:33 -04001598static void ftrace_filter_reset(int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001599{
1600 struct ftrace_page *pg;
1601 struct dyn_ftrace *rec;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001602 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001603
Steven Rostedt52baf112009-02-14 01:15:39 -05001604 mutex_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001605 if (enable)
1606 ftrace_filtered = 0;
Steven Rostedt265c8312009-02-13 12:43:56 -05001607 do_for_each_ftrace_rec(pg, rec) {
1608 if (rec->flags & FTRACE_FL_FAILED)
1609 continue;
1610 rec->flags &= ~type;
1611 } while_for_each_ftrace_rec();
Steven Rostedt52baf112009-02-14 01:15:39 -05001612 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001613}
1614
Ingo Molnare309b412008-05-12 21:20:51 +02001615static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001616ftrace_regex_open(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001617{
1618 struct ftrace_iterator *iter;
1619 int ret = 0;
1620
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001621 if (unlikely(ftrace_disabled))
1622 return -ENODEV;
1623
Steven Rostedt5072c592008-05-12 21:20:43 +02001624 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1625 if (!iter)
1626 return -ENOMEM;
1627
Steven Rostedt41c52c02008-05-22 11:46:33 -04001628 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001629 if ((file->f_mode & FMODE_WRITE) &&
1630 !(file->f_flags & O_APPEND))
Steven Rostedt41c52c02008-05-22 11:46:33 -04001631 ftrace_filter_reset(enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001632
1633 if (file->f_mode & FMODE_READ) {
1634 iter->pg = ftrace_pages_start;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001635 iter->flags = enable ? FTRACE_ITER_FILTER :
1636 FTRACE_ITER_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001637
1638 ret = seq_open(file, &show_ftrace_seq_ops);
1639 if (!ret) {
1640 struct seq_file *m = file->private_data;
1641 m->private = iter;
1642 } else
1643 kfree(iter);
1644 } else
1645 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001646 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001647
1648 return ret;
1649}
1650
Steven Rostedt41c52c02008-05-22 11:46:33 -04001651static int
1652ftrace_filter_open(struct inode *inode, struct file *file)
1653{
1654 return ftrace_regex_open(inode, file, 1);
1655}
1656
1657static int
1658ftrace_notrace_open(struct inode *inode, struct file *file)
1659{
1660 return ftrace_regex_open(inode, file, 0);
1661}
1662
Ingo Molnare309b412008-05-12 21:20:51 +02001663static loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001664ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02001665{
1666 loff_t ret;
1667
1668 if (file->f_mode & FMODE_READ)
1669 ret = seq_lseek(file, offset, origin);
1670 else
1671 file->f_pos = ret = 1;
1672
1673 return ret;
1674}
1675
1676enum {
1677 MATCH_FULL,
1678 MATCH_FRONT_ONLY,
1679 MATCH_MIDDLE_ONLY,
1680 MATCH_END_ONLY,
1681};
1682
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001683/*
1684 * (static function - no need for kernel doc)
1685 *
1686 * Pass in a buffer containing a glob and this function will
1687 * set search to point to the search part of the buffer and
1688 * return the type of search it is (see enum above).
1689 * This does modify buff.
1690 *
1691 * Returns enum type.
1692 * search returns the pointer to use for comparison.
1693 * not returns 1 if buff started with a '!'
1694 * 0 otherwise.
1695 */
1696static int
Steven Rostedt64e7c442009-02-13 17:08:48 -05001697ftrace_setup_glob(char *buff, int len, char **search, int *not)
Steven Rostedt5072c592008-05-12 21:20:43 +02001698{
Steven Rostedt5072c592008-05-12 21:20:43 +02001699 int type = MATCH_FULL;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001700 int i;
Steven Rostedtea3a6d62008-12-17 15:05:36 -05001701
1702 if (buff[0] == '!') {
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001703 *not = 1;
Steven Rostedtea3a6d62008-12-17 15:05:36 -05001704 buff++;
1705 len--;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001706 } else
1707 *not = 0;
1708
1709 *search = buff;
Steven Rostedt5072c592008-05-12 21:20:43 +02001710
1711 for (i = 0; i < len; i++) {
1712 if (buff[i] == '*') {
1713 if (!i) {
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001714 *search = buff + 1;
Steven Rostedt5072c592008-05-12 21:20:43 +02001715 type = MATCH_END_ONLY;
Steven Rostedt5072c592008-05-12 21:20:43 +02001716 } else {
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001717 if (type == MATCH_END_ONLY)
Steven Rostedt5072c592008-05-12 21:20:43 +02001718 type = MATCH_MIDDLE_ONLY;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001719 else
Steven Rostedt5072c592008-05-12 21:20:43 +02001720 type = MATCH_FRONT_ONLY;
Steven Rostedt5072c592008-05-12 21:20:43 +02001721 buff[i] = 0;
1722 break;
1723 }
1724 }
1725 }
1726
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001727 return type;
1728}
1729
Steven Rostedt64e7c442009-02-13 17:08:48 -05001730static int ftrace_match(char *str, char *regex, int len, int type)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001731{
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001732 int matched = 0;
1733 char *ptr;
1734
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001735 switch (type) {
1736 case MATCH_FULL:
1737 if (strcmp(str, regex) == 0)
1738 matched = 1;
1739 break;
1740 case MATCH_FRONT_ONLY:
1741 if (strncmp(str, regex, len) == 0)
1742 matched = 1;
1743 break;
1744 case MATCH_MIDDLE_ONLY:
1745 if (strstr(str, regex))
1746 matched = 1;
1747 break;
1748 case MATCH_END_ONLY:
1749 ptr = strstr(str, regex);
1750 if (ptr && (ptr[len] == 0))
1751 matched = 1;
1752 break;
1753 }
1754
1755 return matched;
1756}
1757
Steven Rostedt64e7c442009-02-13 17:08:48 -05001758static int
1759ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1760{
1761 char str[KSYM_SYMBOL_LEN];
1762
1763 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1764 return ftrace_match(str, regex, len, type);
1765}
1766
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001767static void ftrace_match_records(char *buff, int len, int enable)
1768{
Steven Rostedt6a24a242009-02-17 11:20:26 -05001769 unsigned int search_len;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001770 struct ftrace_page *pg;
1771 struct dyn_ftrace *rec;
Steven Rostedt6a24a242009-02-17 11:20:26 -05001772 unsigned long flag;
1773 char *search;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001774 int type;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001775 int not;
1776
Steven Rostedt6a24a242009-02-17 11:20:26 -05001777 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001778 type = ftrace_setup_glob(buff, len, &search, &not);
1779
1780 search_len = strlen(search);
1781
Steven Rostedt52baf112009-02-14 01:15:39 -05001782 mutex_lock(&ftrace_lock);
Steven Rostedt265c8312009-02-13 12:43:56 -05001783 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt5072c592008-05-12 21:20:43 +02001784
Steven Rostedt265c8312009-02-13 12:43:56 -05001785 if (rec->flags & FTRACE_FL_FAILED)
1786 continue;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05001787
1788 if (ftrace_match_record(rec, search, search_len, type)) {
Steven Rostedt265c8312009-02-13 12:43:56 -05001789 if (not)
1790 rec->flags &= ~flag;
1791 else
1792 rec->flags |= flag;
1793 }
Steven Rostedte68746a2009-02-13 20:53:42 -05001794 /*
1795 * Only enable filtering if we have a function that
1796 * is filtered on.
1797 */
1798 if (enable && (rec->flags & FTRACE_FL_FILTER))
1799 ftrace_filtered = 1;
Steven Rostedt265c8312009-02-13 12:43:56 -05001800 } while_for_each_ftrace_rec();
Steven Rostedt52baf112009-02-14 01:15:39 -05001801 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001802}
1803
Steven Rostedt64e7c442009-02-13 17:08:48 -05001804static int
1805ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1806 char *regex, int len, int type)
1807{
1808 char str[KSYM_SYMBOL_LEN];
1809 char *modname;
1810
1811 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1812
1813 if (!modname || strcmp(modname, mod))
1814 return 0;
1815
1816 /* blank search means to match all funcs in the mod */
1817 if (len)
1818 return ftrace_match(str, regex, len, type);
1819 else
1820 return 1;
1821}
1822
1823static void ftrace_match_module_records(char *buff, char *mod, int enable)
1824{
Steven Rostedt6a24a242009-02-17 11:20:26 -05001825 unsigned search_len = 0;
Steven Rostedt64e7c442009-02-13 17:08:48 -05001826 struct ftrace_page *pg;
1827 struct dyn_ftrace *rec;
1828 int type = MATCH_FULL;
Steven Rostedt6a24a242009-02-17 11:20:26 -05001829 char *search = buff;
1830 unsigned long flag;
Steven Rostedt64e7c442009-02-13 17:08:48 -05001831 int not = 0;
1832
Steven Rostedt6a24a242009-02-17 11:20:26 -05001833 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1834
Steven Rostedt64e7c442009-02-13 17:08:48 -05001835 /* blank or '*' mean the same */
1836 if (strcmp(buff, "*") == 0)
1837 buff[0] = 0;
1838
1839 /* handle the case of 'dont filter this module' */
1840 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1841 buff[0] = 0;
1842 not = 1;
1843 }
1844
1845 if (strlen(buff)) {
1846 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1847 search_len = strlen(search);
1848 }
1849
Steven Rostedt52baf112009-02-14 01:15:39 -05001850 mutex_lock(&ftrace_lock);
Steven Rostedt64e7c442009-02-13 17:08:48 -05001851 do_for_each_ftrace_rec(pg, rec) {
1852
1853 if (rec->flags & FTRACE_FL_FAILED)
1854 continue;
1855
1856 if (ftrace_match_module_record(rec, mod,
1857 search, search_len, type)) {
1858 if (not)
1859 rec->flags &= ~flag;
1860 else
1861 rec->flags |= flag;
1862 }
Steven Rostedte68746a2009-02-13 20:53:42 -05001863 if (enable && (rec->flags & FTRACE_FL_FILTER))
1864 ftrace_filtered = 1;
Steven Rostedt64e7c442009-02-13 17:08:48 -05001865
1866 } while_for_each_ftrace_rec();
Steven Rostedt52baf112009-02-14 01:15:39 -05001867 mutex_unlock(&ftrace_lock);
Steven Rostedt64e7c442009-02-13 17:08:48 -05001868}
1869
Steven Rostedtf6180772009-02-14 00:40:25 -05001870/*
1871 * We register the module command as a template to show others how
1872 * to register the a command as well.
1873 */
1874
1875static int
1876ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1877{
1878 char *mod;
1879
1880 /*
1881 * cmd == 'mod' because we only registered this func
1882 * for the 'mod' ftrace_func_command.
1883 * But if you register one func with multiple commands,
1884 * you can tell which command was used by the cmd
1885 * parameter.
1886 */
1887
1888 /* we must have a module name */
1889 if (!param)
1890 return -EINVAL;
1891
1892 mod = strsep(&param, ":");
1893 if (!strlen(mod))
1894 return -EINVAL;
1895
1896 ftrace_match_module_records(func, mod, enable);
1897 return 0;
1898}
1899
1900static struct ftrace_func_command ftrace_mod_cmd = {
1901 .name = "mod",
1902 .func = ftrace_mod_callback,
1903};
1904
1905static int __init ftrace_mod_cmd_init(void)
1906{
1907 return register_ftrace_command(&ftrace_mod_cmd);
1908}
1909device_initcall(ftrace_mod_cmd_init);
1910
Steven Rostedt59df055f2009-02-14 15:29:06 -05001911static void
Steven Rostedtb6887d72009-02-17 12:32:04 -05001912function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
Steven Rostedt59df055f2009-02-14 15:29:06 -05001913{
Steven Rostedtb6887d72009-02-17 12:32:04 -05001914 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05001915 struct hlist_head *hhd;
1916 struct hlist_node *n;
1917 unsigned long key;
1918 int resched;
1919
1920 key = hash_long(ip, FTRACE_HASH_BITS);
1921
1922 hhd = &ftrace_func_hash[key];
1923
1924 if (hlist_empty(hhd))
1925 return;
1926
1927 /*
1928 * Disable preemption for these calls to prevent a RCU grace
1929 * period. This syncs the hash iteration and freeing of items
1930 * on the hash. rcu_read_lock is too dangerous here.
1931 */
1932 resched = ftrace_preempt_disable();
1933 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1934 if (entry->ip == ip)
1935 entry->ops->func(ip, parent_ip, &entry->data);
1936 }
1937 ftrace_preempt_enable(resched);
1938}
1939
Steven Rostedtb6887d72009-02-17 12:32:04 -05001940static struct ftrace_ops trace_probe_ops __read_mostly =
Steven Rostedt59df055f2009-02-14 15:29:06 -05001941{
Steven Rostedtfb9fb012009-03-25 13:26:41 -04001942 .func = function_trace_probe_call,
Steven Rostedt59df055f2009-02-14 15:29:06 -05001943};
1944
Steven Rostedtb6887d72009-02-17 12:32:04 -05001945static int ftrace_probe_registered;
Steven Rostedt59df055f2009-02-14 15:29:06 -05001946
Steven Rostedtb6887d72009-02-17 12:32:04 -05001947static void __enable_ftrace_function_probe(void)
Steven Rostedt59df055f2009-02-14 15:29:06 -05001948{
1949 int i;
1950
Steven Rostedtb6887d72009-02-17 12:32:04 -05001951 if (ftrace_probe_registered)
Steven Rostedt59df055f2009-02-14 15:29:06 -05001952 return;
1953
1954 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1955 struct hlist_head *hhd = &ftrace_func_hash[i];
1956 if (hhd->first)
1957 break;
1958 }
1959 /* Nothing registered? */
1960 if (i == FTRACE_FUNC_HASHSIZE)
1961 return;
1962
Steven Rostedtb6887d72009-02-17 12:32:04 -05001963 __register_ftrace_function(&trace_probe_ops);
Steven Rostedt59df055f2009-02-14 15:29:06 -05001964 ftrace_startup(0);
Steven Rostedtb6887d72009-02-17 12:32:04 -05001965 ftrace_probe_registered = 1;
Steven Rostedt59df055f2009-02-14 15:29:06 -05001966}
1967
Steven Rostedtb6887d72009-02-17 12:32:04 -05001968static void __disable_ftrace_function_probe(void)
Steven Rostedt59df055f2009-02-14 15:29:06 -05001969{
1970 int i;
1971
Steven Rostedtb6887d72009-02-17 12:32:04 -05001972 if (!ftrace_probe_registered)
Steven Rostedt59df055f2009-02-14 15:29:06 -05001973 return;
1974
1975 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1976 struct hlist_head *hhd = &ftrace_func_hash[i];
1977 if (hhd->first)
1978 return;
1979 }
1980
1981 /* no more funcs left */
Steven Rostedtb6887d72009-02-17 12:32:04 -05001982 __unregister_ftrace_function(&trace_probe_ops);
Steven Rostedt59df055f2009-02-14 15:29:06 -05001983 ftrace_shutdown(0);
Steven Rostedtb6887d72009-02-17 12:32:04 -05001984 ftrace_probe_registered = 0;
Steven Rostedt59df055f2009-02-14 15:29:06 -05001985}
1986
1987
1988static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1989{
Steven Rostedtb6887d72009-02-17 12:32:04 -05001990 struct ftrace_func_probe *entry =
1991 container_of(rhp, struct ftrace_func_probe, rcu);
Steven Rostedt59df055f2009-02-14 15:29:06 -05001992
1993 if (entry->ops->free)
1994 entry->ops->free(&entry->data);
1995 kfree(entry);
1996}
1997
1998
1999int
Steven Rostedtb6887d72009-02-17 12:32:04 -05002000register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002001 void *data)
2002{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002003 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002004 struct ftrace_page *pg;
2005 struct dyn_ftrace *rec;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002006 int type, len, not;
Steven Rostedt6a24a242009-02-17 11:20:26 -05002007 unsigned long key;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002008 int count = 0;
2009 char *search;
2010
2011 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
2012 len = strlen(search);
2013
Steven Rostedtb6887d72009-02-17 12:32:04 -05002014 /* we do not support '!' for function probes */
Steven Rostedt59df055f2009-02-14 15:29:06 -05002015 if (WARN_ON(not))
2016 return -EINVAL;
2017
2018 mutex_lock(&ftrace_lock);
2019 do_for_each_ftrace_rec(pg, rec) {
2020
2021 if (rec->flags & FTRACE_FL_FAILED)
2022 continue;
2023
2024 if (!ftrace_match_record(rec, search, len, type))
2025 continue;
2026
2027 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2028 if (!entry) {
Steven Rostedtb6887d72009-02-17 12:32:04 -05002029 /* If we did not process any, then return error */
Steven Rostedt59df055f2009-02-14 15:29:06 -05002030 if (!count)
2031 count = -ENOMEM;
2032 goto out_unlock;
2033 }
2034
2035 count++;
2036
2037 entry->data = data;
2038
2039 /*
2040 * The caller might want to do something special
2041 * for each function we find. We call the callback
2042 * to give the caller an opportunity to do so.
2043 */
2044 if (ops->callback) {
2045 if (ops->callback(rec->ip, &entry->data) < 0) {
2046 /* caller does not like this func */
2047 kfree(entry);
2048 continue;
2049 }
2050 }
2051
2052 entry->ops = ops;
2053 entry->ip = rec->ip;
2054
2055 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2056 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2057
2058 } while_for_each_ftrace_rec();
Steven Rostedtb6887d72009-02-17 12:32:04 -05002059 __enable_ftrace_function_probe();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002060
2061 out_unlock:
2062 mutex_unlock(&ftrace_lock);
2063
2064 return count;
2065}
2066
2067enum {
Steven Rostedtb6887d72009-02-17 12:32:04 -05002068 PROBE_TEST_FUNC = 1,
2069 PROBE_TEST_DATA = 2
Steven Rostedt59df055f2009-02-14 15:29:06 -05002070};
2071
2072static void
Steven Rostedtb6887d72009-02-17 12:32:04 -05002073__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002074 void *data, int flags)
2075{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002076 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002077 struct hlist_node *n, *tmp;
2078 char str[KSYM_SYMBOL_LEN];
2079 int type = MATCH_FULL;
2080 int i, len = 0;
2081 char *search;
2082
2083 if (glob && (strcmp(glob, "*") || !strlen(glob)))
2084 glob = NULL;
2085 else {
2086 int not;
2087
2088 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
2089 len = strlen(search);
2090
Steven Rostedtb6887d72009-02-17 12:32:04 -05002091 /* we do not support '!' for function probes */
Steven Rostedt59df055f2009-02-14 15:29:06 -05002092 if (WARN_ON(not))
2093 return;
2094 }
2095
2096 mutex_lock(&ftrace_lock);
2097 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2098 struct hlist_head *hhd = &ftrace_func_hash[i];
2099
2100 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2101
2102 /* break up if statements for readability */
Steven Rostedtb6887d72009-02-17 12:32:04 -05002103 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002104 continue;
2105
Steven Rostedtb6887d72009-02-17 12:32:04 -05002106 if ((flags & PROBE_TEST_DATA) && entry->data != data)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002107 continue;
2108
2109 /* do this last, since it is the most expensive */
2110 if (glob) {
2111 kallsyms_lookup(entry->ip, NULL, NULL,
2112 NULL, str);
2113 if (!ftrace_match(str, glob, len, type))
2114 continue;
2115 }
2116
2117 hlist_del(&entry->node);
2118 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2119 }
2120 }
Steven Rostedtb6887d72009-02-17 12:32:04 -05002121 __disable_ftrace_function_probe();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002122 mutex_unlock(&ftrace_lock);
2123}
2124
2125void
Steven Rostedtb6887d72009-02-17 12:32:04 -05002126unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002127 void *data)
2128{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002129 __unregister_ftrace_function_probe(glob, ops, data,
2130 PROBE_TEST_FUNC | PROBE_TEST_DATA);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002131}
2132
2133void
Steven Rostedtb6887d72009-02-17 12:32:04 -05002134unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002135{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002136 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002137}
2138
Steven Rostedtb6887d72009-02-17 12:32:04 -05002139void unregister_ftrace_function_probe_all(char *glob)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002140{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002141 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002142}
2143
Steven Rostedtf6180772009-02-14 00:40:25 -05002144static LIST_HEAD(ftrace_commands);
2145static DEFINE_MUTEX(ftrace_cmd_mutex);
2146
2147int register_ftrace_command(struct ftrace_func_command *cmd)
2148{
2149 struct ftrace_func_command *p;
2150 int ret = 0;
2151
2152 mutex_lock(&ftrace_cmd_mutex);
2153 list_for_each_entry(p, &ftrace_commands, list) {
2154 if (strcmp(cmd->name, p->name) == 0) {
2155 ret = -EBUSY;
2156 goto out_unlock;
2157 }
2158 }
2159 list_add(&cmd->list, &ftrace_commands);
2160 out_unlock:
2161 mutex_unlock(&ftrace_cmd_mutex);
2162
2163 return ret;
2164}
2165
2166int unregister_ftrace_command(struct ftrace_func_command *cmd)
2167{
2168 struct ftrace_func_command *p, *n;
2169 int ret = -ENODEV;
2170
2171 mutex_lock(&ftrace_cmd_mutex);
2172 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2173 if (strcmp(cmd->name, p->name) == 0) {
2174 ret = 0;
2175 list_del_init(&p->list);
2176 goto out_unlock;
2177 }
2178 }
2179 out_unlock:
2180 mutex_unlock(&ftrace_cmd_mutex);
2181
2182 return ret;
2183}
2184
Steven Rostedt64e7c442009-02-13 17:08:48 -05002185static int ftrace_process_regex(char *buff, int len, int enable)
2186{
Steven Rostedtf6180772009-02-14 00:40:25 -05002187 char *func, *command, *next = buff;
Steven Rostedt6a24a242009-02-17 11:20:26 -05002188 struct ftrace_func_command *p;
Steven Rostedtf6180772009-02-14 00:40:25 -05002189 int ret = -EINVAL;
Steven Rostedt64e7c442009-02-13 17:08:48 -05002190
2191 func = strsep(&next, ":");
2192
2193 if (!next) {
2194 ftrace_match_records(func, len, enable);
2195 return 0;
2196 }
2197
Steven Rostedtf6180772009-02-14 00:40:25 -05002198 /* command found */
Steven Rostedt64e7c442009-02-13 17:08:48 -05002199
2200 command = strsep(&next, ":");
2201
Steven Rostedtf6180772009-02-14 00:40:25 -05002202 mutex_lock(&ftrace_cmd_mutex);
2203 list_for_each_entry(p, &ftrace_commands, list) {
2204 if (strcmp(p->name, command) == 0) {
2205 ret = p->func(func, command, next, enable);
2206 goto out_unlock;
2207 }
Steven Rostedt64e7c442009-02-13 17:08:48 -05002208 }
Steven Rostedtf6180772009-02-14 00:40:25 -05002209 out_unlock:
2210 mutex_unlock(&ftrace_cmd_mutex);
Steven Rostedt64e7c442009-02-13 17:08:48 -05002211
Steven Rostedtf6180772009-02-14 00:40:25 -05002212 return ret;
Steven Rostedt64e7c442009-02-13 17:08:48 -05002213}
2214
Ingo Molnare309b412008-05-12 21:20:51 +02002215static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04002216ftrace_regex_write(struct file *file, const char __user *ubuf,
2217 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02002218{
2219 struct ftrace_iterator *iter;
2220 char ch;
2221 size_t read = 0;
2222 ssize_t ret;
2223
2224 if (!cnt || cnt < 0)
2225 return 0;
2226
Steven Rostedt41c52c02008-05-22 11:46:33 -04002227 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002228
2229 if (file->f_mode & FMODE_READ) {
2230 struct seq_file *m = file->private_data;
2231 iter = m->private;
2232 } else
2233 iter = file->private_data;
2234
2235 if (!*ppos) {
2236 iter->flags &= ~FTRACE_ITER_CONT;
2237 iter->buffer_idx = 0;
2238 }
2239
2240 ret = get_user(ch, ubuf++);
2241 if (ret)
2242 goto out;
2243 read++;
2244 cnt--;
2245
2246 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
2247 /* skip white space */
2248 while (cnt && isspace(ch)) {
2249 ret = get_user(ch, ubuf++);
2250 if (ret)
2251 goto out;
2252 read++;
2253 cnt--;
2254 }
2255
Steven Rostedt5072c592008-05-12 21:20:43 +02002256 if (isspace(ch)) {
2257 file->f_pos += read;
2258 ret = read;
2259 goto out;
2260 }
2261
2262 iter->buffer_idx = 0;
2263 }
2264
2265 while (cnt && !isspace(ch)) {
2266 if (iter->buffer_idx < FTRACE_BUFF_MAX)
2267 iter->buffer[iter->buffer_idx++] = ch;
2268 else {
2269 ret = -EINVAL;
2270 goto out;
2271 }
2272 ret = get_user(ch, ubuf++);
2273 if (ret)
2274 goto out;
2275 read++;
2276 cnt--;
2277 }
2278
2279 if (isspace(ch)) {
2280 iter->filtered++;
2281 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt64e7c442009-02-13 17:08:48 -05002282 ret = ftrace_process_regex(iter->buffer,
2283 iter->buffer_idx, enable);
2284 if (ret)
2285 goto out;
Steven Rostedt5072c592008-05-12 21:20:43 +02002286 iter->buffer_idx = 0;
2287 } else
2288 iter->flags |= FTRACE_ITER_CONT;
2289
2290
2291 file->f_pos += read;
2292
2293 ret = read;
2294 out:
Steven Rostedt41c52c02008-05-22 11:46:33 -04002295 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002296
2297 return ret;
2298}
2299
Steven Rostedt41c52c02008-05-22 11:46:33 -04002300static ssize_t
2301ftrace_filter_write(struct file *file, const char __user *ubuf,
2302 size_t cnt, loff_t *ppos)
2303{
2304 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2305}
2306
2307static ssize_t
2308ftrace_notrace_write(struct file *file, const char __user *ubuf,
2309 size_t cnt, loff_t *ppos)
2310{
2311 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2312}
2313
2314static void
2315ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2316{
2317 if (unlikely(ftrace_disabled))
2318 return;
2319
2320 mutex_lock(&ftrace_regex_lock);
2321 if (reset)
2322 ftrace_filter_reset(enable);
2323 if (buf)
Steven Rostedt7f24b312009-02-13 14:37:33 -05002324 ftrace_match_records(buf, len, enable);
Steven Rostedt41c52c02008-05-22 11:46:33 -04002325 mutex_unlock(&ftrace_regex_lock);
2326}
2327
Steven Rostedt77a2b372008-05-12 21:20:45 +02002328/**
2329 * ftrace_set_filter - set a function to filter on in ftrace
2330 * @buf - the string that holds the function filter text.
2331 * @len - the length of the string.
2332 * @reset - non zero to reset all filters before applying this filter.
2333 *
2334 * Filters denote which functions should be enabled when tracing is enabled.
2335 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2336 */
Ingo Molnare309b412008-05-12 21:20:51 +02002337void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02002338{
Steven Rostedt41c52c02008-05-22 11:46:33 -04002339 ftrace_set_regex(buf, len, reset, 1);
2340}
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002341
Steven Rostedt41c52c02008-05-22 11:46:33 -04002342/**
2343 * ftrace_set_notrace - set a function to not trace in ftrace
2344 * @buf - the string that holds the function notrace text.
2345 * @len - the length of the string.
2346 * @reset - non zero to reset all filters before applying this filter.
2347 *
2348 * Notrace Filters denote which functions should not be enabled when tracing
2349 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2350 * for tracing.
2351 */
2352void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2353{
2354 ftrace_set_regex(buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02002355}
2356
Steven Rostedt2af15d62009-05-28 13:37:24 -04002357/*
2358 * command line interface to allow users to set filters on boot up.
2359 */
2360#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
2361static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2362static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2363
2364static int __init set_ftrace_notrace(char *str)
2365{
2366 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2367 return 1;
2368}
2369__setup("ftrace_notrace=", set_ftrace_notrace);
2370
2371static int __init set_ftrace_filter(char *str)
2372{
2373 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2374 return 1;
2375}
2376__setup("ftrace_filter=", set_ftrace_filter);
2377
2378static void __init set_ftrace_early_filter(char *buf, int enable)
2379{
2380 char *func;
2381
2382 while (buf) {
2383 func = strsep(&buf, ",");
2384 ftrace_set_regex(func, strlen(func), 0, enable);
2385 }
2386}
2387
2388static void __init set_ftrace_early_filters(void)
2389{
2390 if (ftrace_filter_buf[0])
2391 set_ftrace_early_filter(ftrace_filter_buf, 1);
2392 if (ftrace_notrace_buf[0])
2393 set_ftrace_early_filter(ftrace_notrace_buf, 0);
2394}
2395
Ingo Molnare309b412008-05-12 21:20:51 +02002396static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04002397ftrace_regex_release(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02002398{
2399 struct seq_file *m = (struct seq_file *)file->private_data;
2400 struct ftrace_iterator *iter;
2401
Steven Rostedt41c52c02008-05-22 11:46:33 -04002402 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002403 if (file->f_mode & FMODE_READ) {
2404 iter = m->private;
2405
2406 seq_release(inode, file);
2407 } else
2408 iter = file->private_data;
2409
2410 if (iter->buffer_idx) {
2411 iter->filtered++;
2412 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt7f24b312009-02-13 14:37:33 -05002413 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02002414 }
2415
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002416 mutex_lock(&ftrace_lock);
Steven Rostedtee02a2e2008-11-15 16:31:41 -05002417 if (ftrace_start_up && ftrace_enabled)
Steven Rostedt5072c592008-05-12 21:20:43 +02002418 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002419 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002420
2421 kfree(iter);
Steven Rostedt41c52c02008-05-22 11:46:33 -04002422 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002423 return 0;
2424}
2425
Steven Rostedt41c52c02008-05-22 11:46:33 -04002426static int
2427ftrace_filter_release(struct inode *inode, struct file *file)
2428{
2429 return ftrace_regex_release(inode, file, 1);
2430}
2431
2432static int
2433ftrace_notrace_release(struct inode *inode, struct file *file)
2434{
2435 return ftrace_regex_release(inode, file, 0);
2436}
2437
Steven Rostedt5e2336a2009-03-05 21:44:55 -05002438static const struct file_operations ftrace_avail_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02002439 .open = ftrace_avail_open,
2440 .read = seq_read,
2441 .llseek = seq_lseek,
2442 .release = ftrace_avail_release,
2443};
2444
Steven Rostedt5e2336a2009-03-05 21:44:55 -05002445static const struct file_operations ftrace_failures_fops = {
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05302446 .open = ftrace_failures_open,
2447 .read = seq_read,
2448 .llseek = seq_lseek,
2449 .release = ftrace_avail_release,
2450};
2451
Steven Rostedt5e2336a2009-03-05 21:44:55 -05002452static const struct file_operations ftrace_filter_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02002453 .open = ftrace_filter_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08002454 .read = seq_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02002455 .write = ftrace_filter_write,
Steven Rostedt41c52c02008-05-22 11:46:33 -04002456 .llseek = ftrace_regex_lseek,
Steven Rostedt5072c592008-05-12 21:20:43 +02002457 .release = ftrace_filter_release,
2458};
2459
Steven Rostedt5e2336a2009-03-05 21:44:55 -05002460static const struct file_operations ftrace_notrace_fops = {
Steven Rostedt41c52c02008-05-22 11:46:33 -04002461 .open = ftrace_notrace_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08002462 .read = seq_read,
Steven Rostedt41c52c02008-05-22 11:46:33 -04002463 .write = ftrace_notrace_write,
2464 .llseek = ftrace_regex_lseek,
2465 .release = ftrace_notrace_release,
2466};
2467
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002468#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2469
2470static DEFINE_MUTEX(graph_lock);
2471
2472int ftrace_graph_count;
2473unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2474
2475static void *
Li Zefan85951842009-06-24 09:54:00 +08002476__g_next(struct seq_file *m, loff_t *pos)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002477{
2478 unsigned long *array = m->private;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002479
Li Zefan85951842009-06-24 09:54:00 +08002480 if (*pos >= ftrace_graph_count)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002481 return NULL;
Li Zefan85951842009-06-24 09:54:00 +08002482 return &array[*pos];
2483}
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002484
Li Zefan85951842009-06-24 09:54:00 +08002485static void *
2486g_next(struct seq_file *m, void *v, loff_t *pos)
2487{
2488 (*pos)++;
2489 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002490}
2491
2492static void *g_start(struct seq_file *m, loff_t *pos)
2493{
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002494 mutex_lock(&graph_lock);
2495
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01002496 /* Nothing, tell g_show to print all functions are enabled */
2497 if (!ftrace_graph_count && !*pos)
2498 return (void *)1;
2499
Li Zefan85951842009-06-24 09:54:00 +08002500 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002501}
2502
2503static void g_stop(struct seq_file *m, void *p)
2504{
2505 mutex_unlock(&graph_lock);
2506}
2507
2508static int g_show(struct seq_file *m, void *v)
2509{
2510 unsigned long *ptr = v;
2511 char str[KSYM_SYMBOL_LEN];
2512
2513 if (!ptr)
2514 return 0;
2515
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01002516 if (ptr == (unsigned long *)1) {
2517 seq_printf(m, "#### all functions enabled ####\n");
2518 return 0;
2519 }
2520
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002521 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
2522
2523 seq_printf(m, "%s\n", str);
2524
2525 return 0;
2526}
2527
2528static struct seq_operations ftrace_graph_seq_ops = {
2529 .start = g_start,
2530 .next = g_next,
2531 .stop = g_stop,
2532 .show = g_show,
2533};
2534
2535static int
2536ftrace_graph_open(struct inode *inode, struct file *file)
2537{
2538 int ret = 0;
2539
2540 if (unlikely(ftrace_disabled))
2541 return -ENODEV;
2542
2543 mutex_lock(&graph_lock);
2544 if ((file->f_mode & FMODE_WRITE) &&
2545 !(file->f_flags & O_APPEND)) {
2546 ftrace_graph_count = 0;
2547 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2548 }
2549
2550 if (file->f_mode & FMODE_READ) {
2551 ret = seq_open(file, &ftrace_graph_seq_ops);
2552 if (!ret) {
2553 struct seq_file *m = file->private_data;
2554 m->private = ftrace_graph_funcs;
2555 }
2556 } else
2557 file->private_data = ftrace_graph_funcs;
2558 mutex_unlock(&graph_lock);
2559
2560 return ret;
2561}
2562
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002563static int
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01002564ftrace_set_func(unsigned long *array, int *idx, char *buffer)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002565{
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002566 struct dyn_ftrace *rec;
2567 struct ftrace_page *pg;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01002568 int search_len;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002569 int found = 0;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01002570 int type, not;
2571 char *search;
2572 bool exists;
2573 int i;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002574
2575 if (ftrace_disabled)
2576 return -ENODEV;
2577
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01002578 /* decode regex */
2579 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
2580 if (not)
2581 return -EINVAL;
2582
2583 search_len = strlen(search);
2584
Steven Rostedt52baf112009-02-14 01:15:39 -05002585 mutex_lock(&ftrace_lock);
Steven Rostedt265c8312009-02-13 12:43:56 -05002586 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002587
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01002588 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2589 break;
2590
Steven Rostedt265c8312009-02-13 12:43:56 -05002591 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2592 continue;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002593
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01002594 if (ftrace_match_record(rec, search, search_len, type)) {
2595 /* ensure it is not already in the array */
2596 exists = false;
2597 for (i = 0; i < *idx; i++)
2598 if (array[i] == rec->ip) {
2599 exists = true;
Steven Rostedt265c8312009-02-13 12:43:56 -05002600 break;
2601 }
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01002602 if (!exists) {
2603 array[(*idx)++] = rec->ip;
2604 found = 1;
2605 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002606 }
Steven Rostedt265c8312009-02-13 12:43:56 -05002607 } while_for_each_ftrace_rec();
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01002608
Steven Rostedt52baf112009-02-14 01:15:39 -05002609 mutex_unlock(&ftrace_lock);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002610
2611 return found ? 0 : -EINVAL;
2612}
2613
2614static ssize_t
2615ftrace_graph_write(struct file *file, const char __user *ubuf,
2616 size_t cnt, loff_t *ppos)
2617{
2618 unsigned char buffer[FTRACE_BUFF_MAX+1];
2619 unsigned long *array;
2620 size_t read = 0;
2621 ssize_t ret;
2622 int index = 0;
2623 char ch;
2624
2625 if (!cnt || cnt < 0)
2626 return 0;
2627
2628 mutex_lock(&graph_lock);
2629
2630 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2631 ret = -EBUSY;
2632 goto out;
2633 }
2634
2635 if (file->f_mode & FMODE_READ) {
2636 struct seq_file *m = file->private_data;
2637 array = m->private;
2638 } else
2639 array = file->private_data;
2640
2641 ret = get_user(ch, ubuf++);
2642 if (ret)
2643 goto out;
2644 read++;
2645 cnt--;
2646
2647 /* skip white space */
2648 while (cnt && isspace(ch)) {
2649 ret = get_user(ch, ubuf++);
2650 if (ret)
2651 goto out;
2652 read++;
2653 cnt--;
2654 }
2655
2656 if (isspace(ch)) {
2657 *ppos += read;
2658 ret = read;
2659 goto out;
2660 }
2661
2662 while (cnt && !isspace(ch)) {
2663 if (index < FTRACE_BUFF_MAX)
2664 buffer[index++] = ch;
2665 else {
2666 ret = -EINVAL;
2667 goto out;
2668 }
2669 ret = get_user(ch, ubuf++);
2670 if (ret)
2671 goto out;
2672 read++;
2673 cnt--;
2674 }
2675 buffer[index] = 0;
2676
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01002677 /* we allow only one expression at a time */
2678 ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002679 if (ret)
2680 goto out;
2681
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002682 file->f_pos += read;
2683
2684 ret = read;
2685 out:
2686 mutex_unlock(&graph_lock);
2687
2688 return ret;
2689}
2690
2691static const struct file_operations ftrace_graph_fops = {
2692 .open = ftrace_graph_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08002693 .read = seq_read,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002694 .write = ftrace_graph_write,
2695};
2696#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2697
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002698static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
Steven Rostedt5072c592008-05-12 21:20:43 +02002699{
Steven Rostedt5072c592008-05-12 21:20:43 +02002700
Frederic Weisbecker5452af62009-03-27 00:25:38 +01002701 trace_create_file("available_filter_functions", 0444,
2702 d_tracer, NULL, &ftrace_avail_fops);
Steven Rostedt5072c592008-05-12 21:20:43 +02002703
Frederic Weisbecker5452af62009-03-27 00:25:38 +01002704 trace_create_file("failures", 0444,
2705 d_tracer, NULL, &ftrace_failures_fops);
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05302706
Frederic Weisbecker5452af62009-03-27 00:25:38 +01002707 trace_create_file("set_ftrace_filter", 0644, d_tracer,
2708 NULL, &ftrace_filter_fops);
Steven Rostedt41c52c02008-05-22 11:46:33 -04002709
Frederic Weisbecker5452af62009-03-27 00:25:38 +01002710 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
Steven Rostedt41c52c02008-05-22 11:46:33 -04002711 NULL, &ftrace_notrace_fops);
Steven Rostedtad90c0e2008-05-27 20:48:37 -04002712
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002713#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbecker5452af62009-03-27 00:25:38 +01002714 trace_create_file("set_graph_function", 0444, d_tracer,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002715 NULL,
2716 &ftrace_graph_fops);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05002717#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2718
Steven Rostedt5072c592008-05-12 21:20:43 +02002719 return 0;
2720}
2721
Steven Rostedt31e88902008-11-14 16:21:19 -08002722static int ftrace_convert_nops(struct module *mod,
2723 unsigned long *start,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04002724 unsigned long *end)
2725{
2726 unsigned long *p;
2727 unsigned long addr;
2728 unsigned long flags;
2729
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002730 mutex_lock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04002731 p = start;
2732 while (p < end) {
2733 addr = ftrace_call_adjust(*p++);
Steven Rostedt20e52272008-11-14 16:21:19 -08002734 /*
2735 * Some architecture linkers will pad between
2736 * the different mcount_loc sections of different
2737 * object files to satisfy alignments.
2738 * Skip any NULL pointers.
2739 */
2740 if (!addr)
2741 continue;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04002742 ftrace_record_ip(addr);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04002743 }
2744
Steven Rostedt08f5ac902008-10-23 09:33:07 -04002745 /* disable interrupts to prevent kstop machine */
Steven Rostedt68bf21a2008-08-14 15:45:08 -04002746 local_irq_save(flags);
Steven Rostedt31e88902008-11-14 16:21:19 -08002747 ftrace_update_code(mod);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04002748 local_irq_restore(flags);
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002749 mutex_unlock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04002750
2751 return 0;
2752}
2753
Steven Rostedt93eb6772009-04-15 13:24:06 -04002754#ifdef CONFIG_MODULES
2755void ftrace_release(void *start, void *end)
2756{
2757 struct dyn_ftrace *rec;
2758 struct ftrace_page *pg;
2759 unsigned long s = (unsigned long)start;
2760 unsigned long e = (unsigned long)end;
2761
2762 if (ftrace_disabled || !start || start == end)
2763 return;
2764
2765 mutex_lock(&ftrace_lock);
2766 do_for_each_ftrace_rec(pg, rec) {
2767 if ((rec->ip >= s) && (rec->ip < e)) {
2768 /*
2769 * rec->ip is changed in ftrace_free_rec()
2770 * It should not between s and e if record was freed.
2771 */
2772 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
2773 ftrace_free_rec(rec);
2774 }
2775 } while_for_each_ftrace_rec();
2776 mutex_unlock(&ftrace_lock);
2777}
2778
2779static void ftrace_init_module(struct module *mod,
2780 unsigned long *start, unsigned long *end)
Steven Rostedt90d595f2008-08-14 15:45:09 -04002781{
Steven Rostedt00fd61a2008-08-15 21:40:04 -04002782 if (ftrace_disabled || start == end)
Steven Rostedtfed19392008-08-14 22:47:19 -04002783 return;
Steven Rostedt31e88902008-11-14 16:21:19 -08002784 ftrace_convert_nops(mod, start, end);
Steven Rostedt90d595f2008-08-14 15:45:09 -04002785}
2786
Steven Rostedt93eb6772009-04-15 13:24:06 -04002787static int ftrace_module_notify(struct notifier_block *self,
2788 unsigned long val, void *data)
2789{
2790 struct module *mod = data;
2791
2792 switch (val) {
2793 case MODULE_STATE_COMING:
2794 ftrace_init_module(mod, mod->ftrace_callsites,
2795 mod->ftrace_callsites +
2796 mod->num_ftrace_callsites);
2797 break;
2798 case MODULE_STATE_GOING:
2799 ftrace_release(mod->ftrace_callsites,
2800 mod->ftrace_callsites +
2801 mod->num_ftrace_callsites);
2802 break;
2803 }
2804
2805 return 0;
2806}
2807#else
2808static int ftrace_module_notify(struct notifier_block *self,
2809 unsigned long val, void *data)
2810{
2811 return 0;
2812}
2813#endif /* CONFIG_MODULES */
2814
2815struct notifier_block ftrace_module_nb = {
2816 .notifier_call = ftrace_module_notify,
2817 .priority = 0,
2818};
2819
Steven Rostedt68bf21a2008-08-14 15:45:08 -04002820extern unsigned long __start_mcount_loc[];
2821extern unsigned long __stop_mcount_loc[];
2822
2823void __init ftrace_init(void)
2824{
2825 unsigned long count, addr, flags;
2826 int ret;
2827
2828 /* Keep the ftrace pointer to the stub */
2829 addr = (unsigned long)ftrace_stub;
2830
2831 local_irq_save(flags);
2832 ftrace_dyn_arch_init(&addr);
2833 local_irq_restore(flags);
2834
2835 /* ftrace_dyn_arch_init places the return code in addr */
2836 if (addr)
2837 goto failed;
2838
2839 count = __stop_mcount_loc - __start_mcount_loc;
2840
2841 ret = ftrace_dyn_table_alloc(count);
2842 if (ret)
2843 goto failed;
2844
2845 last_ftrace_enabled = ftrace_enabled = 1;
2846
Steven Rostedt31e88902008-11-14 16:21:19 -08002847 ret = ftrace_convert_nops(NULL,
2848 __start_mcount_loc,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04002849 __stop_mcount_loc);
2850
Steven Rostedt93eb6772009-04-15 13:24:06 -04002851 ret = register_module_notifier(&ftrace_module_nb);
Ming Lei24ed0c42009-05-17 15:31:38 +08002852 if (ret)
Steven Rostedt93eb6772009-04-15 13:24:06 -04002853 pr_warning("Failed to register trace ftrace module notifier\n");
2854
Steven Rostedt2af15d62009-05-28 13:37:24 -04002855 set_ftrace_early_filters();
2856
Steven Rostedt68bf21a2008-08-14 15:45:08 -04002857 return;
2858 failed:
2859 ftrace_disabled = 1;
2860}
Steven Rostedt68bf21a2008-08-14 15:45:08 -04002861
Steven Rostedt3d083392008-05-12 21:20:42 +02002862#else
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01002863
2864static int __init ftrace_nodyn_init(void)
2865{
2866 ftrace_enabled = 1;
2867 return 0;
2868}
2869device_initcall(ftrace_nodyn_init);
2870
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002871static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2872static inline void ftrace_startup_enable(int command) { }
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05002873/* Keep as macros so we do not need to define the commands */
2874# define ftrace_startup(command) do { } while (0)
2875# define ftrace_shutdown(command) do { } while (0)
Ingo Molnarc7aafc52008-05-12 21:20:45 +02002876# define ftrace_startup_sysctl() do { } while (0)
2877# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02002878#endif /* CONFIG_DYNAMIC_FTRACE */
2879
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002880static ssize_t
2881ftrace_pid_read(struct file *file, char __user *ubuf,
2882 size_t cnt, loff_t *ppos)
2883{
2884 char buf[64];
2885 int r;
2886
Steven Rostedte32d8952008-12-04 00:26:41 -05002887 if (ftrace_pid_trace == ftrace_swapper_pid)
2888 r = sprintf(buf, "swapper tasks\n");
2889 else if (ftrace_pid_trace)
Lai Jiangshancc59c9e2009-03-24 11:03:01 +08002890 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002891 else
2892 r = sprintf(buf, "no pid\n");
2893
2894 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2895}
2896
Steven Rostedte32d8952008-12-04 00:26:41 -05002897static void clear_ftrace_swapper(void)
2898{
2899 struct task_struct *p;
2900 int cpu;
2901
2902 get_online_cpus();
2903 for_each_online_cpu(cpu) {
2904 p = idle_task(cpu);
2905 clear_tsk_trace_trace(p);
2906 }
2907 put_online_cpus();
2908}
2909
2910static void set_ftrace_swapper(void)
2911{
2912 struct task_struct *p;
2913 int cpu;
2914
2915 get_online_cpus();
2916 for_each_online_cpu(cpu) {
2917 p = idle_task(cpu);
2918 set_tsk_trace_trace(p);
2919 }
2920 put_online_cpus();
2921}
2922
2923static void clear_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05002924{
2925 struct task_struct *p;
2926
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01002927 rcu_read_lock();
Steven Rostedte32d8952008-12-04 00:26:41 -05002928 do_each_pid_task(pid, PIDTYPE_PID, p) {
Steven Rostedt978f3a42008-12-04 00:26:40 -05002929 clear_tsk_trace_trace(p);
Steven Rostedte32d8952008-12-04 00:26:41 -05002930 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01002931 rcu_read_unlock();
2932
Steven Rostedte32d8952008-12-04 00:26:41 -05002933 put_pid(pid);
Steven Rostedt978f3a42008-12-04 00:26:40 -05002934}
2935
Steven Rostedte32d8952008-12-04 00:26:41 -05002936static void set_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05002937{
2938 struct task_struct *p;
2939
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01002940 rcu_read_lock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05002941 do_each_pid_task(pid, PIDTYPE_PID, p) {
2942 set_tsk_trace_trace(p);
2943 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01002944 rcu_read_unlock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05002945}
2946
Steven Rostedte32d8952008-12-04 00:26:41 -05002947static void clear_ftrace_pid_task(struct pid **pid)
2948{
2949 if (*pid == ftrace_swapper_pid)
2950 clear_ftrace_swapper();
2951 else
2952 clear_ftrace_pid(*pid);
2953
2954 *pid = NULL;
2955}
2956
2957static void set_ftrace_pid_task(struct pid *pid)
2958{
2959 if (pid == ftrace_swapper_pid)
2960 set_ftrace_swapper();
2961 else
2962 set_ftrace_pid(pid);
2963}
2964
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002965static ssize_t
2966ftrace_pid_write(struct file *filp, const char __user *ubuf,
2967 size_t cnt, loff_t *ppos)
2968{
Steven Rostedt978f3a42008-12-04 00:26:40 -05002969 struct pid *pid;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002970 char buf[64];
2971 long val;
2972 int ret;
2973
2974 if (cnt >= sizeof(buf))
2975 return -EINVAL;
2976
2977 if (copy_from_user(&buf, ubuf, cnt))
2978 return -EFAULT;
2979
2980 buf[cnt] = 0;
2981
2982 ret = strict_strtol(buf, 10, &val);
2983 if (ret < 0)
2984 return ret;
2985
Steven Rostedte6ea44e2009-02-14 01:42:44 -05002986 mutex_lock(&ftrace_lock);
Steven Rostedt978f3a42008-12-04 00:26:40 -05002987 if (val < 0) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002988 /* disable pid tracing */
Steven Rostedt978f3a42008-12-04 00:26:40 -05002989 if (!ftrace_pid_trace)
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002990 goto out;
Steven Rostedt978f3a42008-12-04 00:26:40 -05002991
2992 clear_ftrace_pid_task(&ftrace_pid_trace);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002993
2994 } else {
Steven Rostedte32d8952008-12-04 00:26:41 -05002995 /* swapper task is special */
2996 if (!val) {
2997 pid = ftrace_swapper_pid;
2998 if (pid == ftrace_pid_trace)
2999 goto out;
3000 } else {
3001 pid = find_get_pid(val);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003002
Steven Rostedte32d8952008-12-04 00:26:41 -05003003 if (pid == ftrace_pid_trace) {
3004 put_pid(pid);
3005 goto out;
3006 }
Steven Rostedt978f3a42008-12-04 00:26:40 -05003007 }
3008
3009 if (ftrace_pid_trace)
3010 clear_ftrace_pid_task(&ftrace_pid_trace);
3011
3012 if (!pid)
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003013 goto out;
3014
Steven Rostedt978f3a42008-12-04 00:26:40 -05003015 ftrace_pid_trace = pid;
Steven Rostedt0ef8cde2008-12-03 15:36:58 -05003016
Steven Rostedt978f3a42008-12-04 00:26:40 -05003017 set_ftrace_pid_task(ftrace_pid_trace);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003018 }
3019
3020 /* update the function call */
3021 ftrace_update_pid_func();
3022 ftrace_startup_enable(0);
3023
3024 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003025 mutex_unlock(&ftrace_lock);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003026
3027 return cnt;
3028}
3029
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003030static const struct file_operations ftrace_pid_fops = {
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003031 .read = ftrace_pid_read,
3032 .write = ftrace_pid_write,
3033};
3034
3035static __init int ftrace_init_debugfs(void)
3036{
3037 struct dentry *d_tracer;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003038
3039 d_tracer = tracing_init_dentry();
3040 if (!d_tracer)
3041 return 0;
3042
3043 ftrace_init_dyn_debugfs(d_tracer);
3044
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003045 trace_create_file("set_ftrace_pid", 0644, d_tracer,
3046 NULL, &ftrace_pid_fops);
Steven Rostedt493762f2009-03-23 17:12:36 -04003047
3048 ftrace_profile_debugfs(d_tracer);
3049
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003050 return 0;
3051}
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003052fs_initcall(ftrace_init_debugfs);
3053
Steven Rostedt3d083392008-05-12 21:20:42 +02003054/**
Steven Rostedt81adbdc2008-10-23 09:33:02 -04003055 * ftrace_kill - kill ftrace
Steven Rostedta2bb6a32008-07-10 20:58:15 -04003056 *
3057 * This function should be used by panic code. It stops ftrace
3058 * but in a not so nice way. If you need to simply kill ftrace
3059 * from a non-atomic section, use ftrace_kill.
3060 */
Steven Rostedt81adbdc2008-10-23 09:33:02 -04003061void ftrace_kill(void)
Steven Rostedta2bb6a32008-07-10 20:58:15 -04003062{
3063 ftrace_disabled = 1;
3064 ftrace_enabled = 0;
Steven Rostedta2bb6a32008-07-10 20:58:15 -04003065 clear_ftrace_function();
3066}
3067
3068/**
Steven Rostedt3d083392008-05-12 21:20:42 +02003069 * register_ftrace_function - register a function for profiling
3070 * @ops - ops structure that holds the function for profiling.
3071 *
3072 * Register a function to be called by all functions in the
3073 * kernel.
3074 *
3075 * Note: @ops->func and all the functions it calls must be labeled
3076 * with "notrace", otherwise it will go into a
3077 * recursive loop.
3078 */
3079int register_ftrace_function(struct ftrace_ops *ops)
3080{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003081 int ret;
3082
Steven Rostedt4eebcc82008-05-12 21:20:48 +02003083 if (unlikely(ftrace_disabled))
3084 return -1;
3085
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003086 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01003087
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003088 ret = __register_ftrace_function(ops);
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05003089 ftrace_startup(0);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003090
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003091 mutex_unlock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003092 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02003093}
3094
3095/**
Uwe Kleine-Koenig32632922009-01-12 23:35:50 +01003096 * unregister_ftrace_function - unregister a function for profiling.
Steven Rostedt3d083392008-05-12 21:20:42 +02003097 * @ops - ops structure that holds the function to unregister
3098 *
3099 * Unregister a function that was added to be called by ftrace profiling.
3100 */
3101int unregister_ftrace_function(struct ftrace_ops *ops)
3102{
3103 int ret;
3104
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003105 mutex_lock(&ftrace_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02003106 ret = __unregister_ftrace_function(ops);
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05003107 ftrace_shutdown(0);
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003108 mutex_unlock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003109
3110 return ret;
3111}
3112
Ingo Molnare309b412008-05-12 21:20:51 +02003113int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003114ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02003115 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003116 loff_t *ppos)
3117{
3118 int ret;
3119
Steven Rostedt4eebcc82008-05-12 21:20:48 +02003120 if (unlikely(ftrace_disabled))
3121 return -ENODEV;
3122
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003123 mutex_lock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003124
Steven Rostedt5072c592008-05-12 21:20:43 +02003125 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003126
Li Zefana32c7762009-06-26 16:55:51 +08003127 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003128 goto out;
3129
Li Zefana32c7762009-06-26 16:55:51 +08003130 last_ftrace_enabled = !!ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003131
3132 if (ftrace_enabled) {
3133
3134 ftrace_startup_sysctl();
3135
3136 /* we are starting ftrace again */
3137 if (ftrace_list != &ftrace_list_end) {
3138 if (ftrace_list->next == &ftrace_list_end)
3139 ftrace_trace_function = ftrace_list->func;
3140 else
3141 ftrace_trace_function = ftrace_list_func;
3142 }
3143
3144 } else {
3145 /* stopping ftrace calls (just send to ftrace_stub) */
3146 ftrace_trace_function = ftrace_stub;
3147
3148 ftrace_shutdown_sysctl();
3149 }
3150
3151 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003152 mutex_unlock(&ftrace_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02003153 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02003154}
Ingo Molnarf17845e2008-10-24 12:47:10 +02003155
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01003156#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01003157
Steven Rostedt597af812009-04-03 15:24:12 -04003158static int ftrace_graph_active;
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08003159static struct notifier_block ftrace_suspend_notifier;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003160
Steven Rostedte49dc192008-12-02 23:50:05 -05003161int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3162{
3163 return 0;
3164}
3165
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01003166/* The callbacks that hook a function */
3167trace_func_graph_ret_t ftrace_graph_return =
3168 (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05003169trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003170
3171/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3172static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3173{
3174 int i;
3175 int ret = 0;
3176 unsigned long flags;
3177 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3178 struct task_struct *g, *t;
3179
3180 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3181 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3182 * sizeof(struct ftrace_ret_stack),
3183 GFP_KERNEL);
3184 if (!ret_stack_list[i]) {
3185 start = 0;
3186 end = i;
3187 ret = -ENOMEM;
3188 goto free;
3189 }
3190 }
3191
3192 read_lock_irqsave(&tasklist_lock, flags);
3193 do_each_thread(g, t) {
3194 if (start == end) {
3195 ret = -EAGAIN;
3196 goto unlock;
3197 }
3198
3199 if (t->ret_stack == NULL) {
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01003200 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003201 atomic_set(&t->trace_overrun, 0);
Steven Rostedt26c01622009-06-02 14:01:19 -04003202 t->curr_ret_stack = -1;
3203 /* Make sure the tasks see the -1 first: */
3204 smp_wmb();
3205 t->ret_stack = ret_stack_list[start++];
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003206 }
3207 } while_each_thread(g, t);
3208
3209unlock:
3210 read_unlock_irqrestore(&tasklist_lock, flags);
3211free:
3212 for (i = start; i < end; i++)
3213 kfree(ret_stack_list[i]);
3214 return ret;
3215}
3216
Steven Rostedt8aef2d22009-03-24 01:10:15 -04003217static void
3218ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
3219 struct task_struct *next)
3220{
3221 unsigned long long timestamp;
3222 int index;
3223
Steven Rostedtbe6f1642009-03-24 11:06:24 -04003224 /*
3225 * Does the user want to count the time a function was asleep.
3226 * If so, do not update the time stamps.
3227 */
3228 if (trace_flags & TRACE_ITER_SLEEP_TIME)
3229 return;
3230
Steven Rostedt8aef2d22009-03-24 01:10:15 -04003231 timestamp = trace_clock_local();
3232
3233 prev->ftrace_timestamp = timestamp;
3234
3235 /* only process tasks that we timestamped */
3236 if (!next->ftrace_timestamp)
3237 return;
3238
3239 /*
3240 * Update all the counters in next to make up for the
3241 * time next was sleeping.
3242 */
3243 timestamp -= next->ftrace_timestamp;
3244
3245 for (index = next->curr_ret_stack; index >= 0; index--)
3246 next->ret_stack[index].calltime += timestamp;
3247}
3248
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003249/* Allocate a return stack for each task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01003250static int start_graph_tracing(void)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003251{
3252 struct ftrace_ret_stack **ret_stack_list;
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01003253 int ret, cpu;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003254
3255 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3256 sizeof(struct ftrace_ret_stack *),
3257 GFP_KERNEL);
3258
3259 if (!ret_stack_list)
3260 return -ENOMEM;
3261
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01003262 /* The cpu_boot init_task->ret_stack will never be freed */
Steven Rostedt179c4982009-06-02 12:03:19 -04003263 for_each_online_cpu(cpu) {
3264 if (!idle_task(cpu)->ret_stack)
3265 ftrace_graph_init_task(idle_task(cpu));
3266 }
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01003267
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003268 do {
3269 ret = alloc_retstack_tasklist(ret_stack_list);
3270 } while (ret == -EAGAIN);
3271
Steven Rostedt8aef2d22009-03-24 01:10:15 -04003272 if (!ret) {
3273 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3274 if (ret)
3275 pr_info("ftrace_graph: Couldn't activate tracepoint"
3276 " probe to kernel_sched_switch\n");
3277 }
3278
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003279 kfree(ret_stack_list);
3280 return ret;
3281}
3282
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08003283/*
3284 * Hibernation protection.
3285 * The state of the current task is too much unstable during
3286 * suspend/restore to disk. We want to protect against that.
3287 */
3288static int
3289ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3290 void *unused)
3291{
3292 switch (state) {
3293 case PM_HIBERNATION_PREPARE:
3294 pause_graph_tracing();
3295 break;
3296
3297 case PM_POST_HIBERNATION:
3298 unpause_graph_tracing();
3299 break;
3300 }
3301 return NOTIFY_DONE;
3302}
3303
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01003304int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3305 trace_func_graph_ent_t entryfunc)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01003306{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01003307 int ret = 0;
3308
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003309 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01003310
Steven Rostedt05ce5812009-03-24 00:18:31 -04003311 /* we currently allow only one tracer registered at a time */
Steven Rostedt597af812009-04-03 15:24:12 -04003312 if (ftrace_graph_active) {
Steven Rostedt05ce5812009-03-24 00:18:31 -04003313 ret = -EBUSY;
3314 goto out;
3315 }
3316
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08003317 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3318 register_pm_notifier(&ftrace_suspend_notifier);
3319
Steven Rostedt597af812009-04-03 15:24:12 -04003320 ftrace_graph_active++;
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01003321 ret = start_graph_tracing();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003322 if (ret) {
Steven Rostedt597af812009-04-03 15:24:12 -04003323 ftrace_graph_active--;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003324 goto out;
3325 }
Steven Rostedte53a6312008-11-26 00:16:25 -05003326
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01003327 ftrace_graph_return = retfunc;
3328 ftrace_graph_entry = entryfunc;
Steven Rostedte53a6312008-11-26 00:16:25 -05003329
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05003330 ftrace_startup(FTRACE_START_FUNC_RET);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01003331
3332out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003333 mutex_unlock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01003334 return ret;
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01003335}
3336
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01003337void unregister_ftrace_graph(void)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01003338{
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003339 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01003340
Steven Rostedt597af812009-04-03 15:24:12 -04003341 if (unlikely(!ftrace_graph_active))
Steven Rostedt2aad1b72009-03-30 11:11:28 -04003342 goto out;
3343
Steven Rostedt597af812009-04-03 15:24:12 -04003344 ftrace_graph_active--;
Steven Rostedt8aef2d22009-03-24 01:10:15 -04003345 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01003346 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05003347 ftrace_graph_entry = ftrace_graph_entry_stub;
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05003348 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08003349 unregister_pm_notifier(&ftrace_suspend_notifier);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01003350
Steven Rostedt2aad1b72009-03-30 11:11:28 -04003351 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003352 mutex_unlock(&ftrace_lock);
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01003353}
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003354
3355/* Allocate a return stack for newly created task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01003356void ftrace_graph_init_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003357{
Steven Rostedt84047e32009-06-02 16:51:55 -04003358 /* Make sure we do not use the parent ret_stack */
3359 t->ret_stack = NULL;
3360
Steven Rostedt597af812009-04-03 15:24:12 -04003361 if (ftrace_graph_active) {
Steven Rostedt82310a32009-06-02 12:26:07 -04003362 struct ftrace_ret_stack *ret_stack;
3363
3364 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003365 * sizeof(struct ftrace_ret_stack),
3366 GFP_KERNEL);
Steven Rostedt82310a32009-06-02 12:26:07 -04003367 if (!ret_stack)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003368 return;
3369 t->curr_ret_stack = -1;
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01003370 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003371 atomic_set(&t->trace_overrun, 0);
Steven Rostedt8aef2d22009-03-24 01:10:15 -04003372 t->ftrace_timestamp = 0;
Steven Rostedt82310a32009-06-02 12:26:07 -04003373 /* make curr_ret_stack visable before we add the ret_stack */
3374 smp_wmb();
3375 t->ret_stack = ret_stack;
Steven Rostedt84047e32009-06-02 16:51:55 -04003376 }
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003377}
3378
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01003379void ftrace_graph_exit_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003380{
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01003381 struct ftrace_ret_stack *ret_stack = t->ret_stack;
3382
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003383 t->ret_stack = NULL;
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01003384 /* NULL must become visible to IRQs before we free it: */
3385 barrier();
3386
3387 kfree(ret_stack);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003388}
Steven Rostedt14a866c2008-12-02 23:50:02 -05003389
3390void ftrace_graph_stop(void)
3391{
3392 ftrace_stop();
3393}
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01003394#endif
3395