blob: 11ad796ca0495ee9ab0bb47a99f40e86bcfc1d43 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
Frederic Weisbecker00f57f52009-01-14 13:33:27 -080020#include <linux/suspend.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020021#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020022#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010023#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020024#include <linux/uaccess.h>
Abhishek Sagarf22f9a82008-06-21 23:50:29 +053025#include <linux/kprobes.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010026#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020027#include <linux/sysctl.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020028#include <linux/ctype.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020029#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020030
Abhishek Sagar395a59d2008-06-21 23:47:27 +053031#include <asm/ftrace.h>
32
Steven Rostedt3d083392008-05-12 21:20:42 +020033#include "trace.h"
34
Steven Rostedt69128962008-10-23 09:33:03 -040035#define FTRACE_WARN_ON(cond) \
36 do { \
37 if (WARN_ON(cond)) \
38 ftrace_kill(); \
39 } while (0)
40
41#define FTRACE_WARN_ON_ONCE(cond) \
42 do { \
43 if (WARN_ON_ONCE(cond)) \
44 ftrace_kill(); \
45 } while (0)
46
Steven Rostedt4eebcc82008-05-12 21:20:48 +020047/* ftrace_enabled is a method to turn ftrace on or off */
48int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020049static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020050
Steven Rostedt0ef8cde2008-12-03 15:36:58 -050051/* set when tracing only a pid */
Steven Rostedt978f3a42008-12-04 00:26:40 -050052struct pid *ftrace_pid_trace;
Steven Rostedt21bbecd2008-12-04 23:30:56 -050053static struct pid * const ftrace_swapper_pid = &init_struct_pid;
Steven Rostedtdf4fc312008-11-26 00:16:23 -050054
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050055/* Quick disabling of function tracer. */
56int function_trace_stop;
57
Steven Rostedt4eebcc82008-05-12 21:20:48 +020058/*
59 * ftrace_disabled is set when an anomaly is discovered.
60 * ftrace_disabled is much stronger than ftrace_enabled.
61 */
62static int ftrace_disabled __read_mostly;
63
Steven Rostedt3d083392008-05-12 21:20:42 +020064static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020065static DEFINE_MUTEX(ftrace_sysctl_lock);
Steven Rostedtdf4fc312008-11-26 00:16:23 -050066static DEFINE_MUTEX(ftrace_start_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020067
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020068static struct ftrace_ops ftrace_list_end __read_mostly =
69{
70 .func = ftrace_stub,
71};
72
73static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
74ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050075ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -050076ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020077
Ingo Molnarf2252932008-05-22 10:37:48 +020078static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020079{
80 struct ftrace_ops *op = ftrace_list;
81
82 /* in case someone actually ports this to alpha! */
83 read_barrier_depends();
84
85 while (op != &ftrace_list_end) {
86 /* silly alpha */
87 read_barrier_depends();
88 op->func(ip, parent_ip);
89 op = op->next;
90 };
91}
92
Steven Rostedtdf4fc312008-11-26 00:16:23 -050093static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
94{
Steven Rostedt0ef8cde2008-12-03 15:36:58 -050095 if (!test_tsk_trace_trace(current))
Steven Rostedtdf4fc312008-11-26 00:16:23 -050096 return;
97
98 ftrace_pid_function(ip, parent_ip);
99}
100
101static void set_ftrace_pid_function(ftrace_func_t func)
102{
103 /* do not set ftrace_pid_function to itself! */
104 if (func != ftrace_pid_func)
105 ftrace_pid_function = func;
106}
107
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200108/**
Steven Rostedt3d083392008-05-12 21:20:42 +0200109 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200110 *
Steven Rostedt3d083392008-05-12 21:20:42 +0200111 * This NULLs the ftrace function and in essence stops
112 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200113 */
Steven Rostedt3d083392008-05-12 21:20:42 +0200114void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200115{
Steven Rostedt3d083392008-05-12 21:20:42 +0200116 ftrace_trace_function = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500117 __ftrace_trace_function = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500118 ftrace_pid_function = ftrace_stub;
Steven Rostedt3d083392008-05-12 21:20:42 +0200119}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200120
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500121#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
122/*
123 * For those archs that do not test ftrace_trace_stop in their
124 * mcount call site, we need to do it from C.
125 */
126static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
127{
128 if (function_trace_stop)
129 return;
130
131 __ftrace_trace_function(ip, parent_ip);
132}
133#endif
134
Ingo Molnare309b412008-05-12 21:20:51 +0200135static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +0200136{
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400137 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +0200138 spin_lock(&ftrace_lock);
139
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200140 ops->next = ftrace_list;
141 /*
142 * We are entering ops into the ftrace_list but another
143 * CPU might be walking that list. We need to make sure
144 * the ops->next pointer is valid before another CPU sees
145 * the ops pointer included into the ftrace_list.
146 */
147 smp_wmb();
148 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +0200149
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200150 if (ftrace_enabled) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500151 ftrace_func_t func;
152
153 if (ops->next == &ftrace_list_end)
154 func = ops->func;
155 else
156 func = ftrace_list_func;
157
Steven Rostedt978f3a42008-12-04 00:26:40 -0500158 if (ftrace_pid_trace) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500159 set_ftrace_pid_function(func);
160 func = ftrace_pid_func;
161 }
162
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200163 /*
164 * For one func, simply call it directly.
165 * For more than one func, call the chain.
166 */
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500167#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500168 ftrace_trace_function = func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500169#else
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500170 __ftrace_trace_function = func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500171 ftrace_trace_function = ftrace_test_stop_func;
172#endif
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200173 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200174
175 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200176
177 return 0;
178}
179
Ingo Molnare309b412008-05-12 21:20:51 +0200180static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200181{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200182 struct ftrace_ops **p;
183 int ret = 0;
184
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400185 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +0200186 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200187
188 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200189 * If we are removing the last function, then simply point
190 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200191 */
192 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
193 ftrace_trace_function = ftrace_stub;
194 ftrace_list = &ftrace_list_end;
195 goto out;
196 }
197
198 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
199 if (*p == ops)
200 break;
201
202 if (*p != ops) {
203 ret = -1;
204 goto out;
205 }
206
207 *p = (*p)->next;
208
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200209 if (ftrace_enabled) {
210 /* If we only have one func left, then call that directly */
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500211 if (ftrace_list->next == &ftrace_list_end) {
212 ftrace_func_t func = ftrace_list->func;
213
Steven Rostedt978f3a42008-12-04 00:26:40 -0500214 if (ftrace_pid_trace) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500215 set_ftrace_pid_function(func);
216 func = ftrace_pid_func;
217 }
218#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
219 ftrace_trace_function = func;
220#else
221 __ftrace_trace_function = func;
222#endif
223 }
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200224 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200225
226 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200227 spin_unlock(&ftrace_lock);
228
229 return ret;
230}
231
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500232static void ftrace_update_pid_func(void)
233{
234 ftrace_func_t func;
235
236 /* should not be called from interrupt context */
237 spin_lock(&ftrace_lock);
238
239 if (ftrace_trace_function == ftrace_stub)
240 goto out;
241
242 func = ftrace_trace_function;
243
Steven Rostedt978f3a42008-12-04 00:26:40 -0500244 if (ftrace_pid_trace) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500245 set_ftrace_pid_function(func);
246 func = ftrace_pid_func;
247 } else {
Liming Wang66eafeb2008-12-02 10:33:08 +0800248 if (func == ftrace_pid_func)
249 func = ftrace_pid_function;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500250 }
251
252#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253 ftrace_trace_function = func;
254#else
255 __ftrace_trace_function = func;
256#endif
257
258 out:
259 spin_unlock(&ftrace_lock);
260}
261
Steven Rostedt3d083392008-05-12 21:20:42 +0200262#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400263#ifndef CONFIG_FTRACE_MCOUNT_RECORD
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400264# error Dynamic ftrace depends on MCOUNT_RECORD
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400265#endif
266
Steven Noonan71c67d52008-09-20 01:00:37 -0700267/*
268 * Since MCOUNT_ADDR may point to mcount itself, we do not want
269 * to get it confused by reading a reference in the code as we
270 * are parsing on objcopy output of text. Use a variable for
271 * it instead.
272 */
273static unsigned long mcount_addr = MCOUNT_ADDR;
274
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200275enum {
276 FTRACE_ENABLE_CALLS = (1 << 0),
277 FTRACE_DISABLE_CALLS = (1 << 1),
278 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
279 FTRACE_ENABLE_MCOUNT = (1 << 3),
280 FTRACE_DISABLE_MCOUNT = (1 << 4),
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500281 FTRACE_START_FUNC_RET = (1 << 5),
282 FTRACE_STOP_FUNC_RET = (1 << 6),
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200283};
284
Steven Rostedt5072c592008-05-12 21:20:43 +0200285static int ftrace_filtered;
286
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400287static LIST_HEAD(ftrace_new_addrs);
Steven Rostedt3d083392008-05-12 21:20:42 +0200288
Steven Rostedt41c52c02008-05-22 11:46:33 -0400289static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200290
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200291struct ftrace_page {
292 struct ftrace_page *next;
David Milleraa5e5ce2008-05-13 22:06:56 -0700293 unsigned long index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200294 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700295};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200296
297#define ENTRIES_PER_PAGE \
298 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
299
300/* estimate from running different kernels */
301#define NR_TO_INIT 10000
302
303static struct ftrace_page *ftrace_pages_start;
304static struct ftrace_page *ftrace_pages;
305
Steven Rostedt37ad5082008-05-12 21:20:48 +0200306static struct dyn_ftrace *ftrace_free_records;
307
Abhishek Sagarecea6562008-06-21 23:47:53 +0530308
309#ifdef CONFIG_KPROBES
Ingo Molnarf17845e2008-10-24 12:47:10 +0200310
311static int frozen_record_count;
312
Abhishek Sagarecea6562008-06-21 23:47:53 +0530313static inline void freeze_record(struct dyn_ftrace *rec)
314{
315 if (!(rec->flags & FTRACE_FL_FROZEN)) {
316 rec->flags |= FTRACE_FL_FROZEN;
317 frozen_record_count++;
318 }
319}
320
321static inline void unfreeze_record(struct dyn_ftrace *rec)
322{
323 if (rec->flags & FTRACE_FL_FROZEN) {
324 rec->flags &= ~FTRACE_FL_FROZEN;
325 frozen_record_count--;
326 }
327}
328
329static inline int record_frozen(struct dyn_ftrace *rec)
330{
331 return rec->flags & FTRACE_FL_FROZEN;
332}
333#else
334# define freeze_record(rec) ({ 0; })
335# define unfreeze_record(rec) ({ 0; })
336# define record_frozen(rec) ({ 0; })
337#endif /* CONFIG_KPROBES */
338
Ingo Molnare309b412008-05-12 21:20:51 +0200339static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad5082008-05-12 21:20:48 +0200340{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200341 rec->ip = (unsigned long)ftrace_free_records;
342 ftrace_free_records = rec;
343 rec->flags |= FTRACE_FL_FREE;
344}
345
Steven Rostedtfed19392008-08-14 22:47:19 -0400346void ftrace_release(void *start, unsigned long size)
347{
348 struct dyn_ftrace *rec;
349 struct ftrace_page *pg;
350 unsigned long s = (unsigned long)start;
351 unsigned long e = s + size;
352 int i;
353
Steven Rostedt00fd61a2008-08-15 21:40:04 -0400354 if (ftrace_disabled || !start)
Steven Rostedtfed19392008-08-14 22:47:19 -0400355 return;
356
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400357 /* should not be called from interrupt context */
Steven Rostedtfed19392008-08-14 22:47:19 -0400358 spin_lock(&ftrace_lock);
359
360 for (pg = ftrace_pages_start; pg; pg = pg->next) {
361 for (i = 0; i < pg->index; i++) {
362 rec = &pg->records[i];
363
364 if ((rec->ip >= s) && (rec->ip < e))
365 ftrace_free_rec(rec);
366 }
367 }
368 spin_unlock(&ftrace_lock);
Steven Rostedtfed19392008-08-14 22:47:19 -0400369}
370
Ingo Molnare309b412008-05-12 21:20:51 +0200371static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200372{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200373 struct dyn_ftrace *rec;
374
375 /* First check for freed records */
376 if (ftrace_free_records) {
377 rec = ftrace_free_records;
378
Steven Rostedt37ad5082008-05-12 21:20:48 +0200379 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
Steven Rostedt69128962008-10-23 09:33:03 -0400380 FTRACE_WARN_ON_ONCE(1);
Steven Rostedt37ad5082008-05-12 21:20:48 +0200381 ftrace_free_records = NULL;
382 return NULL;
383 }
384
385 ftrace_free_records = (void *)rec->ip;
386 memset(rec, 0, sizeof(*rec));
387 return rec;
388 }
389
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200390 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400391 if (!ftrace_pages->next) {
392 /* allocate another page */
393 ftrace_pages->next =
394 (void *)get_zeroed_page(GFP_KERNEL);
395 if (!ftrace_pages->next)
396 return NULL;
397 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200398 ftrace_pages = ftrace_pages->next;
399 }
400
401 return &ftrace_pages->records[ftrace_pages->index++];
402}
403
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400404static struct dyn_ftrace *
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200405ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200406{
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400407 struct dyn_ftrace *rec;
Steven Rostedt3d083392008-05-12 21:20:42 +0200408
Steven Rostedtf3c7ac42008-11-14 16:21:19 -0800409 if (ftrace_disabled)
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400410 return NULL;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200411
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400412 rec = ftrace_alloc_dyn_node(ip);
413 if (!rec)
414 return NULL;
Steven Rostedt3d083392008-05-12 21:20:42 +0200415
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400416 rec->ip = ip;
Steven Rostedt3d083392008-05-12 21:20:42 +0200417
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400418 list_add(&rec->list, &ftrace_new_addrs);
Steven Rostedt3d083392008-05-12 21:20:42 +0200419
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400420 return rec;
Steven Rostedt3d083392008-05-12 21:20:42 +0200421}
422
Steven Rostedt05736a42008-09-22 14:55:47 -0700423static void print_ip_ins(const char *fmt, unsigned char *p)
424{
425 int i;
426
427 printk(KERN_CONT "%s", fmt);
428
429 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
430 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
431}
432
Steven Rostedt31e88902008-11-14 16:21:19 -0800433static void ftrace_bug(int failed, unsigned long ip)
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800434{
435 switch (failed) {
436 case -EFAULT:
437 FTRACE_WARN_ON_ONCE(1);
438 pr_info("ftrace faulted on modifying ");
439 print_ip_sym(ip);
440 break;
441 case -EINVAL:
442 FTRACE_WARN_ON_ONCE(1);
443 pr_info("ftrace failed to modify ");
444 print_ip_sym(ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800445 print_ip_ins(" actual: ", (unsigned char *)ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800446 printk(KERN_CONT "\n");
447 break;
448 case -EPERM:
449 FTRACE_WARN_ON_ONCE(1);
450 pr_info("ftrace faulted on writing ");
451 print_ip_sym(ip);
452 break;
453 default:
454 FTRACE_WARN_ON_ONCE(1);
455 pr_info("ftrace faulted on unknown error ");
456 print_ip_sym(ip);
457 }
458}
459
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200460
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530461static int
Steven Rostedt31e88902008-11-14 16:21:19 -0800462__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200463{
Ingo Molnare309b412008-05-12 21:20:51 +0200464 unsigned long ip, fl;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100465 unsigned long ftrace_addr;
466
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100467 ftrace_addr = (unsigned long)ftrace_caller;
Steven Rostedt5072c592008-05-12 21:20:43 +0200468
469 ip = rec->ip;
470
Steven Rostedt982c3502008-11-15 16:31:41 -0500471 /*
472 * If this record is not to be traced and
473 * it is not enabled then do nothing.
474 *
475 * If this record is not to be traced and
476 * it is enabled then disabled it.
477 *
478 */
479 if (rec->flags & FTRACE_FL_NOTRACE) {
480 if (rec->flags & FTRACE_FL_ENABLED)
481 rec->flags &= ~FTRACE_FL_ENABLED;
482 else
Steven Rostedt5072c592008-05-12 21:20:43 +0200483 return 0;
484
Steven Rostedt982c3502008-11-15 16:31:41 -0500485 } else if (ftrace_filtered && enable) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200486 /*
Steven Rostedt982c3502008-11-15 16:31:41 -0500487 * Filtering is on:
Steven Rostedt5072c592008-05-12 21:20:43 +0200488 */
Steven Rostedt982c3502008-11-15 16:31:41 -0500489
490 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
491
492 /* Record is filtered and enabled, do nothing */
493 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
494 return 0;
495
496 /* Record is not filtered and is not enabled do nothing */
497 if (!fl)
498 return 0;
499
500 /* Record is not filtered but enabled, disable it */
501 if (fl == FTRACE_FL_ENABLED)
Steven Rostedt5072c592008-05-12 21:20:43 +0200502 rec->flags &= ~FTRACE_FL_ENABLED;
Steven Rostedt982c3502008-11-15 16:31:41 -0500503 else
504 /* Otherwise record is filtered but not enabled, enable it */
Steven Rostedt5072c592008-05-12 21:20:43 +0200505 rec->flags |= FTRACE_FL_ENABLED;
Steven Rostedt5072c592008-05-12 21:20:43 +0200506 } else {
Steven Rostedt982c3502008-11-15 16:31:41 -0500507 /* Disable or not filtered */
Steven Rostedt5072c592008-05-12 21:20:43 +0200508
509 if (enable) {
Steven Rostedt982c3502008-11-15 16:31:41 -0500510 /* if record is enabled, do nothing */
Steven Rostedt41c52c02008-05-22 11:46:33 -0400511 if (rec->flags & FTRACE_FL_ENABLED)
Steven Rostedt5072c592008-05-12 21:20:43 +0200512 return 0;
Steven Rostedt982c3502008-11-15 16:31:41 -0500513
Steven Rostedt41c52c02008-05-22 11:46:33 -0400514 rec->flags |= FTRACE_FL_ENABLED;
Steven Rostedt982c3502008-11-15 16:31:41 -0500515
Steven Rostedt5072c592008-05-12 21:20:43 +0200516 } else {
Steven Rostedt982c3502008-11-15 16:31:41 -0500517
518 /* if record is not enabled do nothing */
Steven Rostedt5072c592008-05-12 21:20:43 +0200519 if (!(rec->flags & FTRACE_FL_ENABLED))
520 return 0;
Steven Rostedt982c3502008-11-15 16:31:41 -0500521
Steven Rostedt5072c592008-05-12 21:20:43 +0200522 rec->flags &= ~FTRACE_FL_ENABLED;
523 }
524 }
525
Steven Rostedt982c3502008-11-15 16:31:41 -0500526 if (rec->flags & FTRACE_FL_ENABLED)
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100527 return ftrace_make_call(rec, ftrace_addr);
Steven Rostedt31e88902008-11-14 16:21:19 -0800528 else
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100529 return ftrace_make_nop(NULL, rec, ftrace_addr);
Steven Rostedt5072c592008-05-12 21:20:43 +0200530}
531
532static void ftrace_replace_code(int enable)
533{
534 int i, failed;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200535 struct dyn_ftrace *rec;
536 struct ftrace_page *pg;
537
Steven Rostedt37ad5082008-05-12 21:20:48 +0200538 for (pg = ftrace_pages_start; pg; pg = pg->next) {
539 for (i = 0; i < pg->index; i++) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200540 rec = &pg->records[i];
541
Steven Rostedt918c1152008-11-14 16:21:19 -0800542 /*
543 * Skip over free records and records that have
544 * failed.
545 */
546 if (rec->flags & FTRACE_FL_FREE ||
547 rec->flags & FTRACE_FL_FAILED)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200548 continue;
549
550 /* ignore updates to this record's mcount site */
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200551 if (get_kprobe((void *)rec->ip)) {
552 freeze_record(rec);
Steven Rostedt5072c592008-05-12 21:20:43 +0200553 continue;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200554 } else {
555 unfreeze_record(rec);
556 }
557
Steven Rostedt31e88902008-11-14 16:21:19 -0800558 failed = __ftrace_replace_code(rec, enable);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200559 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
560 rec->flags |= FTRACE_FL_FAILED;
561 if ((system_state == SYSTEM_BOOTING) ||
562 !core_kernel_text(rec->ip)) {
563 ftrace_free_rec(rec);
Steven Rostedt43772452009-02-19 13:41:27 -0500564 } else {
Steven Rostedt31e88902008-11-14 16:21:19 -0800565 ftrace_bug(failed, rec->ip);
Steven Rostedt43772452009-02-19 13:41:27 -0500566 /* Stop processing */
567 return;
568 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200569 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200570 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200571 }
572}
573
Ingo Molnare309b412008-05-12 21:20:51 +0200574static int
Steven Rostedt31e88902008-11-14 16:21:19 -0800575ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200576{
577 unsigned long ip;
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400578 int ret;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200579
580 ip = rec->ip;
581
Steven Rostedt31e88902008-11-14 16:21:19 -0800582 ret = ftrace_make_nop(mod, rec, mcount_addr);
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400583 if (ret) {
Steven Rostedt31e88902008-11-14 16:21:19 -0800584 ftrace_bug(ret, ip);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200585 rec->flags |= FTRACE_FL_FAILED;
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530586 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200587 }
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530588 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200589}
590
Steven Rostedt000ab692009-02-17 13:35:06 -0500591/*
592 * archs can override this function if they must do something
593 * before the modifying code is performed.
594 */
595int __weak ftrace_arch_code_modify_prepare(void)
596{
597 return 0;
598}
599
600/*
601 * archs can override this function if they must do something
602 * after the modifying code is performed.
603 */
604int __weak ftrace_arch_code_modify_post_process(void)
605{
606 return 0;
607}
608
Ingo Molnare309b412008-05-12 21:20:51 +0200609static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200610{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200611 int *command = data;
612
Steven Rostedta3583242008-11-11 15:01:42 -0500613 if (*command & FTRACE_ENABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200614 ftrace_replace_code(1);
Steven Rostedta3583242008-11-11 15:01:42 -0500615 else if (*command & FTRACE_DISABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200616 ftrace_replace_code(0);
617
618 if (*command & FTRACE_UPDATE_TRACE_FUNC)
619 ftrace_update_ftrace_func(ftrace_trace_function);
620
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500621 if (*command & FTRACE_START_FUNC_RET)
622 ftrace_enable_ftrace_graph_caller();
623 else if (*command & FTRACE_STOP_FUNC_RET)
624 ftrace_disable_ftrace_graph_caller();
625
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200626 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200627}
628
Ingo Molnare309b412008-05-12 21:20:51 +0200629static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200630{
Steven Rostedt000ab692009-02-17 13:35:06 -0500631 int ret;
632
633 ret = ftrace_arch_code_modify_prepare();
634 FTRACE_WARN_ON(ret);
635 if (ret)
636 return;
637
Rusty Russell784e2d72008-07-28 12:16:31 -0500638 stop_machine(__ftrace_modify_code, &command, NULL);
Steven Rostedt000ab692009-02-17 13:35:06 -0500639
640 ret = ftrace_arch_code_modify_post_process();
641 FTRACE_WARN_ON(ret);
Steven Rostedt3d083392008-05-12 21:20:42 +0200642}
643
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200644static ftrace_func_t saved_ftrace_func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500645static int ftrace_start_up;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500646
647static void ftrace_startup_enable(int command)
648{
649 if (saved_ftrace_func != ftrace_trace_function) {
650 saved_ftrace_func = ftrace_trace_function;
651 command |= FTRACE_UPDATE_TRACE_FUNC;
652 }
653
654 if (!command || !ftrace_enabled)
655 return;
656
657 ftrace_run_update_code(command);
658}
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200659
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500660static void ftrace_startup(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200661{
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200662 if (unlikely(ftrace_disabled))
663 return;
664
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400665 mutex_lock(&ftrace_start_lock);
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500666 ftrace_start_up++;
Steven Rostedt982c3502008-11-15 16:31:41 -0500667 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200668
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500669 ftrace_startup_enable(command);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200670
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400671 mutex_unlock(&ftrace_start_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200672}
673
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500674static void ftrace_shutdown(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200675{
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200676 if (unlikely(ftrace_disabled))
677 return;
678
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400679 mutex_lock(&ftrace_start_lock);
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500680 ftrace_start_up--;
681 if (!ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200682 command |= FTRACE_DISABLE_CALLS;
683
684 if (saved_ftrace_func != ftrace_trace_function) {
685 saved_ftrace_func = ftrace_trace_function;
686 command |= FTRACE_UPDATE_TRACE_FUNC;
687 }
688
689 if (!command || !ftrace_enabled)
Steven Rostedt3d083392008-05-12 21:20:42 +0200690 goto out;
691
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200692 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200693 out:
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400694 mutex_unlock(&ftrace_start_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200695}
696
Ingo Molnare309b412008-05-12 21:20:51 +0200697static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200698{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200699 int command = FTRACE_ENABLE_MCOUNT;
700
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200701 if (unlikely(ftrace_disabled))
702 return;
703
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400704 mutex_lock(&ftrace_start_lock);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200705 /* Force update next time */
706 saved_ftrace_func = NULL;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500707 /* ftrace_start_up is true if we want ftrace running */
708 if (ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200709 command |= FTRACE_ENABLE_CALLS;
710
711 ftrace_run_update_code(command);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400712 mutex_unlock(&ftrace_start_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200713}
714
Ingo Molnare309b412008-05-12 21:20:51 +0200715static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200716{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200717 int command = FTRACE_DISABLE_MCOUNT;
718
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200719 if (unlikely(ftrace_disabled))
720 return;
721
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400722 mutex_lock(&ftrace_start_lock);
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500723 /* ftrace_start_up is true if ftrace is running */
724 if (ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200725 command |= FTRACE_DISABLE_CALLS;
726
727 ftrace_run_update_code(command);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400728 mutex_unlock(&ftrace_start_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200729}
730
Steven Rostedt3d083392008-05-12 21:20:42 +0200731static cycle_t ftrace_update_time;
732static unsigned long ftrace_update_cnt;
733unsigned long ftrace_update_tot_cnt;
734
Steven Rostedt31e88902008-11-14 16:21:19 -0800735static int ftrace_update_code(struct module *mod)
Steven Rostedt3d083392008-05-12 21:20:42 +0200736{
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400737 struct dyn_ftrace *p, *t;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530738 cycle_t start, stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200739
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200740 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200741 ftrace_update_cnt = 0;
742
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400743 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530744
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400745 /* If something went wrong, bail without enabling anything */
746 if (unlikely(ftrace_disabled))
747 return -1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200748
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400749 list_del_init(&p->list);
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530750
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400751 /* convert record (i.e, patch mcount-call with NOP) */
Steven Rostedt31e88902008-11-14 16:21:19 -0800752 if (ftrace_code_disable(mod, p)) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400753 p->flags |= FTRACE_FL_CONVERTED;
754 ftrace_update_cnt++;
755 } else
756 ftrace_free_rec(p);
Steven Rostedt3d083392008-05-12 21:20:42 +0200757 }
758
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200759 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200760 ftrace_update_time = stop - start;
761 ftrace_update_tot_cnt += ftrace_update_cnt;
762
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200763 return 0;
764}
765
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400766static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200767{
768 struct ftrace_page *pg;
769 int cnt;
770 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200771
772 /* allocate a few pages */
773 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
774 if (!ftrace_pages_start)
775 return -1;
776
777 /*
778 * Allocate a few more pages.
779 *
780 * TODO: have some parser search vmlinux before
781 * final linking to find all calls to ftrace.
782 * Then we can:
783 * a) know how many pages to allocate.
784 * and/or
785 * b) set up the table then.
786 *
787 * The dynamic code is still necessary for
788 * modules.
789 */
790
791 pg = ftrace_pages = ftrace_pages_start;
792
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400793 cnt = num_to_init / ENTRIES_PER_PAGE;
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400794 pr_info("ftrace: allocating %ld entries in %d pages\n",
walimis5821e1b2008-11-15 15:19:06 +0800795 num_to_init, cnt + 1);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200796
797 for (i = 0; i < cnt; i++) {
798 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
799
800 /* If we fail, we'll try later anyway */
801 if (!pg->next)
802 break;
803
804 pg = pg->next;
805 }
806
807 return 0;
808}
809
Steven Rostedt5072c592008-05-12 21:20:43 +0200810enum {
811 FTRACE_ITER_FILTER = (1 << 0),
812 FTRACE_ITER_CONT = (1 << 1),
Steven Rostedt41c52c02008-05-22 11:46:33 -0400813 FTRACE_ITER_NOTRACE = (1 << 2),
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530814 FTRACE_ITER_FAILURES = (1 << 3),
Steven Rostedt5072c592008-05-12 21:20:43 +0200815};
816
817#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
818
819struct ftrace_iterator {
Steven Rostedt5072c592008-05-12 21:20:43 +0200820 struct ftrace_page *pg;
821 unsigned idx;
822 unsigned flags;
823 unsigned char buffer[FTRACE_BUFF_MAX+1];
824 unsigned buffer_idx;
825 unsigned filtered;
826};
827
Ingo Molnare309b412008-05-12 21:20:51 +0200828static void *
Steven Rostedt5072c592008-05-12 21:20:43 +0200829t_next(struct seq_file *m, void *v, loff_t *pos)
830{
831 struct ftrace_iterator *iter = m->private;
832 struct dyn_ftrace *rec = NULL;
833
834 (*pos)++;
835
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400836 /* should not be called from interrupt context */
837 spin_lock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200838 retry:
839 if (iter->idx >= iter->pg->index) {
840 if (iter->pg->next) {
841 iter->pg = iter->pg->next;
842 iter->idx = 0;
843 goto retry;
Liming Wang50cdaf02008-11-28 12:13:21 +0800844 } else {
845 iter->idx = -1;
Steven Rostedt5072c592008-05-12 21:20:43 +0200846 }
847 } else {
848 rec = &iter->pg->records[iter->idx++];
Steven Rostedta9fdda32008-08-14 22:47:17 -0400849 if ((rec->flags & FTRACE_FL_FREE) ||
850
851 (!(iter->flags & FTRACE_ITER_FAILURES) &&
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530852 (rec->flags & FTRACE_FL_FAILED)) ||
853
854 ((iter->flags & FTRACE_ITER_FAILURES) &&
Steven Rostedta9fdda32008-08-14 22:47:17 -0400855 !(rec->flags & FTRACE_FL_FAILED)) ||
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530856
Steven Rostedt0183fb12008-11-07 22:36:02 -0500857 ((iter->flags & FTRACE_ITER_FILTER) &&
858 !(rec->flags & FTRACE_FL_FILTER)) ||
859
Steven Rostedt41c52c02008-05-22 11:46:33 -0400860 ((iter->flags & FTRACE_ITER_NOTRACE) &&
861 !(rec->flags & FTRACE_FL_NOTRACE))) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200862 rec = NULL;
863 goto retry;
864 }
865 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400866 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200867
Steven Rostedt5072c592008-05-12 21:20:43 +0200868 return rec;
869}
870
871static void *t_start(struct seq_file *m, loff_t *pos)
872{
873 struct ftrace_iterator *iter = m->private;
874 void *p = NULL;
Steven Rostedt5072c592008-05-12 21:20:43 +0200875
Liming Wang50cdaf02008-11-28 12:13:21 +0800876 if (*pos > 0) {
877 if (iter->idx < 0)
878 return p;
879 (*pos)--;
880 iter->idx--;
881 }
walimis5821e1b2008-11-15 15:19:06 +0800882
Liming Wang50cdaf02008-11-28 12:13:21 +0800883 p = t_next(m, p, pos);
Steven Rostedt5072c592008-05-12 21:20:43 +0200884
885 return p;
886}
887
888static void t_stop(struct seq_file *m, void *p)
889{
890}
891
892static int t_show(struct seq_file *m, void *v)
893{
894 struct dyn_ftrace *rec = v;
895 char str[KSYM_SYMBOL_LEN];
896
897 if (!rec)
898 return 0;
899
900 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
901
Liming Wang50cdaf02008-11-28 12:13:21 +0800902 seq_printf(m, "%s\n", str);
Steven Rostedt5072c592008-05-12 21:20:43 +0200903
904 return 0;
905}
906
907static struct seq_operations show_ftrace_seq_ops = {
908 .start = t_start,
909 .next = t_next,
910 .stop = t_stop,
911 .show = t_show,
912};
913
Ingo Molnare309b412008-05-12 21:20:51 +0200914static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200915ftrace_avail_open(struct inode *inode, struct file *file)
916{
917 struct ftrace_iterator *iter;
918 int ret;
919
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200920 if (unlikely(ftrace_disabled))
921 return -ENODEV;
922
Steven Rostedt5072c592008-05-12 21:20:43 +0200923 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
924 if (!iter)
925 return -ENOMEM;
926
927 iter->pg = ftrace_pages_start;
Steven Rostedt5072c592008-05-12 21:20:43 +0200928
929 ret = seq_open(file, &show_ftrace_seq_ops);
930 if (!ret) {
931 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200932
Steven Rostedt5072c592008-05-12 21:20:43 +0200933 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200934 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +0200935 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200936 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200937
938 return ret;
939}
940
941int ftrace_avail_release(struct inode *inode, struct file *file)
942{
943 struct seq_file *m = (struct seq_file *)file->private_data;
944 struct ftrace_iterator *iter = m->private;
945
946 seq_release(inode, file);
947 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200948
Steven Rostedt5072c592008-05-12 21:20:43 +0200949 return 0;
950}
951
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530952static int
953ftrace_failures_open(struct inode *inode, struct file *file)
954{
955 int ret;
956 struct seq_file *m;
957 struct ftrace_iterator *iter;
958
959 ret = ftrace_avail_open(inode, file);
960 if (!ret) {
961 m = (struct seq_file *)file->private_data;
962 iter = (struct ftrace_iterator *)m->private;
963 iter->flags = FTRACE_ITER_FAILURES;
964 }
965
966 return ret;
967}
968
969
Steven Rostedt41c52c02008-05-22 11:46:33 -0400970static void ftrace_filter_reset(int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +0200971{
972 struct ftrace_page *pg;
973 struct dyn_ftrace *rec;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400974 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +0200975 unsigned i;
976
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400977 /* should not be called from interrupt context */
978 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400979 if (enable)
980 ftrace_filtered = 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200981 pg = ftrace_pages_start;
982 while (pg) {
983 for (i = 0; i < pg->index; i++) {
984 rec = &pg->records[i];
985 if (rec->flags & FTRACE_FL_FAILED)
986 continue;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400987 rec->flags &= ~type;
Steven Rostedt5072c592008-05-12 21:20:43 +0200988 }
989 pg = pg->next;
990 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400991 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200992}
993
Ingo Molnare309b412008-05-12 21:20:51 +0200994static int
Steven Rostedt41c52c02008-05-22 11:46:33 -0400995ftrace_regex_open(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +0200996{
997 struct ftrace_iterator *iter;
998 int ret = 0;
999
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001000 if (unlikely(ftrace_disabled))
1001 return -ENODEV;
1002
Steven Rostedt5072c592008-05-12 21:20:43 +02001003 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1004 if (!iter)
1005 return -ENOMEM;
1006
Steven Rostedt41c52c02008-05-22 11:46:33 -04001007 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001008 if ((file->f_mode & FMODE_WRITE) &&
1009 !(file->f_flags & O_APPEND))
Steven Rostedt41c52c02008-05-22 11:46:33 -04001010 ftrace_filter_reset(enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001011
1012 if (file->f_mode & FMODE_READ) {
1013 iter->pg = ftrace_pages_start;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001014 iter->flags = enable ? FTRACE_ITER_FILTER :
1015 FTRACE_ITER_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001016
1017 ret = seq_open(file, &show_ftrace_seq_ops);
1018 if (!ret) {
1019 struct seq_file *m = file->private_data;
1020 m->private = iter;
1021 } else
1022 kfree(iter);
1023 } else
1024 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001025 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001026
1027 return ret;
1028}
1029
Steven Rostedt41c52c02008-05-22 11:46:33 -04001030static int
1031ftrace_filter_open(struct inode *inode, struct file *file)
1032{
1033 return ftrace_regex_open(inode, file, 1);
1034}
1035
1036static int
1037ftrace_notrace_open(struct inode *inode, struct file *file)
1038{
1039 return ftrace_regex_open(inode, file, 0);
1040}
1041
Ingo Molnare309b412008-05-12 21:20:51 +02001042static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001043ftrace_regex_read(struct file *file, char __user *ubuf,
Steven Rostedt5072c592008-05-12 21:20:43 +02001044 size_t cnt, loff_t *ppos)
1045{
1046 if (file->f_mode & FMODE_READ)
1047 return seq_read(file, ubuf, cnt, ppos);
1048 else
1049 return -EPERM;
1050}
1051
Ingo Molnare309b412008-05-12 21:20:51 +02001052static loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001053ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02001054{
1055 loff_t ret;
1056
1057 if (file->f_mode & FMODE_READ)
1058 ret = seq_lseek(file, offset, origin);
1059 else
1060 file->f_pos = ret = 1;
1061
1062 return ret;
1063}
1064
1065enum {
1066 MATCH_FULL,
1067 MATCH_FRONT_ONLY,
1068 MATCH_MIDDLE_ONLY,
1069 MATCH_END_ONLY,
1070};
1071
Ingo Molnare309b412008-05-12 21:20:51 +02001072static void
Steven Rostedt41c52c02008-05-22 11:46:33 -04001073ftrace_match(unsigned char *buff, int len, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001074{
1075 char str[KSYM_SYMBOL_LEN];
1076 char *search = NULL;
1077 struct ftrace_page *pg;
1078 struct dyn_ftrace *rec;
1079 int type = MATCH_FULL;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001080 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001081 unsigned i, match = 0, search_len = 0;
Steven Rostedtea3a6d62008-12-17 15:05:36 -05001082 int not = 0;
1083
1084 if (buff[0] == '!') {
1085 not = 1;
1086 buff++;
1087 len--;
1088 }
Steven Rostedt5072c592008-05-12 21:20:43 +02001089
1090 for (i = 0; i < len; i++) {
1091 if (buff[i] == '*') {
1092 if (!i) {
1093 search = buff + i + 1;
1094 type = MATCH_END_ONLY;
1095 search_len = len - (i + 1);
1096 } else {
1097 if (type == MATCH_END_ONLY) {
1098 type = MATCH_MIDDLE_ONLY;
1099 } else {
1100 match = i;
1101 type = MATCH_FRONT_ONLY;
1102 }
1103 buff[i] = 0;
1104 break;
1105 }
1106 }
1107 }
1108
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001109 /* should not be called from interrupt context */
1110 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001111 if (enable)
1112 ftrace_filtered = 1;
Steven Rostedt5072c592008-05-12 21:20:43 +02001113 pg = ftrace_pages_start;
1114 while (pg) {
1115 for (i = 0; i < pg->index; i++) {
1116 int matched = 0;
1117 char *ptr;
1118
1119 rec = &pg->records[i];
1120 if (rec->flags & FTRACE_FL_FAILED)
1121 continue;
1122 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1123 switch (type) {
1124 case MATCH_FULL:
1125 if (strcmp(str, buff) == 0)
1126 matched = 1;
1127 break;
1128 case MATCH_FRONT_ONLY:
1129 if (memcmp(str, buff, match) == 0)
1130 matched = 1;
1131 break;
1132 case MATCH_MIDDLE_ONLY:
1133 if (strstr(str, search))
1134 matched = 1;
1135 break;
1136 case MATCH_END_ONLY:
1137 ptr = strstr(str, search);
1138 if (ptr && (ptr[search_len] == 0))
1139 matched = 1;
1140 break;
1141 }
Steven Rostedtea3a6d62008-12-17 15:05:36 -05001142 if (matched) {
1143 if (not)
1144 rec->flags &= ~flag;
1145 else
1146 rec->flags |= flag;
1147 }
Steven Rostedt5072c592008-05-12 21:20:43 +02001148 }
1149 pg = pg->next;
1150 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001151 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001152}
1153
Ingo Molnare309b412008-05-12 21:20:51 +02001154static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001155ftrace_regex_write(struct file *file, const char __user *ubuf,
1156 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001157{
1158 struct ftrace_iterator *iter;
1159 char ch;
1160 size_t read = 0;
1161 ssize_t ret;
1162
1163 if (!cnt || cnt < 0)
1164 return 0;
1165
Steven Rostedt41c52c02008-05-22 11:46:33 -04001166 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001167
1168 if (file->f_mode & FMODE_READ) {
1169 struct seq_file *m = file->private_data;
1170 iter = m->private;
1171 } else
1172 iter = file->private_data;
1173
1174 if (!*ppos) {
1175 iter->flags &= ~FTRACE_ITER_CONT;
1176 iter->buffer_idx = 0;
1177 }
1178
1179 ret = get_user(ch, ubuf++);
1180 if (ret)
1181 goto out;
1182 read++;
1183 cnt--;
1184
1185 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1186 /* skip white space */
1187 while (cnt && isspace(ch)) {
1188 ret = get_user(ch, ubuf++);
1189 if (ret)
1190 goto out;
1191 read++;
1192 cnt--;
1193 }
1194
Steven Rostedt5072c592008-05-12 21:20:43 +02001195 if (isspace(ch)) {
1196 file->f_pos += read;
1197 ret = read;
1198 goto out;
1199 }
1200
1201 iter->buffer_idx = 0;
1202 }
1203
1204 while (cnt && !isspace(ch)) {
1205 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1206 iter->buffer[iter->buffer_idx++] = ch;
1207 else {
1208 ret = -EINVAL;
1209 goto out;
1210 }
1211 ret = get_user(ch, ubuf++);
1212 if (ret)
1213 goto out;
1214 read++;
1215 cnt--;
1216 }
1217
1218 if (isspace(ch)) {
1219 iter->filtered++;
1220 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001221 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001222 iter->buffer_idx = 0;
1223 } else
1224 iter->flags |= FTRACE_ITER_CONT;
1225
1226
1227 file->f_pos += read;
1228
1229 ret = read;
1230 out:
Steven Rostedt41c52c02008-05-22 11:46:33 -04001231 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001232
1233 return ret;
1234}
1235
Steven Rostedt41c52c02008-05-22 11:46:33 -04001236static ssize_t
1237ftrace_filter_write(struct file *file, const char __user *ubuf,
1238 size_t cnt, loff_t *ppos)
1239{
1240 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1241}
1242
1243static ssize_t
1244ftrace_notrace_write(struct file *file, const char __user *ubuf,
1245 size_t cnt, loff_t *ppos)
1246{
1247 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1248}
1249
1250static void
1251ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1252{
1253 if (unlikely(ftrace_disabled))
1254 return;
1255
1256 mutex_lock(&ftrace_regex_lock);
1257 if (reset)
1258 ftrace_filter_reset(enable);
1259 if (buf)
1260 ftrace_match(buf, len, enable);
1261 mutex_unlock(&ftrace_regex_lock);
1262}
1263
Steven Rostedt77a2b372008-05-12 21:20:45 +02001264/**
1265 * ftrace_set_filter - set a function to filter on in ftrace
1266 * @buf - the string that holds the function filter text.
1267 * @len - the length of the string.
1268 * @reset - non zero to reset all filters before applying this filter.
1269 *
1270 * Filters denote which functions should be enabled when tracing is enabled.
1271 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1272 */
Ingo Molnare309b412008-05-12 21:20:51 +02001273void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02001274{
Steven Rostedt41c52c02008-05-22 11:46:33 -04001275 ftrace_set_regex(buf, len, reset, 1);
1276}
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001277
Steven Rostedt41c52c02008-05-22 11:46:33 -04001278/**
1279 * ftrace_set_notrace - set a function to not trace in ftrace
1280 * @buf - the string that holds the function notrace text.
1281 * @len - the length of the string.
1282 * @reset - non zero to reset all filters before applying this filter.
1283 *
1284 * Notrace Filters denote which functions should not be enabled when tracing
1285 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1286 * for tracing.
1287 */
1288void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1289{
1290 ftrace_set_regex(buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02001291}
1292
Ingo Molnare309b412008-05-12 21:20:51 +02001293static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001294ftrace_regex_release(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001295{
1296 struct seq_file *m = (struct seq_file *)file->private_data;
1297 struct ftrace_iterator *iter;
1298
Steven Rostedt41c52c02008-05-22 11:46:33 -04001299 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001300 if (file->f_mode & FMODE_READ) {
1301 iter = m->private;
1302
1303 seq_release(inode, file);
1304 } else
1305 iter = file->private_data;
1306
1307 if (iter->buffer_idx) {
1308 iter->filtered++;
1309 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001310 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001311 }
1312
1313 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -04001314 mutex_lock(&ftrace_start_lock);
Steven Rostedtee02a2e2008-11-15 16:31:41 -05001315 if (ftrace_start_up && ftrace_enabled)
Steven Rostedt5072c592008-05-12 21:20:43 +02001316 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -04001317 mutex_unlock(&ftrace_start_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001318 mutex_unlock(&ftrace_sysctl_lock);
1319
1320 kfree(iter);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001321 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001322 return 0;
1323}
1324
Steven Rostedt41c52c02008-05-22 11:46:33 -04001325static int
1326ftrace_filter_release(struct inode *inode, struct file *file)
1327{
1328 return ftrace_regex_release(inode, file, 1);
1329}
1330
1331static int
1332ftrace_notrace_release(struct inode *inode, struct file *file)
1333{
1334 return ftrace_regex_release(inode, file, 0);
1335}
1336
Steven Rostedt5072c592008-05-12 21:20:43 +02001337static struct file_operations ftrace_avail_fops = {
1338 .open = ftrace_avail_open,
1339 .read = seq_read,
1340 .llseek = seq_lseek,
1341 .release = ftrace_avail_release,
1342};
1343
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301344static struct file_operations ftrace_failures_fops = {
1345 .open = ftrace_failures_open,
1346 .read = seq_read,
1347 .llseek = seq_lseek,
1348 .release = ftrace_avail_release,
1349};
1350
Steven Rostedt5072c592008-05-12 21:20:43 +02001351static struct file_operations ftrace_filter_fops = {
1352 .open = ftrace_filter_open,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001353 .read = ftrace_regex_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02001354 .write = ftrace_filter_write,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001355 .llseek = ftrace_regex_lseek,
Steven Rostedt5072c592008-05-12 21:20:43 +02001356 .release = ftrace_filter_release,
1357};
1358
Steven Rostedt41c52c02008-05-22 11:46:33 -04001359static struct file_operations ftrace_notrace_fops = {
1360 .open = ftrace_notrace_open,
1361 .read = ftrace_regex_read,
1362 .write = ftrace_notrace_write,
1363 .llseek = ftrace_regex_lseek,
1364 .release = ftrace_notrace_release,
1365};
1366
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001367#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1368
1369static DEFINE_MUTEX(graph_lock);
1370
1371int ftrace_graph_count;
1372unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1373
1374static void *
1375g_next(struct seq_file *m, void *v, loff_t *pos)
1376{
1377 unsigned long *array = m->private;
1378 int index = *pos;
1379
1380 (*pos)++;
1381
1382 if (index >= ftrace_graph_count)
1383 return NULL;
1384
1385 return &array[index];
1386}
1387
1388static void *g_start(struct seq_file *m, loff_t *pos)
1389{
1390 void *p = NULL;
1391
1392 mutex_lock(&graph_lock);
1393
1394 p = g_next(m, p, pos);
1395
1396 return p;
1397}
1398
1399static void g_stop(struct seq_file *m, void *p)
1400{
1401 mutex_unlock(&graph_lock);
1402}
1403
1404static int g_show(struct seq_file *m, void *v)
1405{
1406 unsigned long *ptr = v;
1407 char str[KSYM_SYMBOL_LEN];
1408
1409 if (!ptr)
1410 return 0;
1411
1412 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1413
1414 seq_printf(m, "%s\n", str);
1415
1416 return 0;
1417}
1418
1419static struct seq_operations ftrace_graph_seq_ops = {
1420 .start = g_start,
1421 .next = g_next,
1422 .stop = g_stop,
1423 .show = g_show,
1424};
1425
1426static int
1427ftrace_graph_open(struct inode *inode, struct file *file)
1428{
1429 int ret = 0;
1430
1431 if (unlikely(ftrace_disabled))
1432 return -ENODEV;
1433
1434 mutex_lock(&graph_lock);
1435 if ((file->f_mode & FMODE_WRITE) &&
1436 !(file->f_flags & O_APPEND)) {
1437 ftrace_graph_count = 0;
1438 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1439 }
1440
1441 if (file->f_mode & FMODE_READ) {
1442 ret = seq_open(file, &ftrace_graph_seq_ops);
1443 if (!ret) {
1444 struct seq_file *m = file->private_data;
1445 m->private = ftrace_graph_funcs;
1446 }
1447 } else
1448 file->private_data = ftrace_graph_funcs;
1449 mutex_unlock(&graph_lock);
1450
1451 return ret;
1452}
1453
1454static ssize_t
1455ftrace_graph_read(struct file *file, char __user *ubuf,
1456 size_t cnt, loff_t *ppos)
1457{
1458 if (file->f_mode & FMODE_READ)
1459 return seq_read(file, ubuf, cnt, ppos);
1460 else
1461 return -EPERM;
1462}
1463
1464static int
1465ftrace_set_func(unsigned long *array, int idx, char *buffer)
1466{
1467 char str[KSYM_SYMBOL_LEN];
1468 struct dyn_ftrace *rec;
1469 struct ftrace_page *pg;
1470 int found = 0;
Liming Wangfaec2ec2008-12-04 14:24:49 +08001471 int i, j;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001472
1473 if (ftrace_disabled)
1474 return -ENODEV;
1475
1476 /* should not be called from interrupt context */
1477 spin_lock(&ftrace_lock);
1478
1479 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1480 for (i = 0; i < pg->index; i++) {
1481 rec = &pg->records[i];
1482
1483 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1484 continue;
1485
1486 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1487 if (strcmp(str, buffer) == 0) {
1488 found = 1;
Liming Wangfaec2ec2008-12-04 14:24:49 +08001489 for (j = 0; j < idx; j++)
1490 if (array[j] == rec->ip) {
1491 found = 0;
1492 break;
1493 }
1494 if (found)
1495 array[idx] = rec->ip;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001496 break;
1497 }
1498 }
1499 }
1500 spin_unlock(&ftrace_lock);
1501
1502 return found ? 0 : -EINVAL;
1503}
1504
1505static ssize_t
1506ftrace_graph_write(struct file *file, const char __user *ubuf,
1507 size_t cnt, loff_t *ppos)
1508{
1509 unsigned char buffer[FTRACE_BUFF_MAX+1];
1510 unsigned long *array;
1511 size_t read = 0;
1512 ssize_t ret;
1513 int index = 0;
1514 char ch;
1515
1516 if (!cnt || cnt < 0)
1517 return 0;
1518
1519 mutex_lock(&graph_lock);
1520
1521 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1522 ret = -EBUSY;
1523 goto out;
1524 }
1525
1526 if (file->f_mode & FMODE_READ) {
1527 struct seq_file *m = file->private_data;
1528 array = m->private;
1529 } else
1530 array = file->private_data;
1531
1532 ret = get_user(ch, ubuf++);
1533 if (ret)
1534 goto out;
1535 read++;
1536 cnt--;
1537
1538 /* skip white space */
1539 while (cnt && isspace(ch)) {
1540 ret = get_user(ch, ubuf++);
1541 if (ret)
1542 goto out;
1543 read++;
1544 cnt--;
1545 }
1546
1547 if (isspace(ch)) {
1548 *ppos += read;
1549 ret = read;
1550 goto out;
1551 }
1552
1553 while (cnt && !isspace(ch)) {
1554 if (index < FTRACE_BUFF_MAX)
1555 buffer[index++] = ch;
1556 else {
1557 ret = -EINVAL;
1558 goto out;
1559 }
1560 ret = get_user(ch, ubuf++);
1561 if (ret)
1562 goto out;
1563 read++;
1564 cnt--;
1565 }
1566 buffer[index] = 0;
1567
1568 /* we allow only one at a time */
1569 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1570 if (ret)
1571 goto out;
1572
1573 ftrace_graph_count++;
1574
1575 file->f_pos += read;
1576
1577 ret = read;
1578 out:
1579 mutex_unlock(&graph_lock);
1580
1581 return ret;
1582}
1583
1584static const struct file_operations ftrace_graph_fops = {
1585 .open = ftrace_graph_open,
1586 .read = ftrace_graph_read,
1587 .write = ftrace_graph_write,
1588};
1589#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1590
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001591static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
Steven Rostedt5072c592008-05-12 21:20:43 +02001592{
Steven Rostedt5072c592008-05-12 21:20:43 +02001593 struct dentry *entry;
1594
Steven Rostedt5072c592008-05-12 21:20:43 +02001595 entry = debugfs_create_file("available_filter_functions", 0444,
1596 d_tracer, NULL, &ftrace_avail_fops);
1597 if (!entry)
1598 pr_warning("Could not create debugfs "
1599 "'available_filter_functions' entry\n");
1600
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301601 entry = debugfs_create_file("failures", 0444,
1602 d_tracer, NULL, &ftrace_failures_fops);
1603 if (!entry)
1604 pr_warning("Could not create debugfs 'failures' entry\n");
1605
Steven Rostedt5072c592008-05-12 21:20:43 +02001606 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1607 NULL, &ftrace_filter_fops);
1608 if (!entry)
1609 pr_warning("Could not create debugfs "
1610 "'set_ftrace_filter' entry\n");
Steven Rostedt41c52c02008-05-22 11:46:33 -04001611
1612 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1613 NULL, &ftrace_notrace_fops);
1614 if (!entry)
1615 pr_warning("Could not create debugfs "
1616 "'set_ftrace_notrace' entry\n");
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001617
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001618#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1619 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1620 NULL,
1621 &ftrace_graph_fops);
1622 if (!entry)
1623 pr_warning("Could not create debugfs "
1624 "'set_graph_function' entry\n");
1625#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1626
Steven Rostedt5072c592008-05-12 21:20:43 +02001627 return 0;
1628}
1629
Steven Rostedt31e88902008-11-14 16:21:19 -08001630static int ftrace_convert_nops(struct module *mod,
1631 unsigned long *start,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001632 unsigned long *end)
1633{
1634 unsigned long *p;
1635 unsigned long addr;
1636 unsigned long flags;
1637
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001638 mutex_lock(&ftrace_start_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001639 p = start;
1640 while (p < end) {
1641 addr = ftrace_call_adjust(*p++);
Steven Rostedt20e52272008-11-14 16:21:19 -08001642 /*
1643 * Some architecture linkers will pad between
1644 * the different mcount_loc sections of different
1645 * object files to satisfy alignments.
1646 * Skip any NULL pointers.
1647 */
1648 if (!addr)
1649 continue;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001650 ftrace_record_ip(addr);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001651 }
1652
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001653 /* disable interrupts to prevent kstop machine */
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001654 local_irq_save(flags);
Steven Rostedt31e88902008-11-14 16:21:19 -08001655 ftrace_update_code(mod);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001656 local_irq_restore(flags);
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001657 mutex_unlock(&ftrace_start_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001658
1659 return 0;
1660}
1661
Steven Rostedt31e88902008-11-14 16:21:19 -08001662void ftrace_init_module(struct module *mod,
1663 unsigned long *start, unsigned long *end)
Steven Rostedt90d595f2008-08-14 15:45:09 -04001664{
Steven Rostedt00fd61a2008-08-15 21:40:04 -04001665 if (ftrace_disabled || start == end)
Steven Rostedtfed19392008-08-14 22:47:19 -04001666 return;
Steven Rostedt31e88902008-11-14 16:21:19 -08001667 ftrace_convert_nops(mod, start, end);
Steven Rostedt90d595f2008-08-14 15:45:09 -04001668}
1669
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001670extern unsigned long __start_mcount_loc[];
1671extern unsigned long __stop_mcount_loc[];
1672
1673void __init ftrace_init(void)
1674{
1675 unsigned long count, addr, flags;
1676 int ret;
1677
1678 /* Keep the ftrace pointer to the stub */
1679 addr = (unsigned long)ftrace_stub;
1680
1681 local_irq_save(flags);
1682 ftrace_dyn_arch_init(&addr);
1683 local_irq_restore(flags);
1684
1685 /* ftrace_dyn_arch_init places the return code in addr */
1686 if (addr)
1687 goto failed;
1688
1689 count = __stop_mcount_loc - __start_mcount_loc;
1690
1691 ret = ftrace_dyn_table_alloc(count);
1692 if (ret)
1693 goto failed;
1694
1695 last_ftrace_enabled = ftrace_enabled = 1;
1696
Steven Rostedt31e88902008-11-14 16:21:19 -08001697 ret = ftrace_convert_nops(NULL,
1698 __start_mcount_loc,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001699 __stop_mcount_loc);
1700
1701 return;
1702 failed:
1703 ftrace_disabled = 1;
1704}
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001705
Steven Rostedt3d083392008-05-12 21:20:42 +02001706#else
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01001707
1708static int __init ftrace_nodyn_init(void)
1709{
1710 ftrace_enabled = 1;
1711 return 0;
1712}
1713device_initcall(ftrace_nodyn_init);
1714
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001715static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1716static inline void ftrace_startup_enable(int command) { }
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001717/* Keep as macros so we do not need to define the commands */
1718# define ftrace_startup(command) do { } while (0)
1719# define ftrace_shutdown(command) do { } while (0)
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001720# define ftrace_startup_sysctl() do { } while (0)
1721# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001722#endif /* CONFIG_DYNAMIC_FTRACE */
1723
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001724static ssize_t
1725ftrace_pid_read(struct file *file, char __user *ubuf,
1726 size_t cnt, loff_t *ppos)
1727{
1728 char buf[64];
1729 int r;
1730
Steven Rostedte32d8952008-12-04 00:26:41 -05001731 if (ftrace_pid_trace == ftrace_swapper_pid)
1732 r = sprintf(buf, "swapper tasks\n");
1733 else if (ftrace_pid_trace)
Steven Rostedt978f3a42008-12-04 00:26:40 -05001734 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001735 else
1736 r = sprintf(buf, "no pid\n");
1737
1738 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1739}
1740
Steven Rostedte32d8952008-12-04 00:26:41 -05001741static void clear_ftrace_swapper(void)
1742{
1743 struct task_struct *p;
1744 int cpu;
1745
1746 get_online_cpus();
1747 for_each_online_cpu(cpu) {
1748 p = idle_task(cpu);
1749 clear_tsk_trace_trace(p);
1750 }
1751 put_online_cpus();
1752}
1753
1754static void set_ftrace_swapper(void)
1755{
1756 struct task_struct *p;
1757 int cpu;
1758
1759 get_online_cpus();
1760 for_each_online_cpu(cpu) {
1761 p = idle_task(cpu);
1762 set_tsk_trace_trace(p);
1763 }
1764 put_online_cpus();
1765}
1766
1767static void clear_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05001768{
1769 struct task_struct *p;
1770
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01001771 rcu_read_lock();
Steven Rostedte32d8952008-12-04 00:26:41 -05001772 do_each_pid_task(pid, PIDTYPE_PID, p) {
Steven Rostedt978f3a42008-12-04 00:26:40 -05001773 clear_tsk_trace_trace(p);
Steven Rostedte32d8952008-12-04 00:26:41 -05001774 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01001775 rcu_read_unlock();
1776
Steven Rostedte32d8952008-12-04 00:26:41 -05001777 put_pid(pid);
Steven Rostedt978f3a42008-12-04 00:26:40 -05001778}
1779
Steven Rostedte32d8952008-12-04 00:26:41 -05001780static void set_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05001781{
1782 struct task_struct *p;
1783
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01001784 rcu_read_lock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05001785 do_each_pid_task(pid, PIDTYPE_PID, p) {
1786 set_tsk_trace_trace(p);
1787 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01001788 rcu_read_unlock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05001789}
1790
Steven Rostedte32d8952008-12-04 00:26:41 -05001791static void clear_ftrace_pid_task(struct pid **pid)
1792{
1793 if (*pid == ftrace_swapper_pid)
1794 clear_ftrace_swapper();
1795 else
1796 clear_ftrace_pid(*pid);
1797
1798 *pid = NULL;
1799}
1800
1801static void set_ftrace_pid_task(struct pid *pid)
1802{
1803 if (pid == ftrace_swapper_pid)
1804 set_ftrace_swapper();
1805 else
1806 set_ftrace_pid(pid);
1807}
1808
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001809static ssize_t
1810ftrace_pid_write(struct file *filp, const char __user *ubuf,
1811 size_t cnt, loff_t *ppos)
1812{
Steven Rostedt978f3a42008-12-04 00:26:40 -05001813 struct pid *pid;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001814 char buf[64];
1815 long val;
1816 int ret;
1817
1818 if (cnt >= sizeof(buf))
1819 return -EINVAL;
1820
1821 if (copy_from_user(&buf, ubuf, cnt))
1822 return -EFAULT;
1823
1824 buf[cnt] = 0;
1825
1826 ret = strict_strtol(buf, 10, &val);
1827 if (ret < 0)
1828 return ret;
1829
1830 mutex_lock(&ftrace_start_lock);
Steven Rostedt978f3a42008-12-04 00:26:40 -05001831 if (val < 0) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001832 /* disable pid tracing */
Steven Rostedt978f3a42008-12-04 00:26:40 -05001833 if (!ftrace_pid_trace)
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001834 goto out;
Steven Rostedt978f3a42008-12-04 00:26:40 -05001835
1836 clear_ftrace_pid_task(&ftrace_pid_trace);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001837
1838 } else {
Steven Rostedte32d8952008-12-04 00:26:41 -05001839 /* swapper task is special */
1840 if (!val) {
1841 pid = ftrace_swapper_pid;
1842 if (pid == ftrace_pid_trace)
1843 goto out;
1844 } else {
1845 pid = find_get_pid(val);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001846
Steven Rostedte32d8952008-12-04 00:26:41 -05001847 if (pid == ftrace_pid_trace) {
1848 put_pid(pid);
1849 goto out;
1850 }
Steven Rostedt978f3a42008-12-04 00:26:40 -05001851 }
1852
1853 if (ftrace_pid_trace)
1854 clear_ftrace_pid_task(&ftrace_pid_trace);
1855
1856 if (!pid)
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001857 goto out;
1858
Steven Rostedt978f3a42008-12-04 00:26:40 -05001859 ftrace_pid_trace = pid;
Steven Rostedt0ef8cde2008-12-03 15:36:58 -05001860
Steven Rostedt978f3a42008-12-04 00:26:40 -05001861 set_ftrace_pid_task(ftrace_pid_trace);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001862 }
1863
1864 /* update the function call */
1865 ftrace_update_pid_func();
1866 ftrace_startup_enable(0);
1867
1868 out:
1869 mutex_unlock(&ftrace_start_lock);
1870
1871 return cnt;
1872}
1873
1874static struct file_operations ftrace_pid_fops = {
1875 .read = ftrace_pid_read,
1876 .write = ftrace_pid_write,
1877};
1878
1879static __init int ftrace_init_debugfs(void)
1880{
1881 struct dentry *d_tracer;
1882 struct dentry *entry;
1883
1884 d_tracer = tracing_init_dentry();
1885 if (!d_tracer)
1886 return 0;
1887
1888 ftrace_init_dyn_debugfs(d_tracer);
1889
1890 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1891 NULL, &ftrace_pid_fops);
1892 if (!entry)
1893 pr_warning("Could not create debugfs "
1894 "'set_ftrace_pid' entry\n");
1895 return 0;
1896}
1897
1898fs_initcall(ftrace_init_debugfs);
1899
Steven Rostedt3d083392008-05-12 21:20:42 +02001900/**
Steven Rostedt81adbdc2008-10-23 09:33:02 -04001901 * ftrace_kill - kill ftrace
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001902 *
1903 * This function should be used by panic code. It stops ftrace
1904 * but in a not so nice way. If you need to simply kill ftrace
1905 * from a non-atomic section, use ftrace_kill.
1906 */
Steven Rostedt81adbdc2008-10-23 09:33:02 -04001907void ftrace_kill(void)
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001908{
1909 ftrace_disabled = 1;
1910 ftrace_enabled = 0;
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001911 clear_ftrace_function();
1912}
1913
1914/**
Steven Rostedt3d083392008-05-12 21:20:42 +02001915 * register_ftrace_function - register a function for profiling
1916 * @ops - ops structure that holds the function for profiling.
1917 *
1918 * Register a function to be called by all functions in the
1919 * kernel.
1920 *
1921 * Note: @ops->func and all the functions it calls must be labeled
1922 * with "notrace", otherwise it will go into a
1923 * recursive loop.
1924 */
1925int register_ftrace_function(struct ftrace_ops *ops)
1926{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001927 int ret;
1928
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001929 if (unlikely(ftrace_disabled))
1930 return -1;
1931
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001932 mutex_lock(&ftrace_sysctl_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001933
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001934 ret = __register_ftrace_function(ops);
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001935 ftrace_startup(0);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001936
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001937 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001938 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001939}
1940
1941/**
1942 * unregister_ftrace_function - unresgister a function for profiling.
1943 * @ops - ops structure that holds the function to unregister
1944 *
1945 * Unregister a function that was added to be called by ftrace profiling.
1946 */
1947int unregister_ftrace_function(struct ftrace_ops *ops)
1948{
1949 int ret;
1950
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001951 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001952 ret = __unregister_ftrace_function(ops);
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001953 ftrace_shutdown(0);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001954 mutex_unlock(&ftrace_sysctl_lock);
1955
1956 return ret;
1957}
1958
Ingo Molnare309b412008-05-12 21:20:51 +02001959int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001960ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02001961 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001962 loff_t *ppos)
1963{
1964 int ret;
1965
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001966 if (unlikely(ftrace_disabled))
1967 return -ENODEV;
1968
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001969 mutex_lock(&ftrace_sysctl_lock);
1970
Steven Rostedt5072c592008-05-12 21:20:43 +02001971 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001972
1973 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1974 goto out;
1975
1976 last_ftrace_enabled = ftrace_enabled;
1977
1978 if (ftrace_enabled) {
1979
1980 ftrace_startup_sysctl();
1981
1982 /* we are starting ftrace again */
1983 if (ftrace_list != &ftrace_list_end) {
1984 if (ftrace_list->next == &ftrace_list_end)
1985 ftrace_trace_function = ftrace_list->func;
1986 else
1987 ftrace_trace_function = ftrace_list_func;
1988 }
1989
1990 } else {
1991 /* stopping ftrace calls (just send to ftrace_stub) */
1992 ftrace_trace_function = ftrace_stub;
1993
1994 ftrace_shutdown_sysctl();
1995 }
1996
1997 out:
1998 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001999 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02002000}
Ingo Molnarf17845e2008-10-24 12:47:10 +02002001
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002002#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002003
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002004static atomic_t ftrace_graph_active;
Frederic Weisbecker00f57f52009-01-14 13:33:27 -08002005static struct notifier_block ftrace_suspend_notifier;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002006
Steven Rostedte49dc192008-12-02 23:50:05 -05002007int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2008{
2009 return 0;
2010}
2011
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002012/* The callbacks that hook a function */
2013trace_func_graph_ret_t ftrace_graph_return =
2014 (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05002015trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002016
2017/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2018static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2019{
2020 int i;
2021 int ret = 0;
2022 unsigned long flags;
2023 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2024 struct task_struct *g, *t;
2025
2026 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2027 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2028 * sizeof(struct ftrace_ret_stack),
2029 GFP_KERNEL);
2030 if (!ret_stack_list[i]) {
2031 start = 0;
2032 end = i;
2033 ret = -ENOMEM;
2034 goto free;
2035 }
2036 }
2037
2038 read_lock_irqsave(&tasklist_lock, flags);
2039 do_each_thread(g, t) {
2040 if (start == end) {
2041 ret = -EAGAIN;
2042 goto unlock;
2043 }
2044
2045 if (t->ret_stack == NULL) {
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002046 t->curr_ret_stack = -1;
Frederic Weisbecker48d68b22008-12-02 00:20:39 +01002047 /* Make sure IRQs see the -1 first: */
2048 barrier();
2049 t->ret_stack = ret_stack_list[start++];
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01002050 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002051 atomic_set(&t->trace_overrun, 0);
2052 }
2053 } while_each_thread(g, t);
2054
2055unlock:
2056 read_unlock_irqrestore(&tasklist_lock, flags);
2057free:
2058 for (i = start; i < end; i++)
2059 kfree(ret_stack_list[i]);
2060 return ret;
2061}
2062
2063/* Allocate a return stack for each task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002064static int start_graph_tracing(void)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002065{
2066 struct ftrace_ret_stack **ret_stack_list;
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01002067 int ret, cpu;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002068
2069 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2070 sizeof(struct ftrace_ret_stack *),
2071 GFP_KERNEL);
2072
2073 if (!ret_stack_list)
2074 return -ENOMEM;
2075
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01002076 /* The cpu_boot init_task->ret_stack will never be freed */
2077 for_each_online_cpu(cpu)
2078 ftrace_graph_init_task(idle_task(cpu));
2079
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002080 do {
2081 ret = alloc_retstack_tasklist(ret_stack_list);
2082 } while (ret == -EAGAIN);
2083
2084 kfree(ret_stack_list);
2085 return ret;
2086}
2087
Frederic Weisbecker00f57f52009-01-14 13:33:27 -08002088/*
2089 * Hibernation protection.
2090 * The state of the current task is too much unstable during
2091 * suspend/restore to disk. We want to protect against that.
2092 */
2093static int
2094ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2095 void *unused)
2096{
2097 switch (state) {
2098 case PM_HIBERNATION_PREPARE:
2099 pause_graph_tracing();
2100 break;
2101
2102 case PM_POST_HIBERNATION:
2103 unpause_graph_tracing();
2104 break;
2105 }
2106 return NOTIFY_DONE;
2107}
2108
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002109int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2110 trace_func_graph_ent_t entryfunc)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002111{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002112 int ret = 0;
2113
2114 mutex_lock(&ftrace_sysctl_lock);
2115
Frederic Weisbecker00f57f52009-01-14 13:33:27 -08002116 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2117 register_pm_notifier(&ftrace_suspend_notifier);
2118
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002119 atomic_inc(&ftrace_graph_active);
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002120 ret = start_graph_tracing();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002121 if (ret) {
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002122 atomic_dec(&ftrace_graph_active);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002123 goto out;
2124 }
Steven Rostedte53a6312008-11-26 00:16:25 -05002125
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002126 ftrace_graph_return = retfunc;
2127 ftrace_graph_entry = entryfunc;
Steven Rostedte53a6312008-11-26 00:16:25 -05002128
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05002129 ftrace_startup(FTRACE_START_FUNC_RET);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002130
2131out:
2132 mutex_unlock(&ftrace_sysctl_lock);
2133 return ret;
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002134}
2135
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002136void unregister_ftrace_graph(void)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002137{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002138 mutex_lock(&ftrace_sysctl_lock);
2139
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002140 atomic_dec(&ftrace_graph_active);
2141 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05002142 ftrace_graph_entry = ftrace_graph_entry_stub;
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05002143 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
Frederic Weisbecker00f57f52009-01-14 13:33:27 -08002144 unregister_pm_notifier(&ftrace_suspend_notifier);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002145
2146 mutex_unlock(&ftrace_sysctl_lock);
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002147}
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002148
2149/* Allocate a return stack for newly created task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002150void ftrace_graph_init_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002151{
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002152 if (atomic_read(&ftrace_graph_active)) {
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002153 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2154 * sizeof(struct ftrace_ret_stack),
2155 GFP_KERNEL);
2156 if (!t->ret_stack)
2157 return;
2158 t->curr_ret_stack = -1;
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01002159 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002160 atomic_set(&t->trace_overrun, 0);
2161 } else
2162 t->ret_stack = NULL;
2163}
2164
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002165void ftrace_graph_exit_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002166{
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01002167 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2168
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002169 t->ret_stack = NULL;
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01002170 /* NULL must become visible to IRQs before we free it: */
2171 barrier();
2172
2173 kfree(ret_stack);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002174}
Steven Rostedt14a866c2008-12-02 23:50:02 -05002175
2176void ftrace_graph_stop(void)
2177{
2178 ftrace_stop();
2179}
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002180#endif
2181