blob: c70f6bfbb732bf261bb3f48a4a54d4d03e1efea1 [file] [log] [blame]
Steven Rostedte5a81b62008-08-27 23:31:01 -04001/*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5#include <linux/stacktrace.h>
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/uaccess.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12#include <linux/module.h>
Steven Rostedtf38f1d22008-12-16 23:06:40 -050013#include <linux/sysctl.h>
Steven Rostedte5a81b62008-08-27 23:31:01 -040014#include <linux/init.h>
15#include <linux/fs.h>
Steven Rostedt762e1202011-12-19 22:01:00 -050016
17#include <asm/setup.h>
18
Steven Rostedte5a81b62008-08-27 23:31:01 -040019#include "trace.h"
20
21#define STACK_TRACE_ENTRIES 500
22
Steven Rostedt (Red Hat)af186012013-03-13 21:25:35 -040023#ifdef CC_USING_FENTRY
Steven Rostedt (Red Hat)d4ca6cc2013-03-13 23:34:22 -040024# define fentry 1
Steven Rostedt (Red Hat)af186012013-03-13 21:25:35 -040025#else
Steven Rostedt (Red Hat)d4ca6cc2013-03-13 23:34:22 -040026# define fentry 0
Steven Rostedt (Red Hat)af186012013-03-13 21:25:35 -040027#endif
28
Steven Rostedt1b6cced2008-08-29 16:51:43 -040029static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
30 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
31static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
32
Steven Rostedt (Red Hat)d4ca6cc2013-03-13 23:34:22 -040033/*
34 * Reserve one entry for the passed in ip. This will allow
35 * us to remove most or all of the stack size overhead
36 * added by the stack tracer itself.
37 */
Steven Rostedte5a81b62008-08-27 23:31:01 -040038static struct stack_trace max_stack_trace = {
Steven Rostedt (Red Hat)d4ca6cc2013-03-13 23:34:22 -040039 .max_entries = STACK_TRACE_ENTRIES - 1,
40 .entries = &stack_dump_trace[1],
Steven Rostedte5a81b62008-08-27 23:31:01 -040041};
42
43static unsigned long max_stack_size;
Thomas Gleixner445c8952009-12-02 19:49:50 +010044static arch_spinlock_t max_stack_lock =
Thomas Gleixneredc35bd2009-12-03 12:38:57 +010045 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedte5a81b62008-08-27 23:31:01 -040046
47static int stack_trace_disabled __read_mostly;
48static DEFINE_PER_CPU(int, trace_active);
Steven Rostedtf38f1d22008-12-16 23:06:40 -050049static DEFINE_MUTEX(stack_sysctl_mutex);
50
51int stack_tracer_enabled;
52static int last_stack_tracer_enabled;
Steven Rostedte5a81b62008-08-27 23:31:01 -040053
Steven Rostedt (Red Hat)89323812013-03-13 20:43:57 -040054static inline void
Steven Rostedt (Red Hat)af186012013-03-13 21:25:35 -040055check_stack(unsigned long ip, unsigned long *stack)
Steven Rostedte5a81b62008-08-27 23:31:01 -040056{
Steven Rostedt1b6cced2008-08-29 16:51:43 -040057 unsigned long this_size, flags;
58 unsigned long *p, *top, *start;
Steven Rostedt (Red Hat)d4ca6cc2013-03-13 23:34:22 -040059 static int tracer_frame;
60 int frame_size = ACCESS_ONCE(tracer_frame);
Steven Rostedt1b6cced2008-08-29 16:51:43 -040061 int i;
Steven Rostedte5a81b62008-08-27 23:31:01 -040062
Steven Rostedt (Red Hat)89323812013-03-13 20:43:57 -040063 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
Steven Rostedte5a81b62008-08-27 23:31:01 -040064 this_size = THREAD_SIZE - this_size;
Steven Rostedt (Red Hat)d4ca6cc2013-03-13 23:34:22 -040065 /* Remove the frame of the tracer */
66 this_size -= frame_size;
Steven Rostedte5a81b62008-08-27 23:31:01 -040067
68 if (this_size <= max_stack_size)
69 return;
70
Steven Rostedt81520a12008-10-06 21:24:18 -040071 /* we do not handle interrupt stacks yet */
Steven Rostedt (Red Hat)89323812013-03-13 20:43:57 -040072 if (!object_is_on_stack(stack))
Steven Rostedt81520a12008-10-06 21:24:18 -040073 return;
74
Steven Rostedta5e25882008-12-02 15:34:05 -050075 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010076 arch_spin_lock(&max_stack_lock);
Steven Rostedte5a81b62008-08-27 23:31:01 -040077
Steven Rostedt (Red Hat)d4ca6cc2013-03-13 23:34:22 -040078 /* In case another CPU set the tracer_frame on us */
79 if (unlikely(!frame_size))
80 this_size -= tracer_frame;
81
Steven Rostedte5a81b62008-08-27 23:31:01 -040082 /* a race could have already updated it */
83 if (this_size <= max_stack_size)
84 goto out;
85
86 max_stack_size = this_size;
87
88 max_stack_trace.nr_entries = 0;
Steven Rostedt1b6cced2008-08-29 16:51:43 -040089 max_stack_trace.skip = 3;
Steven Rostedte5a81b62008-08-27 23:31:01 -040090
91 save_stack_trace(&max_stack_trace);
92
Steven Rostedt1b6cced2008-08-29 16:51:43 -040093 /*
Steven Rostedt (Red Hat)d4ca6cc2013-03-13 23:34:22 -040094 * Add the passed in ip from the function tracer.
95 * Searching for this on the stack will skip over
96 * most of the overhead from the stack tracer itself.
Steven Rostedt (Red Hat)af186012013-03-13 21:25:35 -040097 */
Steven Rostedt (Red Hat)d4ca6cc2013-03-13 23:34:22 -040098 stack_dump_trace[0] = ip;
99 max_stack_trace.nr_entries++;
Steven Rostedt (Red Hat)af186012013-03-13 21:25:35 -0400100
101 /*
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400102 * Now find where in the stack these are.
103 */
104 i = 0;
Steven Rostedt (Red Hat)89323812013-03-13 20:43:57 -0400105 start = stack;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400106 top = (unsigned long *)
107 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
108
109 /*
110 * Loop through all the entries. One of the entries may
111 * for some reason be missed on the stack, so we may
112 * have to account for them. If they are all there, this
113 * loop will only happen once. This code only takes place
114 * on a new max, so it is far from a fast path.
115 */
116 while (i < max_stack_trace.nr_entries) {
Steven Rostedt0a371192008-12-03 11:04:50 -0500117 int found = 0;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400118
119 stack_dump_index[i] = this_size;
120 p = start;
121
122 for (; p < top && i < max_stack_trace.nr_entries; p++) {
123 if (*p == stack_dump_trace[i]) {
124 this_size = stack_dump_index[i++] =
125 (top - p) * sizeof(unsigned long);
Steven Rostedt0a371192008-12-03 11:04:50 -0500126 found = 1;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400127 /* Start the search from here */
128 start = p + 1;
Steven Rostedt (Red Hat)d4ca6cc2013-03-13 23:34:22 -0400129 /*
130 * We do not want to show the overhead
131 * of the stack tracer stack in the
132 * max stack. If we haven't figured
133 * out what that is, then figure it out
134 * now.
135 */
136 if (unlikely(!tracer_frame) && i == 1) {
137 tracer_frame = (p - stack) *
138 sizeof(unsigned long);
139 max_stack_size -= tracer_frame;
140 }
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400141 }
142 }
143
Steven Rostedt0a371192008-12-03 11:04:50 -0500144 if (!found)
145 i++;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400146 }
147
Steven Rostedte5a81b62008-08-27 23:31:01 -0400148 out:
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100149 arch_spin_unlock(&max_stack_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -0500150 local_irq_restore(flags);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400151}
152
153static void
154stack_trace_call(unsigned long ip, unsigned long parent_ip)
155{
Steven Rostedt (Red Hat)89323812013-03-13 20:43:57 -0400156 unsigned long stack;
Steven Rostedt5168ae52010-06-03 09:36:50 -0400157 int cpu;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400158
159 if (unlikely(!ftrace_enabled || stack_trace_disabled))
160 return;
161
Steven Rostedt5168ae52010-06-03 09:36:50 -0400162 preempt_disable_notrace();
Steven Rostedte5a81b62008-08-27 23:31:01 -0400163
164 cpu = raw_smp_processor_id();
165 /* no atomic needed, we only modify this variable by this cpu */
166 if (per_cpu(trace_active, cpu)++ != 0)
167 goto out;
168
Steven Rostedt (Red Hat)d4ca6cc2013-03-13 23:34:22 -0400169 /*
170 * When fentry is used, the traced function does not get
171 * its stack frame set up, and we lose the parent.
172 * The ip is pretty useless because the function tracer
173 * was called before that function set up its stack frame.
174 * In this case, we use the parent ip.
175 *
176 * By adding the return address of either the parent ip
177 * or the current ip we can disregard most of the stack usage
178 * caused by the stack tracer itself.
179 *
180 * The function tracer always reports the address of where the
181 * mcount call was, but the stack will hold the return address.
182 */
183 if (fentry)
184 ip = parent_ip;
185 else
186 ip += MCOUNT_INSN_SIZE;
187
188 check_stack(ip, &stack);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400189
190 out:
191 per_cpu(trace_active, cpu)--;
192 /* prevent recursion in schedule */
Steven Rostedt5168ae52010-06-03 09:36:50 -0400193 preempt_enable_notrace();
Steven Rostedte5a81b62008-08-27 23:31:01 -0400194}
195
196static struct ftrace_ops trace_ops __read_mostly =
197{
198 .func = stack_trace_call,
199};
200
201static ssize_t
202stack_max_size_read(struct file *filp, char __user *ubuf,
203 size_t count, loff_t *ppos)
204{
205 unsigned long *ptr = filp->private_data;
206 char buf[64];
207 int r;
208
209 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
210 if (r > sizeof(buf))
211 r = sizeof(buf);
212 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
213}
214
215static ssize_t
216stack_max_size_write(struct file *filp, const char __user *ubuf,
217 size_t count, loff_t *ppos)
218{
219 long *ptr = filp->private_data;
220 unsigned long val, flags;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400221 int ret;
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800222 int cpu;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400223
Peter Huewe22fe9b52011-06-07 21:58:27 +0200224 ret = kstrtoul_from_user(ubuf, count, 10, &val);
225 if (ret)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400226 return ret;
227
Steven Rostedta5e25882008-12-02 15:34:05 -0500228 local_irq_save(flags);
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800229
230 /*
231 * In case we trace inside arch_spin_lock() or after (NMI),
232 * we will cause circular lock, so we also need to increase
233 * the percpu trace_active here.
234 */
235 cpu = smp_processor_id();
236 per_cpu(trace_active, cpu)++;
237
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100238 arch_spin_lock(&max_stack_lock);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400239 *ptr = val;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100240 arch_spin_unlock(&max_stack_lock);
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800241
242 per_cpu(trace_active, cpu)--;
Steven Rostedta5e25882008-12-02 15:34:05 -0500243 local_irq_restore(flags);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400244
245 return count;
246}
247
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500248static const struct file_operations stack_max_size_fops = {
Steven Rostedte5a81b62008-08-27 23:31:01 -0400249 .open = tracing_open_generic,
250 .read = stack_max_size_read,
251 .write = stack_max_size_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200252 .llseek = default_llseek,
Steven Rostedte5a81b62008-08-27 23:31:01 -0400253};
254
255static void *
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800256__next(struct seq_file *m, loff_t *pos)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400257{
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800258 long n = *pos - 1;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400259
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800260 if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400261 return NULL;
262
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800263 m->private = (void *)n;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400264 return &m->private;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400265}
266
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800267static void *
268t_next(struct seq_file *m, void *v, loff_t *pos)
269{
270 (*pos)++;
271 return __next(m, pos);
272}
273
Steven Rostedte5a81b62008-08-27 23:31:01 -0400274static void *t_start(struct seq_file *m, loff_t *pos)
275{
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800276 int cpu;
277
Steven Rostedte5a81b62008-08-27 23:31:01 -0400278 local_irq_disable();
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800279
280 cpu = smp_processor_id();
281 per_cpu(trace_active, cpu)++;
282
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100283 arch_spin_lock(&max_stack_lock);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400284
Liming Wang522a1102008-11-21 11:00:18 +0800285 if (*pos == 0)
286 return SEQ_START_TOKEN;
287
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800288 return __next(m, pos);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400289}
290
291static void t_stop(struct seq_file *m, void *p)
292{
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800293 int cpu;
294
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100295 arch_spin_unlock(&max_stack_lock);
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800296
297 cpu = smp_processor_id();
298 per_cpu(trace_active, cpu)--;
299
Steven Rostedte5a81b62008-08-27 23:31:01 -0400300 local_irq_enable();
301}
302
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400303static int trace_lookup_stack(struct seq_file *m, long i)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400304{
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400305 unsigned long addr = stack_dump_trace[i];
Steven Rostedte5a81b62008-08-27 23:31:01 -0400306
Anton Blanchard151772d2010-08-25 11:32:38 +1000307 return seq_printf(m, "%pS\n", (void *)addr);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400308}
309
Steven Rostedte447e1d2009-03-12 19:42:29 -0400310static void print_disabled(struct seq_file *m)
311{
312 seq_puts(m, "#\n"
313 "# Stack tracer disabled\n"
314 "#\n"
315 "# To enable the stack tracer, either add 'stacktrace' to the\n"
316 "# kernel command line\n"
317 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
318 "#\n");
319}
320
Steven Rostedte5a81b62008-08-27 23:31:01 -0400321static int t_show(struct seq_file *m, void *v)
322{
Liming Wang522a1102008-11-21 11:00:18 +0800323 long i;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400324 int size;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400325
Liming Wang522a1102008-11-21 11:00:18 +0800326 if (v == SEQ_START_TOKEN) {
Steven Rostedteb1871f2009-03-13 00:00:58 -0400327 seq_printf(m, " Depth Size Location"
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400328 " (%d entries)\n"
Steven Rostedteb1871f2009-03-13 00:00:58 -0400329 " ----- ---- --------\n",
walimis083a63b2009-06-03 16:01:28 +0800330 max_stack_trace.nr_entries - 1);
Steven Rostedte447e1d2009-03-12 19:42:29 -0400331
332 if (!stack_tracer_enabled && !max_stack_size)
333 print_disabled(m);
334
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400335 return 0;
336 }
337
Liming Wang522a1102008-11-21 11:00:18 +0800338 i = *(long *)v;
339
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400340 if (i >= max_stack_trace.nr_entries ||
341 stack_dump_trace[i] == ULONG_MAX)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400342 return 0;
343
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400344 if (i+1 == max_stack_trace.nr_entries ||
345 stack_dump_trace[i+1] == ULONG_MAX)
346 size = stack_dump_index[i];
347 else
348 size = stack_dump_index[i] - stack_dump_index[i+1];
349
350 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
351
352 trace_lookup_stack(m, i);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400353
354 return 0;
355}
356
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500357static const struct seq_operations stack_trace_seq_ops = {
Steven Rostedte5a81b62008-08-27 23:31:01 -0400358 .start = t_start,
359 .next = t_next,
360 .stop = t_stop,
361 .show = t_show,
362};
363
364static int stack_trace_open(struct inode *inode, struct file *file)
365{
Li Zefand8cc1ab2009-07-23 11:28:40 +0800366 return seq_open(file, &stack_trace_seq_ops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400367}
368
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500369static const struct file_operations stack_trace_fops = {
Steven Rostedte5a81b62008-08-27 23:31:01 -0400370 .open = stack_trace_open,
371 .read = seq_read,
372 .llseek = seq_lseek,
Li Zefand8cc1ab2009-07-23 11:28:40 +0800373 .release = seq_release,
Steven Rostedte5a81b62008-08-27 23:31:01 -0400374};
375
Steven Rostedtd2d45c72011-12-19 14:44:09 -0500376static int
377stack_trace_filter_open(struct inode *inode, struct file *file)
378{
379 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
380 inode, file);
381}
382
383static const struct file_operations stack_trace_filter_fops = {
384 .open = stack_trace_filter_open,
385 .read = seq_read,
386 .write = ftrace_filter_write,
387 .llseek = ftrace_regex_lseek,
388 .release = ftrace_regex_release,
389};
390
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500391int
392stack_trace_sysctl(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700393 void __user *buffer, size_t *lenp,
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500394 loff_t *ppos)
395{
396 int ret;
397
398 mutex_lock(&stack_sysctl_mutex);
399
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700400 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500401
402 if (ret || !write ||
Li Zefana32c7762009-06-26 16:55:51 +0800403 (last_stack_tracer_enabled == !!stack_tracer_enabled))
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500404 goto out;
405
Li Zefana32c7762009-06-26 16:55:51 +0800406 last_stack_tracer_enabled = !!stack_tracer_enabled;
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500407
408 if (stack_tracer_enabled)
409 register_ftrace_function(&trace_ops);
410 else
411 unregister_ftrace_function(&trace_ops);
412
413 out:
414 mutex_unlock(&stack_sysctl_mutex);
415 return ret;
416}
417
Steven Rostedt762e1202011-12-19 22:01:00 -0500418static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
419
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500420static __init int enable_stacktrace(char *str)
421{
Steven Rostedt762e1202011-12-19 22:01:00 -0500422 if (strncmp(str, "_filter=", 8) == 0)
423 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
424
Steven Rostedte05a43b2008-12-17 09:43:00 -0500425 stack_tracer_enabled = 1;
426 last_stack_tracer_enabled = 1;
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500427 return 1;
428}
429__setup("stacktrace", enable_stacktrace);
430
Steven Rostedte5a81b62008-08-27 23:31:01 -0400431static __init int stack_trace_init(void)
432{
433 struct dentry *d_tracer;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400434
435 d_tracer = tracing_init_dentry();
Namhyung Kime1672f42013-04-10 09:18:12 +0900436 if (!d_tracer)
437 return 0;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400438
Frederic Weisbecker5452af62009-03-27 00:25:38 +0100439 trace_create_file("stack_max_size", 0644, d_tracer,
440 &max_stack_size, &stack_max_size_fops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400441
Frederic Weisbecker5452af62009-03-27 00:25:38 +0100442 trace_create_file("stack_trace", 0444, d_tracer,
443 NULL, &stack_trace_fops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400444
Steven Rostedtd2d45c72011-12-19 14:44:09 -0500445 trace_create_file("stack_trace_filter", 0444, d_tracer,
446 NULL, &stack_trace_filter_fops);
447
Steven Rostedt762e1202011-12-19 22:01:00 -0500448 if (stack_trace_filter_buf[0])
449 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
450
Steven Rostedte05a43b2008-12-17 09:43:00 -0500451 if (stack_tracer_enabled)
Steven Rostedtf38f1d22008-12-16 23:06:40 -0500452 register_ftrace_function(&trace_ops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400453
454 return 0;
455}
456
457device_initcall(stack_trace_init);