blob: b69966f0f1443fb70f4cb909f2da9018fec84409 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
20#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020021#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010022#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020023#include <linux/uaccess.h>
Abhishek Sagarf22f9a82008-06-21 23:50:29 +053024#include <linux/kprobes.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010025#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020026#include <linux/sysctl.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020027#include <linux/ctype.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010028#include <linux/hash.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020029#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020030
Abhishek Sagar395a59d2008-06-21 23:47:27 +053031#include <asm/ftrace.h>
32
Steven Rostedt3d083392008-05-12 21:20:42 +020033#include "trace.h"
34
Steven Rostedt4eebcc82008-05-12 21:20:48 +020035/* ftrace_enabled is a method to turn ftrace on or off */
36int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020037static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020038
Steven Rostedt4eebcc82008-05-12 21:20:48 +020039/*
Steven Rostedt3b47bfc2008-08-27 23:24:15 -040040 * Since MCOUNT_ADDR may point to mcount itself, we do not want
41 * to get it confused by reading a reference in the code as we
42 * are parsing on objcopy output of text. Use a variable for
43 * it instead.
44 */
45static unsigned long mcount_addr = MCOUNT_ADDR;
46
47/*
Steven Rostedt4eebcc82008-05-12 21:20:48 +020048 * ftrace_disabled is set when an anomaly is discovered.
49 * ftrace_disabled is much stronger than ftrace_enabled.
50 */
51static int ftrace_disabled __read_mostly;
52
Steven Rostedt3d083392008-05-12 21:20:42 +020053static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020054static DEFINE_MUTEX(ftrace_sysctl_lock);
55
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020056static struct ftrace_ops ftrace_list_end __read_mostly =
57{
58 .func = ftrace_stub,
59};
60
61static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
62ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
63
Ingo Molnarf2252932008-05-22 10:37:48 +020064static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020065{
66 struct ftrace_ops *op = ftrace_list;
67
68 /* in case someone actually ports this to alpha! */
69 read_barrier_depends();
70
71 while (op != &ftrace_list_end) {
72 /* silly alpha */
73 read_barrier_depends();
74 op->func(ip, parent_ip);
75 op = op->next;
76 };
77}
78
79/**
Steven Rostedt3d083392008-05-12 21:20:42 +020080 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020081 *
Steven Rostedt3d083392008-05-12 21:20:42 +020082 * This NULLs the ftrace function and in essence stops
83 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020084 */
Steven Rostedt3d083392008-05-12 21:20:42 +020085void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020086{
Steven Rostedt3d083392008-05-12 21:20:42 +020087 ftrace_trace_function = ftrace_stub;
88}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020089
Ingo Molnare309b412008-05-12 21:20:51 +020090static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +020091{
Steven Rostedt99ecdc42008-08-15 21:40:05 -040092 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +020093 spin_lock(&ftrace_lock);
94
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020095 ops->next = ftrace_list;
96 /*
97 * We are entering ops into the ftrace_list but another
98 * CPU might be walking that list. We need to make sure
99 * the ops->next pointer is valid before another CPU sees
100 * the ops pointer included into the ftrace_list.
101 */
102 smp_wmb();
103 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +0200104
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200105 if (ftrace_enabled) {
106 /*
107 * For one func, simply call it directly.
108 * For more than one func, call the chain.
109 */
110 if (ops->next == &ftrace_list_end)
111 ftrace_trace_function = ops->func;
112 else
113 ftrace_trace_function = ftrace_list_func;
114 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200115
116 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200117
118 return 0;
119}
120
Ingo Molnare309b412008-05-12 21:20:51 +0200121static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200122{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200123 struct ftrace_ops **p;
124 int ret = 0;
125
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400126 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +0200127 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200128
129 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200130 * If we are removing the last function, then simply point
131 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200132 */
133 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
134 ftrace_trace_function = ftrace_stub;
135 ftrace_list = &ftrace_list_end;
136 goto out;
137 }
138
139 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
140 if (*p == ops)
141 break;
142
143 if (*p != ops) {
144 ret = -1;
145 goto out;
146 }
147
148 *p = (*p)->next;
149
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200150 if (ftrace_enabled) {
151 /* If we only have one func left, then call that directly */
152 if (ftrace_list == &ftrace_list_end ||
153 ftrace_list->next == &ftrace_list_end)
154 ftrace_trace_function = ftrace_list->func;
155 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200156
157 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200158 spin_unlock(&ftrace_lock);
159
160 return ret;
161}
162
163#ifdef CONFIG_DYNAMIC_FTRACE
164
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400165#ifndef CONFIG_FTRACE_MCOUNT_RECORD
166/*
167 * The hash lock is only needed when the recording of the mcount
168 * callers are dynamic. That is, by the caller themselves and
169 * not recorded via the compilation.
170 */
171static DEFINE_SPINLOCK(ftrace_hash_lock);
Stephen Rothwell2d7da802008-08-25 13:08:44 +1000172#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
173#define ftrace_hash_unlock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400174#else
175/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
Ingo Molnarac8825e2008-08-25 08:12:04 +0200176#define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400177#define ftrace_hash_unlock(flags) do { } while(0)
178#endif
179
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200180static struct task_struct *ftraced_task;
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200181
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200182enum {
183 FTRACE_ENABLE_CALLS = (1 << 0),
184 FTRACE_DISABLE_CALLS = (1 << 1),
185 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
186 FTRACE_ENABLE_MCOUNT = (1 << 3),
187 FTRACE_DISABLE_MCOUNT = (1 << 4),
188};
189
Steven Rostedt5072c592008-05-12 21:20:43 +0200190static int ftrace_filtered;
Abhishek Sagarecea6562008-06-21 23:47:53 +0530191static int tracing_on;
192static int frozen_record_count;
Steven Rostedt5072c592008-05-12 21:20:43 +0200193
Steven Rostedt3d083392008-05-12 21:20:42 +0200194static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
195
196static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
197
Steven Rostedt3d083392008-05-12 21:20:42 +0200198static DEFINE_MUTEX(ftraced_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400199static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200200
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200201struct ftrace_page {
202 struct ftrace_page *next;
David Milleraa5e5ce2008-05-13 22:06:56 -0700203 unsigned long index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200204 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700205};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200206
207#define ENTRIES_PER_PAGE \
208 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
209
210/* estimate from running different kernels */
211#define NR_TO_INIT 10000
212
213static struct ftrace_page *ftrace_pages_start;
214static struct ftrace_page *ftrace_pages;
215
Steven Rostedt3d083392008-05-12 21:20:42 +0200216static int ftraced_trigger;
217static int ftraced_suspend;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400218static int ftraced_stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200219
220static int ftrace_record_suspend;
221
Steven Rostedt37ad5082008-05-12 21:20:48 +0200222static struct dyn_ftrace *ftrace_free_records;
223
Abhishek Sagarecea6562008-06-21 23:47:53 +0530224
225#ifdef CONFIG_KPROBES
226static inline void freeze_record(struct dyn_ftrace *rec)
227{
228 if (!(rec->flags & FTRACE_FL_FROZEN)) {
229 rec->flags |= FTRACE_FL_FROZEN;
230 frozen_record_count++;
231 }
232}
233
234static inline void unfreeze_record(struct dyn_ftrace *rec)
235{
236 if (rec->flags & FTRACE_FL_FROZEN) {
237 rec->flags &= ~FTRACE_FL_FROZEN;
238 frozen_record_count--;
239 }
240}
241
242static inline int record_frozen(struct dyn_ftrace *rec)
243{
244 return rec->flags & FTRACE_FL_FROZEN;
245}
246#else
247# define freeze_record(rec) ({ 0; })
248# define unfreeze_record(rec) ({ 0; })
249# define record_frozen(rec) ({ 0; })
250#endif /* CONFIG_KPROBES */
251
252int skip_trace(unsigned long ip)
253{
254 unsigned long fl;
255 struct dyn_ftrace *rec;
256 struct hlist_node *t;
257 struct hlist_head *head;
258
259 if (frozen_record_count == 0)
260 return 0;
261
262 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
263 hlist_for_each_entry_rcu(rec, t, head, node) {
264 if (rec->ip == ip) {
265 if (record_frozen(rec)) {
266 if (rec->flags & FTRACE_FL_FAILED)
267 return 1;
268
269 if (!(rec->flags & FTRACE_FL_CONVERTED))
270 return 1;
271
272 if (!tracing_on || !ftrace_enabled)
273 return 1;
274
275 if (ftrace_filtered) {
276 fl = rec->flags & (FTRACE_FL_FILTER |
277 FTRACE_FL_NOTRACE);
278 if (!fl || (fl & FTRACE_FL_NOTRACE))
279 return 1;
280 }
281 }
282 break;
283 }
284 }
285
286 return 0;
287}
288
Ingo Molnare309b412008-05-12 21:20:51 +0200289static inline int
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +0200290ftrace_ip_in_hash(unsigned long ip, unsigned long key)
Steven Rostedt3d083392008-05-12 21:20:42 +0200291{
292 struct dyn_ftrace *p;
293 struct hlist_node *t;
294 int found = 0;
295
Abhishek Sagarffdaa352008-05-24 23:45:02 +0530296 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
Steven Rostedt3d083392008-05-12 21:20:42 +0200297 if (p->ip == ip) {
298 found = 1;
299 break;
300 }
301 }
302
303 return found;
304}
305
Ingo Molnare309b412008-05-12 21:20:51 +0200306static inline void
Steven Rostedt3d083392008-05-12 21:20:42 +0200307ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
308{
Abhishek Sagarffdaa352008-05-24 23:45:02 +0530309 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
Steven Rostedt3d083392008-05-12 21:20:42 +0200310}
311
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530312/* called from kstop_machine */
313static inline void ftrace_del_hash(struct dyn_ftrace *node)
314{
315 hlist_del(&node->node);
316}
317
Ingo Molnare309b412008-05-12 21:20:51 +0200318static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad5082008-05-12 21:20:48 +0200319{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200320 rec->ip = (unsigned long)ftrace_free_records;
321 ftrace_free_records = rec;
322 rec->flags |= FTRACE_FL_FREE;
323}
324
Steven Rostedtfed19392008-08-14 22:47:19 -0400325void ftrace_release(void *start, unsigned long size)
326{
327 struct dyn_ftrace *rec;
328 struct ftrace_page *pg;
329 unsigned long s = (unsigned long)start;
330 unsigned long e = s + size;
331 int i;
332
Steven Rostedt00fd61a2008-08-15 21:40:04 -0400333 if (ftrace_disabled || !start)
Steven Rostedtfed19392008-08-14 22:47:19 -0400334 return;
335
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400336 /* should not be called from interrupt context */
Steven Rostedtfed19392008-08-14 22:47:19 -0400337 spin_lock(&ftrace_lock);
338
339 for (pg = ftrace_pages_start; pg; pg = pg->next) {
340 for (i = 0; i < pg->index; i++) {
341 rec = &pg->records[i];
342
343 if ((rec->ip >= s) && (rec->ip < e))
344 ftrace_free_rec(rec);
345 }
346 }
347 spin_unlock(&ftrace_lock);
348
349}
350
Ingo Molnare309b412008-05-12 21:20:51 +0200351static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200352{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200353 struct dyn_ftrace *rec;
354
355 /* First check for freed records */
356 if (ftrace_free_records) {
357 rec = ftrace_free_records;
358
Steven Rostedt37ad5082008-05-12 21:20:48 +0200359 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
360 WARN_ON_ONCE(1);
361 ftrace_free_records = NULL;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200362 ftrace_disabled = 1;
363 ftrace_enabled = 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200364 return NULL;
365 }
366
367 ftrace_free_records = (void *)rec->ip;
368 memset(rec, 0, sizeof(*rec));
369 return rec;
370 }
371
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200372 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
373 if (!ftrace_pages->next)
374 return NULL;
375 ftrace_pages = ftrace_pages->next;
376 }
377
378 return &ftrace_pages->records[ftrace_pages->index++];
379}
380
Ingo Molnare309b412008-05-12 21:20:51 +0200381static void
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200382ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200383{
384 struct dyn_ftrace *node;
385 unsigned long flags;
386 unsigned long key;
387 int resched;
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200388 int cpu;
Steven Rostedt3d083392008-05-12 21:20:42 +0200389
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200390 if (!ftrace_enabled || ftrace_disabled)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200391 return;
392
Steven Rostedt3d083392008-05-12 21:20:42 +0200393 resched = need_resched();
394 preempt_disable_notrace();
395
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200396 /*
397 * We simply need to protect against recursion.
398 * Use the the raw version of smp_processor_id and not
399 * __get_cpu_var which can call debug hooks that can
400 * cause a recursive crash here.
401 */
402 cpu = raw_smp_processor_id();
403 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
404 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
Steven Rostedt3d083392008-05-12 21:20:42 +0200405 goto out;
406
407 if (unlikely(ftrace_record_suspend))
408 goto out;
409
410 key = hash_long(ip, FTRACE_HASHBITS);
411
412 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
413
414 if (ftrace_ip_in_hash(ip, key))
415 goto out;
416
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400417 ftrace_hash_lock(flags);
Steven Rostedt3d083392008-05-12 21:20:42 +0200418
419 /* This ip may have hit the hash before the lock */
420 if (ftrace_ip_in_hash(ip, key))
421 goto out_unlock;
422
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200423 node = ftrace_alloc_dyn_node(ip);
Steven Rostedt3d083392008-05-12 21:20:42 +0200424 if (!node)
425 goto out_unlock;
426
427 node->ip = ip;
428
429 ftrace_add_hash(node, key);
430
431 ftraced_trigger = 1;
432
433 out_unlock:
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400434 ftrace_hash_unlock(flags);
Steven Rostedt3d083392008-05-12 21:20:42 +0200435 out:
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200436 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
Steven Rostedt3d083392008-05-12 21:20:42 +0200437
438 /* prevent recursion with scheduler */
439 if (resched)
440 preempt_enable_no_resched_notrace();
441 else
442 preempt_enable_notrace();
443}
444
Steven Rostedtcaf8cde2008-05-12 21:20:50 +0200445#define FTRACE_ADDR ((long)(ftrace_caller))
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200446
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530447static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200448__ftrace_replace_code(struct dyn_ftrace *rec,
449 unsigned char *old, unsigned char *new, int enable)
450{
Steven Rostedt41c52c02008-05-22 11:46:33 -0400451 unsigned long ip, fl;
Steven Rostedt5072c592008-05-12 21:20:43 +0200452
453 ip = rec->ip;
454
455 if (ftrace_filtered && enable) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200456 /*
457 * If filtering is on:
458 *
459 * If this record is set to be filtered and
460 * is enabled then do nothing.
461 *
462 * If this record is set to be filtered and
463 * it is not enabled, enable it.
464 *
465 * If this record is not set to be filtered
466 * and it is not enabled do nothing.
467 *
Steven Rostedt41c52c02008-05-22 11:46:33 -0400468 * If this record is set not to trace then
469 * do nothing.
470 *
Abhishek Sagara4500b82008-06-14 11:59:39 +0530471 * If this record is set not to trace and
472 * it is enabled then disable it.
473 *
Steven Rostedt5072c592008-05-12 21:20:43 +0200474 * If this record is not set to be filtered and
475 * it is enabled, disable it.
476 */
Abhishek Sagara4500b82008-06-14 11:59:39 +0530477
478 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
479 FTRACE_FL_ENABLED);
Steven Rostedt5072c592008-05-12 21:20:43 +0200480
481 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
Abhishek Sagara4500b82008-06-14 11:59:39 +0530482 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
483 !fl || (fl == FTRACE_FL_NOTRACE))
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530484 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200485
486 /*
487 * If it is enabled disable it,
488 * otherwise enable it!
489 */
Abhishek Sagara4500b82008-06-14 11:59:39 +0530490 if (fl & FTRACE_FL_ENABLED) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200491 /* swap new and old */
492 new = old;
493 old = ftrace_call_replace(ip, FTRACE_ADDR);
494 rec->flags &= ~FTRACE_FL_ENABLED;
495 } else {
496 new = ftrace_call_replace(ip, FTRACE_ADDR);
497 rec->flags |= FTRACE_FL_ENABLED;
498 }
499 } else {
500
Steven Rostedt41c52c02008-05-22 11:46:33 -0400501 if (enable) {
502 /*
503 * If this record is set not to trace and is
504 * not enabled, do nothing.
505 */
506 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
507 if (fl == FTRACE_FL_NOTRACE)
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530508 return 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400509
Steven Rostedt5072c592008-05-12 21:20:43 +0200510 new = ftrace_call_replace(ip, FTRACE_ADDR);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400511 } else
Steven Rostedt5072c592008-05-12 21:20:43 +0200512 old = ftrace_call_replace(ip, FTRACE_ADDR);
513
514 if (enable) {
515 if (rec->flags & FTRACE_FL_ENABLED)
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530516 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200517 rec->flags |= FTRACE_FL_ENABLED;
518 } else {
519 if (!(rec->flags & FTRACE_FL_ENABLED))
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530520 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200521 rec->flags &= ~FTRACE_FL_ENABLED;
522 }
523 }
524
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530525 return ftrace_modify_code(ip, old, new);
Steven Rostedt5072c592008-05-12 21:20:43 +0200526}
527
Ingo Molnare309b412008-05-12 21:20:51 +0200528static void ftrace_replace_code(int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200529{
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530530 int i, failed;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200531 unsigned char *new = NULL, *old = NULL;
532 struct dyn_ftrace *rec;
533 struct ftrace_page *pg;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200534
Steven Rostedt5072c592008-05-12 21:20:43 +0200535 if (enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200536 old = ftrace_nop_replace();
537 else
538 new = ftrace_nop_replace();
539
540 for (pg = ftrace_pages_start; pg; pg = pg->next) {
541 for (i = 0; i < pg->index; i++) {
542 rec = &pg->records[i];
543
544 /* don't modify code that has already faulted */
545 if (rec->flags & FTRACE_FL_FAILED)
546 continue;
547
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530548 /* ignore updates to this record's mcount site */
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530549 if (get_kprobe((void *)rec->ip)) {
550 freeze_record(rec);
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530551 continue;
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530552 } else {
553 unfreeze_record(rec);
554 }
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530555
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530556 failed = __ftrace_replace_code(rec, old, new, enable);
557 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
558 rec->flags |= FTRACE_FL_FAILED;
559 if ((system_state == SYSTEM_BOOTING) ||
Abhishek Sagar34078a52008-06-03 08:33:41 +0530560 !core_kernel_text(rec->ip)) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530561 ftrace_del_hash(rec);
562 ftrace_free_rec(rec);
563 }
564 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200565 }
566 }
567}
568
Ingo Molnare309b412008-05-12 21:20:51 +0200569static void ftrace_shutdown_replenish(void)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200570{
571 if (ftrace_pages->next)
572 return;
573
574 /* allocate another page */
575 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
576}
Steven Rostedt3d083392008-05-12 21:20:42 +0200577
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530578static int
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200579ftrace_code_disable(struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200580{
581 unsigned long ip;
582 unsigned char *nop, *call;
583 int failed;
584
585 ip = rec->ip;
586
587 nop = ftrace_nop_replace();
Steven Rostedt3b47bfc2008-08-27 23:24:15 -0400588 call = ftrace_call_replace(ip, mcount_addr);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200589
590 failed = ftrace_modify_code(ip, call, nop);
Steven Rostedt37ad5082008-05-12 21:20:48 +0200591 if (failed) {
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200592 rec->flags |= FTRACE_FL_FAILED;
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530593 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200594 }
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530595 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200596}
597
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400598static int __ftrace_update_code(void *ignore);
599
Ingo Molnare309b412008-05-12 21:20:51 +0200600static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200601{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200602 unsigned long addr;
603 int *command = data;
604
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400605 if (*command & FTRACE_ENABLE_CALLS) {
606 /*
607 * Update any recorded ips now that we have the
608 * machine stopped
609 */
610 __ftrace_update_code(NULL);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200611 ftrace_replace_code(1);
Abhishek Sagarecea6562008-06-21 23:47:53 +0530612 tracing_on = 1;
613 } else if (*command & FTRACE_DISABLE_CALLS) {
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200614 ftrace_replace_code(0);
Abhishek Sagarecea6562008-06-21 23:47:53 +0530615 tracing_on = 0;
616 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200617
618 if (*command & FTRACE_UPDATE_TRACE_FUNC)
619 ftrace_update_ftrace_func(ftrace_trace_function);
620
621 if (*command & FTRACE_ENABLE_MCOUNT) {
622 addr = (unsigned long)ftrace_record_ip;
623 ftrace_mcount_set(&addr);
624 } else if (*command & FTRACE_DISABLE_MCOUNT) {
625 addr = (unsigned long)ftrace_stub;
626 ftrace_mcount_set(&addr);
627 }
628
629 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200630}
631
Ingo Molnare309b412008-05-12 21:20:51 +0200632static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200633{
Rusty Russell784e2d72008-07-28 12:16:31 -0500634 stop_machine(__ftrace_modify_code, &command, NULL);
Steven Rostedt3d083392008-05-12 21:20:42 +0200635}
636
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400637void ftrace_disable_daemon(void)
638{
639 /* Stop the daemon from calling kstop_machine */
640 mutex_lock(&ftraced_lock);
641 ftraced_stop = 1;
642 mutex_unlock(&ftraced_lock);
643
644 ftrace_force_update();
645}
646
647void ftrace_enable_daemon(void)
648{
649 mutex_lock(&ftraced_lock);
650 ftraced_stop = 0;
651 mutex_unlock(&ftraced_lock);
652
653 ftrace_force_update();
654}
655
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200656static ftrace_func_t saved_ftrace_func;
657
Ingo Molnare309b412008-05-12 21:20:51 +0200658static void ftrace_startup(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200659{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200660 int command = 0;
661
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200662 if (unlikely(ftrace_disabled))
663 return;
664
Steven Rostedt3d083392008-05-12 21:20:42 +0200665 mutex_lock(&ftraced_lock);
666 ftraced_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200667 if (ftraced_suspend == 1)
668 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200669
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200670 if (saved_ftrace_func != ftrace_trace_function) {
671 saved_ftrace_func = ftrace_trace_function;
672 command |= FTRACE_UPDATE_TRACE_FUNC;
673 }
674
675 if (!command || !ftrace_enabled)
676 goto out;
677
678 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200679 out:
680 mutex_unlock(&ftraced_lock);
681}
682
Ingo Molnare309b412008-05-12 21:20:51 +0200683static void ftrace_shutdown(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200684{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200685 int command = 0;
686
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200687 if (unlikely(ftrace_disabled))
688 return;
689
Steven Rostedt3d083392008-05-12 21:20:42 +0200690 mutex_lock(&ftraced_lock);
691 ftraced_suspend--;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200692 if (!ftraced_suspend)
693 command |= FTRACE_DISABLE_CALLS;
694
695 if (saved_ftrace_func != ftrace_trace_function) {
696 saved_ftrace_func = ftrace_trace_function;
697 command |= FTRACE_UPDATE_TRACE_FUNC;
698 }
699
700 if (!command || !ftrace_enabled)
Steven Rostedt3d083392008-05-12 21:20:42 +0200701 goto out;
702
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200703 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200704 out:
705 mutex_unlock(&ftraced_lock);
706}
707
Ingo Molnare309b412008-05-12 21:20:51 +0200708static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200709{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200710 int command = FTRACE_ENABLE_MCOUNT;
711
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200712 if (unlikely(ftrace_disabled))
713 return;
714
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200715 mutex_lock(&ftraced_lock);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200716 /* Force update next time */
717 saved_ftrace_func = NULL;
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200718 /* ftraced_suspend is true if we want ftrace running */
719 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200720 command |= FTRACE_ENABLE_CALLS;
721
722 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200723 mutex_unlock(&ftraced_lock);
724}
725
Ingo Molnare309b412008-05-12 21:20:51 +0200726static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200727{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200728 int command = FTRACE_DISABLE_MCOUNT;
729
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200730 if (unlikely(ftrace_disabled))
731 return;
732
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200733 mutex_lock(&ftraced_lock);
734 /* ftraced_suspend is true if ftrace is running */
735 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200736 command |= FTRACE_DISABLE_CALLS;
737
738 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200739 mutex_unlock(&ftraced_lock);
740}
741
Steven Rostedt3d083392008-05-12 21:20:42 +0200742static cycle_t ftrace_update_time;
743static unsigned long ftrace_update_cnt;
744unsigned long ftrace_update_tot_cnt;
745
Ingo Molnare309b412008-05-12 21:20:51 +0200746static int __ftrace_update_code(void *ignore)
Steven Rostedt3d083392008-05-12 21:20:42 +0200747{
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530748 int i, save_ftrace_enabled;
749 cycle_t start, stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200750 struct dyn_ftrace *p;
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530751 struct hlist_node *t, *n;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530752 struct hlist_head *head, temp_list;
Steven Rostedt3d083392008-05-12 21:20:42 +0200753
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200754 /* Don't be recording funcs now */
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400755 ftrace_record_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200756 save_ftrace_enabled = ftrace_enabled;
757 ftrace_enabled = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200758
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200759 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200760 ftrace_update_cnt = 0;
761
762 /* No locks needed, the machine is stopped! */
763 for (i = 0; i < FTRACE_HASHSIZE; i++) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530764 INIT_HLIST_HEAD(&temp_list);
765 head = &ftrace_hash[i];
766
Steven Rostedt3d083392008-05-12 21:20:42 +0200767 /* all CPUS are stopped, we are safe to modify code */
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530768 hlist_for_each_entry_safe(p, t, n, head, node) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530769 /* Skip over failed records which have not been
770 * freed. */
771 if (p->flags & FTRACE_FL_FAILED)
772 continue;
Steven Rostedt3d083392008-05-12 21:20:42 +0200773
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530774 /* Unconverted records are always at the head of the
775 * hash bucket. Once we encounter a converted record,
776 * simply skip over to the next bucket. Saves ftraced
777 * some processor cycles (ftrace does its bid for
778 * global warming :-p ). */
779 if (p->flags & (FTRACE_FL_CONVERTED))
780 break;
781
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530782 /* Ignore updates to this record's mcount site.
783 * Reintroduce this record at the head of this
784 * bucket to attempt to "convert" it again if
785 * the kprobe on it is unregistered before the
786 * next run. */
787 if (get_kprobe((void *)p->ip)) {
788 ftrace_del_hash(p);
789 INIT_HLIST_NODE(&p->node);
790 hlist_add_head(&p->node, &temp_list);
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530791 freeze_record(p);
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530792 continue;
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530793 } else {
794 unfreeze_record(p);
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530795 }
796
797 /* convert record (i.e, patch mcount-call with NOP) */
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530798 if (ftrace_code_disable(p)) {
799 p->flags |= FTRACE_FL_CONVERTED;
800 ftrace_update_cnt++;
801 } else {
802 if ((system_state == SYSTEM_BOOTING) ||
Abhishek Sagar34078a52008-06-03 08:33:41 +0530803 !core_kernel_text(p->ip)) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530804 ftrace_del_hash(p);
805 ftrace_free_rec(p);
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530806 }
807 }
808 }
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530809
810 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
811 hlist_del(&p->node);
812 INIT_HLIST_NODE(&p->node);
813 hlist_add_head(&p->node, head);
814 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200815 }
816
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200817 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200818 ftrace_update_time = stop - start;
819 ftrace_update_tot_cnt += ftrace_update_cnt;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400820 ftraced_trigger = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200821
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200822 ftrace_enabled = save_ftrace_enabled;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400823 ftrace_record_suspend--;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200824
825 return 0;
826}
827
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400828static int ftrace_update_code(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200829{
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400830 if (unlikely(ftrace_disabled) ||
831 !ftrace_enabled || !ftraced_trigger)
832 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200833
Rusty Russell784e2d72008-07-28 12:16:31 -0500834 stop_machine(__ftrace_update_code, NULL, NULL);
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400835
836 return 1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200837}
838
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400839static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200840{
841 struct ftrace_page *pg;
842 int cnt;
843 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200844
845 /* allocate a few pages */
846 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
847 if (!ftrace_pages_start)
848 return -1;
849
850 /*
851 * Allocate a few more pages.
852 *
853 * TODO: have some parser search vmlinux before
854 * final linking to find all calls to ftrace.
855 * Then we can:
856 * a) know how many pages to allocate.
857 * and/or
858 * b) set up the table then.
859 *
860 * The dynamic code is still necessary for
861 * modules.
862 */
863
864 pg = ftrace_pages = ftrace_pages_start;
865
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400866 cnt = num_to_init / ENTRIES_PER_PAGE;
867 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
868 num_to_init, cnt);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200869
870 for (i = 0; i < cnt; i++) {
871 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
872
873 /* If we fail, we'll try later anyway */
874 if (!pg->next)
875 break;
876
877 pg = pg->next;
878 }
879
880 return 0;
881}
882
Steven Rostedt5072c592008-05-12 21:20:43 +0200883enum {
884 FTRACE_ITER_FILTER = (1 << 0),
885 FTRACE_ITER_CONT = (1 << 1),
Steven Rostedt41c52c02008-05-22 11:46:33 -0400886 FTRACE_ITER_NOTRACE = (1 << 2),
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530887 FTRACE_ITER_FAILURES = (1 << 3),
Steven Rostedt5072c592008-05-12 21:20:43 +0200888};
889
890#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
891
892struct ftrace_iterator {
893 loff_t pos;
894 struct ftrace_page *pg;
895 unsigned idx;
896 unsigned flags;
897 unsigned char buffer[FTRACE_BUFF_MAX+1];
898 unsigned buffer_idx;
899 unsigned filtered;
900};
901
Ingo Molnare309b412008-05-12 21:20:51 +0200902static void *
Steven Rostedt5072c592008-05-12 21:20:43 +0200903t_next(struct seq_file *m, void *v, loff_t *pos)
904{
905 struct ftrace_iterator *iter = m->private;
906 struct dyn_ftrace *rec = NULL;
907
908 (*pos)++;
909
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400910 /* should not be called from interrupt context */
911 spin_lock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200912 retry:
913 if (iter->idx >= iter->pg->index) {
914 if (iter->pg->next) {
915 iter->pg = iter->pg->next;
916 iter->idx = 0;
917 goto retry;
918 }
919 } else {
920 rec = &iter->pg->records[iter->idx++];
Steven Rostedta9fdda32008-08-14 22:47:17 -0400921 if ((rec->flags & FTRACE_FL_FREE) ||
922
923 (!(iter->flags & FTRACE_ITER_FAILURES) &&
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530924 (rec->flags & FTRACE_FL_FAILED)) ||
925
926 ((iter->flags & FTRACE_ITER_FAILURES) &&
Steven Rostedta9fdda32008-08-14 22:47:17 -0400927 !(rec->flags & FTRACE_FL_FAILED)) ||
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530928
Steven Rostedt41c52c02008-05-22 11:46:33 -0400929 ((iter->flags & FTRACE_ITER_NOTRACE) &&
930 !(rec->flags & FTRACE_FL_NOTRACE))) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200931 rec = NULL;
932 goto retry;
933 }
934 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400935 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200936
937 iter->pos = *pos;
938
939 return rec;
940}
941
942static void *t_start(struct seq_file *m, loff_t *pos)
943{
944 struct ftrace_iterator *iter = m->private;
945 void *p = NULL;
946 loff_t l = -1;
947
948 if (*pos != iter->pos) {
949 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
950 ;
951 } else {
952 l = *pos;
953 p = t_next(m, p, &l);
954 }
955
956 return p;
957}
958
959static void t_stop(struct seq_file *m, void *p)
960{
961}
962
963static int t_show(struct seq_file *m, void *v)
964{
965 struct dyn_ftrace *rec = v;
966 char str[KSYM_SYMBOL_LEN];
967
968 if (!rec)
969 return 0;
970
971 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
972
973 seq_printf(m, "%s\n", str);
974
975 return 0;
976}
977
978static struct seq_operations show_ftrace_seq_ops = {
979 .start = t_start,
980 .next = t_next,
981 .stop = t_stop,
982 .show = t_show,
983};
984
Ingo Molnare309b412008-05-12 21:20:51 +0200985static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200986ftrace_avail_open(struct inode *inode, struct file *file)
987{
988 struct ftrace_iterator *iter;
989 int ret;
990
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200991 if (unlikely(ftrace_disabled))
992 return -ENODEV;
993
Steven Rostedt5072c592008-05-12 21:20:43 +0200994 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
995 if (!iter)
996 return -ENOMEM;
997
998 iter->pg = ftrace_pages_start;
999 iter->pos = -1;
1000
1001 ret = seq_open(file, &show_ftrace_seq_ops);
1002 if (!ret) {
1003 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001004
Steven Rostedt5072c592008-05-12 21:20:43 +02001005 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001006 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +02001007 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001008 }
Steven Rostedt5072c592008-05-12 21:20:43 +02001009
1010 return ret;
1011}
1012
1013int ftrace_avail_release(struct inode *inode, struct file *file)
1014{
1015 struct seq_file *m = (struct seq_file *)file->private_data;
1016 struct ftrace_iterator *iter = m->private;
1017
1018 seq_release(inode, file);
1019 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001020
Steven Rostedt5072c592008-05-12 21:20:43 +02001021 return 0;
1022}
1023
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301024static int
1025ftrace_failures_open(struct inode *inode, struct file *file)
1026{
1027 int ret;
1028 struct seq_file *m;
1029 struct ftrace_iterator *iter;
1030
1031 ret = ftrace_avail_open(inode, file);
1032 if (!ret) {
1033 m = (struct seq_file *)file->private_data;
1034 iter = (struct ftrace_iterator *)m->private;
1035 iter->flags = FTRACE_ITER_FAILURES;
1036 }
1037
1038 return ret;
1039}
1040
1041
Steven Rostedt41c52c02008-05-22 11:46:33 -04001042static void ftrace_filter_reset(int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001043{
1044 struct ftrace_page *pg;
1045 struct dyn_ftrace *rec;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001046 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001047 unsigned i;
1048
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001049 /* should not be called from interrupt context */
1050 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001051 if (enable)
1052 ftrace_filtered = 0;
Steven Rostedt5072c592008-05-12 21:20:43 +02001053 pg = ftrace_pages_start;
1054 while (pg) {
1055 for (i = 0; i < pg->index; i++) {
1056 rec = &pg->records[i];
1057 if (rec->flags & FTRACE_FL_FAILED)
1058 continue;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001059 rec->flags &= ~type;
Steven Rostedt5072c592008-05-12 21:20:43 +02001060 }
1061 pg = pg->next;
1062 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001063 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001064}
1065
Ingo Molnare309b412008-05-12 21:20:51 +02001066static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001067ftrace_regex_open(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001068{
1069 struct ftrace_iterator *iter;
1070 int ret = 0;
1071
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001072 if (unlikely(ftrace_disabled))
1073 return -ENODEV;
1074
Steven Rostedt5072c592008-05-12 21:20:43 +02001075 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1076 if (!iter)
1077 return -ENOMEM;
1078
Steven Rostedt41c52c02008-05-22 11:46:33 -04001079 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001080 if ((file->f_mode & FMODE_WRITE) &&
1081 !(file->f_flags & O_APPEND))
Steven Rostedt41c52c02008-05-22 11:46:33 -04001082 ftrace_filter_reset(enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001083
1084 if (file->f_mode & FMODE_READ) {
1085 iter->pg = ftrace_pages_start;
1086 iter->pos = -1;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001087 iter->flags = enable ? FTRACE_ITER_FILTER :
1088 FTRACE_ITER_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001089
1090 ret = seq_open(file, &show_ftrace_seq_ops);
1091 if (!ret) {
1092 struct seq_file *m = file->private_data;
1093 m->private = iter;
1094 } else
1095 kfree(iter);
1096 } else
1097 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001098 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001099
1100 return ret;
1101}
1102
Steven Rostedt41c52c02008-05-22 11:46:33 -04001103static int
1104ftrace_filter_open(struct inode *inode, struct file *file)
1105{
1106 return ftrace_regex_open(inode, file, 1);
1107}
1108
1109static int
1110ftrace_notrace_open(struct inode *inode, struct file *file)
1111{
1112 return ftrace_regex_open(inode, file, 0);
1113}
1114
Ingo Molnare309b412008-05-12 21:20:51 +02001115static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001116ftrace_regex_read(struct file *file, char __user *ubuf,
Steven Rostedt5072c592008-05-12 21:20:43 +02001117 size_t cnt, loff_t *ppos)
1118{
1119 if (file->f_mode & FMODE_READ)
1120 return seq_read(file, ubuf, cnt, ppos);
1121 else
1122 return -EPERM;
1123}
1124
Ingo Molnare309b412008-05-12 21:20:51 +02001125static loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001126ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02001127{
1128 loff_t ret;
1129
1130 if (file->f_mode & FMODE_READ)
1131 ret = seq_lseek(file, offset, origin);
1132 else
1133 file->f_pos = ret = 1;
1134
1135 return ret;
1136}
1137
1138enum {
1139 MATCH_FULL,
1140 MATCH_FRONT_ONLY,
1141 MATCH_MIDDLE_ONLY,
1142 MATCH_END_ONLY,
1143};
1144
Ingo Molnare309b412008-05-12 21:20:51 +02001145static void
Steven Rostedt41c52c02008-05-22 11:46:33 -04001146ftrace_match(unsigned char *buff, int len, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001147{
1148 char str[KSYM_SYMBOL_LEN];
1149 char *search = NULL;
1150 struct ftrace_page *pg;
1151 struct dyn_ftrace *rec;
1152 int type = MATCH_FULL;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001153 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001154 unsigned i, match = 0, search_len = 0;
1155
1156 for (i = 0; i < len; i++) {
1157 if (buff[i] == '*') {
1158 if (!i) {
1159 search = buff + i + 1;
1160 type = MATCH_END_ONLY;
1161 search_len = len - (i + 1);
1162 } else {
1163 if (type == MATCH_END_ONLY) {
1164 type = MATCH_MIDDLE_ONLY;
1165 } else {
1166 match = i;
1167 type = MATCH_FRONT_ONLY;
1168 }
1169 buff[i] = 0;
1170 break;
1171 }
1172 }
1173 }
1174
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001175 /* should not be called from interrupt context */
1176 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001177 if (enable)
1178 ftrace_filtered = 1;
Steven Rostedt5072c592008-05-12 21:20:43 +02001179 pg = ftrace_pages_start;
1180 while (pg) {
1181 for (i = 0; i < pg->index; i++) {
1182 int matched = 0;
1183 char *ptr;
1184
1185 rec = &pg->records[i];
1186 if (rec->flags & FTRACE_FL_FAILED)
1187 continue;
1188 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1189 switch (type) {
1190 case MATCH_FULL:
1191 if (strcmp(str, buff) == 0)
1192 matched = 1;
1193 break;
1194 case MATCH_FRONT_ONLY:
1195 if (memcmp(str, buff, match) == 0)
1196 matched = 1;
1197 break;
1198 case MATCH_MIDDLE_ONLY:
1199 if (strstr(str, search))
1200 matched = 1;
1201 break;
1202 case MATCH_END_ONLY:
1203 ptr = strstr(str, search);
1204 if (ptr && (ptr[search_len] == 0))
1205 matched = 1;
1206 break;
1207 }
1208 if (matched)
Steven Rostedt41c52c02008-05-22 11:46:33 -04001209 rec->flags |= flag;
Steven Rostedt5072c592008-05-12 21:20:43 +02001210 }
1211 pg = pg->next;
1212 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001213 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001214}
1215
Ingo Molnare309b412008-05-12 21:20:51 +02001216static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001217ftrace_regex_write(struct file *file, const char __user *ubuf,
1218 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001219{
1220 struct ftrace_iterator *iter;
1221 char ch;
1222 size_t read = 0;
1223 ssize_t ret;
1224
1225 if (!cnt || cnt < 0)
1226 return 0;
1227
Steven Rostedt41c52c02008-05-22 11:46:33 -04001228 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001229
1230 if (file->f_mode & FMODE_READ) {
1231 struct seq_file *m = file->private_data;
1232 iter = m->private;
1233 } else
1234 iter = file->private_data;
1235
1236 if (!*ppos) {
1237 iter->flags &= ~FTRACE_ITER_CONT;
1238 iter->buffer_idx = 0;
1239 }
1240
1241 ret = get_user(ch, ubuf++);
1242 if (ret)
1243 goto out;
1244 read++;
1245 cnt--;
1246
1247 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1248 /* skip white space */
1249 while (cnt && isspace(ch)) {
1250 ret = get_user(ch, ubuf++);
1251 if (ret)
1252 goto out;
1253 read++;
1254 cnt--;
1255 }
1256
Steven Rostedt5072c592008-05-12 21:20:43 +02001257 if (isspace(ch)) {
1258 file->f_pos += read;
1259 ret = read;
1260 goto out;
1261 }
1262
1263 iter->buffer_idx = 0;
1264 }
1265
1266 while (cnt && !isspace(ch)) {
1267 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1268 iter->buffer[iter->buffer_idx++] = ch;
1269 else {
1270 ret = -EINVAL;
1271 goto out;
1272 }
1273 ret = get_user(ch, ubuf++);
1274 if (ret)
1275 goto out;
1276 read++;
1277 cnt--;
1278 }
1279
1280 if (isspace(ch)) {
1281 iter->filtered++;
1282 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001283 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001284 iter->buffer_idx = 0;
1285 } else
1286 iter->flags |= FTRACE_ITER_CONT;
1287
1288
1289 file->f_pos += read;
1290
1291 ret = read;
1292 out:
Steven Rostedt41c52c02008-05-22 11:46:33 -04001293 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001294
1295 return ret;
1296}
1297
Steven Rostedt41c52c02008-05-22 11:46:33 -04001298static ssize_t
1299ftrace_filter_write(struct file *file, const char __user *ubuf,
1300 size_t cnt, loff_t *ppos)
1301{
1302 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1303}
1304
1305static ssize_t
1306ftrace_notrace_write(struct file *file, const char __user *ubuf,
1307 size_t cnt, loff_t *ppos)
1308{
1309 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1310}
1311
1312static void
1313ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1314{
1315 if (unlikely(ftrace_disabled))
1316 return;
1317
1318 mutex_lock(&ftrace_regex_lock);
1319 if (reset)
1320 ftrace_filter_reset(enable);
1321 if (buf)
1322 ftrace_match(buf, len, enable);
1323 mutex_unlock(&ftrace_regex_lock);
1324}
1325
Steven Rostedt77a2b372008-05-12 21:20:45 +02001326/**
1327 * ftrace_set_filter - set a function to filter on in ftrace
1328 * @buf - the string that holds the function filter text.
1329 * @len - the length of the string.
1330 * @reset - non zero to reset all filters before applying this filter.
1331 *
1332 * Filters denote which functions should be enabled when tracing is enabled.
1333 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1334 */
Ingo Molnare309b412008-05-12 21:20:51 +02001335void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02001336{
Steven Rostedt41c52c02008-05-22 11:46:33 -04001337 ftrace_set_regex(buf, len, reset, 1);
1338}
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001339
Steven Rostedt41c52c02008-05-22 11:46:33 -04001340/**
1341 * ftrace_set_notrace - set a function to not trace in ftrace
1342 * @buf - the string that holds the function notrace text.
1343 * @len - the length of the string.
1344 * @reset - non zero to reset all filters before applying this filter.
1345 *
1346 * Notrace Filters denote which functions should not be enabled when tracing
1347 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1348 * for tracing.
1349 */
1350void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1351{
1352 ftrace_set_regex(buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02001353}
1354
Ingo Molnare309b412008-05-12 21:20:51 +02001355static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001356ftrace_regex_release(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001357{
1358 struct seq_file *m = (struct seq_file *)file->private_data;
1359 struct ftrace_iterator *iter;
1360
Steven Rostedt41c52c02008-05-22 11:46:33 -04001361 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001362 if (file->f_mode & FMODE_READ) {
1363 iter = m->private;
1364
1365 seq_release(inode, file);
1366 } else
1367 iter = file->private_data;
1368
1369 if (iter->buffer_idx) {
1370 iter->filtered++;
1371 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001372 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001373 }
1374
1375 mutex_lock(&ftrace_sysctl_lock);
1376 mutex_lock(&ftraced_lock);
1377 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1378 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1379 mutex_unlock(&ftraced_lock);
1380 mutex_unlock(&ftrace_sysctl_lock);
1381
1382 kfree(iter);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001383 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001384 return 0;
1385}
1386
Steven Rostedt41c52c02008-05-22 11:46:33 -04001387static int
1388ftrace_filter_release(struct inode *inode, struct file *file)
1389{
1390 return ftrace_regex_release(inode, file, 1);
1391}
1392
1393static int
1394ftrace_notrace_release(struct inode *inode, struct file *file)
1395{
1396 return ftrace_regex_release(inode, file, 0);
1397}
1398
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001399static ssize_t
1400ftraced_read(struct file *filp, char __user *ubuf,
1401 size_t cnt, loff_t *ppos)
1402{
1403 /* don't worry about races */
1404 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1405 int r = strlen(buf);
1406
1407 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1408}
1409
1410static ssize_t
1411ftraced_write(struct file *filp, const char __user *ubuf,
1412 size_t cnt, loff_t *ppos)
1413{
1414 char buf[64];
1415 long val;
1416 int ret;
1417
1418 if (cnt >= sizeof(buf))
1419 return -EINVAL;
1420
1421 if (copy_from_user(&buf, ubuf, cnt))
1422 return -EFAULT;
1423
1424 if (strncmp(buf, "enable", 6) == 0)
1425 val = 1;
1426 else if (strncmp(buf, "disable", 7) == 0)
1427 val = 0;
1428 else {
1429 buf[cnt] = 0;
1430
1431 ret = strict_strtoul(buf, 10, &val);
1432 if (ret < 0)
1433 return ret;
1434
1435 val = !!val;
1436 }
1437
1438 if (val)
1439 ftrace_enable_daemon();
1440 else
1441 ftrace_disable_daemon();
1442
1443 filp->f_pos += cnt;
1444
1445 return cnt;
1446}
1447
Steven Rostedt5072c592008-05-12 21:20:43 +02001448static struct file_operations ftrace_avail_fops = {
1449 .open = ftrace_avail_open,
1450 .read = seq_read,
1451 .llseek = seq_lseek,
1452 .release = ftrace_avail_release,
1453};
1454
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301455static struct file_operations ftrace_failures_fops = {
1456 .open = ftrace_failures_open,
1457 .read = seq_read,
1458 .llseek = seq_lseek,
1459 .release = ftrace_avail_release,
1460};
1461
Steven Rostedt5072c592008-05-12 21:20:43 +02001462static struct file_operations ftrace_filter_fops = {
1463 .open = ftrace_filter_open,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001464 .read = ftrace_regex_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02001465 .write = ftrace_filter_write,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001466 .llseek = ftrace_regex_lseek,
Steven Rostedt5072c592008-05-12 21:20:43 +02001467 .release = ftrace_filter_release,
1468};
1469
Steven Rostedt41c52c02008-05-22 11:46:33 -04001470static struct file_operations ftrace_notrace_fops = {
1471 .open = ftrace_notrace_open,
1472 .read = ftrace_regex_read,
1473 .write = ftrace_notrace_write,
1474 .llseek = ftrace_regex_lseek,
1475 .release = ftrace_notrace_release,
1476};
1477
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001478static struct file_operations ftraced_fops = {
1479 .open = tracing_open_generic,
1480 .read = ftraced_read,
1481 .write = ftraced_write,
1482};
1483
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001484/**
1485 * ftrace_force_update - force an update to all recording ftrace functions
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001486 */
1487int ftrace_force_update(void)
1488{
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001489 int ret = 0;
1490
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001491 if (unlikely(ftrace_disabled))
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001492 return -ENODEV;
1493
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001494 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001495 mutex_lock(&ftraced_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001496
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001497 /*
1498 * If ftraced_trigger is not set, then there is nothing
1499 * to update.
1500 */
1501 if (ftraced_trigger && !ftrace_update_code())
1502 ret = -EBUSY;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001503
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001504 mutex_unlock(&ftraced_lock);
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001505 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001506
1507 return ret;
1508}
1509
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001510static void ftrace_force_shutdown(void)
1511{
1512 struct task_struct *task;
1513 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1514
1515 mutex_lock(&ftraced_lock);
1516 task = ftraced_task;
1517 ftraced_task = NULL;
1518 ftraced_suspend = -1;
1519 ftrace_run_update_code(command);
1520 mutex_unlock(&ftraced_lock);
1521
1522 if (task)
1523 kthread_stop(task);
1524}
1525
Steven Rostedt5072c592008-05-12 21:20:43 +02001526static __init int ftrace_init_debugfs(void)
1527{
1528 struct dentry *d_tracer;
1529 struct dentry *entry;
1530
1531 d_tracer = tracing_init_dentry();
1532
1533 entry = debugfs_create_file("available_filter_functions", 0444,
1534 d_tracer, NULL, &ftrace_avail_fops);
1535 if (!entry)
1536 pr_warning("Could not create debugfs "
1537 "'available_filter_functions' entry\n");
1538
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301539 entry = debugfs_create_file("failures", 0444,
1540 d_tracer, NULL, &ftrace_failures_fops);
1541 if (!entry)
1542 pr_warning("Could not create debugfs 'failures' entry\n");
1543
Steven Rostedt5072c592008-05-12 21:20:43 +02001544 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1545 NULL, &ftrace_filter_fops);
1546 if (!entry)
1547 pr_warning("Could not create debugfs "
1548 "'set_ftrace_filter' entry\n");
Steven Rostedt41c52c02008-05-22 11:46:33 -04001549
1550 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1551 NULL, &ftrace_notrace_fops);
1552 if (!entry)
1553 pr_warning("Could not create debugfs "
1554 "'set_ftrace_notrace' entry\n");
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001555
1556 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1557 NULL, &ftraced_fops);
1558 if (!entry)
1559 pr_warning("Could not create debugfs "
1560 "'ftraced_enabled' entry\n");
Steven Rostedt5072c592008-05-12 21:20:43 +02001561 return 0;
1562}
1563
1564fs_initcall(ftrace_init_debugfs);
1565
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001566#ifdef CONFIG_FTRACE_MCOUNT_RECORD
1567static int ftrace_convert_nops(unsigned long *start,
1568 unsigned long *end)
1569{
1570 unsigned long *p;
1571 unsigned long addr;
1572 unsigned long flags;
1573
1574 p = start;
1575 while (p < end) {
1576 addr = ftrace_call_adjust(*p++);
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001577 /* should not be called from interrupt context */
Steven Rostedtfed19392008-08-14 22:47:19 -04001578 spin_lock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001579 ftrace_record_ip(addr);
Steven Rostedtfed19392008-08-14 22:47:19 -04001580 spin_unlock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001581 ftrace_shutdown_replenish();
1582 }
1583
1584 /* p is ignored */
1585 local_irq_save(flags);
1586 __ftrace_update_code(p);
1587 local_irq_restore(flags);
1588
1589 return 0;
1590}
1591
Steven Rostedt90d595f2008-08-14 15:45:09 -04001592void ftrace_init_module(unsigned long *start, unsigned long *end)
1593{
Steven Rostedt00fd61a2008-08-15 21:40:04 -04001594 if (ftrace_disabled || start == end)
Steven Rostedtfed19392008-08-14 22:47:19 -04001595 return;
Steven Rostedt90d595f2008-08-14 15:45:09 -04001596 ftrace_convert_nops(start, end);
1597}
1598
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001599extern unsigned long __start_mcount_loc[];
1600extern unsigned long __stop_mcount_loc[];
1601
1602void __init ftrace_init(void)
1603{
1604 unsigned long count, addr, flags;
1605 int ret;
1606
1607 /* Keep the ftrace pointer to the stub */
1608 addr = (unsigned long)ftrace_stub;
1609
1610 local_irq_save(flags);
1611 ftrace_dyn_arch_init(&addr);
1612 local_irq_restore(flags);
1613
1614 /* ftrace_dyn_arch_init places the return code in addr */
1615 if (addr)
1616 goto failed;
1617
1618 count = __stop_mcount_loc - __start_mcount_loc;
1619
1620 ret = ftrace_dyn_table_alloc(count);
1621 if (ret)
1622 goto failed;
1623
1624 last_ftrace_enabled = ftrace_enabled = 1;
1625
1626 ret = ftrace_convert_nops(__start_mcount_loc,
1627 __stop_mcount_loc);
1628
1629 return;
1630 failed:
1631 ftrace_disabled = 1;
1632}
1633#else /* CONFIG_FTRACE_MCOUNT_RECORD */
1634static int ftraced(void *ignore)
1635{
1636 unsigned long usecs;
1637
1638 while (!kthread_should_stop()) {
1639
1640 set_current_state(TASK_INTERRUPTIBLE);
1641
1642 /* check once a second */
1643 schedule_timeout(HZ);
1644
1645 if (unlikely(ftrace_disabled))
1646 continue;
1647
1648 mutex_lock(&ftrace_sysctl_lock);
1649 mutex_lock(&ftraced_lock);
1650 if (!ftraced_suspend && !ftraced_stop &&
1651 ftrace_update_code()) {
1652 usecs = nsecs_to_usecs(ftrace_update_time);
1653 if (ftrace_update_tot_cnt > 100000) {
1654 ftrace_update_tot_cnt = 0;
1655 pr_info("hm, dftrace overflow: %lu change%s"
1656 " (%lu total) in %lu usec%s\n",
1657 ftrace_update_cnt,
1658 ftrace_update_cnt != 1 ? "s" : "",
1659 ftrace_update_tot_cnt,
1660 usecs, usecs != 1 ? "s" : "");
1661 ftrace_disabled = 1;
1662 WARN_ON_ONCE(1);
1663 }
1664 }
1665 mutex_unlock(&ftraced_lock);
1666 mutex_unlock(&ftrace_sysctl_lock);
1667
1668 ftrace_shutdown_replenish();
1669 }
1670 __set_current_state(TASK_RUNNING);
1671 return 0;
1672}
1673
Ingo Molnare309b412008-05-12 21:20:51 +02001674static int __init ftrace_dynamic_init(void)
Steven Rostedt3d083392008-05-12 21:20:42 +02001675{
1676 struct task_struct *p;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001677 unsigned long addr;
Steven Rostedt3d083392008-05-12 21:20:42 +02001678 int ret;
1679
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001680 addr = (unsigned long)ftrace_record_ip;
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +02001681
Rusty Russell784e2d72008-07-28 12:16:31 -05001682 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001683
1684 /* ftrace_dyn_arch_init places the return code in addr */
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001685 if (addr) {
1686 ret = (int)addr;
1687 goto failed;
1688 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001689
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001690 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
Steven Rostedt3d083392008-05-12 21:20:42 +02001691 if (ret)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001692 goto failed;
Steven Rostedt3d083392008-05-12 21:20:42 +02001693
1694 p = kthread_run(ftraced, NULL, "ftraced");
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001695 if (IS_ERR(p)) {
1696 ret = -1;
1697 goto failed;
1698 }
Steven Rostedt3d083392008-05-12 21:20:42 +02001699
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001700 last_ftrace_enabled = ftrace_enabled = 1;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001701 ftraced_task = p;
Steven Rostedt3d083392008-05-12 21:20:42 +02001702
1703 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001704
1705 failed:
1706 ftrace_disabled = 1;
1707 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001708}
1709
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001710core_initcall(ftrace_dynamic_init);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001711#endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1712
Steven Rostedt3d083392008-05-12 21:20:42 +02001713#else
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001714# define ftrace_startup() do { } while (0)
1715# define ftrace_shutdown() do { } while (0)
1716# define ftrace_startup_sysctl() do { } while (0)
1717# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001718# define ftrace_force_shutdown() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001719#endif /* CONFIG_DYNAMIC_FTRACE */
1720
1721/**
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001722 * ftrace_kill_atomic - kill ftrace from critical sections
1723 *
1724 * This function should be used by panic code. It stops ftrace
1725 * but in a not so nice way. If you need to simply kill ftrace
1726 * from a non-atomic section, use ftrace_kill.
1727 */
1728void ftrace_kill_atomic(void)
1729{
1730 ftrace_disabled = 1;
1731 ftrace_enabled = 0;
Ingo Molnarb2613e32008-07-11 16:44:27 +02001732#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001733 ftraced_suspend = -1;
Ingo Molnarb2613e32008-07-11 16:44:27 +02001734#endif
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001735 clear_ftrace_function();
1736}
1737
1738/**
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001739 * ftrace_kill - totally shutdown ftrace
1740 *
1741 * This is a safety measure. If something was detected that seems
1742 * wrong, calling this function will keep ftrace from doing
1743 * any more modifications, and updates.
1744 * used when something went wrong.
1745 */
1746void ftrace_kill(void)
1747{
1748 mutex_lock(&ftrace_sysctl_lock);
1749 ftrace_disabled = 1;
1750 ftrace_enabled = 0;
1751
1752 clear_ftrace_function();
1753 mutex_unlock(&ftrace_sysctl_lock);
1754
1755 /* Try to totally disable ftrace */
1756 ftrace_force_shutdown();
1757}
1758
1759/**
Steven Rostedt3d083392008-05-12 21:20:42 +02001760 * register_ftrace_function - register a function for profiling
1761 * @ops - ops structure that holds the function for profiling.
1762 *
1763 * Register a function to be called by all functions in the
1764 * kernel.
1765 *
1766 * Note: @ops->func and all the functions it calls must be labeled
1767 * with "notrace", otherwise it will go into a
1768 * recursive loop.
1769 */
1770int register_ftrace_function(struct ftrace_ops *ops)
1771{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001772 int ret;
1773
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001774 if (unlikely(ftrace_disabled))
1775 return -1;
1776
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001777 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001778 ret = __register_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001779 ftrace_startup();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001780 mutex_unlock(&ftrace_sysctl_lock);
1781
1782 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001783}
1784
1785/**
1786 * unregister_ftrace_function - unresgister a function for profiling.
1787 * @ops - ops structure that holds the function to unregister
1788 *
1789 * Unregister a function that was added to be called by ftrace profiling.
1790 */
1791int unregister_ftrace_function(struct ftrace_ops *ops)
1792{
1793 int ret;
1794
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001795 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001796 ret = __unregister_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001797 ftrace_shutdown();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001798 mutex_unlock(&ftrace_sysctl_lock);
1799
1800 return ret;
1801}
1802
Ingo Molnare309b412008-05-12 21:20:51 +02001803int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001804ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02001805 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001806 loff_t *ppos)
1807{
1808 int ret;
1809
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001810 if (unlikely(ftrace_disabled))
1811 return -ENODEV;
1812
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001813 mutex_lock(&ftrace_sysctl_lock);
1814
Steven Rostedt5072c592008-05-12 21:20:43 +02001815 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001816
1817 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1818 goto out;
1819
1820 last_ftrace_enabled = ftrace_enabled;
1821
1822 if (ftrace_enabled) {
1823
1824 ftrace_startup_sysctl();
1825
1826 /* we are starting ftrace again */
1827 if (ftrace_list != &ftrace_list_end) {
1828 if (ftrace_list->next == &ftrace_list_end)
1829 ftrace_trace_function = ftrace_list->func;
1830 else
1831 ftrace_trace_function = ftrace_list_func;
1832 }
1833
1834 } else {
1835 /* stopping ftrace calls (just send to ftrace_stub) */
1836 ftrace_trace_function = ftrace_stub;
1837
1838 ftrace_shutdown_sysctl();
1839 }
1840
1841 out:
1842 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001843 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001844}