blob: b2de8de773560730c93a14f831ba38d4e1e0ba96 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
20#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020021#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010022#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020023#include <linux/uaccess.h>
Abhishek Sagarf22f9a82008-06-21 23:50:29 +053024#include <linux/kprobes.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010025#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020026#include <linux/sysctl.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020027#include <linux/ctype.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010028#include <linux/hash.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020029#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020030
Abhishek Sagar395a59d2008-06-21 23:47:27 +053031#include <asm/ftrace.h>
32
Steven Rostedt3d083392008-05-12 21:20:42 +020033#include "trace.h"
34
Steven Rostedt4eebcc82008-05-12 21:20:48 +020035/* ftrace_enabled is a method to turn ftrace on or off */
36int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020037static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020038
Steven Rostedt4eebcc82008-05-12 21:20:48 +020039/*
40 * ftrace_disabled is set when an anomaly is discovered.
41 * ftrace_disabled is much stronger than ftrace_enabled.
42 */
43static int ftrace_disabled __read_mostly;
44
Steven Rostedt3d083392008-05-12 21:20:42 +020045static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020046static DEFINE_MUTEX(ftrace_sysctl_lock);
47
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020048static struct ftrace_ops ftrace_list_end __read_mostly =
49{
50 .func = ftrace_stub,
51};
52
53static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
Ingo Molnarf2252932008-05-22 10:37:48 +020056static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020057{
58 struct ftrace_ops *op = ftrace_list;
59
60 /* in case someone actually ports this to alpha! */
61 read_barrier_depends();
62
63 while (op != &ftrace_list_end) {
64 /* silly alpha */
65 read_barrier_depends();
66 op->func(ip, parent_ip);
67 op = op->next;
68 };
69}
70
71/**
Steven Rostedt3d083392008-05-12 21:20:42 +020072 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020073 *
Steven Rostedt3d083392008-05-12 21:20:42 +020074 * This NULLs the ftrace function and in essence stops
75 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020076 */
Steven Rostedt3d083392008-05-12 21:20:42 +020077void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020078{
Steven Rostedt3d083392008-05-12 21:20:42 +020079 ftrace_trace_function = ftrace_stub;
80}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020081
Ingo Molnare309b412008-05-12 21:20:51 +020082static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +020083{
Steven Rostedt99ecdc42008-08-15 21:40:05 -040084 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +020085 spin_lock(&ftrace_lock);
86
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020087 ops->next = ftrace_list;
88 /*
89 * We are entering ops into the ftrace_list but another
90 * CPU might be walking that list. We need to make sure
91 * the ops->next pointer is valid before another CPU sees
92 * the ops pointer included into the ftrace_list.
93 */
94 smp_wmb();
95 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +020096
Steven Rostedtb0fc4942008-05-12 21:20:43 +020097 if (ftrace_enabled) {
98 /*
99 * For one func, simply call it directly.
100 * For more than one func, call the chain.
101 */
102 if (ops->next == &ftrace_list_end)
103 ftrace_trace_function = ops->func;
104 else
105 ftrace_trace_function = ftrace_list_func;
106 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200107
108 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200109
110 return 0;
111}
112
Ingo Molnare309b412008-05-12 21:20:51 +0200113static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200114{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200115 struct ftrace_ops **p;
116 int ret = 0;
117
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400118 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +0200119 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200120
121 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200122 * If we are removing the last function, then simply point
123 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200124 */
125 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
126 ftrace_trace_function = ftrace_stub;
127 ftrace_list = &ftrace_list_end;
128 goto out;
129 }
130
131 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
132 if (*p == ops)
133 break;
134
135 if (*p != ops) {
136 ret = -1;
137 goto out;
138 }
139
140 *p = (*p)->next;
141
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200142 if (ftrace_enabled) {
143 /* If we only have one func left, then call that directly */
144 if (ftrace_list == &ftrace_list_end ||
145 ftrace_list->next == &ftrace_list_end)
146 ftrace_trace_function = ftrace_list->func;
147 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200148
149 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200150 spin_unlock(&ftrace_lock);
151
152 return ret;
153}
154
155#ifdef CONFIG_DYNAMIC_FTRACE
156
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400157#ifndef CONFIG_FTRACE_MCOUNT_RECORD
158/*
159 * The hash lock is only needed when the recording of the mcount
160 * callers are dynamic. That is, by the caller themselves and
161 * not recorded via the compilation.
162 */
163static DEFINE_SPINLOCK(ftrace_hash_lock);
Stephen Rothwell2d7da802008-08-25 13:08:44 +1000164#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
Steven Rostedt644f9912008-09-06 01:06:04 -0400165#define ftrace_hash_unlock(flags) \
166 spin_unlock_irqrestore(&ftrace_hash_lock, flags)
Steven Rostedtbd95b882008-10-16 09:31:27 -0400167static void ftrace_release_hash(unsigned long start, unsigned long end);
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400168#else
169/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
Ingo Molnarac8825e2008-08-25 08:12:04 +0200170#define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400171#define ftrace_hash_unlock(flags) do { } while(0)
Steven Rostedtbd95b882008-10-16 09:31:27 -0400172static inline void ftrace_release_hash(unsigned long start, unsigned long end)
173{
174}
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400175#endif
176
Steven Noonan71c67d52008-09-20 01:00:37 -0700177/*
178 * Since MCOUNT_ADDR may point to mcount itself, we do not want
179 * to get it confused by reading a reference in the code as we
180 * are parsing on objcopy output of text. Use a variable for
181 * it instead.
182 */
183static unsigned long mcount_addr = MCOUNT_ADDR;
184
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200185static struct task_struct *ftraced_task;
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200186
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200187enum {
188 FTRACE_ENABLE_CALLS = (1 << 0),
189 FTRACE_DISABLE_CALLS = (1 << 1),
190 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
191 FTRACE_ENABLE_MCOUNT = (1 << 3),
192 FTRACE_DISABLE_MCOUNT = (1 << 4),
193};
194
Steven Rostedt5072c592008-05-12 21:20:43 +0200195static int ftrace_filtered;
Abhishek Sagarecea6562008-06-21 23:47:53 +0530196static int tracing_on;
197static int frozen_record_count;
Steven Rostedt5072c592008-05-12 21:20:43 +0200198
Steven Rostedt3d083392008-05-12 21:20:42 +0200199static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
200
201static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
202
Steven Rostedt3d083392008-05-12 21:20:42 +0200203static DEFINE_MUTEX(ftraced_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400204static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200205
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200206struct ftrace_page {
207 struct ftrace_page *next;
David Milleraa5e5ce2008-05-13 22:06:56 -0700208 unsigned long index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200209 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700210};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200211
212#define ENTRIES_PER_PAGE \
213 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
214
215/* estimate from running different kernels */
216#define NR_TO_INIT 10000
217
218static struct ftrace_page *ftrace_pages_start;
219static struct ftrace_page *ftrace_pages;
220
Steven Rostedt3d083392008-05-12 21:20:42 +0200221static int ftraced_trigger;
222static int ftraced_suspend;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400223static int ftraced_stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200224
225static int ftrace_record_suspend;
226
Steven Rostedt37ad5082008-05-12 21:20:48 +0200227static struct dyn_ftrace *ftrace_free_records;
228
Abhishek Sagarecea6562008-06-21 23:47:53 +0530229
230#ifdef CONFIG_KPROBES
231static inline void freeze_record(struct dyn_ftrace *rec)
232{
233 if (!(rec->flags & FTRACE_FL_FROZEN)) {
234 rec->flags |= FTRACE_FL_FROZEN;
235 frozen_record_count++;
236 }
237}
238
239static inline void unfreeze_record(struct dyn_ftrace *rec)
240{
241 if (rec->flags & FTRACE_FL_FROZEN) {
242 rec->flags &= ~FTRACE_FL_FROZEN;
243 frozen_record_count--;
244 }
245}
246
247static inline int record_frozen(struct dyn_ftrace *rec)
248{
249 return rec->flags & FTRACE_FL_FROZEN;
250}
251#else
252# define freeze_record(rec) ({ 0; })
253# define unfreeze_record(rec) ({ 0; })
254# define record_frozen(rec) ({ 0; })
255#endif /* CONFIG_KPROBES */
256
257int skip_trace(unsigned long ip)
258{
259 unsigned long fl;
260 struct dyn_ftrace *rec;
261 struct hlist_node *t;
262 struct hlist_head *head;
263
264 if (frozen_record_count == 0)
265 return 0;
266
267 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
268 hlist_for_each_entry_rcu(rec, t, head, node) {
269 if (rec->ip == ip) {
270 if (record_frozen(rec)) {
271 if (rec->flags & FTRACE_FL_FAILED)
272 return 1;
273
274 if (!(rec->flags & FTRACE_FL_CONVERTED))
275 return 1;
276
277 if (!tracing_on || !ftrace_enabled)
278 return 1;
279
280 if (ftrace_filtered) {
281 fl = rec->flags & (FTRACE_FL_FILTER |
282 FTRACE_FL_NOTRACE);
283 if (!fl || (fl & FTRACE_FL_NOTRACE))
284 return 1;
285 }
286 }
287 break;
288 }
289 }
290
291 return 0;
292}
293
Ingo Molnare309b412008-05-12 21:20:51 +0200294static inline int
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +0200295ftrace_ip_in_hash(unsigned long ip, unsigned long key)
Steven Rostedt3d083392008-05-12 21:20:42 +0200296{
297 struct dyn_ftrace *p;
298 struct hlist_node *t;
299 int found = 0;
300
Abhishek Sagarffdaa352008-05-24 23:45:02 +0530301 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
Steven Rostedt3d083392008-05-12 21:20:42 +0200302 if (p->ip == ip) {
303 found = 1;
304 break;
305 }
306 }
307
308 return found;
309}
310
Ingo Molnare309b412008-05-12 21:20:51 +0200311static inline void
Steven Rostedt3d083392008-05-12 21:20:42 +0200312ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
313{
Abhishek Sagarffdaa352008-05-24 23:45:02 +0530314 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
Steven Rostedt3d083392008-05-12 21:20:42 +0200315}
316
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530317/* called from kstop_machine */
318static inline void ftrace_del_hash(struct dyn_ftrace *node)
319{
320 hlist_del(&node->node);
321}
322
Ingo Molnare309b412008-05-12 21:20:51 +0200323static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad5082008-05-12 21:20:48 +0200324{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200325 rec->ip = (unsigned long)ftrace_free_records;
326 ftrace_free_records = rec;
327 rec->flags |= FTRACE_FL_FREE;
328}
329
Steven Rostedtfed19392008-08-14 22:47:19 -0400330void ftrace_release(void *start, unsigned long size)
331{
332 struct dyn_ftrace *rec;
333 struct ftrace_page *pg;
334 unsigned long s = (unsigned long)start;
335 unsigned long e = s + size;
336 int i;
337
Steven Rostedt00fd61a2008-08-15 21:40:04 -0400338 if (ftrace_disabled || !start)
Steven Rostedtfed19392008-08-14 22:47:19 -0400339 return;
340
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400341 /* should not be called from interrupt context */
Steven Rostedtfed19392008-08-14 22:47:19 -0400342 spin_lock(&ftrace_lock);
343
344 for (pg = ftrace_pages_start; pg; pg = pg->next) {
345 for (i = 0; i < pg->index; i++) {
346 rec = &pg->records[i];
347
348 if ((rec->ip >= s) && (rec->ip < e))
349 ftrace_free_rec(rec);
350 }
351 }
352 spin_unlock(&ftrace_lock);
353
Steven Rostedtbd95b882008-10-16 09:31:27 -0400354 ftrace_release_hash(s, e);
Steven Rostedtfed19392008-08-14 22:47:19 -0400355}
356
Ingo Molnare309b412008-05-12 21:20:51 +0200357static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200358{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200359 struct dyn_ftrace *rec;
360
361 /* First check for freed records */
362 if (ftrace_free_records) {
363 rec = ftrace_free_records;
364
Steven Rostedt37ad5082008-05-12 21:20:48 +0200365 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
366 WARN_ON_ONCE(1);
367 ftrace_free_records = NULL;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200368 ftrace_disabled = 1;
369 ftrace_enabled = 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200370 return NULL;
371 }
372
373 ftrace_free_records = (void *)rec->ip;
374 memset(rec, 0, sizeof(*rec));
375 return rec;
376 }
377
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200378 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
379 if (!ftrace_pages->next)
380 return NULL;
381 ftrace_pages = ftrace_pages->next;
382 }
383
384 return &ftrace_pages->records[ftrace_pages->index++];
385}
386
Ingo Molnare309b412008-05-12 21:20:51 +0200387static void
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200388ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200389{
390 struct dyn_ftrace *node;
391 unsigned long flags;
392 unsigned long key;
393 int resched;
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200394 int cpu;
Steven Rostedt3d083392008-05-12 21:20:42 +0200395
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200396 if (!ftrace_enabled || ftrace_disabled)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200397 return;
398
Steven Rostedt3d083392008-05-12 21:20:42 +0200399 resched = need_resched();
400 preempt_disable_notrace();
401
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200402 /*
403 * We simply need to protect against recursion.
404 * Use the the raw version of smp_processor_id and not
405 * __get_cpu_var which can call debug hooks that can
406 * cause a recursive crash here.
407 */
408 cpu = raw_smp_processor_id();
409 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
410 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
Steven Rostedt3d083392008-05-12 21:20:42 +0200411 goto out;
412
413 if (unlikely(ftrace_record_suspend))
414 goto out;
415
416 key = hash_long(ip, FTRACE_HASHBITS);
417
418 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
419
420 if (ftrace_ip_in_hash(ip, key))
421 goto out;
422
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400423 ftrace_hash_lock(flags);
Steven Rostedt3d083392008-05-12 21:20:42 +0200424
425 /* This ip may have hit the hash before the lock */
426 if (ftrace_ip_in_hash(ip, key))
427 goto out_unlock;
428
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200429 node = ftrace_alloc_dyn_node(ip);
Steven Rostedt3d083392008-05-12 21:20:42 +0200430 if (!node)
431 goto out_unlock;
432
433 node->ip = ip;
434
435 ftrace_add_hash(node, key);
436
437 ftraced_trigger = 1;
438
439 out_unlock:
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400440 ftrace_hash_unlock(flags);
Steven Rostedt3d083392008-05-12 21:20:42 +0200441 out:
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200442 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
Steven Rostedt3d083392008-05-12 21:20:42 +0200443
444 /* prevent recursion with scheduler */
445 if (resched)
446 preempt_enable_no_resched_notrace();
447 else
448 preempt_enable_notrace();
449}
450
Steven Rostedtcaf8cde2008-05-12 21:20:50 +0200451#define FTRACE_ADDR ((long)(ftrace_caller))
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200452
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530453static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200454__ftrace_replace_code(struct dyn_ftrace *rec,
455 unsigned char *old, unsigned char *new, int enable)
456{
Steven Rostedt41c52c02008-05-22 11:46:33 -0400457 unsigned long ip, fl;
Steven Rostedt5072c592008-05-12 21:20:43 +0200458
459 ip = rec->ip;
460
461 if (ftrace_filtered && enable) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200462 /*
463 * If filtering is on:
464 *
465 * If this record is set to be filtered and
466 * is enabled then do nothing.
467 *
468 * If this record is set to be filtered and
469 * it is not enabled, enable it.
470 *
471 * If this record is not set to be filtered
472 * and it is not enabled do nothing.
473 *
Steven Rostedt41c52c02008-05-22 11:46:33 -0400474 * If this record is set not to trace then
475 * do nothing.
476 *
Abhishek Sagara4500b82008-06-14 11:59:39 +0530477 * If this record is set not to trace and
478 * it is enabled then disable it.
479 *
Steven Rostedt5072c592008-05-12 21:20:43 +0200480 * If this record is not set to be filtered and
481 * it is enabled, disable it.
482 */
Abhishek Sagara4500b82008-06-14 11:59:39 +0530483
484 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
485 FTRACE_FL_ENABLED);
Steven Rostedt5072c592008-05-12 21:20:43 +0200486
487 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
Abhishek Sagara4500b82008-06-14 11:59:39 +0530488 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
489 !fl || (fl == FTRACE_FL_NOTRACE))
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530490 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200491
492 /*
493 * If it is enabled disable it,
494 * otherwise enable it!
495 */
Abhishek Sagara4500b82008-06-14 11:59:39 +0530496 if (fl & FTRACE_FL_ENABLED) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200497 /* swap new and old */
498 new = old;
499 old = ftrace_call_replace(ip, FTRACE_ADDR);
500 rec->flags &= ~FTRACE_FL_ENABLED;
501 } else {
502 new = ftrace_call_replace(ip, FTRACE_ADDR);
503 rec->flags |= FTRACE_FL_ENABLED;
504 }
505 } else {
506
Steven Rostedt41c52c02008-05-22 11:46:33 -0400507 if (enable) {
508 /*
509 * If this record is set not to trace and is
510 * not enabled, do nothing.
511 */
512 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
513 if (fl == FTRACE_FL_NOTRACE)
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530514 return 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400515
Steven Rostedt5072c592008-05-12 21:20:43 +0200516 new = ftrace_call_replace(ip, FTRACE_ADDR);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400517 } else
Steven Rostedt5072c592008-05-12 21:20:43 +0200518 old = ftrace_call_replace(ip, FTRACE_ADDR);
519
520 if (enable) {
521 if (rec->flags & FTRACE_FL_ENABLED)
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530522 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200523 rec->flags |= FTRACE_FL_ENABLED;
524 } else {
525 if (!(rec->flags & FTRACE_FL_ENABLED))
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530526 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200527 rec->flags &= ~FTRACE_FL_ENABLED;
528 }
529 }
530
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530531 return ftrace_modify_code(ip, old, new);
Steven Rostedt5072c592008-05-12 21:20:43 +0200532}
533
Ingo Molnare309b412008-05-12 21:20:51 +0200534static void ftrace_replace_code(int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200535{
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530536 int i, failed;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200537 unsigned char *new = NULL, *old = NULL;
538 struct dyn_ftrace *rec;
539 struct ftrace_page *pg;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200540
Steven Rostedt5072c592008-05-12 21:20:43 +0200541 if (enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200542 old = ftrace_nop_replace();
543 else
544 new = ftrace_nop_replace();
545
546 for (pg = ftrace_pages_start; pg; pg = pg->next) {
547 for (i = 0; i < pg->index; i++) {
548 rec = &pg->records[i];
549
550 /* don't modify code that has already faulted */
551 if (rec->flags & FTRACE_FL_FAILED)
552 continue;
553
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530554 /* ignore updates to this record's mcount site */
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530555 if (get_kprobe((void *)rec->ip)) {
556 freeze_record(rec);
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530557 continue;
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530558 } else {
559 unfreeze_record(rec);
560 }
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530561
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530562 failed = __ftrace_replace_code(rec, old, new, enable);
563 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
564 rec->flags |= FTRACE_FL_FAILED;
565 if ((system_state == SYSTEM_BOOTING) ||
Abhishek Sagar34078a52008-06-03 08:33:41 +0530566 !core_kernel_text(rec->ip)) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530567 ftrace_del_hash(rec);
568 ftrace_free_rec(rec);
569 }
570 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200571 }
572 }
573}
574
Ingo Molnare309b412008-05-12 21:20:51 +0200575static void ftrace_shutdown_replenish(void)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200576{
577 if (ftrace_pages->next)
578 return;
579
580 /* allocate another page */
581 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
582}
Steven Rostedt3d083392008-05-12 21:20:42 +0200583
Steven Rostedt05736a42008-09-22 14:55:47 -0700584static void print_ip_ins(const char *fmt, unsigned char *p)
585{
586 int i;
587
588 printk(KERN_CONT "%s", fmt);
589
590 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
591 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
592}
593
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530594static int
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200595ftrace_code_disable(struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200596{
597 unsigned long ip;
598 unsigned char *nop, *call;
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400599 int ret;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200600
601 ip = rec->ip;
602
603 nop = ftrace_nop_replace();
Steven Rostedt3b47bfc2008-08-27 23:24:15 -0400604 call = ftrace_call_replace(ip, mcount_addr);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200605
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400606 ret = ftrace_modify_code(ip, call, nop);
607 if (ret) {
608 switch (ret) {
609 case -EFAULT:
Steven Rostedt05736a42008-09-22 14:55:47 -0700610 WARN_ON_ONCE(1);
611 pr_info("ftrace faulted on modifying ");
612 print_ip_sym(ip);
613 break;
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400614 case -EINVAL:
Steven Rostedt05736a42008-09-22 14:55:47 -0700615 WARN_ON_ONCE(1);
616 pr_info("ftrace failed to modify ");
617 print_ip_sym(ip);
618 print_ip_ins(" expected: ", call);
619 print_ip_ins(" actual: ", (unsigned char *)ip);
620 print_ip_ins(" replace: ", nop);
621 printk(KERN_CONT "\n");
622 break;
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400623 case -EPERM:
624 WARN_ON_ONCE(1);
625 pr_info("ftrace faulted on writing ");
626 print_ip_sym(ip);
627 break;
628 default:
629 WARN_ON_ONCE(1);
630 pr_info("ftrace faulted on unknown error ");
631 print_ip_sym(ip);
Steven Rostedt05736a42008-09-22 14:55:47 -0700632 }
633
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200634 rec->flags |= FTRACE_FL_FAILED;
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530635 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200636 }
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530637 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200638}
639
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400640static int __ftrace_update_code(void *ignore);
641
Ingo Molnare309b412008-05-12 21:20:51 +0200642static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200643{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200644 unsigned long addr;
645 int *command = data;
646
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400647 if (*command & FTRACE_ENABLE_CALLS) {
648 /*
649 * Update any recorded ips now that we have the
650 * machine stopped
651 */
652 __ftrace_update_code(NULL);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200653 ftrace_replace_code(1);
Abhishek Sagarecea6562008-06-21 23:47:53 +0530654 tracing_on = 1;
655 } else if (*command & FTRACE_DISABLE_CALLS) {
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200656 ftrace_replace_code(0);
Abhishek Sagarecea6562008-06-21 23:47:53 +0530657 tracing_on = 0;
658 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200659
660 if (*command & FTRACE_UPDATE_TRACE_FUNC)
661 ftrace_update_ftrace_func(ftrace_trace_function);
662
663 if (*command & FTRACE_ENABLE_MCOUNT) {
664 addr = (unsigned long)ftrace_record_ip;
665 ftrace_mcount_set(&addr);
666 } else if (*command & FTRACE_DISABLE_MCOUNT) {
667 addr = (unsigned long)ftrace_stub;
668 ftrace_mcount_set(&addr);
669 }
670
671 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200672}
673
Ingo Molnare309b412008-05-12 21:20:51 +0200674static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200675{
Rusty Russell784e2d72008-07-28 12:16:31 -0500676 stop_machine(__ftrace_modify_code, &command, NULL);
Steven Rostedt3d083392008-05-12 21:20:42 +0200677}
678
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400679void ftrace_disable_daemon(void)
680{
681 /* Stop the daemon from calling kstop_machine */
682 mutex_lock(&ftraced_lock);
683 ftraced_stop = 1;
684 mutex_unlock(&ftraced_lock);
685
686 ftrace_force_update();
687}
688
689void ftrace_enable_daemon(void)
690{
691 mutex_lock(&ftraced_lock);
692 ftraced_stop = 0;
693 mutex_unlock(&ftraced_lock);
694
695 ftrace_force_update();
696}
697
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200698static ftrace_func_t saved_ftrace_func;
699
Ingo Molnare309b412008-05-12 21:20:51 +0200700static void ftrace_startup(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200701{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200702 int command = 0;
703
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200704 if (unlikely(ftrace_disabled))
705 return;
706
Steven Rostedt3d083392008-05-12 21:20:42 +0200707 mutex_lock(&ftraced_lock);
708 ftraced_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200709 if (ftraced_suspend == 1)
710 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200711
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200712 if (saved_ftrace_func != ftrace_trace_function) {
713 saved_ftrace_func = ftrace_trace_function;
714 command |= FTRACE_UPDATE_TRACE_FUNC;
715 }
716
717 if (!command || !ftrace_enabled)
718 goto out;
719
720 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200721 out:
722 mutex_unlock(&ftraced_lock);
723}
724
Ingo Molnare309b412008-05-12 21:20:51 +0200725static void ftrace_shutdown(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200726{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200727 int command = 0;
728
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200729 if (unlikely(ftrace_disabled))
730 return;
731
Steven Rostedt3d083392008-05-12 21:20:42 +0200732 mutex_lock(&ftraced_lock);
733 ftraced_suspend--;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200734 if (!ftraced_suspend)
735 command |= FTRACE_DISABLE_CALLS;
736
737 if (saved_ftrace_func != ftrace_trace_function) {
738 saved_ftrace_func = ftrace_trace_function;
739 command |= FTRACE_UPDATE_TRACE_FUNC;
740 }
741
742 if (!command || !ftrace_enabled)
Steven Rostedt3d083392008-05-12 21:20:42 +0200743 goto out;
744
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200745 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200746 out:
747 mutex_unlock(&ftraced_lock);
748}
749
Ingo Molnare309b412008-05-12 21:20:51 +0200750static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200751{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200752 int command = FTRACE_ENABLE_MCOUNT;
753
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200754 if (unlikely(ftrace_disabled))
755 return;
756
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200757 mutex_lock(&ftraced_lock);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200758 /* Force update next time */
759 saved_ftrace_func = NULL;
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200760 /* ftraced_suspend is true if we want ftrace running */
761 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200762 command |= FTRACE_ENABLE_CALLS;
763
764 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200765 mutex_unlock(&ftraced_lock);
766}
767
Ingo Molnare309b412008-05-12 21:20:51 +0200768static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200769{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200770 int command = FTRACE_DISABLE_MCOUNT;
771
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200772 if (unlikely(ftrace_disabled))
773 return;
774
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200775 mutex_lock(&ftraced_lock);
776 /* ftraced_suspend is true if ftrace is running */
777 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200778 command |= FTRACE_DISABLE_CALLS;
779
780 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200781 mutex_unlock(&ftraced_lock);
782}
783
Steven Rostedt3d083392008-05-12 21:20:42 +0200784static cycle_t ftrace_update_time;
785static unsigned long ftrace_update_cnt;
786unsigned long ftrace_update_tot_cnt;
787
Ingo Molnare309b412008-05-12 21:20:51 +0200788static int __ftrace_update_code(void *ignore)
Steven Rostedt3d083392008-05-12 21:20:42 +0200789{
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530790 int i, save_ftrace_enabled;
791 cycle_t start, stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200792 struct dyn_ftrace *p;
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530793 struct hlist_node *t, *n;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530794 struct hlist_head *head, temp_list;
Steven Rostedt3d083392008-05-12 21:20:42 +0200795
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200796 /* Don't be recording funcs now */
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400797 ftrace_record_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200798 save_ftrace_enabled = ftrace_enabled;
799 ftrace_enabled = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200800
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200801 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200802 ftrace_update_cnt = 0;
803
804 /* No locks needed, the machine is stopped! */
805 for (i = 0; i < FTRACE_HASHSIZE; i++) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530806 INIT_HLIST_HEAD(&temp_list);
807 head = &ftrace_hash[i];
808
Steven Rostedt3d083392008-05-12 21:20:42 +0200809 /* all CPUS are stopped, we are safe to modify code */
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530810 hlist_for_each_entry_safe(p, t, n, head, node) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530811 /* Skip over failed records which have not been
812 * freed. */
813 if (p->flags & FTRACE_FL_FAILED)
814 continue;
Steven Rostedt3d083392008-05-12 21:20:42 +0200815
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530816 /* Unconverted records are always at the head of the
817 * hash bucket. Once we encounter a converted record,
818 * simply skip over to the next bucket. Saves ftraced
819 * some processor cycles (ftrace does its bid for
820 * global warming :-p ). */
821 if (p->flags & (FTRACE_FL_CONVERTED))
822 break;
823
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530824 /* Ignore updates to this record's mcount site.
825 * Reintroduce this record at the head of this
826 * bucket to attempt to "convert" it again if
827 * the kprobe on it is unregistered before the
828 * next run. */
829 if (get_kprobe((void *)p->ip)) {
830 ftrace_del_hash(p);
831 INIT_HLIST_NODE(&p->node);
832 hlist_add_head(&p->node, &temp_list);
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530833 freeze_record(p);
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530834 continue;
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530835 } else {
836 unfreeze_record(p);
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530837 }
838
839 /* convert record (i.e, patch mcount-call with NOP) */
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530840 if (ftrace_code_disable(p)) {
841 p->flags |= FTRACE_FL_CONVERTED;
842 ftrace_update_cnt++;
843 } else {
844 if ((system_state == SYSTEM_BOOTING) ||
Abhishek Sagar34078a52008-06-03 08:33:41 +0530845 !core_kernel_text(p->ip)) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530846 ftrace_del_hash(p);
847 ftrace_free_rec(p);
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530848 }
849 }
850 }
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530851
852 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
853 hlist_del(&p->node);
854 INIT_HLIST_NODE(&p->node);
855 hlist_add_head(&p->node, head);
856 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200857 }
858
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200859 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200860 ftrace_update_time = stop - start;
861 ftrace_update_tot_cnt += ftrace_update_cnt;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400862 ftraced_trigger = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200863
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200864 ftrace_enabled = save_ftrace_enabled;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400865 ftrace_record_suspend--;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200866
867 return 0;
868}
869
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400870static int ftrace_update_code(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200871{
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400872 if (unlikely(ftrace_disabled) ||
873 !ftrace_enabled || !ftraced_trigger)
874 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200875
Rusty Russell784e2d72008-07-28 12:16:31 -0500876 stop_machine(__ftrace_update_code, NULL, NULL);
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400877
878 return 1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200879}
880
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400881static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200882{
883 struct ftrace_page *pg;
884 int cnt;
885 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200886
887 /* allocate a few pages */
888 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
889 if (!ftrace_pages_start)
890 return -1;
891
892 /*
893 * Allocate a few more pages.
894 *
895 * TODO: have some parser search vmlinux before
896 * final linking to find all calls to ftrace.
897 * Then we can:
898 * a) know how many pages to allocate.
899 * and/or
900 * b) set up the table then.
901 *
902 * The dynamic code is still necessary for
903 * modules.
904 */
905
906 pg = ftrace_pages = ftrace_pages_start;
907
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400908 cnt = num_to_init / ENTRIES_PER_PAGE;
909 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
910 num_to_init, cnt);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200911
912 for (i = 0; i < cnt; i++) {
913 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
914
915 /* If we fail, we'll try later anyway */
916 if (!pg->next)
917 break;
918
919 pg = pg->next;
920 }
921
922 return 0;
923}
924
Steven Rostedt5072c592008-05-12 21:20:43 +0200925enum {
926 FTRACE_ITER_FILTER = (1 << 0),
927 FTRACE_ITER_CONT = (1 << 1),
Steven Rostedt41c52c02008-05-22 11:46:33 -0400928 FTRACE_ITER_NOTRACE = (1 << 2),
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530929 FTRACE_ITER_FAILURES = (1 << 3),
Steven Rostedt5072c592008-05-12 21:20:43 +0200930};
931
932#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
933
934struct ftrace_iterator {
935 loff_t pos;
936 struct ftrace_page *pg;
937 unsigned idx;
938 unsigned flags;
939 unsigned char buffer[FTRACE_BUFF_MAX+1];
940 unsigned buffer_idx;
941 unsigned filtered;
942};
943
Ingo Molnare309b412008-05-12 21:20:51 +0200944static void *
Steven Rostedt5072c592008-05-12 21:20:43 +0200945t_next(struct seq_file *m, void *v, loff_t *pos)
946{
947 struct ftrace_iterator *iter = m->private;
948 struct dyn_ftrace *rec = NULL;
949
950 (*pos)++;
951
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400952 /* should not be called from interrupt context */
953 spin_lock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200954 retry:
955 if (iter->idx >= iter->pg->index) {
956 if (iter->pg->next) {
957 iter->pg = iter->pg->next;
958 iter->idx = 0;
959 goto retry;
960 }
961 } else {
962 rec = &iter->pg->records[iter->idx++];
Steven Rostedta9fdda32008-08-14 22:47:17 -0400963 if ((rec->flags & FTRACE_FL_FREE) ||
964
965 (!(iter->flags & FTRACE_ITER_FAILURES) &&
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530966 (rec->flags & FTRACE_FL_FAILED)) ||
967
968 ((iter->flags & FTRACE_ITER_FAILURES) &&
Steven Rostedta9fdda32008-08-14 22:47:17 -0400969 !(rec->flags & FTRACE_FL_FAILED)) ||
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530970
Steven Rostedt41c52c02008-05-22 11:46:33 -0400971 ((iter->flags & FTRACE_ITER_NOTRACE) &&
972 !(rec->flags & FTRACE_FL_NOTRACE))) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200973 rec = NULL;
974 goto retry;
975 }
976 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400977 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200978
979 iter->pos = *pos;
980
981 return rec;
982}
983
984static void *t_start(struct seq_file *m, loff_t *pos)
985{
986 struct ftrace_iterator *iter = m->private;
987 void *p = NULL;
988 loff_t l = -1;
989
990 if (*pos != iter->pos) {
991 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
992 ;
993 } else {
994 l = *pos;
995 p = t_next(m, p, &l);
996 }
997
998 return p;
999}
1000
1001static void t_stop(struct seq_file *m, void *p)
1002{
1003}
1004
1005static int t_show(struct seq_file *m, void *v)
1006{
1007 struct dyn_ftrace *rec = v;
1008 char str[KSYM_SYMBOL_LEN];
1009
1010 if (!rec)
1011 return 0;
1012
1013 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1014
1015 seq_printf(m, "%s\n", str);
1016
1017 return 0;
1018}
1019
1020static struct seq_operations show_ftrace_seq_ops = {
1021 .start = t_start,
1022 .next = t_next,
1023 .stop = t_stop,
1024 .show = t_show,
1025};
1026
Ingo Molnare309b412008-05-12 21:20:51 +02001027static int
Steven Rostedt5072c592008-05-12 21:20:43 +02001028ftrace_avail_open(struct inode *inode, struct file *file)
1029{
1030 struct ftrace_iterator *iter;
1031 int ret;
1032
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001033 if (unlikely(ftrace_disabled))
1034 return -ENODEV;
1035
Steven Rostedt5072c592008-05-12 21:20:43 +02001036 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1037 if (!iter)
1038 return -ENOMEM;
1039
1040 iter->pg = ftrace_pages_start;
1041 iter->pos = -1;
1042
1043 ret = seq_open(file, &show_ftrace_seq_ops);
1044 if (!ret) {
1045 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001046
Steven Rostedt5072c592008-05-12 21:20:43 +02001047 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001048 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +02001049 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001050 }
Steven Rostedt5072c592008-05-12 21:20:43 +02001051
1052 return ret;
1053}
1054
1055int ftrace_avail_release(struct inode *inode, struct file *file)
1056{
1057 struct seq_file *m = (struct seq_file *)file->private_data;
1058 struct ftrace_iterator *iter = m->private;
1059
1060 seq_release(inode, file);
1061 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001062
Steven Rostedt5072c592008-05-12 21:20:43 +02001063 return 0;
1064}
1065
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301066static int
1067ftrace_failures_open(struct inode *inode, struct file *file)
1068{
1069 int ret;
1070 struct seq_file *m;
1071 struct ftrace_iterator *iter;
1072
1073 ret = ftrace_avail_open(inode, file);
1074 if (!ret) {
1075 m = (struct seq_file *)file->private_data;
1076 iter = (struct ftrace_iterator *)m->private;
1077 iter->flags = FTRACE_ITER_FAILURES;
1078 }
1079
1080 return ret;
1081}
1082
1083
Steven Rostedt41c52c02008-05-22 11:46:33 -04001084static void ftrace_filter_reset(int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001085{
1086 struct ftrace_page *pg;
1087 struct dyn_ftrace *rec;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001088 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001089 unsigned i;
1090
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001091 /* should not be called from interrupt context */
1092 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001093 if (enable)
1094 ftrace_filtered = 0;
Steven Rostedt5072c592008-05-12 21:20:43 +02001095 pg = ftrace_pages_start;
1096 while (pg) {
1097 for (i = 0; i < pg->index; i++) {
1098 rec = &pg->records[i];
1099 if (rec->flags & FTRACE_FL_FAILED)
1100 continue;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001101 rec->flags &= ~type;
Steven Rostedt5072c592008-05-12 21:20:43 +02001102 }
1103 pg = pg->next;
1104 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001105 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001106}
1107
Ingo Molnare309b412008-05-12 21:20:51 +02001108static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001109ftrace_regex_open(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001110{
1111 struct ftrace_iterator *iter;
1112 int ret = 0;
1113
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001114 if (unlikely(ftrace_disabled))
1115 return -ENODEV;
1116
Steven Rostedt5072c592008-05-12 21:20:43 +02001117 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1118 if (!iter)
1119 return -ENOMEM;
1120
Steven Rostedt41c52c02008-05-22 11:46:33 -04001121 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001122 if ((file->f_mode & FMODE_WRITE) &&
1123 !(file->f_flags & O_APPEND))
Steven Rostedt41c52c02008-05-22 11:46:33 -04001124 ftrace_filter_reset(enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001125
1126 if (file->f_mode & FMODE_READ) {
1127 iter->pg = ftrace_pages_start;
1128 iter->pos = -1;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001129 iter->flags = enable ? FTRACE_ITER_FILTER :
1130 FTRACE_ITER_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001131
1132 ret = seq_open(file, &show_ftrace_seq_ops);
1133 if (!ret) {
1134 struct seq_file *m = file->private_data;
1135 m->private = iter;
1136 } else
1137 kfree(iter);
1138 } else
1139 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001140 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001141
1142 return ret;
1143}
1144
Steven Rostedt41c52c02008-05-22 11:46:33 -04001145static int
1146ftrace_filter_open(struct inode *inode, struct file *file)
1147{
1148 return ftrace_regex_open(inode, file, 1);
1149}
1150
1151static int
1152ftrace_notrace_open(struct inode *inode, struct file *file)
1153{
1154 return ftrace_regex_open(inode, file, 0);
1155}
1156
Ingo Molnare309b412008-05-12 21:20:51 +02001157static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001158ftrace_regex_read(struct file *file, char __user *ubuf,
Steven Rostedt5072c592008-05-12 21:20:43 +02001159 size_t cnt, loff_t *ppos)
1160{
1161 if (file->f_mode & FMODE_READ)
1162 return seq_read(file, ubuf, cnt, ppos);
1163 else
1164 return -EPERM;
1165}
1166
Ingo Molnare309b412008-05-12 21:20:51 +02001167static loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001168ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02001169{
1170 loff_t ret;
1171
1172 if (file->f_mode & FMODE_READ)
1173 ret = seq_lseek(file, offset, origin);
1174 else
1175 file->f_pos = ret = 1;
1176
1177 return ret;
1178}
1179
1180enum {
1181 MATCH_FULL,
1182 MATCH_FRONT_ONLY,
1183 MATCH_MIDDLE_ONLY,
1184 MATCH_END_ONLY,
1185};
1186
Ingo Molnare309b412008-05-12 21:20:51 +02001187static void
Steven Rostedt41c52c02008-05-22 11:46:33 -04001188ftrace_match(unsigned char *buff, int len, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001189{
1190 char str[KSYM_SYMBOL_LEN];
1191 char *search = NULL;
1192 struct ftrace_page *pg;
1193 struct dyn_ftrace *rec;
1194 int type = MATCH_FULL;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001195 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001196 unsigned i, match = 0, search_len = 0;
1197
1198 for (i = 0; i < len; i++) {
1199 if (buff[i] == '*') {
1200 if (!i) {
1201 search = buff + i + 1;
1202 type = MATCH_END_ONLY;
1203 search_len = len - (i + 1);
1204 } else {
1205 if (type == MATCH_END_ONLY) {
1206 type = MATCH_MIDDLE_ONLY;
1207 } else {
1208 match = i;
1209 type = MATCH_FRONT_ONLY;
1210 }
1211 buff[i] = 0;
1212 break;
1213 }
1214 }
1215 }
1216
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001217 /* should not be called from interrupt context */
1218 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001219 if (enable)
1220 ftrace_filtered = 1;
Steven Rostedt5072c592008-05-12 21:20:43 +02001221 pg = ftrace_pages_start;
1222 while (pg) {
1223 for (i = 0; i < pg->index; i++) {
1224 int matched = 0;
1225 char *ptr;
1226
1227 rec = &pg->records[i];
1228 if (rec->flags & FTRACE_FL_FAILED)
1229 continue;
1230 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1231 switch (type) {
1232 case MATCH_FULL:
1233 if (strcmp(str, buff) == 0)
1234 matched = 1;
1235 break;
1236 case MATCH_FRONT_ONLY:
1237 if (memcmp(str, buff, match) == 0)
1238 matched = 1;
1239 break;
1240 case MATCH_MIDDLE_ONLY:
1241 if (strstr(str, search))
1242 matched = 1;
1243 break;
1244 case MATCH_END_ONLY:
1245 ptr = strstr(str, search);
1246 if (ptr && (ptr[search_len] == 0))
1247 matched = 1;
1248 break;
1249 }
1250 if (matched)
Steven Rostedt41c52c02008-05-22 11:46:33 -04001251 rec->flags |= flag;
Steven Rostedt5072c592008-05-12 21:20:43 +02001252 }
1253 pg = pg->next;
1254 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001255 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001256}
1257
Ingo Molnare309b412008-05-12 21:20:51 +02001258static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001259ftrace_regex_write(struct file *file, const char __user *ubuf,
1260 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001261{
1262 struct ftrace_iterator *iter;
1263 char ch;
1264 size_t read = 0;
1265 ssize_t ret;
1266
1267 if (!cnt || cnt < 0)
1268 return 0;
1269
Steven Rostedt41c52c02008-05-22 11:46:33 -04001270 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001271
1272 if (file->f_mode & FMODE_READ) {
1273 struct seq_file *m = file->private_data;
1274 iter = m->private;
1275 } else
1276 iter = file->private_data;
1277
1278 if (!*ppos) {
1279 iter->flags &= ~FTRACE_ITER_CONT;
1280 iter->buffer_idx = 0;
1281 }
1282
1283 ret = get_user(ch, ubuf++);
1284 if (ret)
1285 goto out;
1286 read++;
1287 cnt--;
1288
1289 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1290 /* skip white space */
1291 while (cnt && isspace(ch)) {
1292 ret = get_user(ch, ubuf++);
1293 if (ret)
1294 goto out;
1295 read++;
1296 cnt--;
1297 }
1298
Steven Rostedt5072c592008-05-12 21:20:43 +02001299 if (isspace(ch)) {
1300 file->f_pos += read;
1301 ret = read;
1302 goto out;
1303 }
1304
1305 iter->buffer_idx = 0;
1306 }
1307
1308 while (cnt && !isspace(ch)) {
1309 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1310 iter->buffer[iter->buffer_idx++] = ch;
1311 else {
1312 ret = -EINVAL;
1313 goto out;
1314 }
1315 ret = get_user(ch, ubuf++);
1316 if (ret)
1317 goto out;
1318 read++;
1319 cnt--;
1320 }
1321
1322 if (isspace(ch)) {
1323 iter->filtered++;
1324 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001325 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001326 iter->buffer_idx = 0;
1327 } else
1328 iter->flags |= FTRACE_ITER_CONT;
1329
1330
1331 file->f_pos += read;
1332
1333 ret = read;
1334 out:
Steven Rostedt41c52c02008-05-22 11:46:33 -04001335 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001336
1337 return ret;
1338}
1339
Steven Rostedt41c52c02008-05-22 11:46:33 -04001340static ssize_t
1341ftrace_filter_write(struct file *file, const char __user *ubuf,
1342 size_t cnt, loff_t *ppos)
1343{
1344 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1345}
1346
1347static ssize_t
1348ftrace_notrace_write(struct file *file, const char __user *ubuf,
1349 size_t cnt, loff_t *ppos)
1350{
1351 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1352}
1353
1354static void
1355ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1356{
1357 if (unlikely(ftrace_disabled))
1358 return;
1359
1360 mutex_lock(&ftrace_regex_lock);
1361 if (reset)
1362 ftrace_filter_reset(enable);
1363 if (buf)
1364 ftrace_match(buf, len, enable);
1365 mutex_unlock(&ftrace_regex_lock);
1366}
1367
Steven Rostedt77a2b372008-05-12 21:20:45 +02001368/**
1369 * ftrace_set_filter - set a function to filter on in ftrace
1370 * @buf - the string that holds the function filter text.
1371 * @len - the length of the string.
1372 * @reset - non zero to reset all filters before applying this filter.
1373 *
1374 * Filters denote which functions should be enabled when tracing is enabled.
1375 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1376 */
Ingo Molnare309b412008-05-12 21:20:51 +02001377void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02001378{
Steven Rostedt41c52c02008-05-22 11:46:33 -04001379 ftrace_set_regex(buf, len, reset, 1);
1380}
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001381
Steven Rostedt41c52c02008-05-22 11:46:33 -04001382/**
1383 * ftrace_set_notrace - set a function to not trace in ftrace
1384 * @buf - the string that holds the function notrace text.
1385 * @len - the length of the string.
1386 * @reset - non zero to reset all filters before applying this filter.
1387 *
1388 * Notrace Filters denote which functions should not be enabled when tracing
1389 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1390 * for tracing.
1391 */
1392void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1393{
1394 ftrace_set_regex(buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02001395}
1396
Ingo Molnare309b412008-05-12 21:20:51 +02001397static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001398ftrace_regex_release(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001399{
1400 struct seq_file *m = (struct seq_file *)file->private_data;
1401 struct ftrace_iterator *iter;
1402
Steven Rostedt41c52c02008-05-22 11:46:33 -04001403 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001404 if (file->f_mode & FMODE_READ) {
1405 iter = m->private;
1406
1407 seq_release(inode, file);
1408 } else
1409 iter = file->private_data;
1410
1411 if (iter->buffer_idx) {
1412 iter->filtered++;
1413 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001414 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001415 }
1416
1417 mutex_lock(&ftrace_sysctl_lock);
1418 mutex_lock(&ftraced_lock);
1419 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1420 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1421 mutex_unlock(&ftraced_lock);
1422 mutex_unlock(&ftrace_sysctl_lock);
1423
1424 kfree(iter);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001425 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001426 return 0;
1427}
1428
Steven Rostedt41c52c02008-05-22 11:46:33 -04001429static int
1430ftrace_filter_release(struct inode *inode, struct file *file)
1431{
1432 return ftrace_regex_release(inode, file, 1);
1433}
1434
1435static int
1436ftrace_notrace_release(struct inode *inode, struct file *file)
1437{
1438 return ftrace_regex_release(inode, file, 0);
1439}
1440
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001441static ssize_t
1442ftraced_read(struct file *filp, char __user *ubuf,
1443 size_t cnt, loff_t *ppos)
1444{
1445 /* don't worry about races */
1446 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1447 int r = strlen(buf);
1448
1449 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1450}
1451
1452static ssize_t
1453ftraced_write(struct file *filp, const char __user *ubuf,
1454 size_t cnt, loff_t *ppos)
1455{
1456 char buf[64];
1457 long val;
1458 int ret;
1459
1460 if (cnt >= sizeof(buf))
1461 return -EINVAL;
1462
1463 if (copy_from_user(&buf, ubuf, cnt))
1464 return -EFAULT;
1465
1466 if (strncmp(buf, "enable", 6) == 0)
1467 val = 1;
1468 else if (strncmp(buf, "disable", 7) == 0)
1469 val = 0;
1470 else {
1471 buf[cnt] = 0;
1472
1473 ret = strict_strtoul(buf, 10, &val);
1474 if (ret < 0)
1475 return ret;
1476
1477 val = !!val;
1478 }
1479
1480 if (val)
1481 ftrace_enable_daemon();
1482 else
1483 ftrace_disable_daemon();
1484
1485 filp->f_pos += cnt;
1486
1487 return cnt;
1488}
1489
Steven Rostedt5072c592008-05-12 21:20:43 +02001490static struct file_operations ftrace_avail_fops = {
1491 .open = ftrace_avail_open,
1492 .read = seq_read,
1493 .llseek = seq_lseek,
1494 .release = ftrace_avail_release,
1495};
1496
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301497static struct file_operations ftrace_failures_fops = {
1498 .open = ftrace_failures_open,
1499 .read = seq_read,
1500 .llseek = seq_lseek,
1501 .release = ftrace_avail_release,
1502};
1503
Steven Rostedt5072c592008-05-12 21:20:43 +02001504static struct file_operations ftrace_filter_fops = {
1505 .open = ftrace_filter_open,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001506 .read = ftrace_regex_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02001507 .write = ftrace_filter_write,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001508 .llseek = ftrace_regex_lseek,
Steven Rostedt5072c592008-05-12 21:20:43 +02001509 .release = ftrace_filter_release,
1510};
1511
Steven Rostedt41c52c02008-05-22 11:46:33 -04001512static struct file_operations ftrace_notrace_fops = {
1513 .open = ftrace_notrace_open,
1514 .read = ftrace_regex_read,
1515 .write = ftrace_notrace_write,
1516 .llseek = ftrace_regex_lseek,
1517 .release = ftrace_notrace_release,
1518};
1519
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001520static struct file_operations ftraced_fops = {
1521 .open = tracing_open_generic,
1522 .read = ftraced_read,
1523 .write = ftraced_write,
1524};
1525
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001526/**
1527 * ftrace_force_update - force an update to all recording ftrace functions
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001528 */
1529int ftrace_force_update(void)
1530{
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001531 int ret = 0;
1532
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001533 if (unlikely(ftrace_disabled))
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001534 return -ENODEV;
1535
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001536 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001537 mutex_lock(&ftraced_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001538
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001539 /*
1540 * If ftraced_trigger is not set, then there is nothing
1541 * to update.
1542 */
1543 if (ftraced_trigger && !ftrace_update_code())
1544 ret = -EBUSY;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001545
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001546 mutex_unlock(&ftraced_lock);
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001547 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001548
1549 return ret;
1550}
1551
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001552static void ftrace_force_shutdown(void)
1553{
1554 struct task_struct *task;
1555 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1556
1557 mutex_lock(&ftraced_lock);
1558 task = ftraced_task;
1559 ftraced_task = NULL;
1560 ftraced_suspend = -1;
1561 ftrace_run_update_code(command);
1562 mutex_unlock(&ftraced_lock);
1563
1564 if (task)
1565 kthread_stop(task);
1566}
1567
Steven Rostedt5072c592008-05-12 21:20:43 +02001568static __init int ftrace_init_debugfs(void)
1569{
1570 struct dentry *d_tracer;
1571 struct dentry *entry;
1572
1573 d_tracer = tracing_init_dentry();
1574
1575 entry = debugfs_create_file("available_filter_functions", 0444,
1576 d_tracer, NULL, &ftrace_avail_fops);
1577 if (!entry)
1578 pr_warning("Could not create debugfs "
1579 "'available_filter_functions' entry\n");
1580
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301581 entry = debugfs_create_file("failures", 0444,
1582 d_tracer, NULL, &ftrace_failures_fops);
1583 if (!entry)
1584 pr_warning("Could not create debugfs 'failures' entry\n");
1585
Steven Rostedt5072c592008-05-12 21:20:43 +02001586 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1587 NULL, &ftrace_filter_fops);
1588 if (!entry)
1589 pr_warning("Could not create debugfs "
1590 "'set_ftrace_filter' entry\n");
Steven Rostedt41c52c02008-05-22 11:46:33 -04001591
1592 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1593 NULL, &ftrace_notrace_fops);
1594 if (!entry)
1595 pr_warning("Could not create debugfs "
1596 "'set_ftrace_notrace' entry\n");
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001597
1598 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1599 NULL, &ftraced_fops);
1600 if (!entry)
1601 pr_warning("Could not create debugfs "
1602 "'ftraced_enabled' entry\n");
Steven Rostedt5072c592008-05-12 21:20:43 +02001603 return 0;
1604}
1605
1606fs_initcall(ftrace_init_debugfs);
1607
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001608#ifdef CONFIG_FTRACE_MCOUNT_RECORD
1609static int ftrace_convert_nops(unsigned long *start,
1610 unsigned long *end)
1611{
1612 unsigned long *p;
1613 unsigned long addr;
1614 unsigned long flags;
1615
1616 p = start;
1617 while (p < end) {
1618 addr = ftrace_call_adjust(*p++);
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001619 /* should not be called from interrupt context */
Steven Rostedtfed19392008-08-14 22:47:19 -04001620 spin_lock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001621 ftrace_record_ip(addr);
Steven Rostedtfed19392008-08-14 22:47:19 -04001622 spin_unlock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001623 ftrace_shutdown_replenish();
1624 }
1625
1626 /* p is ignored */
1627 local_irq_save(flags);
1628 __ftrace_update_code(p);
1629 local_irq_restore(flags);
1630
1631 return 0;
1632}
1633
Steven Rostedt90d595f2008-08-14 15:45:09 -04001634void ftrace_init_module(unsigned long *start, unsigned long *end)
1635{
Steven Rostedt00fd61a2008-08-15 21:40:04 -04001636 if (ftrace_disabled || start == end)
Steven Rostedtfed19392008-08-14 22:47:19 -04001637 return;
Steven Rostedt90d595f2008-08-14 15:45:09 -04001638 ftrace_convert_nops(start, end);
1639}
1640
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001641extern unsigned long __start_mcount_loc[];
1642extern unsigned long __stop_mcount_loc[];
1643
1644void __init ftrace_init(void)
1645{
1646 unsigned long count, addr, flags;
1647 int ret;
1648
1649 /* Keep the ftrace pointer to the stub */
1650 addr = (unsigned long)ftrace_stub;
1651
1652 local_irq_save(flags);
1653 ftrace_dyn_arch_init(&addr);
1654 local_irq_restore(flags);
1655
1656 /* ftrace_dyn_arch_init places the return code in addr */
1657 if (addr)
1658 goto failed;
1659
1660 count = __stop_mcount_loc - __start_mcount_loc;
1661
1662 ret = ftrace_dyn_table_alloc(count);
1663 if (ret)
1664 goto failed;
1665
1666 last_ftrace_enabled = ftrace_enabled = 1;
1667
1668 ret = ftrace_convert_nops(__start_mcount_loc,
1669 __stop_mcount_loc);
1670
1671 return;
1672 failed:
1673 ftrace_disabled = 1;
1674}
1675#else /* CONFIG_FTRACE_MCOUNT_RECORD */
Steven Rostedtbd95b882008-10-16 09:31:27 -04001676
1677static void ftrace_release_hash(unsigned long start, unsigned long end)
1678{
1679 struct dyn_ftrace *rec;
1680 struct hlist_node *t, *n;
1681 struct hlist_head *head, temp_list;
1682 unsigned long flags;
1683 int i, cpu;
1684
1685 preempt_disable_notrace();
1686
1687 /* disable incase we call something that calls mcount */
1688 cpu = raw_smp_processor_id();
1689 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
1690
1691 ftrace_hash_lock(flags);
1692
1693 for (i = 0; i < FTRACE_HASHSIZE; i++) {
1694 INIT_HLIST_HEAD(&temp_list);
1695 head = &ftrace_hash[i];
1696
1697 /* all CPUS are stopped, we are safe to modify code */
1698 hlist_for_each_entry_safe(rec, t, n, head, node) {
1699 if (rec->flags & FTRACE_FL_FREE)
1700 continue;
1701
1702 if ((rec->ip >= start) && (rec->ip < end))
1703 ftrace_free_rec(rec);
1704 }
1705 }
1706
1707 ftrace_hash_unlock(flags);
1708
1709 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
1710 preempt_enable_notrace();
1711
1712}
1713
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001714static int ftraced(void *ignore)
1715{
1716 unsigned long usecs;
1717
1718 while (!kthread_should_stop()) {
1719
1720 set_current_state(TASK_INTERRUPTIBLE);
1721
1722 /* check once a second */
1723 schedule_timeout(HZ);
1724
1725 if (unlikely(ftrace_disabled))
1726 continue;
1727
1728 mutex_lock(&ftrace_sysctl_lock);
1729 mutex_lock(&ftraced_lock);
1730 if (!ftraced_suspend && !ftraced_stop &&
1731 ftrace_update_code()) {
1732 usecs = nsecs_to_usecs(ftrace_update_time);
1733 if (ftrace_update_tot_cnt > 100000) {
1734 ftrace_update_tot_cnt = 0;
1735 pr_info("hm, dftrace overflow: %lu change%s"
1736 " (%lu total) in %lu usec%s\n",
1737 ftrace_update_cnt,
1738 ftrace_update_cnt != 1 ? "s" : "",
1739 ftrace_update_tot_cnt,
1740 usecs, usecs != 1 ? "s" : "");
1741 ftrace_disabled = 1;
1742 WARN_ON_ONCE(1);
1743 }
1744 }
1745 mutex_unlock(&ftraced_lock);
1746 mutex_unlock(&ftrace_sysctl_lock);
1747
1748 ftrace_shutdown_replenish();
1749 }
1750 __set_current_state(TASK_RUNNING);
1751 return 0;
1752}
1753
Ingo Molnare309b412008-05-12 21:20:51 +02001754static int __init ftrace_dynamic_init(void)
Steven Rostedt3d083392008-05-12 21:20:42 +02001755{
1756 struct task_struct *p;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001757 unsigned long addr;
Steven Rostedt3d083392008-05-12 21:20:42 +02001758 int ret;
1759
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001760 addr = (unsigned long)ftrace_record_ip;
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +02001761
Rusty Russell784e2d72008-07-28 12:16:31 -05001762 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001763
1764 /* ftrace_dyn_arch_init places the return code in addr */
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001765 if (addr) {
1766 ret = (int)addr;
1767 goto failed;
1768 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001769
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001770 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
Steven Rostedt3d083392008-05-12 21:20:42 +02001771 if (ret)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001772 goto failed;
Steven Rostedt3d083392008-05-12 21:20:42 +02001773
1774 p = kthread_run(ftraced, NULL, "ftraced");
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001775 if (IS_ERR(p)) {
1776 ret = -1;
1777 goto failed;
1778 }
Steven Rostedt3d083392008-05-12 21:20:42 +02001779
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001780 last_ftrace_enabled = ftrace_enabled = 1;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001781 ftraced_task = p;
Steven Rostedt3d083392008-05-12 21:20:42 +02001782
1783 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001784
1785 failed:
1786 ftrace_disabled = 1;
1787 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001788}
1789
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001790core_initcall(ftrace_dynamic_init);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001791#endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1792
Steven Rostedt3d083392008-05-12 21:20:42 +02001793#else
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001794# define ftrace_startup() do { } while (0)
1795# define ftrace_shutdown() do { } while (0)
1796# define ftrace_startup_sysctl() do { } while (0)
1797# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001798# define ftrace_force_shutdown() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001799#endif /* CONFIG_DYNAMIC_FTRACE */
1800
1801/**
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001802 * ftrace_kill_atomic - kill ftrace from critical sections
1803 *
1804 * This function should be used by panic code. It stops ftrace
1805 * but in a not so nice way. If you need to simply kill ftrace
1806 * from a non-atomic section, use ftrace_kill.
1807 */
1808void ftrace_kill_atomic(void)
1809{
1810 ftrace_disabled = 1;
1811 ftrace_enabled = 0;
Ingo Molnarb2613e32008-07-11 16:44:27 +02001812#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001813 ftraced_suspend = -1;
Ingo Molnarb2613e32008-07-11 16:44:27 +02001814#endif
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001815 clear_ftrace_function();
1816}
1817
1818/**
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001819 * ftrace_kill - totally shutdown ftrace
1820 *
1821 * This is a safety measure. If something was detected that seems
1822 * wrong, calling this function will keep ftrace from doing
1823 * any more modifications, and updates.
1824 * used when something went wrong.
1825 */
1826void ftrace_kill(void)
1827{
1828 mutex_lock(&ftrace_sysctl_lock);
1829 ftrace_disabled = 1;
1830 ftrace_enabled = 0;
1831
1832 clear_ftrace_function();
1833 mutex_unlock(&ftrace_sysctl_lock);
1834
1835 /* Try to totally disable ftrace */
1836 ftrace_force_shutdown();
1837}
1838
1839/**
Steven Rostedt3d083392008-05-12 21:20:42 +02001840 * register_ftrace_function - register a function for profiling
1841 * @ops - ops structure that holds the function for profiling.
1842 *
1843 * Register a function to be called by all functions in the
1844 * kernel.
1845 *
1846 * Note: @ops->func and all the functions it calls must be labeled
1847 * with "notrace", otherwise it will go into a
1848 * recursive loop.
1849 */
1850int register_ftrace_function(struct ftrace_ops *ops)
1851{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001852 int ret;
1853
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001854 if (unlikely(ftrace_disabled))
1855 return -1;
1856
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001857 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001858 ret = __register_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001859 ftrace_startup();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001860 mutex_unlock(&ftrace_sysctl_lock);
1861
1862 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001863}
1864
1865/**
1866 * unregister_ftrace_function - unresgister a function for profiling.
1867 * @ops - ops structure that holds the function to unregister
1868 *
1869 * Unregister a function that was added to be called by ftrace profiling.
1870 */
1871int unregister_ftrace_function(struct ftrace_ops *ops)
1872{
1873 int ret;
1874
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001875 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001876 ret = __unregister_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001877 ftrace_shutdown();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001878 mutex_unlock(&ftrace_sysctl_lock);
1879
1880 return ret;
1881}
1882
Ingo Molnare309b412008-05-12 21:20:51 +02001883int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001884ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02001885 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001886 loff_t *ppos)
1887{
1888 int ret;
1889
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001890 if (unlikely(ftrace_disabled))
1891 return -ENODEV;
1892
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001893 mutex_lock(&ftrace_sysctl_lock);
1894
Steven Rostedt5072c592008-05-12 21:20:43 +02001895 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001896
1897 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1898 goto out;
1899
1900 last_ftrace_enabled = ftrace_enabled;
1901
1902 if (ftrace_enabled) {
1903
1904 ftrace_startup_sysctl();
1905
1906 /* we are starting ftrace again */
1907 if (ftrace_list != &ftrace_list_end) {
1908 if (ftrace_list->next == &ftrace_list_end)
1909 ftrace_trace_function = ftrace_list->func;
1910 else
1911 ftrace_trace_function = ftrace_list_func;
1912 }
1913
1914 } else {
1915 /* stopping ftrace calls (just send to ftrace_stub) */
1916 ftrace_trace_function = ftrace_stub;
1917
1918 ftrace_shutdown_sysctl();
1919 }
1920
1921 out:
1922 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001923 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001924}