blob: 4dda4f60a2a9262770e3195b894e48b8178296a3 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
20#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020021#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010022#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020023#include <linux/uaccess.h>
Abhishek Sagarf22f9a82008-06-21 23:50:29 +053024#include <linux/kprobes.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010025#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020026#include <linux/sysctl.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020027#include <linux/ctype.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010028#include <linux/hash.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020029#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020030
Abhishek Sagar395a59d2008-06-21 23:47:27 +053031#include <asm/ftrace.h>
32
Steven Rostedt3d083392008-05-12 21:20:42 +020033#include "trace.h"
34
Steven Rostedt4eebcc82008-05-12 21:20:48 +020035/* ftrace_enabled is a method to turn ftrace on or off */
36int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020037static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020038
Steven Rostedt4eebcc82008-05-12 21:20:48 +020039/*
40 * ftrace_disabled is set when an anomaly is discovered.
41 * ftrace_disabled is much stronger than ftrace_enabled.
42 */
43static int ftrace_disabled __read_mostly;
44
Steven Rostedt3d083392008-05-12 21:20:42 +020045static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020046static DEFINE_MUTEX(ftrace_sysctl_lock);
47
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020048static struct ftrace_ops ftrace_list_end __read_mostly =
49{
50 .func = ftrace_stub,
51};
52
53static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
Ingo Molnarf2252932008-05-22 10:37:48 +020056static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020057{
58 struct ftrace_ops *op = ftrace_list;
59
60 /* in case someone actually ports this to alpha! */
61 read_barrier_depends();
62
63 while (op != &ftrace_list_end) {
64 /* silly alpha */
65 read_barrier_depends();
66 op->func(ip, parent_ip);
67 op = op->next;
68 };
69}
70
71/**
Steven Rostedt3d083392008-05-12 21:20:42 +020072 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020073 *
Steven Rostedt3d083392008-05-12 21:20:42 +020074 * This NULLs the ftrace function and in essence stops
75 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020076 */
Steven Rostedt3d083392008-05-12 21:20:42 +020077void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020078{
Steven Rostedt3d083392008-05-12 21:20:42 +020079 ftrace_trace_function = ftrace_stub;
80}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020081
Ingo Molnare309b412008-05-12 21:20:51 +020082static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +020083{
Steven Rostedt99ecdc42008-08-15 21:40:05 -040084 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +020085 spin_lock(&ftrace_lock);
86
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020087 ops->next = ftrace_list;
88 /*
89 * We are entering ops into the ftrace_list but another
90 * CPU might be walking that list. We need to make sure
91 * the ops->next pointer is valid before another CPU sees
92 * the ops pointer included into the ftrace_list.
93 */
94 smp_wmb();
95 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +020096
Steven Rostedtb0fc4942008-05-12 21:20:43 +020097 if (ftrace_enabled) {
98 /*
99 * For one func, simply call it directly.
100 * For more than one func, call the chain.
101 */
102 if (ops->next == &ftrace_list_end)
103 ftrace_trace_function = ops->func;
104 else
105 ftrace_trace_function = ftrace_list_func;
106 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200107
108 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200109
110 return 0;
111}
112
Ingo Molnare309b412008-05-12 21:20:51 +0200113static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200114{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200115 struct ftrace_ops **p;
116 int ret = 0;
117
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400118 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +0200119 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200120
121 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200122 * If we are removing the last function, then simply point
123 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200124 */
125 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
126 ftrace_trace_function = ftrace_stub;
127 ftrace_list = &ftrace_list_end;
128 goto out;
129 }
130
131 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
132 if (*p == ops)
133 break;
134
135 if (*p != ops) {
136 ret = -1;
137 goto out;
138 }
139
140 *p = (*p)->next;
141
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200142 if (ftrace_enabled) {
143 /* If we only have one func left, then call that directly */
144 if (ftrace_list == &ftrace_list_end ||
145 ftrace_list->next == &ftrace_list_end)
146 ftrace_trace_function = ftrace_list->func;
147 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200148
149 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200150 spin_unlock(&ftrace_lock);
151
152 return ret;
153}
154
155#ifdef CONFIG_DYNAMIC_FTRACE
156
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400157#ifndef CONFIG_FTRACE_MCOUNT_RECORD
158/*
159 * The hash lock is only needed when the recording of the mcount
160 * callers are dynamic. That is, by the caller themselves and
161 * not recorded via the compilation.
162 */
163static DEFINE_SPINLOCK(ftrace_hash_lock);
Stephen Rothwell2d7da802008-08-25 13:08:44 +1000164#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
Steven Rostedt644f9912008-09-06 01:06:04 -0400165#define ftrace_hash_unlock(flags) \
166 spin_unlock_irqrestore(&ftrace_hash_lock, flags)
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400167#else
168/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
Ingo Molnarac8825e2008-08-25 08:12:04 +0200169#define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400170#define ftrace_hash_unlock(flags) do { } while(0)
171#endif
172
Steven Noonan71c67d52008-09-20 01:00:37 -0700173/*
174 * Since MCOUNT_ADDR may point to mcount itself, we do not want
175 * to get it confused by reading a reference in the code as we
176 * are parsing on objcopy output of text. Use a variable for
177 * it instead.
178 */
179static unsigned long mcount_addr = MCOUNT_ADDR;
180
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200181static struct task_struct *ftraced_task;
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200182
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200183enum {
184 FTRACE_ENABLE_CALLS = (1 << 0),
185 FTRACE_DISABLE_CALLS = (1 << 1),
186 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
187 FTRACE_ENABLE_MCOUNT = (1 << 3),
188 FTRACE_DISABLE_MCOUNT = (1 << 4),
189};
190
Steven Rostedt5072c592008-05-12 21:20:43 +0200191static int ftrace_filtered;
Abhishek Sagarecea6562008-06-21 23:47:53 +0530192static int tracing_on;
193static int frozen_record_count;
Steven Rostedt5072c592008-05-12 21:20:43 +0200194
Steven Rostedt3d083392008-05-12 21:20:42 +0200195static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
196
197static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
198
Steven Rostedt3d083392008-05-12 21:20:42 +0200199static DEFINE_MUTEX(ftraced_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400200static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200201
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200202struct ftrace_page {
203 struct ftrace_page *next;
David Milleraa5e5ce2008-05-13 22:06:56 -0700204 unsigned long index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200205 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700206};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200207
208#define ENTRIES_PER_PAGE \
209 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
210
211/* estimate from running different kernels */
212#define NR_TO_INIT 10000
213
214static struct ftrace_page *ftrace_pages_start;
215static struct ftrace_page *ftrace_pages;
216
Steven Rostedt3d083392008-05-12 21:20:42 +0200217static int ftraced_trigger;
218static int ftraced_suspend;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400219static int ftraced_stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200220
221static int ftrace_record_suspend;
222
Steven Rostedt37ad5082008-05-12 21:20:48 +0200223static struct dyn_ftrace *ftrace_free_records;
224
Abhishek Sagarecea6562008-06-21 23:47:53 +0530225
226#ifdef CONFIG_KPROBES
227static inline void freeze_record(struct dyn_ftrace *rec)
228{
229 if (!(rec->flags & FTRACE_FL_FROZEN)) {
230 rec->flags |= FTRACE_FL_FROZEN;
231 frozen_record_count++;
232 }
233}
234
235static inline void unfreeze_record(struct dyn_ftrace *rec)
236{
237 if (rec->flags & FTRACE_FL_FROZEN) {
238 rec->flags &= ~FTRACE_FL_FROZEN;
239 frozen_record_count--;
240 }
241}
242
243static inline int record_frozen(struct dyn_ftrace *rec)
244{
245 return rec->flags & FTRACE_FL_FROZEN;
246}
247#else
248# define freeze_record(rec) ({ 0; })
249# define unfreeze_record(rec) ({ 0; })
250# define record_frozen(rec) ({ 0; })
251#endif /* CONFIG_KPROBES */
252
253int skip_trace(unsigned long ip)
254{
255 unsigned long fl;
256 struct dyn_ftrace *rec;
257 struct hlist_node *t;
258 struct hlist_head *head;
259
260 if (frozen_record_count == 0)
261 return 0;
262
263 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
264 hlist_for_each_entry_rcu(rec, t, head, node) {
265 if (rec->ip == ip) {
266 if (record_frozen(rec)) {
267 if (rec->flags & FTRACE_FL_FAILED)
268 return 1;
269
270 if (!(rec->flags & FTRACE_FL_CONVERTED))
271 return 1;
272
273 if (!tracing_on || !ftrace_enabled)
274 return 1;
275
276 if (ftrace_filtered) {
277 fl = rec->flags & (FTRACE_FL_FILTER |
278 FTRACE_FL_NOTRACE);
279 if (!fl || (fl & FTRACE_FL_NOTRACE))
280 return 1;
281 }
282 }
283 break;
284 }
285 }
286
287 return 0;
288}
289
Ingo Molnare309b412008-05-12 21:20:51 +0200290static inline int
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +0200291ftrace_ip_in_hash(unsigned long ip, unsigned long key)
Steven Rostedt3d083392008-05-12 21:20:42 +0200292{
293 struct dyn_ftrace *p;
294 struct hlist_node *t;
295 int found = 0;
296
Abhishek Sagarffdaa352008-05-24 23:45:02 +0530297 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
Steven Rostedt3d083392008-05-12 21:20:42 +0200298 if (p->ip == ip) {
299 found = 1;
300 break;
301 }
302 }
303
304 return found;
305}
306
Ingo Molnare309b412008-05-12 21:20:51 +0200307static inline void
Steven Rostedt3d083392008-05-12 21:20:42 +0200308ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
309{
Abhishek Sagarffdaa352008-05-24 23:45:02 +0530310 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
Steven Rostedt3d083392008-05-12 21:20:42 +0200311}
312
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530313/* called from kstop_machine */
314static inline void ftrace_del_hash(struct dyn_ftrace *node)
315{
316 hlist_del(&node->node);
317}
318
Ingo Molnare309b412008-05-12 21:20:51 +0200319static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad5082008-05-12 21:20:48 +0200320{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200321 rec->ip = (unsigned long)ftrace_free_records;
322 ftrace_free_records = rec;
323 rec->flags |= FTRACE_FL_FREE;
324}
325
Steven Rostedtfed19392008-08-14 22:47:19 -0400326void ftrace_release(void *start, unsigned long size)
327{
328 struct dyn_ftrace *rec;
329 struct ftrace_page *pg;
330 unsigned long s = (unsigned long)start;
331 unsigned long e = s + size;
332 int i;
333
Steven Rostedt00fd61a2008-08-15 21:40:04 -0400334 if (ftrace_disabled || !start)
Steven Rostedtfed19392008-08-14 22:47:19 -0400335 return;
336
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400337 /* should not be called from interrupt context */
Steven Rostedtfed19392008-08-14 22:47:19 -0400338 spin_lock(&ftrace_lock);
339
340 for (pg = ftrace_pages_start; pg; pg = pg->next) {
341 for (i = 0; i < pg->index; i++) {
342 rec = &pg->records[i];
343
344 if ((rec->ip >= s) && (rec->ip < e))
345 ftrace_free_rec(rec);
346 }
347 }
348 spin_unlock(&ftrace_lock);
349
350}
351
Ingo Molnare309b412008-05-12 21:20:51 +0200352static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200353{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200354 struct dyn_ftrace *rec;
355
356 /* First check for freed records */
357 if (ftrace_free_records) {
358 rec = ftrace_free_records;
359
Steven Rostedt37ad5082008-05-12 21:20:48 +0200360 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
361 WARN_ON_ONCE(1);
362 ftrace_free_records = NULL;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200363 ftrace_disabled = 1;
364 ftrace_enabled = 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200365 return NULL;
366 }
367
368 ftrace_free_records = (void *)rec->ip;
369 memset(rec, 0, sizeof(*rec));
370 return rec;
371 }
372
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200373 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
374 if (!ftrace_pages->next)
375 return NULL;
376 ftrace_pages = ftrace_pages->next;
377 }
378
379 return &ftrace_pages->records[ftrace_pages->index++];
380}
381
Ingo Molnare309b412008-05-12 21:20:51 +0200382static void
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200383ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200384{
385 struct dyn_ftrace *node;
386 unsigned long flags;
387 unsigned long key;
388 int resched;
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200389 int cpu;
Steven Rostedt3d083392008-05-12 21:20:42 +0200390
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200391 if (!ftrace_enabled || ftrace_disabled)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200392 return;
393
Steven Rostedt3d083392008-05-12 21:20:42 +0200394 resched = need_resched();
395 preempt_disable_notrace();
396
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200397 /*
398 * We simply need to protect against recursion.
399 * Use the the raw version of smp_processor_id and not
400 * __get_cpu_var which can call debug hooks that can
401 * cause a recursive crash here.
402 */
403 cpu = raw_smp_processor_id();
404 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
405 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
Steven Rostedt3d083392008-05-12 21:20:42 +0200406 goto out;
407
408 if (unlikely(ftrace_record_suspend))
409 goto out;
410
411 key = hash_long(ip, FTRACE_HASHBITS);
412
413 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
414
415 if (ftrace_ip_in_hash(ip, key))
416 goto out;
417
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400418 ftrace_hash_lock(flags);
Steven Rostedt3d083392008-05-12 21:20:42 +0200419
420 /* This ip may have hit the hash before the lock */
421 if (ftrace_ip_in_hash(ip, key))
422 goto out_unlock;
423
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200424 node = ftrace_alloc_dyn_node(ip);
Steven Rostedt3d083392008-05-12 21:20:42 +0200425 if (!node)
426 goto out_unlock;
427
428 node->ip = ip;
429
430 ftrace_add_hash(node, key);
431
432 ftraced_trigger = 1;
433
434 out_unlock:
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400435 ftrace_hash_unlock(flags);
Steven Rostedt3d083392008-05-12 21:20:42 +0200436 out:
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200437 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
Steven Rostedt3d083392008-05-12 21:20:42 +0200438
439 /* prevent recursion with scheduler */
440 if (resched)
441 preempt_enable_no_resched_notrace();
442 else
443 preempt_enable_notrace();
444}
445
Steven Rostedtcaf8cde2008-05-12 21:20:50 +0200446#define FTRACE_ADDR ((long)(ftrace_caller))
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200447
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530448static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200449__ftrace_replace_code(struct dyn_ftrace *rec,
450 unsigned char *old, unsigned char *new, int enable)
451{
Steven Rostedt41c52c02008-05-22 11:46:33 -0400452 unsigned long ip, fl;
Steven Rostedt5072c592008-05-12 21:20:43 +0200453
454 ip = rec->ip;
455
456 if (ftrace_filtered && enable) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200457 /*
458 * If filtering is on:
459 *
460 * If this record is set to be filtered and
461 * is enabled then do nothing.
462 *
463 * If this record is set to be filtered and
464 * it is not enabled, enable it.
465 *
466 * If this record is not set to be filtered
467 * and it is not enabled do nothing.
468 *
Steven Rostedt41c52c02008-05-22 11:46:33 -0400469 * If this record is set not to trace then
470 * do nothing.
471 *
Abhishek Sagara4500b82008-06-14 11:59:39 +0530472 * If this record is set not to trace and
473 * it is enabled then disable it.
474 *
Steven Rostedt5072c592008-05-12 21:20:43 +0200475 * If this record is not set to be filtered and
476 * it is enabled, disable it.
477 */
Abhishek Sagara4500b82008-06-14 11:59:39 +0530478
479 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
480 FTRACE_FL_ENABLED);
Steven Rostedt5072c592008-05-12 21:20:43 +0200481
482 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
Abhishek Sagara4500b82008-06-14 11:59:39 +0530483 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
484 !fl || (fl == FTRACE_FL_NOTRACE))
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530485 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200486
487 /*
488 * If it is enabled disable it,
489 * otherwise enable it!
490 */
Abhishek Sagara4500b82008-06-14 11:59:39 +0530491 if (fl & FTRACE_FL_ENABLED) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200492 /* swap new and old */
493 new = old;
494 old = ftrace_call_replace(ip, FTRACE_ADDR);
495 rec->flags &= ~FTRACE_FL_ENABLED;
496 } else {
497 new = ftrace_call_replace(ip, FTRACE_ADDR);
498 rec->flags |= FTRACE_FL_ENABLED;
499 }
500 } else {
501
Steven Rostedt41c52c02008-05-22 11:46:33 -0400502 if (enable) {
503 /*
504 * If this record is set not to trace and is
505 * not enabled, do nothing.
506 */
507 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
508 if (fl == FTRACE_FL_NOTRACE)
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530509 return 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400510
Steven Rostedt5072c592008-05-12 21:20:43 +0200511 new = ftrace_call_replace(ip, FTRACE_ADDR);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400512 } else
Steven Rostedt5072c592008-05-12 21:20:43 +0200513 old = ftrace_call_replace(ip, FTRACE_ADDR);
514
515 if (enable) {
516 if (rec->flags & FTRACE_FL_ENABLED)
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530517 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200518 rec->flags |= FTRACE_FL_ENABLED;
519 } else {
520 if (!(rec->flags & FTRACE_FL_ENABLED))
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530521 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200522 rec->flags &= ~FTRACE_FL_ENABLED;
523 }
524 }
525
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530526 return ftrace_modify_code(ip, old, new);
Steven Rostedt5072c592008-05-12 21:20:43 +0200527}
528
Ingo Molnare309b412008-05-12 21:20:51 +0200529static void ftrace_replace_code(int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200530{
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530531 int i, failed;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200532 unsigned char *new = NULL, *old = NULL;
533 struct dyn_ftrace *rec;
534 struct ftrace_page *pg;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200535
Steven Rostedt5072c592008-05-12 21:20:43 +0200536 if (enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200537 old = ftrace_nop_replace();
538 else
539 new = ftrace_nop_replace();
540
541 for (pg = ftrace_pages_start; pg; pg = pg->next) {
542 for (i = 0; i < pg->index; i++) {
543 rec = &pg->records[i];
544
545 /* don't modify code that has already faulted */
546 if (rec->flags & FTRACE_FL_FAILED)
547 continue;
548
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530549 /* ignore updates to this record's mcount site */
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530550 if (get_kprobe((void *)rec->ip)) {
551 freeze_record(rec);
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530552 continue;
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530553 } else {
554 unfreeze_record(rec);
555 }
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530556
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530557 failed = __ftrace_replace_code(rec, old, new, enable);
558 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
559 rec->flags |= FTRACE_FL_FAILED;
560 if ((system_state == SYSTEM_BOOTING) ||
Abhishek Sagar34078a52008-06-03 08:33:41 +0530561 !core_kernel_text(rec->ip)) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530562 ftrace_del_hash(rec);
563 ftrace_free_rec(rec);
564 }
565 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200566 }
567 }
568}
569
Ingo Molnare309b412008-05-12 21:20:51 +0200570static void ftrace_shutdown_replenish(void)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200571{
572 if (ftrace_pages->next)
573 return;
574
575 /* allocate another page */
576 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
577}
Steven Rostedt3d083392008-05-12 21:20:42 +0200578
Steven Rostedt05736a42008-09-22 14:55:47 -0700579static void print_ip_ins(const char *fmt, unsigned char *p)
580{
581 int i;
582
583 printk(KERN_CONT "%s", fmt);
584
585 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
586 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
587}
588
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530589static int
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200590ftrace_code_disable(struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200591{
592 unsigned long ip;
593 unsigned char *nop, *call;
594 int failed;
595
596 ip = rec->ip;
597
598 nop = ftrace_nop_replace();
Steven Rostedt3b47bfc2008-08-27 23:24:15 -0400599 call = ftrace_call_replace(ip, mcount_addr);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200600
601 failed = ftrace_modify_code(ip, call, nop);
Steven Rostedt37ad5082008-05-12 21:20:48 +0200602 if (failed) {
Steven Rostedt05736a42008-09-22 14:55:47 -0700603 switch (failed) {
604 case 1:
605 WARN_ON_ONCE(1);
606 pr_info("ftrace faulted on modifying ");
607 print_ip_sym(ip);
608 break;
609 case 2:
610 WARN_ON_ONCE(1);
611 pr_info("ftrace failed to modify ");
612 print_ip_sym(ip);
613 print_ip_ins(" expected: ", call);
614 print_ip_ins(" actual: ", (unsigned char *)ip);
615 print_ip_ins(" replace: ", nop);
616 printk(KERN_CONT "\n");
617 break;
618 }
619
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200620 rec->flags |= FTRACE_FL_FAILED;
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530621 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200622 }
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530623 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200624}
625
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400626static int __ftrace_update_code(void *ignore);
627
Ingo Molnare309b412008-05-12 21:20:51 +0200628static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200629{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200630 unsigned long addr;
631 int *command = data;
632
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400633 if (*command & FTRACE_ENABLE_CALLS) {
634 /*
635 * Update any recorded ips now that we have the
636 * machine stopped
637 */
638 __ftrace_update_code(NULL);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200639 ftrace_replace_code(1);
Abhishek Sagarecea6562008-06-21 23:47:53 +0530640 tracing_on = 1;
641 } else if (*command & FTRACE_DISABLE_CALLS) {
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200642 ftrace_replace_code(0);
Abhishek Sagarecea6562008-06-21 23:47:53 +0530643 tracing_on = 0;
644 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200645
646 if (*command & FTRACE_UPDATE_TRACE_FUNC)
647 ftrace_update_ftrace_func(ftrace_trace_function);
648
649 if (*command & FTRACE_ENABLE_MCOUNT) {
650 addr = (unsigned long)ftrace_record_ip;
651 ftrace_mcount_set(&addr);
652 } else if (*command & FTRACE_DISABLE_MCOUNT) {
653 addr = (unsigned long)ftrace_stub;
654 ftrace_mcount_set(&addr);
655 }
656
657 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200658}
659
Ingo Molnare309b412008-05-12 21:20:51 +0200660static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200661{
Rusty Russell784e2d72008-07-28 12:16:31 -0500662 stop_machine(__ftrace_modify_code, &command, NULL);
Steven Rostedt3d083392008-05-12 21:20:42 +0200663}
664
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400665void ftrace_disable_daemon(void)
666{
667 /* Stop the daemon from calling kstop_machine */
668 mutex_lock(&ftraced_lock);
669 ftraced_stop = 1;
670 mutex_unlock(&ftraced_lock);
671
672 ftrace_force_update();
673}
674
675void ftrace_enable_daemon(void)
676{
677 mutex_lock(&ftraced_lock);
678 ftraced_stop = 0;
679 mutex_unlock(&ftraced_lock);
680
681 ftrace_force_update();
682}
683
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200684static ftrace_func_t saved_ftrace_func;
685
Ingo Molnare309b412008-05-12 21:20:51 +0200686static void ftrace_startup(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200687{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200688 int command = 0;
689
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200690 if (unlikely(ftrace_disabled))
691 return;
692
Steven Rostedt3d083392008-05-12 21:20:42 +0200693 mutex_lock(&ftraced_lock);
694 ftraced_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200695 if (ftraced_suspend == 1)
696 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200697
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200698 if (saved_ftrace_func != ftrace_trace_function) {
699 saved_ftrace_func = ftrace_trace_function;
700 command |= FTRACE_UPDATE_TRACE_FUNC;
701 }
702
703 if (!command || !ftrace_enabled)
704 goto out;
705
706 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200707 out:
708 mutex_unlock(&ftraced_lock);
709}
710
Ingo Molnare309b412008-05-12 21:20:51 +0200711static void ftrace_shutdown(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200712{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200713 int command = 0;
714
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200715 if (unlikely(ftrace_disabled))
716 return;
717
Steven Rostedt3d083392008-05-12 21:20:42 +0200718 mutex_lock(&ftraced_lock);
719 ftraced_suspend--;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200720 if (!ftraced_suspend)
721 command |= FTRACE_DISABLE_CALLS;
722
723 if (saved_ftrace_func != ftrace_trace_function) {
724 saved_ftrace_func = ftrace_trace_function;
725 command |= FTRACE_UPDATE_TRACE_FUNC;
726 }
727
728 if (!command || !ftrace_enabled)
Steven Rostedt3d083392008-05-12 21:20:42 +0200729 goto out;
730
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200731 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200732 out:
733 mutex_unlock(&ftraced_lock);
734}
735
Ingo Molnare309b412008-05-12 21:20:51 +0200736static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200737{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200738 int command = FTRACE_ENABLE_MCOUNT;
739
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200740 if (unlikely(ftrace_disabled))
741 return;
742
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200743 mutex_lock(&ftraced_lock);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200744 /* Force update next time */
745 saved_ftrace_func = NULL;
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200746 /* ftraced_suspend is true if we want ftrace running */
747 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200748 command |= FTRACE_ENABLE_CALLS;
749
750 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200751 mutex_unlock(&ftraced_lock);
752}
753
Ingo Molnare309b412008-05-12 21:20:51 +0200754static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200755{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200756 int command = FTRACE_DISABLE_MCOUNT;
757
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200758 if (unlikely(ftrace_disabled))
759 return;
760
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200761 mutex_lock(&ftraced_lock);
762 /* ftraced_suspend is true if ftrace is running */
763 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200764 command |= FTRACE_DISABLE_CALLS;
765
766 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200767 mutex_unlock(&ftraced_lock);
768}
769
Steven Rostedt3d083392008-05-12 21:20:42 +0200770static cycle_t ftrace_update_time;
771static unsigned long ftrace_update_cnt;
772unsigned long ftrace_update_tot_cnt;
773
Ingo Molnare309b412008-05-12 21:20:51 +0200774static int __ftrace_update_code(void *ignore)
Steven Rostedt3d083392008-05-12 21:20:42 +0200775{
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530776 int i, save_ftrace_enabled;
777 cycle_t start, stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200778 struct dyn_ftrace *p;
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530779 struct hlist_node *t, *n;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530780 struct hlist_head *head, temp_list;
Steven Rostedt3d083392008-05-12 21:20:42 +0200781
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200782 /* Don't be recording funcs now */
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400783 ftrace_record_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200784 save_ftrace_enabled = ftrace_enabled;
785 ftrace_enabled = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200786
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200787 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200788 ftrace_update_cnt = 0;
789
790 /* No locks needed, the machine is stopped! */
791 for (i = 0; i < FTRACE_HASHSIZE; i++) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530792 INIT_HLIST_HEAD(&temp_list);
793 head = &ftrace_hash[i];
794
Steven Rostedt3d083392008-05-12 21:20:42 +0200795 /* all CPUS are stopped, we are safe to modify code */
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530796 hlist_for_each_entry_safe(p, t, n, head, node) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530797 /* Skip over failed records which have not been
798 * freed. */
799 if (p->flags & FTRACE_FL_FAILED)
800 continue;
Steven Rostedt3d083392008-05-12 21:20:42 +0200801
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530802 /* Unconverted records are always at the head of the
803 * hash bucket. Once we encounter a converted record,
804 * simply skip over to the next bucket. Saves ftraced
805 * some processor cycles (ftrace does its bid for
806 * global warming :-p ). */
807 if (p->flags & (FTRACE_FL_CONVERTED))
808 break;
809
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530810 /* Ignore updates to this record's mcount site.
811 * Reintroduce this record at the head of this
812 * bucket to attempt to "convert" it again if
813 * the kprobe on it is unregistered before the
814 * next run. */
815 if (get_kprobe((void *)p->ip)) {
816 ftrace_del_hash(p);
817 INIT_HLIST_NODE(&p->node);
818 hlist_add_head(&p->node, &temp_list);
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530819 freeze_record(p);
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530820 continue;
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530821 } else {
822 unfreeze_record(p);
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530823 }
824
825 /* convert record (i.e, patch mcount-call with NOP) */
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530826 if (ftrace_code_disable(p)) {
827 p->flags |= FTRACE_FL_CONVERTED;
828 ftrace_update_cnt++;
829 } else {
830 if ((system_state == SYSTEM_BOOTING) ||
Abhishek Sagar34078a52008-06-03 08:33:41 +0530831 !core_kernel_text(p->ip)) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530832 ftrace_del_hash(p);
833 ftrace_free_rec(p);
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530834 }
835 }
836 }
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530837
838 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
839 hlist_del(&p->node);
840 INIT_HLIST_NODE(&p->node);
841 hlist_add_head(&p->node, head);
842 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200843 }
844
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200845 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200846 ftrace_update_time = stop - start;
847 ftrace_update_tot_cnt += ftrace_update_cnt;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400848 ftraced_trigger = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200849
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200850 ftrace_enabled = save_ftrace_enabled;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400851 ftrace_record_suspend--;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200852
853 return 0;
854}
855
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400856static int ftrace_update_code(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200857{
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400858 if (unlikely(ftrace_disabled) ||
859 !ftrace_enabled || !ftraced_trigger)
860 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200861
Rusty Russell784e2d72008-07-28 12:16:31 -0500862 stop_machine(__ftrace_update_code, NULL, NULL);
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400863
864 return 1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200865}
866
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400867static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200868{
869 struct ftrace_page *pg;
870 int cnt;
871 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200872
873 /* allocate a few pages */
874 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
875 if (!ftrace_pages_start)
876 return -1;
877
878 /*
879 * Allocate a few more pages.
880 *
881 * TODO: have some parser search vmlinux before
882 * final linking to find all calls to ftrace.
883 * Then we can:
884 * a) know how many pages to allocate.
885 * and/or
886 * b) set up the table then.
887 *
888 * The dynamic code is still necessary for
889 * modules.
890 */
891
892 pg = ftrace_pages = ftrace_pages_start;
893
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400894 cnt = num_to_init / ENTRIES_PER_PAGE;
895 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
896 num_to_init, cnt);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200897
898 for (i = 0; i < cnt; i++) {
899 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
900
901 /* If we fail, we'll try later anyway */
902 if (!pg->next)
903 break;
904
905 pg = pg->next;
906 }
907
908 return 0;
909}
910
Steven Rostedt5072c592008-05-12 21:20:43 +0200911enum {
912 FTRACE_ITER_FILTER = (1 << 0),
913 FTRACE_ITER_CONT = (1 << 1),
Steven Rostedt41c52c02008-05-22 11:46:33 -0400914 FTRACE_ITER_NOTRACE = (1 << 2),
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530915 FTRACE_ITER_FAILURES = (1 << 3),
Steven Rostedt5072c592008-05-12 21:20:43 +0200916};
917
918#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
919
920struct ftrace_iterator {
921 loff_t pos;
922 struct ftrace_page *pg;
923 unsigned idx;
924 unsigned flags;
925 unsigned char buffer[FTRACE_BUFF_MAX+1];
926 unsigned buffer_idx;
927 unsigned filtered;
928};
929
Ingo Molnare309b412008-05-12 21:20:51 +0200930static void *
Steven Rostedt5072c592008-05-12 21:20:43 +0200931t_next(struct seq_file *m, void *v, loff_t *pos)
932{
933 struct ftrace_iterator *iter = m->private;
934 struct dyn_ftrace *rec = NULL;
935
936 (*pos)++;
937
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400938 /* should not be called from interrupt context */
939 spin_lock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200940 retry:
941 if (iter->idx >= iter->pg->index) {
942 if (iter->pg->next) {
943 iter->pg = iter->pg->next;
944 iter->idx = 0;
945 goto retry;
946 }
947 } else {
948 rec = &iter->pg->records[iter->idx++];
Steven Rostedta9fdda32008-08-14 22:47:17 -0400949 if ((rec->flags & FTRACE_FL_FREE) ||
950
951 (!(iter->flags & FTRACE_ITER_FAILURES) &&
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530952 (rec->flags & FTRACE_FL_FAILED)) ||
953
954 ((iter->flags & FTRACE_ITER_FAILURES) &&
Steven Rostedta9fdda32008-08-14 22:47:17 -0400955 !(rec->flags & FTRACE_FL_FAILED)) ||
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530956
Steven Rostedt41c52c02008-05-22 11:46:33 -0400957 ((iter->flags & FTRACE_ITER_NOTRACE) &&
958 !(rec->flags & FTRACE_FL_NOTRACE))) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200959 rec = NULL;
960 goto retry;
961 }
962 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400963 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200964
965 iter->pos = *pos;
966
967 return rec;
968}
969
970static void *t_start(struct seq_file *m, loff_t *pos)
971{
972 struct ftrace_iterator *iter = m->private;
973 void *p = NULL;
974 loff_t l = -1;
975
976 if (*pos != iter->pos) {
977 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
978 ;
979 } else {
980 l = *pos;
981 p = t_next(m, p, &l);
982 }
983
984 return p;
985}
986
987static void t_stop(struct seq_file *m, void *p)
988{
989}
990
991static int t_show(struct seq_file *m, void *v)
992{
993 struct dyn_ftrace *rec = v;
994 char str[KSYM_SYMBOL_LEN];
995
996 if (!rec)
997 return 0;
998
999 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1000
1001 seq_printf(m, "%s\n", str);
1002
1003 return 0;
1004}
1005
1006static struct seq_operations show_ftrace_seq_ops = {
1007 .start = t_start,
1008 .next = t_next,
1009 .stop = t_stop,
1010 .show = t_show,
1011};
1012
Ingo Molnare309b412008-05-12 21:20:51 +02001013static int
Steven Rostedt5072c592008-05-12 21:20:43 +02001014ftrace_avail_open(struct inode *inode, struct file *file)
1015{
1016 struct ftrace_iterator *iter;
1017 int ret;
1018
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001019 if (unlikely(ftrace_disabled))
1020 return -ENODEV;
1021
Steven Rostedt5072c592008-05-12 21:20:43 +02001022 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1023 if (!iter)
1024 return -ENOMEM;
1025
1026 iter->pg = ftrace_pages_start;
1027 iter->pos = -1;
1028
1029 ret = seq_open(file, &show_ftrace_seq_ops);
1030 if (!ret) {
1031 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001032
Steven Rostedt5072c592008-05-12 21:20:43 +02001033 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001034 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +02001035 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001036 }
Steven Rostedt5072c592008-05-12 21:20:43 +02001037
1038 return ret;
1039}
1040
1041int ftrace_avail_release(struct inode *inode, struct file *file)
1042{
1043 struct seq_file *m = (struct seq_file *)file->private_data;
1044 struct ftrace_iterator *iter = m->private;
1045
1046 seq_release(inode, file);
1047 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001048
Steven Rostedt5072c592008-05-12 21:20:43 +02001049 return 0;
1050}
1051
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301052static int
1053ftrace_failures_open(struct inode *inode, struct file *file)
1054{
1055 int ret;
1056 struct seq_file *m;
1057 struct ftrace_iterator *iter;
1058
1059 ret = ftrace_avail_open(inode, file);
1060 if (!ret) {
1061 m = (struct seq_file *)file->private_data;
1062 iter = (struct ftrace_iterator *)m->private;
1063 iter->flags = FTRACE_ITER_FAILURES;
1064 }
1065
1066 return ret;
1067}
1068
1069
Steven Rostedt41c52c02008-05-22 11:46:33 -04001070static void ftrace_filter_reset(int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001071{
1072 struct ftrace_page *pg;
1073 struct dyn_ftrace *rec;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001074 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001075 unsigned i;
1076
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001077 /* should not be called from interrupt context */
1078 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001079 if (enable)
1080 ftrace_filtered = 0;
Steven Rostedt5072c592008-05-12 21:20:43 +02001081 pg = ftrace_pages_start;
1082 while (pg) {
1083 for (i = 0; i < pg->index; i++) {
1084 rec = &pg->records[i];
1085 if (rec->flags & FTRACE_FL_FAILED)
1086 continue;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001087 rec->flags &= ~type;
Steven Rostedt5072c592008-05-12 21:20:43 +02001088 }
1089 pg = pg->next;
1090 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001091 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001092}
1093
Ingo Molnare309b412008-05-12 21:20:51 +02001094static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001095ftrace_regex_open(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001096{
1097 struct ftrace_iterator *iter;
1098 int ret = 0;
1099
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001100 if (unlikely(ftrace_disabled))
1101 return -ENODEV;
1102
Steven Rostedt5072c592008-05-12 21:20:43 +02001103 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1104 if (!iter)
1105 return -ENOMEM;
1106
Steven Rostedt41c52c02008-05-22 11:46:33 -04001107 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001108 if ((file->f_mode & FMODE_WRITE) &&
1109 !(file->f_flags & O_APPEND))
Steven Rostedt41c52c02008-05-22 11:46:33 -04001110 ftrace_filter_reset(enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001111
1112 if (file->f_mode & FMODE_READ) {
1113 iter->pg = ftrace_pages_start;
1114 iter->pos = -1;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001115 iter->flags = enable ? FTRACE_ITER_FILTER :
1116 FTRACE_ITER_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001117
1118 ret = seq_open(file, &show_ftrace_seq_ops);
1119 if (!ret) {
1120 struct seq_file *m = file->private_data;
1121 m->private = iter;
1122 } else
1123 kfree(iter);
1124 } else
1125 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001126 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001127
1128 return ret;
1129}
1130
Steven Rostedt41c52c02008-05-22 11:46:33 -04001131static int
1132ftrace_filter_open(struct inode *inode, struct file *file)
1133{
1134 return ftrace_regex_open(inode, file, 1);
1135}
1136
1137static int
1138ftrace_notrace_open(struct inode *inode, struct file *file)
1139{
1140 return ftrace_regex_open(inode, file, 0);
1141}
1142
Ingo Molnare309b412008-05-12 21:20:51 +02001143static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001144ftrace_regex_read(struct file *file, char __user *ubuf,
Steven Rostedt5072c592008-05-12 21:20:43 +02001145 size_t cnt, loff_t *ppos)
1146{
1147 if (file->f_mode & FMODE_READ)
1148 return seq_read(file, ubuf, cnt, ppos);
1149 else
1150 return -EPERM;
1151}
1152
Ingo Molnare309b412008-05-12 21:20:51 +02001153static loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001154ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02001155{
1156 loff_t ret;
1157
1158 if (file->f_mode & FMODE_READ)
1159 ret = seq_lseek(file, offset, origin);
1160 else
1161 file->f_pos = ret = 1;
1162
1163 return ret;
1164}
1165
1166enum {
1167 MATCH_FULL,
1168 MATCH_FRONT_ONLY,
1169 MATCH_MIDDLE_ONLY,
1170 MATCH_END_ONLY,
1171};
1172
Ingo Molnare309b412008-05-12 21:20:51 +02001173static void
Steven Rostedt41c52c02008-05-22 11:46:33 -04001174ftrace_match(unsigned char *buff, int len, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001175{
1176 char str[KSYM_SYMBOL_LEN];
1177 char *search = NULL;
1178 struct ftrace_page *pg;
1179 struct dyn_ftrace *rec;
1180 int type = MATCH_FULL;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001181 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001182 unsigned i, match = 0, search_len = 0;
1183
1184 for (i = 0; i < len; i++) {
1185 if (buff[i] == '*') {
1186 if (!i) {
1187 search = buff + i + 1;
1188 type = MATCH_END_ONLY;
1189 search_len = len - (i + 1);
1190 } else {
1191 if (type == MATCH_END_ONLY) {
1192 type = MATCH_MIDDLE_ONLY;
1193 } else {
1194 match = i;
1195 type = MATCH_FRONT_ONLY;
1196 }
1197 buff[i] = 0;
1198 break;
1199 }
1200 }
1201 }
1202
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001203 /* should not be called from interrupt context */
1204 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001205 if (enable)
1206 ftrace_filtered = 1;
Steven Rostedt5072c592008-05-12 21:20:43 +02001207 pg = ftrace_pages_start;
1208 while (pg) {
1209 for (i = 0; i < pg->index; i++) {
1210 int matched = 0;
1211 char *ptr;
1212
1213 rec = &pg->records[i];
1214 if (rec->flags & FTRACE_FL_FAILED)
1215 continue;
1216 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1217 switch (type) {
1218 case MATCH_FULL:
1219 if (strcmp(str, buff) == 0)
1220 matched = 1;
1221 break;
1222 case MATCH_FRONT_ONLY:
1223 if (memcmp(str, buff, match) == 0)
1224 matched = 1;
1225 break;
1226 case MATCH_MIDDLE_ONLY:
1227 if (strstr(str, search))
1228 matched = 1;
1229 break;
1230 case MATCH_END_ONLY:
1231 ptr = strstr(str, search);
1232 if (ptr && (ptr[search_len] == 0))
1233 matched = 1;
1234 break;
1235 }
1236 if (matched)
Steven Rostedt41c52c02008-05-22 11:46:33 -04001237 rec->flags |= flag;
Steven Rostedt5072c592008-05-12 21:20:43 +02001238 }
1239 pg = pg->next;
1240 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001241 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001242}
1243
Ingo Molnare309b412008-05-12 21:20:51 +02001244static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001245ftrace_regex_write(struct file *file, const char __user *ubuf,
1246 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001247{
1248 struct ftrace_iterator *iter;
1249 char ch;
1250 size_t read = 0;
1251 ssize_t ret;
1252
1253 if (!cnt || cnt < 0)
1254 return 0;
1255
Steven Rostedt41c52c02008-05-22 11:46:33 -04001256 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001257
1258 if (file->f_mode & FMODE_READ) {
1259 struct seq_file *m = file->private_data;
1260 iter = m->private;
1261 } else
1262 iter = file->private_data;
1263
1264 if (!*ppos) {
1265 iter->flags &= ~FTRACE_ITER_CONT;
1266 iter->buffer_idx = 0;
1267 }
1268
1269 ret = get_user(ch, ubuf++);
1270 if (ret)
1271 goto out;
1272 read++;
1273 cnt--;
1274
1275 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1276 /* skip white space */
1277 while (cnt && isspace(ch)) {
1278 ret = get_user(ch, ubuf++);
1279 if (ret)
1280 goto out;
1281 read++;
1282 cnt--;
1283 }
1284
Steven Rostedt5072c592008-05-12 21:20:43 +02001285 if (isspace(ch)) {
1286 file->f_pos += read;
1287 ret = read;
1288 goto out;
1289 }
1290
1291 iter->buffer_idx = 0;
1292 }
1293
1294 while (cnt && !isspace(ch)) {
1295 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1296 iter->buffer[iter->buffer_idx++] = ch;
1297 else {
1298 ret = -EINVAL;
1299 goto out;
1300 }
1301 ret = get_user(ch, ubuf++);
1302 if (ret)
1303 goto out;
1304 read++;
1305 cnt--;
1306 }
1307
1308 if (isspace(ch)) {
1309 iter->filtered++;
1310 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001311 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001312 iter->buffer_idx = 0;
1313 } else
1314 iter->flags |= FTRACE_ITER_CONT;
1315
1316
1317 file->f_pos += read;
1318
1319 ret = read;
1320 out:
Steven Rostedt41c52c02008-05-22 11:46:33 -04001321 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001322
1323 return ret;
1324}
1325
Steven Rostedt41c52c02008-05-22 11:46:33 -04001326static ssize_t
1327ftrace_filter_write(struct file *file, const char __user *ubuf,
1328 size_t cnt, loff_t *ppos)
1329{
1330 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1331}
1332
1333static ssize_t
1334ftrace_notrace_write(struct file *file, const char __user *ubuf,
1335 size_t cnt, loff_t *ppos)
1336{
1337 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1338}
1339
1340static void
1341ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1342{
1343 if (unlikely(ftrace_disabled))
1344 return;
1345
1346 mutex_lock(&ftrace_regex_lock);
1347 if (reset)
1348 ftrace_filter_reset(enable);
1349 if (buf)
1350 ftrace_match(buf, len, enable);
1351 mutex_unlock(&ftrace_regex_lock);
1352}
1353
Steven Rostedt77a2b372008-05-12 21:20:45 +02001354/**
1355 * ftrace_set_filter - set a function to filter on in ftrace
1356 * @buf - the string that holds the function filter text.
1357 * @len - the length of the string.
1358 * @reset - non zero to reset all filters before applying this filter.
1359 *
1360 * Filters denote which functions should be enabled when tracing is enabled.
1361 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1362 */
Ingo Molnare309b412008-05-12 21:20:51 +02001363void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02001364{
Steven Rostedt41c52c02008-05-22 11:46:33 -04001365 ftrace_set_regex(buf, len, reset, 1);
1366}
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001367
Steven Rostedt41c52c02008-05-22 11:46:33 -04001368/**
1369 * ftrace_set_notrace - set a function to not trace in ftrace
1370 * @buf - the string that holds the function notrace text.
1371 * @len - the length of the string.
1372 * @reset - non zero to reset all filters before applying this filter.
1373 *
1374 * Notrace Filters denote which functions should not be enabled when tracing
1375 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1376 * for tracing.
1377 */
1378void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1379{
1380 ftrace_set_regex(buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02001381}
1382
Ingo Molnare309b412008-05-12 21:20:51 +02001383static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001384ftrace_regex_release(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001385{
1386 struct seq_file *m = (struct seq_file *)file->private_data;
1387 struct ftrace_iterator *iter;
1388
Steven Rostedt41c52c02008-05-22 11:46:33 -04001389 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001390 if (file->f_mode & FMODE_READ) {
1391 iter = m->private;
1392
1393 seq_release(inode, file);
1394 } else
1395 iter = file->private_data;
1396
1397 if (iter->buffer_idx) {
1398 iter->filtered++;
1399 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001400 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001401 }
1402
1403 mutex_lock(&ftrace_sysctl_lock);
1404 mutex_lock(&ftraced_lock);
1405 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1406 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1407 mutex_unlock(&ftraced_lock);
1408 mutex_unlock(&ftrace_sysctl_lock);
1409
1410 kfree(iter);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001411 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001412 return 0;
1413}
1414
Steven Rostedt41c52c02008-05-22 11:46:33 -04001415static int
1416ftrace_filter_release(struct inode *inode, struct file *file)
1417{
1418 return ftrace_regex_release(inode, file, 1);
1419}
1420
1421static int
1422ftrace_notrace_release(struct inode *inode, struct file *file)
1423{
1424 return ftrace_regex_release(inode, file, 0);
1425}
1426
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001427static ssize_t
1428ftraced_read(struct file *filp, char __user *ubuf,
1429 size_t cnt, loff_t *ppos)
1430{
1431 /* don't worry about races */
1432 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1433 int r = strlen(buf);
1434
1435 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1436}
1437
1438static ssize_t
1439ftraced_write(struct file *filp, const char __user *ubuf,
1440 size_t cnt, loff_t *ppos)
1441{
1442 char buf[64];
1443 long val;
1444 int ret;
1445
1446 if (cnt >= sizeof(buf))
1447 return -EINVAL;
1448
1449 if (copy_from_user(&buf, ubuf, cnt))
1450 return -EFAULT;
1451
1452 if (strncmp(buf, "enable", 6) == 0)
1453 val = 1;
1454 else if (strncmp(buf, "disable", 7) == 0)
1455 val = 0;
1456 else {
1457 buf[cnt] = 0;
1458
1459 ret = strict_strtoul(buf, 10, &val);
1460 if (ret < 0)
1461 return ret;
1462
1463 val = !!val;
1464 }
1465
1466 if (val)
1467 ftrace_enable_daemon();
1468 else
1469 ftrace_disable_daemon();
1470
1471 filp->f_pos += cnt;
1472
1473 return cnt;
1474}
1475
Steven Rostedt5072c592008-05-12 21:20:43 +02001476static struct file_operations ftrace_avail_fops = {
1477 .open = ftrace_avail_open,
1478 .read = seq_read,
1479 .llseek = seq_lseek,
1480 .release = ftrace_avail_release,
1481};
1482
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301483static struct file_operations ftrace_failures_fops = {
1484 .open = ftrace_failures_open,
1485 .read = seq_read,
1486 .llseek = seq_lseek,
1487 .release = ftrace_avail_release,
1488};
1489
Steven Rostedt5072c592008-05-12 21:20:43 +02001490static struct file_operations ftrace_filter_fops = {
1491 .open = ftrace_filter_open,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001492 .read = ftrace_regex_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02001493 .write = ftrace_filter_write,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001494 .llseek = ftrace_regex_lseek,
Steven Rostedt5072c592008-05-12 21:20:43 +02001495 .release = ftrace_filter_release,
1496};
1497
Steven Rostedt41c52c02008-05-22 11:46:33 -04001498static struct file_operations ftrace_notrace_fops = {
1499 .open = ftrace_notrace_open,
1500 .read = ftrace_regex_read,
1501 .write = ftrace_notrace_write,
1502 .llseek = ftrace_regex_lseek,
1503 .release = ftrace_notrace_release,
1504};
1505
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001506static struct file_operations ftraced_fops = {
1507 .open = tracing_open_generic,
1508 .read = ftraced_read,
1509 .write = ftraced_write,
1510};
1511
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001512/**
1513 * ftrace_force_update - force an update to all recording ftrace functions
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001514 */
1515int ftrace_force_update(void)
1516{
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001517 int ret = 0;
1518
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001519 if (unlikely(ftrace_disabled))
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001520 return -ENODEV;
1521
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001522 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001523 mutex_lock(&ftraced_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001524
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001525 /*
1526 * If ftraced_trigger is not set, then there is nothing
1527 * to update.
1528 */
1529 if (ftraced_trigger && !ftrace_update_code())
1530 ret = -EBUSY;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001531
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001532 mutex_unlock(&ftraced_lock);
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001533 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001534
1535 return ret;
1536}
1537
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001538static void ftrace_force_shutdown(void)
1539{
1540 struct task_struct *task;
1541 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1542
1543 mutex_lock(&ftraced_lock);
1544 task = ftraced_task;
1545 ftraced_task = NULL;
1546 ftraced_suspend = -1;
1547 ftrace_run_update_code(command);
1548 mutex_unlock(&ftraced_lock);
1549
1550 if (task)
1551 kthread_stop(task);
1552}
1553
Steven Rostedt5072c592008-05-12 21:20:43 +02001554static __init int ftrace_init_debugfs(void)
1555{
1556 struct dentry *d_tracer;
1557 struct dentry *entry;
1558
1559 d_tracer = tracing_init_dentry();
1560
1561 entry = debugfs_create_file("available_filter_functions", 0444,
1562 d_tracer, NULL, &ftrace_avail_fops);
1563 if (!entry)
1564 pr_warning("Could not create debugfs "
1565 "'available_filter_functions' entry\n");
1566
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301567 entry = debugfs_create_file("failures", 0444,
1568 d_tracer, NULL, &ftrace_failures_fops);
1569 if (!entry)
1570 pr_warning("Could not create debugfs 'failures' entry\n");
1571
Steven Rostedt5072c592008-05-12 21:20:43 +02001572 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1573 NULL, &ftrace_filter_fops);
1574 if (!entry)
1575 pr_warning("Could not create debugfs "
1576 "'set_ftrace_filter' entry\n");
Steven Rostedt41c52c02008-05-22 11:46:33 -04001577
1578 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1579 NULL, &ftrace_notrace_fops);
1580 if (!entry)
1581 pr_warning("Could not create debugfs "
1582 "'set_ftrace_notrace' entry\n");
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001583
1584 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1585 NULL, &ftraced_fops);
1586 if (!entry)
1587 pr_warning("Could not create debugfs "
1588 "'ftraced_enabled' entry\n");
Steven Rostedt5072c592008-05-12 21:20:43 +02001589 return 0;
1590}
1591
1592fs_initcall(ftrace_init_debugfs);
1593
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001594#ifdef CONFIG_FTRACE_MCOUNT_RECORD
1595static int ftrace_convert_nops(unsigned long *start,
1596 unsigned long *end)
1597{
1598 unsigned long *p;
1599 unsigned long addr;
1600 unsigned long flags;
1601
1602 p = start;
1603 while (p < end) {
1604 addr = ftrace_call_adjust(*p++);
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001605 /* should not be called from interrupt context */
Steven Rostedtfed19392008-08-14 22:47:19 -04001606 spin_lock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001607 ftrace_record_ip(addr);
Steven Rostedtfed19392008-08-14 22:47:19 -04001608 spin_unlock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001609 ftrace_shutdown_replenish();
1610 }
1611
1612 /* p is ignored */
1613 local_irq_save(flags);
1614 __ftrace_update_code(p);
1615 local_irq_restore(flags);
1616
1617 return 0;
1618}
1619
Steven Rostedt90d595f2008-08-14 15:45:09 -04001620void ftrace_init_module(unsigned long *start, unsigned long *end)
1621{
Steven Rostedt00fd61a2008-08-15 21:40:04 -04001622 if (ftrace_disabled || start == end)
Steven Rostedtfed19392008-08-14 22:47:19 -04001623 return;
Steven Rostedt90d595f2008-08-14 15:45:09 -04001624 ftrace_convert_nops(start, end);
1625}
1626
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001627extern unsigned long __start_mcount_loc[];
1628extern unsigned long __stop_mcount_loc[];
1629
1630void __init ftrace_init(void)
1631{
1632 unsigned long count, addr, flags;
1633 int ret;
1634
1635 /* Keep the ftrace pointer to the stub */
1636 addr = (unsigned long)ftrace_stub;
1637
1638 local_irq_save(flags);
1639 ftrace_dyn_arch_init(&addr);
1640 local_irq_restore(flags);
1641
1642 /* ftrace_dyn_arch_init places the return code in addr */
1643 if (addr)
1644 goto failed;
1645
1646 count = __stop_mcount_loc - __start_mcount_loc;
1647
1648 ret = ftrace_dyn_table_alloc(count);
1649 if (ret)
1650 goto failed;
1651
1652 last_ftrace_enabled = ftrace_enabled = 1;
1653
1654 ret = ftrace_convert_nops(__start_mcount_loc,
1655 __stop_mcount_loc);
1656
1657 return;
1658 failed:
1659 ftrace_disabled = 1;
1660}
1661#else /* CONFIG_FTRACE_MCOUNT_RECORD */
1662static int ftraced(void *ignore)
1663{
1664 unsigned long usecs;
1665
1666 while (!kthread_should_stop()) {
1667
1668 set_current_state(TASK_INTERRUPTIBLE);
1669
1670 /* check once a second */
1671 schedule_timeout(HZ);
1672
1673 if (unlikely(ftrace_disabled))
1674 continue;
1675
1676 mutex_lock(&ftrace_sysctl_lock);
1677 mutex_lock(&ftraced_lock);
1678 if (!ftraced_suspend && !ftraced_stop &&
1679 ftrace_update_code()) {
1680 usecs = nsecs_to_usecs(ftrace_update_time);
1681 if (ftrace_update_tot_cnt > 100000) {
1682 ftrace_update_tot_cnt = 0;
1683 pr_info("hm, dftrace overflow: %lu change%s"
1684 " (%lu total) in %lu usec%s\n",
1685 ftrace_update_cnt,
1686 ftrace_update_cnt != 1 ? "s" : "",
1687 ftrace_update_tot_cnt,
1688 usecs, usecs != 1 ? "s" : "");
1689 ftrace_disabled = 1;
1690 WARN_ON_ONCE(1);
1691 }
1692 }
1693 mutex_unlock(&ftraced_lock);
1694 mutex_unlock(&ftrace_sysctl_lock);
1695
1696 ftrace_shutdown_replenish();
1697 }
1698 __set_current_state(TASK_RUNNING);
1699 return 0;
1700}
1701
Ingo Molnare309b412008-05-12 21:20:51 +02001702static int __init ftrace_dynamic_init(void)
Steven Rostedt3d083392008-05-12 21:20:42 +02001703{
1704 struct task_struct *p;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001705 unsigned long addr;
Steven Rostedt3d083392008-05-12 21:20:42 +02001706 int ret;
1707
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001708 addr = (unsigned long)ftrace_record_ip;
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +02001709
Rusty Russell784e2d72008-07-28 12:16:31 -05001710 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001711
1712 /* ftrace_dyn_arch_init places the return code in addr */
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001713 if (addr) {
1714 ret = (int)addr;
1715 goto failed;
1716 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001717
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001718 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
Steven Rostedt3d083392008-05-12 21:20:42 +02001719 if (ret)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001720 goto failed;
Steven Rostedt3d083392008-05-12 21:20:42 +02001721
1722 p = kthread_run(ftraced, NULL, "ftraced");
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001723 if (IS_ERR(p)) {
1724 ret = -1;
1725 goto failed;
1726 }
Steven Rostedt3d083392008-05-12 21:20:42 +02001727
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001728 last_ftrace_enabled = ftrace_enabled = 1;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001729 ftraced_task = p;
Steven Rostedt3d083392008-05-12 21:20:42 +02001730
1731 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001732
1733 failed:
1734 ftrace_disabled = 1;
1735 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001736}
1737
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001738core_initcall(ftrace_dynamic_init);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001739#endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1740
Steven Rostedt3d083392008-05-12 21:20:42 +02001741#else
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001742# define ftrace_startup() do { } while (0)
1743# define ftrace_shutdown() do { } while (0)
1744# define ftrace_startup_sysctl() do { } while (0)
1745# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001746# define ftrace_force_shutdown() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001747#endif /* CONFIG_DYNAMIC_FTRACE */
1748
1749/**
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001750 * ftrace_kill_atomic - kill ftrace from critical sections
1751 *
1752 * This function should be used by panic code. It stops ftrace
1753 * but in a not so nice way. If you need to simply kill ftrace
1754 * from a non-atomic section, use ftrace_kill.
1755 */
1756void ftrace_kill_atomic(void)
1757{
1758 ftrace_disabled = 1;
1759 ftrace_enabled = 0;
Ingo Molnarb2613e32008-07-11 16:44:27 +02001760#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001761 ftraced_suspend = -1;
Ingo Molnarb2613e32008-07-11 16:44:27 +02001762#endif
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001763 clear_ftrace_function();
1764}
1765
1766/**
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001767 * ftrace_kill - totally shutdown ftrace
1768 *
1769 * This is a safety measure. If something was detected that seems
1770 * wrong, calling this function will keep ftrace from doing
1771 * any more modifications, and updates.
1772 * used when something went wrong.
1773 */
1774void ftrace_kill(void)
1775{
1776 mutex_lock(&ftrace_sysctl_lock);
1777 ftrace_disabled = 1;
1778 ftrace_enabled = 0;
1779
1780 clear_ftrace_function();
1781 mutex_unlock(&ftrace_sysctl_lock);
1782
1783 /* Try to totally disable ftrace */
1784 ftrace_force_shutdown();
1785}
1786
1787/**
Steven Rostedt3d083392008-05-12 21:20:42 +02001788 * register_ftrace_function - register a function for profiling
1789 * @ops - ops structure that holds the function for profiling.
1790 *
1791 * Register a function to be called by all functions in the
1792 * kernel.
1793 *
1794 * Note: @ops->func and all the functions it calls must be labeled
1795 * with "notrace", otherwise it will go into a
1796 * recursive loop.
1797 */
1798int register_ftrace_function(struct ftrace_ops *ops)
1799{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001800 int ret;
1801
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001802 if (unlikely(ftrace_disabled))
1803 return -1;
1804
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001805 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001806 ret = __register_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001807 ftrace_startup();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001808 mutex_unlock(&ftrace_sysctl_lock);
1809
1810 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001811}
1812
1813/**
1814 * unregister_ftrace_function - unresgister a function for profiling.
1815 * @ops - ops structure that holds the function to unregister
1816 *
1817 * Unregister a function that was added to be called by ftrace profiling.
1818 */
1819int unregister_ftrace_function(struct ftrace_ops *ops)
1820{
1821 int ret;
1822
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001823 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001824 ret = __unregister_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001825 ftrace_shutdown();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001826 mutex_unlock(&ftrace_sysctl_lock);
1827
1828 return ret;
1829}
1830
Ingo Molnare309b412008-05-12 21:20:51 +02001831int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001832ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02001833 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001834 loff_t *ppos)
1835{
1836 int ret;
1837
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001838 if (unlikely(ftrace_disabled))
1839 return -ENODEV;
1840
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001841 mutex_lock(&ftrace_sysctl_lock);
1842
Steven Rostedt5072c592008-05-12 21:20:43 +02001843 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001844
1845 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1846 goto out;
1847
1848 last_ftrace_enabled = ftrace_enabled;
1849
1850 if (ftrace_enabled) {
1851
1852 ftrace_startup_sysctl();
1853
1854 /* we are starting ftrace again */
1855 if (ftrace_list != &ftrace_list_end) {
1856 if (ftrace_list->next == &ftrace_list_end)
1857 ftrace_trace_function = ftrace_list->func;
1858 else
1859 ftrace_trace_function = ftrace_list_func;
1860 }
1861
1862 } else {
1863 /* stopping ftrace calls (just send to ftrace_stub) */
1864 ftrace_trace_function = ftrace_stub;
1865
1866 ftrace_shutdown_sysctl();
1867 }
1868
1869 out:
1870 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001871 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001872}