blob: 85e84133541745770b9c506c6e1b16326b8cb370 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
20#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020021#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010022#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020023#include <linux/uaccess.h>
Abhishek Sagarf22f9a82008-06-21 23:50:29 +053024#include <linux/kprobes.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010025#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020026#include <linux/sysctl.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020027#include <linux/ctype.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010028#include <linux/hash.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020029#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020030
Abhishek Sagar395a59d2008-06-21 23:47:27 +053031#include <asm/ftrace.h>
32
Steven Rostedt3d083392008-05-12 21:20:42 +020033#include "trace.h"
34
Steven Rostedt4eebcc82008-05-12 21:20:48 +020035/* ftrace_enabled is a method to turn ftrace on or off */
36int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020037static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020038
Steven Rostedt4eebcc82008-05-12 21:20:48 +020039/*
40 * ftrace_disabled is set when an anomaly is discovered.
41 * ftrace_disabled is much stronger than ftrace_enabled.
42 */
43static int ftrace_disabled __read_mostly;
44
Steven Rostedt3d083392008-05-12 21:20:42 +020045static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020046static DEFINE_MUTEX(ftrace_sysctl_lock);
47
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020048static struct ftrace_ops ftrace_list_end __read_mostly =
49{
50 .func = ftrace_stub,
51};
52
53static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
Ingo Molnarf2252932008-05-22 10:37:48 +020056static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020057{
58 struct ftrace_ops *op = ftrace_list;
59
60 /* in case someone actually ports this to alpha! */
61 read_barrier_depends();
62
63 while (op != &ftrace_list_end) {
64 /* silly alpha */
65 read_barrier_depends();
66 op->func(ip, parent_ip);
67 op = op->next;
68 };
69}
70
71/**
Steven Rostedt3d083392008-05-12 21:20:42 +020072 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020073 *
Steven Rostedt3d083392008-05-12 21:20:42 +020074 * This NULLs the ftrace function and in essence stops
75 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020076 */
Steven Rostedt3d083392008-05-12 21:20:42 +020077void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020078{
Steven Rostedt3d083392008-05-12 21:20:42 +020079 ftrace_trace_function = ftrace_stub;
80}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020081
Ingo Molnare309b412008-05-12 21:20:51 +020082static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +020083{
84 /* Should never be called by interrupts */
85 spin_lock(&ftrace_lock);
86
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020087 ops->next = ftrace_list;
88 /*
89 * We are entering ops into the ftrace_list but another
90 * CPU might be walking that list. We need to make sure
91 * the ops->next pointer is valid before another CPU sees
92 * the ops pointer included into the ftrace_list.
93 */
94 smp_wmb();
95 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +020096
Steven Rostedtb0fc4942008-05-12 21:20:43 +020097 if (ftrace_enabled) {
98 /*
99 * For one func, simply call it directly.
100 * For more than one func, call the chain.
101 */
102 if (ops->next == &ftrace_list_end)
103 ftrace_trace_function = ops->func;
104 else
105 ftrace_trace_function = ftrace_list_func;
106 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200107
108 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200109
110 return 0;
111}
112
Ingo Molnare309b412008-05-12 21:20:51 +0200113static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200114{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200115 struct ftrace_ops **p;
116 int ret = 0;
117
Steven Rostedt3d083392008-05-12 21:20:42 +0200118 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200119
120 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200121 * If we are removing the last function, then simply point
122 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200123 */
124 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
125 ftrace_trace_function = ftrace_stub;
126 ftrace_list = &ftrace_list_end;
127 goto out;
128 }
129
130 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
131 if (*p == ops)
132 break;
133
134 if (*p != ops) {
135 ret = -1;
136 goto out;
137 }
138
139 *p = (*p)->next;
140
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200141 if (ftrace_enabled) {
142 /* If we only have one func left, then call that directly */
143 if (ftrace_list == &ftrace_list_end ||
144 ftrace_list->next == &ftrace_list_end)
145 ftrace_trace_function = ftrace_list->func;
146 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200147
148 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200149 spin_unlock(&ftrace_lock);
150
151 return ret;
152}
153
154#ifdef CONFIG_DYNAMIC_FTRACE
155
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200156static struct task_struct *ftraced_task;
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200157
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200158enum {
159 FTRACE_ENABLE_CALLS = (1 << 0),
160 FTRACE_DISABLE_CALLS = (1 << 1),
161 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
162 FTRACE_ENABLE_MCOUNT = (1 << 3),
163 FTRACE_DISABLE_MCOUNT = (1 << 4),
164};
165
Steven Rostedt5072c592008-05-12 21:20:43 +0200166static int ftrace_filtered;
Abhishek Sagarecea6562008-06-21 23:47:53 +0530167static int tracing_on;
168static int frozen_record_count;
Steven Rostedt5072c592008-05-12 21:20:43 +0200169
Steven Rostedt3d083392008-05-12 21:20:42 +0200170static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
171
172static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
173
174static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175static DEFINE_MUTEX(ftraced_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400176static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200177
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200178struct ftrace_page {
179 struct ftrace_page *next;
David Milleraa5e5ce2008-05-13 22:06:56 -0700180 unsigned long index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200181 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700182};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200183
184#define ENTRIES_PER_PAGE \
185 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
186
187/* estimate from running different kernels */
188#define NR_TO_INIT 10000
189
190static struct ftrace_page *ftrace_pages_start;
191static struct ftrace_page *ftrace_pages;
192
Steven Rostedt3d083392008-05-12 21:20:42 +0200193static int ftraced_trigger;
194static int ftraced_suspend;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400195static int ftraced_stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200196
197static int ftrace_record_suspend;
198
Steven Rostedt37ad5082008-05-12 21:20:48 +0200199static struct dyn_ftrace *ftrace_free_records;
200
Abhishek Sagarecea6562008-06-21 23:47:53 +0530201
202#ifdef CONFIG_KPROBES
203static inline void freeze_record(struct dyn_ftrace *rec)
204{
205 if (!(rec->flags & FTRACE_FL_FROZEN)) {
206 rec->flags |= FTRACE_FL_FROZEN;
207 frozen_record_count++;
208 }
209}
210
211static inline void unfreeze_record(struct dyn_ftrace *rec)
212{
213 if (rec->flags & FTRACE_FL_FROZEN) {
214 rec->flags &= ~FTRACE_FL_FROZEN;
215 frozen_record_count--;
216 }
217}
218
219static inline int record_frozen(struct dyn_ftrace *rec)
220{
221 return rec->flags & FTRACE_FL_FROZEN;
222}
223#else
224# define freeze_record(rec) ({ 0; })
225# define unfreeze_record(rec) ({ 0; })
226# define record_frozen(rec) ({ 0; })
227#endif /* CONFIG_KPROBES */
228
229int skip_trace(unsigned long ip)
230{
231 unsigned long fl;
232 struct dyn_ftrace *rec;
233 struct hlist_node *t;
234 struct hlist_head *head;
235
236 if (frozen_record_count == 0)
237 return 0;
238
239 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
240 hlist_for_each_entry_rcu(rec, t, head, node) {
241 if (rec->ip == ip) {
242 if (record_frozen(rec)) {
243 if (rec->flags & FTRACE_FL_FAILED)
244 return 1;
245
246 if (!(rec->flags & FTRACE_FL_CONVERTED))
247 return 1;
248
249 if (!tracing_on || !ftrace_enabled)
250 return 1;
251
252 if (ftrace_filtered) {
253 fl = rec->flags & (FTRACE_FL_FILTER |
254 FTRACE_FL_NOTRACE);
255 if (!fl || (fl & FTRACE_FL_NOTRACE))
256 return 1;
257 }
258 }
259 break;
260 }
261 }
262
263 return 0;
264}
265
Ingo Molnare309b412008-05-12 21:20:51 +0200266static inline int
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +0200267ftrace_ip_in_hash(unsigned long ip, unsigned long key)
Steven Rostedt3d083392008-05-12 21:20:42 +0200268{
269 struct dyn_ftrace *p;
270 struct hlist_node *t;
271 int found = 0;
272
Abhishek Sagarffdaa352008-05-24 23:45:02 +0530273 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
Steven Rostedt3d083392008-05-12 21:20:42 +0200274 if (p->ip == ip) {
275 found = 1;
276 break;
277 }
278 }
279
280 return found;
281}
282
Ingo Molnare309b412008-05-12 21:20:51 +0200283static inline void
Steven Rostedt3d083392008-05-12 21:20:42 +0200284ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
285{
Abhishek Sagarffdaa352008-05-24 23:45:02 +0530286 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
Steven Rostedt3d083392008-05-12 21:20:42 +0200287}
288
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530289/* called from kstop_machine */
290static inline void ftrace_del_hash(struct dyn_ftrace *node)
291{
292 hlist_del(&node->node);
293}
294
Ingo Molnare309b412008-05-12 21:20:51 +0200295static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad5082008-05-12 21:20:48 +0200296{
297 /* no locking, only called from kstop_machine */
298
299 rec->ip = (unsigned long)ftrace_free_records;
300 ftrace_free_records = rec;
301 rec->flags |= FTRACE_FL_FREE;
302}
303
Ingo Molnare309b412008-05-12 21:20:51 +0200304static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200305{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200306 struct dyn_ftrace *rec;
307
308 /* First check for freed records */
309 if (ftrace_free_records) {
310 rec = ftrace_free_records;
311
Steven Rostedt37ad5082008-05-12 21:20:48 +0200312 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
313 WARN_ON_ONCE(1);
314 ftrace_free_records = NULL;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200315 ftrace_disabled = 1;
316 ftrace_enabled = 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200317 return NULL;
318 }
319
320 ftrace_free_records = (void *)rec->ip;
321 memset(rec, 0, sizeof(*rec));
322 return rec;
323 }
324
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200325 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
326 if (!ftrace_pages->next)
327 return NULL;
328 ftrace_pages = ftrace_pages->next;
329 }
330
331 return &ftrace_pages->records[ftrace_pages->index++];
332}
333
Ingo Molnare309b412008-05-12 21:20:51 +0200334static void
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200335ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200336{
337 struct dyn_ftrace *node;
338 unsigned long flags;
339 unsigned long key;
340 int resched;
341 int atomic;
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200342 int cpu;
Steven Rostedt3d083392008-05-12 21:20:42 +0200343
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200344 if (!ftrace_enabled || ftrace_disabled)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200345 return;
346
Steven Rostedt3d083392008-05-12 21:20:42 +0200347 resched = need_resched();
348 preempt_disable_notrace();
349
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200350 /*
351 * We simply need to protect against recursion.
352 * Use the the raw version of smp_processor_id and not
353 * __get_cpu_var which can call debug hooks that can
354 * cause a recursive crash here.
355 */
356 cpu = raw_smp_processor_id();
357 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
358 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
Steven Rostedt3d083392008-05-12 21:20:42 +0200359 goto out;
360
361 if (unlikely(ftrace_record_suspend))
362 goto out;
363
364 key = hash_long(ip, FTRACE_HASHBITS);
365
366 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
367
368 if (ftrace_ip_in_hash(ip, key))
369 goto out;
370
371 atomic = irqs_disabled();
372
373 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
374
375 /* This ip may have hit the hash before the lock */
376 if (ftrace_ip_in_hash(ip, key))
377 goto out_unlock;
378
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200379 node = ftrace_alloc_dyn_node(ip);
Steven Rostedt3d083392008-05-12 21:20:42 +0200380 if (!node)
381 goto out_unlock;
382
383 node->ip = ip;
384
385 ftrace_add_hash(node, key);
386
387 ftraced_trigger = 1;
388
389 out_unlock:
390 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
391 out:
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200392 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
Steven Rostedt3d083392008-05-12 21:20:42 +0200393
394 /* prevent recursion with scheduler */
395 if (resched)
396 preempt_enable_no_resched_notrace();
397 else
398 preempt_enable_notrace();
399}
400
Steven Rostedtcaf8cde2008-05-12 21:20:50 +0200401#define FTRACE_ADDR ((long)(ftrace_caller))
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200402
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530403static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200404__ftrace_replace_code(struct dyn_ftrace *rec,
405 unsigned char *old, unsigned char *new, int enable)
406{
Steven Rostedt41c52c02008-05-22 11:46:33 -0400407 unsigned long ip, fl;
Steven Rostedt5072c592008-05-12 21:20:43 +0200408
409 ip = rec->ip;
410
411 if (ftrace_filtered && enable) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200412 /*
413 * If filtering is on:
414 *
415 * If this record is set to be filtered and
416 * is enabled then do nothing.
417 *
418 * If this record is set to be filtered and
419 * it is not enabled, enable it.
420 *
421 * If this record is not set to be filtered
422 * and it is not enabled do nothing.
423 *
Steven Rostedt41c52c02008-05-22 11:46:33 -0400424 * If this record is set not to trace then
425 * do nothing.
426 *
Abhishek Sagara4500b82008-06-14 11:59:39 +0530427 * If this record is set not to trace and
428 * it is enabled then disable it.
429 *
Steven Rostedt5072c592008-05-12 21:20:43 +0200430 * If this record is not set to be filtered and
431 * it is enabled, disable it.
432 */
Abhishek Sagara4500b82008-06-14 11:59:39 +0530433
434 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
435 FTRACE_FL_ENABLED);
Steven Rostedt5072c592008-05-12 21:20:43 +0200436
437 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
Abhishek Sagara4500b82008-06-14 11:59:39 +0530438 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
439 !fl || (fl == FTRACE_FL_NOTRACE))
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530440 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200441
442 /*
443 * If it is enabled disable it,
444 * otherwise enable it!
445 */
Abhishek Sagara4500b82008-06-14 11:59:39 +0530446 if (fl & FTRACE_FL_ENABLED) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200447 /* swap new and old */
448 new = old;
449 old = ftrace_call_replace(ip, FTRACE_ADDR);
450 rec->flags &= ~FTRACE_FL_ENABLED;
451 } else {
452 new = ftrace_call_replace(ip, FTRACE_ADDR);
453 rec->flags |= FTRACE_FL_ENABLED;
454 }
455 } else {
456
Steven Rostedt41c52c02008-05-22 11:46:33 -0400457 if (enable) {
458 /*
459 * If this record is set not to trace and is
460 * not enabled, do nothing.
461 */
462 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
463 if (fl == FTRACE_FL_NOTRACE)
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530464 return 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400465
Steven Rostedt5072c592008-05-12 21:20:43 +0200466 new = ftrace_call_replace(ip, FTRACE_ADDR);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400467 } else
Steven Rostedt5072c592008-05-12 21:20:43 +0200468 old = ftrace_call_replace(ip, FTRACE_ADDR);
469
470 if (enable) {
471 if (rec->flags & FTRACE_FL_ENABLED)
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530472 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200473 rec->flags |= FTRACE_FL_ENABLED;
474 } else {
475 if (!(rec->flags & FTRACE_FL_ENABLED))
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530476 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200477 rec->flags &= ~FTRACE_FL_ENABLED;
478 }
479 }
480
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530481 return ftrace_modify_code(ip, old, new);
Steven Rostedt5072c592008-05-12 21:20:43 +0200482}
483
Ingo Molnare309b412008-05-12 21:20:51 +0200484static void ftrace_replace_code(int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200485{
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530486 int i, failed;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200487 unsigned char *new = NULL, *old = NULL;
488 struct dyn_ftrace *rec;
489 struct ftrace_page *pg;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200490
Steven Rostedt5072c592008-05-12 21:20:43 +0200491 if (enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200492 old = ftrace_nop_replace();
493 else
494 new = ftrace_nop_replace();
495
496 for (pg = ftrace_pages_start; pg; pg = pg->next) {
497 for (i = 0; i < pg->index; i++) {
498 rec = &pg->records[i];
499
500 /* don't modify code that has already faulted */
501 if (rec->flags & FTRACE_FL_FAILED)
502 continue;
503
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530504 /* ignore updates to this record's mcount site */
505 if (get_kprobe((void *)rec->ip))
506 continue;
507
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530508 failed = __ftrace_replace_code(rec, old, new, enable);
509 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
510 rec->flags |= FTRACE_FL_FAILED;
511 if ((system_state == SYSTEM_BOOTING) ||
Abhishek Sagar34078a52008-06-03 08:33:41 +0530512 !core_kernel_text(rec->ip)) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530513 ftrace_del_hash(rec);
514 ftrace_free_rec(rec);
515 }
516 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200517 }
518 }
519}
520
Ingo Molnare309b412008-05-12 21:20:51 +0200521static void ftrace_shutdown_replenish(void)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200522{
523 if (ftrace_pages->next)
524 return;
525
526 /* allocate another page */
527 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
528}
Steven Rostedt3d083392008-05-12 21:20:42 +0200529
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530530static int
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200531ftrace_code_disable(struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200532{
533 unsigned long ip;
534 unsigned char *nop, *call;
535 int failed;
536
537 ip = rec->ip;
538
539 nop = ftrace_nop_replace();
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200540 call = ftrace_call_replace(ip, MCOUNT_ADDR);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200541
542 failed = ftrace_modify_code(ip, call, nop);
Steven Rostedt37ad5082008-05-12 21:20:48 +0200543 if (failed) {
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200544 rec->flags |= FTRACE_FL_FAILED;
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530545 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200546 }
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530547 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200548}
549
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400550static int __ftrace_update_code(void *ignore);
551
Ingo Molnare309b412008-05-12 21:20:51 +0200552static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200553{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200554 unsigned long addr;
555 int *command = data;
556
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400557 if (*command & FTRACE_ENABLE_CALLS) {
558 /*
559 * Update any recorded ips now that we have the
560 * machine stopped
561 */
562 __ftrace_update_code(NULL);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200563 ftrace_replace_code(1);
Abhishek Sagarecea6562008-06-21 23:47:53 +0530564 tracing_on = 1;
565 } else if (*command & FTRACE_DISABLE_CALLS) {
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200566 ftrace_replace_code(0);
Abhishek Sagarecea6562008-06-21 23:47:53 +0530567 tracing_on = 0;
568 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200569
570 if (*command & FTRACE_UPDATE_TRACE_FUNC)
571 ftrace_update_ftrace_func(ftrace_trace_function);
572
573 if (*command & FTRACE_ENABLE_MCOUNT) {
574 addr = (unsigned long)ftrace_record_ip;
575 ftrace_mcount_set(&addr);
576 } else if (*command & FTRACE_DISABLE_MCOUNT) {
577 addr = (unsigned long)ftrace_stub;
578 ftrace_mcount_set(&addr);
579 }
580
581 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200582}
583
Ingo Molnare309b412008-05-12 21:20:51 +0200584static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200585{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200586 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
Steven Rostedt3d083392008-05-12 21:20:42 +0200587}
588
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400589void ftrace_disable_daemon(void)
590{
591 /* Stop the daemon from calling kstop_machine */
592 mutex_lock(&ftraced_lock);
593 ftraced_stop = 1;
594 mutex_unlock(&ftraced_lock);
595
596 ftrace_force_update();
597}
598
599void ftrace_enable_daemon(void)
600{
601 mutex_lock(&ftraced_lock);
602 ftraced_stop = 0;
603 mutex_unlock(&ftraced_lock);
604
605 ftrace_force_update();
606}
607
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200608static ftrace_func_t saved_ftrace_func;
609
Ingo Molnare309b412008-05-12 21:20:51 +0200610static void ftrace_startup(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200611{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200612 int command = 0;
613
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200614 if (unlikely(ftrace_disabled))
615 return;
616
Steven Rostedt3d083392008-05-12 21:20:42 +0200617 mutex_lock(&ftraced_lock);
618 ftraced_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200619 if (ftraced_suspend == 1)
620 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200621
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200622 if (saved_ftrace_func != ftrace_trace_function) {
623 saved_ftrace_func = ftrace_trace_function;
624 command |= FTRACE_UPDATE_TRACE_FUNC;
625 }
626
627 if (!command || !ftrace_enabled)
628 goto out;
629
630 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200631 out:
632 mutex_unlock(&ftraced_lock);
633}
634
Ingo Molnare309b412008-05-12 21:20:51 +0200635static void ftrace_shutdown(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200636{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200637 int command = 0;
638
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200639 if (unlikely(ftrace_disabled))
640 return;
641
Steven Rostedt3d083392008-05-12 21:20:42 +0200642 mutex_lock(&ftraced_lock);
643 ftraced_suspend--;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200644 if (!ftraced_suspend)
645 command |= FTRACE_DISABLE_CALLS;
646
647 if (saved_ftrace_func != ftrace_trace_function) {
648 saved_ftrace_func = ftrace_trace_function;
649 command |= FTRACE_UPDATE_TRACE_FUNC;
650 }
651
652 if (!command || !ftrace_enabled)
Steven Rostedt3d083392008-05-12 21:20:42 +0200653 goto out;
654
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200655 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200656 out:
657 mutex_unlock(&ftraced_lock);
658}
659
Ingo Molnare309b412008-05-12 21:20:51 +0200660static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200661{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200662 int command = FTRACE_ENABLE_MCOUNT;
663
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200664 if (unlikely(ftrace_disabled))
665 return;
666
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200667 mutex_lock(&ftraced_lock);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200668 /* Force update next time */
669 saved_ftrace_func = NULL;
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200670 /* ftraced_suspend is true if we want ftrace running */
671 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200672 command |= FTRACE_ENABLE_CALLS;
673
674 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200675 mutex_unlock(&ftraced_lock);
676}
677
Ingo Molnare309b412008-05-12 21:20:51 +0200678static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200679{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200680 int command = FTRACE_DISABLE_MCOUNT;
681
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200682 if (unlikely(ftrace_disabled))
683 return;
684
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200685 mutex_lock(&ftraced_lock);
686 /* ftraced_suspend is true if ftrace is running */
687 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200688 command |= FTRACE_DISABLE_CALLS;
689
690 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200691 mutex_unlock(&ftraced_lock);
692}
693
Steven Rostedt3d083392008-05-12 21:20:42 +0200694static cycle_t ftrace_update_time;
695static unsigned long ftrace_update_cnt;
696unsigned long ftrace_update_tot_cnt;
697
Ingo Molnare309b412008-05-12 21:20:51 +0200698static int __ftrace_update_code(void *ignore)
Steven Rostedt3d083392008-05-12 21:20:42 +0200699{
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530700 int i, save_ftrace_enabled;
701 cycle_t start, stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200702 struct dyn_ftrace *p;
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530703 struct hlist_node *t, *n;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530704 struct hlist_head *head, temp_list;
Steven Rostedt3d083392008-05-12 21:20:42 +0200705
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200706 /* Don't be recording funcs now */
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400707 ftrace_record_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200708 save_ftrace_enabled = ftrace_enabled;
709 ftrace_enabled = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200710
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200711 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200712 ftrace_update_cnt = 0;
713
714 /* No locks needed, the machine is stopped! */
715 for (i = 0; i < FTRACE_HASHSIZE; i++) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530716 INIT_HLIST_HEAD(&temp_list);
717 head = &ftrace_hash[i];
718
Steven Rostedt3d083392008-05-12 21:20:42 +0200719 /* all CPUS are stopped, we are safe to modify code */
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530720 hlist_for_each_entry_safe(p, t, n, head, node) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530721 /* Skip over failed records which have not been
722 * freed. */
723 if (p->flags & FTRACE_FL_FAILED)
724 continue;
Steven Rostedt3d083392008-05-12 21:20:42 +0200725
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530726 /* Unconverted records are always at the head of the
727 * hash bucket. Once we encounter a converted record,
728 * simply skip over to the next bucket. Saves ftraced
729 * some processor cycles (ftrace does its bid for
730 * global warming :-p ). */
731 if (p->flags & (FTRACE_FL_CONVERTED))
732 break;
733
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530734 /* Ignore updates to this record's mcount site.
735 * Reintroduce this record at the head of this
736 * bucket to attempt to "convert" it again if
737 * the kprobe on it is unregistered before the
738 * next run. */
739 if (get_kprobe((void *)p->ip)) {
740 ftrace_del_hash(p);
741 INIT_HLIST_NODE(&p->node);
742 hlist_add_head(&p->node, &temp_list);
743 continue;
744 }
745
746 /* convert record (i.e, patch mcount-call with NOP) */
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530747 if (ftrace_code_disable(p)) {
748 p->flags |= FTRACE_FL_CONVERTED;
749 ftrace_update_cnt++;
750 } else {
751 if ((system_state == SYSTEM_BOOTING) ||
Abhishek Sagar34078a52008-06-03 08:33:41 +0530752 !core_kernel_text(p->ip)) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530753 ftrace_del_hash(p);
754 ftrace_free_rec(p);
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530755 }
756 }
757 }
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530758
759 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
760 hlist_del(&p->node);
761 INIT_HLIST_NODE(&p->node);
762 hlist_add_head(&p->node, head);
763 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200764 }
765
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200766 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200767 ftrace_update_time = stop - start;
768 ftrace_update_tot_cnt += ftrace_update_cnt;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400769 ftraced_trigger = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200770
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200771 ftrace_enabled = save_ftrace_enabled;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400772 ftrace_record_suspend--;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200773
774 return 0;
775}
776
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400777static int ftrace_update_code(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200778{
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400779 if (unlikely(ftrace_disabled) ||
780 !ftrace_enabled || !ftraced_trigger)
781 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200782
Steven Rostedt3d083392008-05-12 21:20:42 +0200783 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400784
785 return 1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200786}
787
Ingo Molnare309b412008-05-12 21:20:51 +0200788static int ftraced(void *ignore)
Steven Rostedt3d083392008-05-12 21:20:42 +0200789{
790 unsigned long usecs;
791
Steven Rostedt3d083392008-05-12 21:20:42 +0200792 while (!kthread_should_stop()) {
793
Steven Rostedt07a267c2008-05-12 21:20:55 +0200794 set_current_state(TASK_INTERRUPTIBLE);
795
Steven Rostedt3d083392008-05-12 21:20:42 +0200796 /* check once a second */
797 schedule_timeout(HZ);
798
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200799 if (unlikely(ftrace_disabled))
800 continue;
801
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200802 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200803 mutex_lock(&ftraced_lock);
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400804 if (!ftraced_suspend && !ftraced_stop &&
805 ftrace_update_code()) {
Steven Rostedt3d083392008-05-12 21:20:42 +0200806 usecs = nsecs_to_usecs(ftrace_update_time);
807 if (ftrace_update_tot_cnt > 100000) {
808 ftrace_update_tot_cnt = 0;
809 pr_info("hm, dftrace overflow: %lu change%s"
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400810 " (%lu total) in %lu usec%s\n",
Steven Rostedt3d083392008-05-12 21:20:42 +0200811 ftrace_update_cnt,
812 ftrace_update_cnt != 1 ? "s" : "",
813 ftrace_update_tot_cnt,
814 usecs, usecs != 1 ? "s" : "");
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200815 ftrace_disabled = 1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200816 WARN_ON_ONCE(1);
817 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200818 }
819 mutex_unlock(&ftraced_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200820 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200821
822 ftrace_shutdown_replenish();
Steven Rostedt3d083392008-05-12 21:20:42 +0200823 }
824 __set_current_state(TASK_RUNNING);
825 return 0;
826}
827
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200828static int __init ftrace_dyn_table_alloc(void)
829{
830 struct ftrace_page *pg;
831 int cnt;
832 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200833
834 /* allocate a few pages */
835 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
836 if (!ftrace_pages_start)
837 return -1;
838
839 /*
840 * Allocate a few more pages.
841 *
842 * TODO: have some parser search vmlinux before
843 * final linking to find all calls to ftrace.
844 * Then we can:
845 * a) know how many pages to allocate.
846 * and/or
847 * b) set up the table then.
848 *
849 * The dynamic code is still necessary for
850 * modules.
851 */
852
853 pg = ftrace_pages = ftrace_pages_start;
854
855 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
856
857 for (i = 0; i < cnt; i++) {
858 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
859
860 /* If we fail, we'll try later anyway */
861 if (!pg->next)
862 break;
863
864 pg = pg->next;
865 }
866
867 return 0;
868}
869
Steven Rostedt5072c592008-05-12 21:20:43 +0200870enum {
871 FTRACE_ITER_FILTER = (1 << 0),
872 FTRACE_ITER_CONT = (1 << 1),
Steven Rostedt41c52c02008-05-22 11:46:33 -0400873 FTRACE_ITER_NOTRACE = (1 << 2),
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530874 FTRACE_ITER_FAILURES = (1 << 3),
Steven Rostedt5072c592008-05-12 21:20:43 +0200875};
876
877#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
878
879struct ftrace_iterator {
880 loff_t pos;
881 struct ftrace_page *pg;
882 unsigned idx;
883 unsigned flags;
884 unsigned char buffer[FTRACE_BUFF_MAX+1];
885 unsigned buffer_idx;
886 unsigned filtered;
887};
888
Ingo Molnare309b412008-05-12 21:20:51 +0200889static void *
Steven Rostedt5072c592008-05-12 21:20:43 +0200890t_next(struct seq_file *m, void *v, loff_t *pos)
891{
892 struct ftrace_iterator *iter = m->private;
893 struct dyn_ftrace *rec = NULL;
894
895 (*pos)++;
896
897 retry:
898 if (iter->idx >= iter->pg->index) {
899 if (iter->pg->next) {
900 iter->pg = iter->pg->next;
901 iter->idx = 0;
902 goto retry;
903 }
904 } else {
905 rec = &iter->pg->records[iter->idx++];
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530906 if ((!(iter->flags & FTRACE_ITER_FAILURES) &&
907 (rec->flags & FTRACE_FL_FAILED)) ||
908
909 ((iter->flags & FTRACE_ITER_FAILURES) &&
910 (!(rec->flags & FTRACE_FL_FAILED) ||
911 (rec->flags & FTRACE_FL_FREE))) ||
912
Steven Rostedt5072c592008-05-12 21:20:43 +0200913 ((iter->flags & FTRACE_ITER_FILTER) &&
Steven Rostedt41c52c02008-05-22 11:46:33 -0400914 !(rec->flags & FTRACE_FL_FILTER)) ||
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530915
Steven Rostedt41c52c02008-05-22 11:46:33 -0400916 ((iter->flags & FTRACE_ITER_NOTRACE) &&
917 !(rec->flags & FTRACE_FL_NOTRACE))) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200918 rec = NULL;
919 goto retry;
920 }
921 }
922
923 iter->pos = *pos;
924
925 return rec;
926}
927
928static void *t_start(struct seq_file *m, loff_t *pos)
929{
930 struct ftrace_iterator *iter = m->private;
931 void *p = NULL;
932 loff_t l = -1;
933
934 if (*pos != iter->pos) {
935 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
936 ;
937 } else {
938 l = *pos;
939 p = t_next(m, p, &l);
940 }
941
942 return p;
943}
944
945static void t_stop(struct seq_file *m, void *p)
946{
947}
948
949static int t_show(struct seq_file *m, void *v)
950{
951 struct dyn_ftrace *rec = v;
952 char str[KSYM_SYMBOL_LEN];
953
954 if (!rec)
955 return 0;
956
957 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
958
959 seq_printf(m, "%s\n", str);
960
961 return 0;
962}
963
964static struct seq_operations show_ftrace_seq_ops = {
965 .start = t_start,
966 .next = t_next,
967 .stop = t_stop,
968 .show = t_show,
969};
970
Ingo Molnare309b412008-05-12 21:20:51 +0200971static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200972ftrace_avail_open(struct inode *inode, struct file *file)
973{
974 struct ftrace_iterator *iter;
975 int ret;
976
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200977 if (unlikely(ftrace_disabled))
978 return -ENODEV;
979
Steven Rostedt5072c592008-05-12 21:20:43 +0200980 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
981 if (!iter)
982 return -ENOMEM;
983
984 iter->pg = ftrace_pages_start;
985 iter->pos = -1;
986
987 ret = seq_open(file, &show_ftrace_seq_ops);
988 if (!ret) {
989 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200990
Steven Rostedt5072c592008-05-12 21:20:43 +0200991 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200992 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +0200993 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200994 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200995
996 return ret;
997}
998
999int ftrace_avail_release(struct inode *inode, struct file *file)
1000{
1001 struct seq_file *m = (struct seq_file *)file->private_data;
1002 struct ftrace_iterator *iter = m->private;
1003
1004 seq_release(inode, file);
1005 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001006
Steven Rostedt5072c592008-05-12 21:20:43 +02001007 return 0;
1008}
1009
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301010static int
1011ftrace_failures_open(struct inode *inode, struct file *file)
1012{
1013 int ret;
1014 struct seq_file *m;
1015 struct ftrace_iterator *iter;
1016
1017 ret = ftrace_avail_open(inode, file);
1018 if (!ret) {
1019 m = (struct seq_file *)file->private_data;
1020 iter = (struct ftrace_iterator *)m->private;
1021 iter->flags = FTRACE_ITER_FAILURES;
1022 }
1023
1024 return ret;
1025}
1026
1027
Steven Rostedt41c52c02008-05-22 11:46:33 -04001028static void ftrace_filter_reset(int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001029{
1030 struct ftrace_page *pg;
1031 struct dyn_ftrace *rec;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001032 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001033 unsigned i;
1034
1035 /* keep kstop machine from running */
1036 preempt_disable();
Steven Rostedt41c52c02008-05-22 11:46:33 -04001037 if (enable)
1038 ftrace_filtered = 0;
Steven Rostedt5072c592008-05-12 21:20:43 +02001039 pg = ftrace_pages_start;
1040 while (pg) {
1041 for (i = 0; i < pg->index; i++) {
1042 rec = &pg->records[i];
1043 if (rec->flags & FTRACE_FL_FAILED)
1044 continue;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001045 rec->flags &= ~type;
Steven Rostedt5072c592008-05-12 21:20:43 +02001046 }
1047 pg = pg->next;
1048 }
1049 preempt_enable();
1050}
1051
Ingo Molnare309b412008-05-12 21:20:51 +02001052static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001053ftrace_regex_open(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001054{
1055 struct ftrace_iterator *iter;
1056 int ret = 0;
1057
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001058 if (unlikely(ftrace_disabled))
1059 return -ENODEV;
1060
Steven Rostedt5072c592008-05-12 21:20:43 +02001061 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1062 if (!iter)
1063 return -ENOMEM;
1064
Steven Rostedt41c52c02008-05-22 11:46:33 -04001065 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001066 if ((file->f_mode & FMODE_WRITE) &&
1067 !(file->f_flags & O_APPEND))
Steven Rostedt41c52c02008-05-22 11:46:33 -04001068 ftrace_filter_reset(enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001069
1070 if (file->f_mode & FMODE_READ) {
1071 iter->pg = ftrace_pages_start;
1072 iter->pos = -1;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001073 iter->flags = enable ? FTRACE_ITER_FILTER :
1074 FTRACE_ITER_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001075
1076 ret = seq_open(file, &show_ftrace_seq_ops);
1077 if (!ret) {
1078 struct seq_file *m = file->private_data;
1079 m->private = iter;
1080 } else
1081 kfree(iter);
1082 } else
1083 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001084 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001085
1086 return ret;
1087}
1088
Steven Rostedt41c52c02008-05-22 11:46:33 -04001089static int
1090ftrace_filter_open(struct inode *inode, struct file *file)
1091{
1092 return ftrace_regex_open(inode, file, 1);
1093}
1094
1095static int
1096ftrace_notrace_open(struct inode *inode, struct file *file)
1097{
1098 return ftrace_regex_open(inode, file, 0);
1099}
1100
Ingo Molnare309b412008-05-12 21:20:51 +02001101static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001102ftrace_regex_read(struct file *file, char __user *ubuf,
Steven Rostedt5072c592008-05-12 21:20:43 +02001103 size_t cnt, loff_t *ppos)
1104{
1105 if (file->f_mode & FMODE_READ)
1106 return seq_read(file, ubuf, cnt, ppos);
1107 else
1108 return -EPERM;
1109}
1110
Ingo Molnare309b412008-05-12 21:20:51 +02001111static loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001112ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02001113{
1114 loff_t ret;
1115
1116 if (file->f_mode & FMODE_READ)
1117 ret = seq_lseek(file, offset, origin);
1118 else
1119 file->f_pos = ret = 1;
1120
1121 return ret;
1122}
1123
1124enum {
1125 MATCH_FULL,
1126 MATCH_FRONT_ONLY,
1127 MATCH_MIDDLE_ONLY,
1128 MATCH_END_ONLY,
1129};
1130
Ingo Molnare309b412008-05-12 21:20:51 +02001131static void
Steven Rostedt41c52c02008-05-22 11:46:33 -04001132ftrace_match(unsigned char *buff, int len, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001133{
1134 char str[KSYM_SYMBOL_LEN];
1135 char *search = NULL;
1136 struct ftrace_page *pg;
1137 struct dyn_ftrace *rec;
1138 int type = MATCH_FULL;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001139 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001140 unsigned i, match = 0, search_len = 0;
1141
1142 for (i = 0; i < len; i++) {
1143 if (buff[i] == '*') {
1144 if (!i) {
1145 search = buff + i + 1;
1146 type = MATCH_END_ONLY;
1147 search_len = len - (i + 1);
1148 } else {
1149 if (type == MATCH_END_ONLY) {
1150 type = MATCH_MIDDLE_ONLY;
1151 } else {
1152 match = i;
1153 type = MATCH_FRONT_ONLY;
1154 }
1155 buff[i] = 0;
1156 break;
1157 }
1158 }
1159 }
1160
1161 /* keep kstop machine from running */
1162 preempt_disable();
Steven Rostedt41c52c02008-05-22 11:46:33 -04001163 if (enable)
1164 ftrace_filtered = 1;
Steven Rostedt5072c592008-05-12 21:20:43 +02001165 pg = ftrace_pages_start;
1166 while (pg) {
1167 for (i = 0; i < pg->index; i++) {
1168 int matched = 0;
1169 char *ptr;
1170
1171 rec = &pg->records[i];
1172 if (rec->flags & FTRACE_FL_FAILED)
1173 continue;
1174 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1175 switch (type) {
1176 case MATCH_FULL:
1177 if (strcmp(str, buff) == 0)
1178 matched = 1;
1179 break;
1180 case MATCH_FRONT_ONLY:
1181 if (memcmp(str, buff, match) == 0)
1182 matched = 1;
1183 break;
1184 case MATCH_MIDDLE_ONLY:
1185 if (strstr(str, search))
1186 matched = 1;
1187 break;
1188 case MATCH_END_ONLY:
1189 ptr = strstr(str, search);
1190 if (ptr && (ptr[search_len] == 0))
1191 matched = 1;
1192 break;
1193 }
1194 if (matched)
Steven Rostedt41c52c02008-05-22 11:46:33 -04001195 rec->flags |= flag;
Steven Rostedt5072c592008-05-12 21:20:43 +02001196 }
1197 pg = pg->next;
1198 }
1199 preempt_enable();
1200}
1201
Ingo Molnare309b412008-05-12 21:20:51 +02001202static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001203ftrace_regex_write(struct file *file, const char __user *ubuf,
1204 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001205{
1206 struct ftrace_iterator *iter;
1207 char ch;
1208 size_t read = 0;
1209 ssize_t ret;
1210
1211 if (!cnt || cnt < 0)
1212 return 0;
1213
Steven Rostedt41c52c02008-05-22 11:46:33 -04001214 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001215
1216 if (file->f_mode & FMODE_READ) {
1217 struct seq_file *m = file->private_data;
1218 iter = m->private;
1219 } else
1220 iter = file->private_data;
1221
1222 if (!*ppos) {
1223 iter->flags &= ~FTRACE_ITER_CONT;
1224 iter->buffer_idx = 0;
1225 }
1226
1227 ret = get_user(ch, ubuf++);
1228 if (ret)
1229 goto out;
1230 read++;
1231 cnt--;
1232
1233 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1234 /* skip white space */
1235 while (cnt && isspace(ch)) {
1236 ret = get_user(ch, ubuf++);
1237 if (ret)
1238 goto out;
1239 read++;
1240 cnt--;
1241 }
1242
Steven Rostedt5072c592008-05-12 21:20:43 +02001243 if (isspace(ch)) {
1244 file->f_pos += read;
1245 ret = read;
1246 goto out;
1247 }
1248
1249 iter->buffer_idx = 0;
1250 }
1251
1252 while (cnt && !isspace(ch)) {
1253 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1254 iter->buffer[iter->buffer_idx++] = ch;
1255 else {
1256 ret = -EINVAL;
1257 goto out;
1258 }
1259 ret = get_user(ch, ubuf++);
1260 if (ret)
1261 goto out;
1262 read++;
1263 cnt--;
1264 }
1265
1266 if (isspace(ch)) {
1267 iter->filtered++;
1268 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001269 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001270 iter->buffer_idx = 0;
1271 } else
1272 iter->flags |= FTRACE_ITER_CONT;
1273
1274
1275 file->f_pos += read;
1276
1277 ret = read;
1278 out:
Steven Rostedt41c52c02008-05-22 11:46:33 -04001279 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001280
1281 return ret;
1282}
1283
Steven Rostedt41c52c02008-05-22 11:46:33 -04001284static ssize_t
1285ftrace_filter_write(struct file *file, const char __user *ubuf,
1286 size_t cnt, loff_t *ppos)
1287{
1288 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1289}
1290
1291static ssize_t
1292ftrace_notrace_write(struct file *file, const char __user *ubuf,
1293 size_t cnt, loff_t *ppos)
1294{
1295 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1296}
1297
1298static void
1299ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1300{
1301 if (unlikely(ftrace_disabled))
1302 return;
1303
1304 mutex_lock(&ftrace_regex_lock);
1305 if (reset)
1306 ftrace_filter_reset(enable);
1307 if (buf)
1308 ftrace_match(buf, len, enable);
1309 mutex_unlock(&ftrace_regex_lock);
1310}
1311
Steven Rostedt77a2b372008-05-12 21:20:45 +02001312/**
1313 * ftrace_set_filter - set a function to filter on in ftrace
1314 * @buf - the string that holds the function filter text.
1315 * @len - the length of the string.
1316 * @reset - non zero to reset all filters before applying this filter.
1317 *
1318 * Filters denote which functions should be enabled when tracing is enabled.
1319 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1320 */
Ingo Molnare309b412008-05-12 21:20:51 +02001321void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02001322{
Steven Rostedt41c52c02008-05-22 11:46:33 -04001323 ftrace_set_regex(buf, len, reset, 1);
1324}
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001325
Steven Rostedt41c52c02008-05-22 11:46:33 -04001326/**
1327 * ftrace_set_notrace - set a function to not trace in ftrace
1328 * @buf - the string that holds the function notrace text.
1329 * @len - the length of the string.
1330 * @reset - non zero to reset all filters before applying this filter.
1331 *
1332 * Notrace Filters denote which functions should not be enabled when tracing
1333 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1334 * for tracing.
1335 */
1336void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1337{
1338 ftrace_set_regex(buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02001339}
1340
Ingo Molnare309b412008-05-12 21:20:51 +02001341static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001342ftrace_regex_release(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001343{
1344 struct seq_file *m = (struct seq_file *)file->private_data;
1345 struct ftrace_iterator *iter;
1346
Steven Rostedt41c52c02008-05-22 11:46:33 -04001347 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001348 if (file->f_mode & FMODE_READ) {
1349 iter = m->private;
1350
1351 seq_release(inode, file);
1352 } else
1353 iter = file->private_data;
1354
1355 if (iter->buffer_idx) {
1356 iter->filtered++;
1357 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001358 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001359 }
1360
1361 mutex_lock(&ftrace_sysctl_lock);
1362 mutex_lock(&ftraced_lock);
1363 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1364 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1365 mutex_unlock(&ftraced_lock);
1366 mutex_unlock(&ftrace_sysctl_lock);
1367
1368 kfree(iter);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001369 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001370 return 0;
1371}
1372
Steven Rostedt41c52c02008-05-22 11:46:33 -04001373static int
1374ftrace_filter_release(struct inode *inode, struct file *file)
1375{
1376 return ftrace_regex_release(inode, file, 1);
1377}
1378
1379static int
1380ftrace_notrace_release(struct inode *inode, struct file *file)
1381{
1382 return ftrace_regex_release(inode, file, 0);
1383}
1384
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001385static ssize_t
1386ftraced_read(struct file *filp, char __user *ubuf,
1387 size_t cnt, loff_t *ppos)
1388{
1389 /* don't worry about races */
1390 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1391 int r = strlen(buf);
1392
1393 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1394}
1395
1396static ssize_t
1397ftraced_write(struct file *filp, const char __user *ubuf,
1398 size_t cnt, loff_t *ppos)
1399{
1400 char buf[64];
1401 long val;
1402 int ret;
1403
1404 if (cnt >= sizeof(buf))
1405 return -EINVAL;
1406
1407 if (copy_from_user(&buf, ubuf, cnt))
1408 return -EFAULT;
1409
1410 if (strncmp(buf, "enable", 6) == 0)
1411 val = 1;
1412 else if (strncmp(buf, "disable", 7) == 0)
1413 val = 0;
1414 else {
1415 buf[cnt] = 0;
1416
1417 ret = strict_strtoul(buf, 10, &val);
1418 if (ret < 0)
1419 return ret;
1420
1421 val = !!val;
1422 }
1423
1424 if (val)
1425 ftrace_enable_daemon();
1426 else
1427 ftrace_disable_daemon();
1428
1429 filp->f_pos += cnt;
1430
1431 return cnt;
1432}
1433
Steven Rostedt5072c592008-05-12 21:20:43 +02001434static struct file_operations ftrace_avail_fops = {
1435 .open = ftrace_avail_open,
1436 .read = seq_read,
1437 .llseek = seq_lseek,
1438 .release = ftrace_avail_release,
1439};
1440
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301441static struct file_operations ftrace_failures_fops = {
1442 .open = ftrace_failures_open,
1443 .read = seq_read,
1444 .llseek = seq_lseek,
1445 .release = ftrace_avail_release,
1446};
1447
Steven Rostedt5072c592008-05-12 21:20:43 +02001448static struct file_operations ftrace_filter_fops = {
1449 .open = ftrace_filter_open,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001450 .read = ftrace_regex_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02001451 .write = ftrace_filter_write,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001452 .llseek = ftrace_regex_lseek,
Steven Rostedt5072c592008-05-12 21:20:43 +02001453 .release = ftrace_filter_release,
1454};
1455
Steven Rostedt41c52c02008-05-22 11:46:33 -04001456static struct file_operations ftrace_notrace_fops = {
1457 .open = ftrace_notrace_open,
1458 .read = ftrace_regex_read,
1459 .write = ftrace_notrace_write,
1460 .llseek = ftrace_regex_lseek,
1461 .release = ftrace_notrace_release,
1462};
1463
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001464static struct file_operations ftraced_fops = {
1465 .open = tracing_open_generic,
1466 .read = ftraced_read,
1467 .write = ftraced_write,
1468};
1469
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001470/**
1471 * ftrace_force_update - force an update to all recording ftrace functions
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001472 */
1473int ftrace_force_update(void)
1474{
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001475 int ret = 0;
1476
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001477 if (unlikely(ftrace_disabled))
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001478 return -ENODEV;
1479
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001480 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001481 mutex_lock(&ftraced_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001482
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001483 /*
1484 * If ftraced_trigger is not set, then there is nothing
1485 * to update.
1486 */
1487 if (ftraced_trigger && !ftrace_update_code())
1488 ret = -EBUSY;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001489
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001490 mutex_unlock(&ftraced_lock);
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001491 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001492
1493 return ret;
1494}
1495
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001496static void ftrace_force_shutdown(void)
1497{
1498 struct task_struct *task;
1499 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1500
1501 mutex_lock(&ftraced_lock);
1502 task = ftraced_task;
1503 ftraced_task = NULL;
1504 ftraced_suspend = -1;
1505 ftrace_run_update_code(command);
1506 mutex_unlock(&ftraced_lock);
1507
1508 if (task)
1509 kthread_stop(task);
1510}
1511
Steven Rostedt5072c592008-05-12 21:20:43 +02001512static __init int ftrace_init_debugfs(void)
1513{
1514 struct dentry *d_tracer;
1515 struct dentry *entry;
1516
1517 d_tracer = tracing_init_dentry();
1518
1519 entry = debugfs_create_file("available_filter_functions", 0444,
1520 d_tracer, NULL, &ftrace_avail_fops);
1521 if (!entry)
1522 pr_warning("Could not create debugfs "
1523 "'available_filter_functions' entry\n");
1524
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301525 entry = debugfs_create_file("failures", 0444,
1526 d_tracer, NULL, &ftrace_failures_fops);
1527 if (!entry)
1528 pr_warning("Could not create debugfs 'failures' entry\n");
1529
Steven Rostedt5072c592008-05-12 21:20:43 +02001530 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1531 NULL, &ftrace_filter_fops);
1532 if (!entry)
1533 pr_warning("Could not create debugfs "
1534 "'set_ftrace_filter' entry\n");
Steven Rostedt41c52c02008-05-22 11:46:33 -04001535
1536 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1537 NULL, &ftrace_notrace_fops);
1538 if (!entry)
1539 pr_warning("Could not create debugfs "
1540 "'set_ftrace_notrace' entry\n");
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001541
1542 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1543 NULL, &ftraced_fops);
1544 if (!entry)
1545 pr_warning("Could not create debugfs "
1546 "'ftraced_enabled' entry\n");
Steven Rostedt5072c592008-05-12 21:20:43 +02001547 return 0;
1548}
1549
1550fs_initcall(ftrace_init_debugfs);
1551
Ingo Molnare309b412008-05-12 21:20:51 +02001552static int __init ftrace_dynamic_init(void)
Steven Rostedt3d083392008-05-12 21:20:42 +02001553{
1554 struct task_struct *p;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001555 unsigned long addr;
Steven Rostedt3d083392008-05-12 21:20:42 +02001556 int ret;
1557
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001558 addr = (unsigned long)ftrace_record_ip;
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +02001559
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001560 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1561
1562 /* ftrace_dyn_arch_init places the return code in addr */
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001563 if (addr) {
1564 ret = (int)addr;
1565 goto failed;
1566 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001567
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001568 ret = ftrace_dyn_table_alloc();
Steven Rostedt3d083392008-05-12 21:20:42 +02001569 if (ret)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001570 goto failed;
Steven Rostedt3d083392008-05-12 21:20:42 +02001571
1572 p = kthread_run(ftraced, NULL, "ftraced");
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001573 if (IS_ERR(p)) {
1574 ret = -1;
1575 goto failed;
1576 }
Steven Rostedt3d083392008-05-12 21:20:42 +02001577
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001578 last_ftrace_enabled = ftrace_enabled = 1;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001579 ftraced_task = p;
Steven Rostedt3d083392008-05-12 21:20:42 +02001580
1581 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001582
1583 failed:
1584 ftrace_disabled = 1;
1585 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001586}
1587
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001588core_initcall(ftrace_dynamic_init);
Steven Rostedt3d083392008-05-12 21:20:42 +02001589#else
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001590# define ftrace_startup() do { } while (0)
1591# define ftrace_shutdown() do { } while (0)
1592# define ftrace_startup_sysctl() do { } while (0)
1593# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001594# define ftrace_force_shutdown() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001595#endif /* CONFIG_DYNAMIC_FTRACE */
1596
1597/**
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001598 * ftrace_kill - totally shutdown ftrace
1599 *
1600 * This is a safety measure. If something was detected that seems
1601 * wrong, calling this function will keep ftrace from doing
1602 * any more modifications, and updates.
1603 * used when something went wrong.
1604 */
1605void ftrace_kill(void)
1606{
1607 mutex_lock(&ftrace_sysctl_lock);
1608 ftrace_disabled = 1;
1609 ftrace_enabled = 0;
1610
1611 clear_ftrace_function();
1612 mutex_unlock(&ftrace_sysctl_lock);
1613
1614 /* Try to totally disable ftrace */
1615 ftrace_force_shutdown();
1616}
1617
1618/**
Steven Rostedt3d083392008-05-12 21:20:42 +02001619 * register_ftrace_function - register a function for profiling
1620 * @ops - ops structure that holds the function for profiling.
1621 *
1622 * Register a function to be called by all functions in the
1623 * kernel.
1624 *
1625 * Note: @ops->func and all the functions it calls must be labeled
1626 * with "notrace", otherwise it will go into a
1627 * recursive loop.
1628 */
1629int register_ftrace_function(struct ftrace_ops *ops)
1630{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001631 int ret;
1632
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001633 if (unlikely(ftrace_disabled))
1634 return -1;
1635
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001636 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001637 ret = __register_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001638 ftrace_startup();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001639 mutex_unlock(&ftrace_sysctl_lock);
1640
1641 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001642}
1643
1644/**
1645 * unregister_ftrace_function - unresgister a function for profiling.
1646 * @ops - ops structure that holds the function to unregister
1647 *
1648 * Unregister a function that was added to be called by ftrace profiling.
1649 */
1650int unregister_ftrace_function(struct ftrace_ops *ops)
1651{
1652 int ret;
1653
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001654 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001655 ret = __unregister_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001656 ftrace_shutdown();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001657 mutex_unlock(&ftrace_sysctl_lock);
1658
1659 return ret;
1660}
1661
Ingo Molnare309b412008-05-12 21:20:51 +02001662int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001663ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02001664 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001665 loff_t *ppos)
1666{
1667 int ret;
1668
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001669 if (unlikely(ftrace_disabled))
1670 return -ENODEV;
1671
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001672 mutex_lock(&ftrace_sysctl_lock);
1673
Steven Rostedt5072c592008-05-12 21:20:43 +02001674 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001675
1676 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1677 goto out;
1678
1679 last_ftrace_enabled = ftrace_enabled;
1680
1681 if (ftrace_enabled) {
1682
1683 ftrace_startup_sysctl();
1684
1685 /* we are starting ftrace again */
1686 if (ftrace_list != &ftrace_list_end) {
1687 if (ftrace_list->next == &ftrace_list_end)
1688 ftrace_trace_function = ftrace_list->func;
1689 else
1690 ftrace_trace_function = ftrace_list_func;
1691 }
1692
1693 } else {
1694 /* stopping ftrace calls (just send to ftrace_stub) */
1695 ftrace_trace_function = ftrace_stub;
1696
1697 ftrace_shutdown_sysctl();
1698 }
1699
1700 out:
1701 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001702 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001703}