blob: eadd0eaea9b6b70449bf7db247f215b096c4bc30 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
20#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020021#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010022#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020023#include <linux/uaccess.h>
Abhishek Sagarf22f9a82008-06-21 23:50:29 +053024#include <linux/kprobes.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010025#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020026#include <linux/sysctl.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020027#include <linux/ctype.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010028#include <linux/hash.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020029#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020030
Abhishek Sagar395a59d2008-06-21 23:47:27 +053031#include <asm/ftrace.h>
32
Steven Rostedt3d083392008-05-12 21:20:42 +020033#include "trace.h"
34
Steven Rostedt4eebcc82008-05-12 21:20:48 +020035/* ftrace_enabled is a method to turn ftrace on or off */
36int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020037static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020038
Steven Rostedt4eebcc82008-05-12 21:20:48 +020039/*
40 * ftrace_disabled is set when an anomaly is discovered.
41 * ftrace_disabled is much stronger than ftrace_enabled.
42 */
43static int ftrace_disabled __read_mostly;
44
Steven Rostedt3d083392008-05-12 21:20:42 +020045static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020046static DEFINE_MUTEX(ftrace_sysctl_lock);
47
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020048static struct ftrace_ops ftrace_list_end __read_mostly =
49{
50 .func = ftrace_stub,
51};
52
53static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
Ingo Molnarf2252932008-05-22 10:37:48 +020056static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020057{
58 struct ftrace_ops *op = ftrace_list;
59
60 /* in case someone actually ports this to alpha! */
61 read_barrier_depends();
62
63 while (op != &ftrace_list_end) {
64 /* silly alpha */
65 read_barrier_depends();
66 op->func(ip, parent_ip);
67 op = op->next;
68 };
69}
70
71/**
Steven Rostedt3d083392008-05-12 21:20:42 +020072 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020073 *
Steven Rostedt3d083392008-05-12 21:20:42 +020074 * This NULLs the ftrace function and in essence stops
75 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020076 */
Steven Rostedt3d083392008-05-12 21:20:42 +020077void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020078{
Steven Rostedt3d083392008-05-12 21:20:42 +020079 ftrace_trace_function = ftrace_stub;
80}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020081
Ingo Molnare309b412008-05-12 21:20:51 +020082static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +020083{
84 /* Should never be called by interrupts */
85 spin_lock(&ftrace_lock);
86
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020087 ops->next = ftrace_list;
88 /*
89 * We are entering ops into the ftrace_list but another
90 * CPU might be walking that list. We need to make sure
91 * the ops->next pointer is valid before another CPU sees
92 * the ops pointer included into the ftrace_list.
93 */
94 smp_wmb();
95 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +020096
Steven Rostedtb0fc4942008-05-12 21:20:43 +020097 if (ftrace_enabled) {
98 /*
99 * For one func, simply call it directly.
100 * For more than one func, call the chain.
101 */
102 if (ops->next == &ftrace_list_end)
103 ftrace_trace_function = ops->func;
104 else
105 ftrace_trace_function = ftrace_list_func;
106 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200107
108 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200109
110 return 0;
111}
112
Ingo Molnare309b412008-05-12 21:20:51 +0200113static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200114{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200115 struct ftrace_ops **p;
116 int ret = 0;
117
Steven Rostedt3d083392008-05-12 21:20:42 +0200118 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200119
120 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200121 * If we are removing the last function, then simply point
122 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200123 */
124 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
125 ftrace_trace_function = ftrace_stub;
126 ftrace_list = &ftrace_list_end;
127 goto out;
128 }
129
130 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
131 if (*p == ops)
132 break;
133
134 if (*p != ops) {
135 ret = -1;
136 goto out;
137 }
138
139 *p = (*p)->next;
140
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200141 if (ftrace_enabled) {
142 /* If we only have one func left, then call that directly */
143 if (ftrace_list == &ftrace_list_end ||
144 ftrace_list->next == &ftrace_list_end)
145 ftrace_trace_function = ftrace_list->func;
146 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200147
148 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200149 spin_unlock(&ftrace_lock);
150
151 return ret;
152}
153
154#ifdef CONFIG_DYNAMIC_FTRACE
155
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200156static struct task_struct *ftraced_task;
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200157
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200158enum {
159 FTRACE_ENABLE_CALLS = (1 << 0),
160 FTRACE_DISABLE_CALLS = (1 << 1),
161 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
162 FTRACE_ENABLE_MCOUNT = (1 << 3),
163 FTRACE_DISABLE_MCOUNT = (1 << 4),
164};
165
Steven Rostedt5072c592008-05-12 21:20:43 +0200166static int ftrace_filtered;
Abhishek Sagarecea6562008-06-21 23:47:53 +0530167static int tracing_on;
168static int frozen_record_count;
Steven Rostedt5072c592008-05-12 21:20:43 +0200169
Steven Rostedt3d083392008-05-12 21:20:42 +0200170static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
171
172static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
173
174static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175static DEFINE_MUTEX(ftraced_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400176static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200177
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200178struct ftrace_page {
179 struct ftrace_page *next;
David Milleraa5e5ce2008-05-13 22:06:56 -0700180 unsigned long index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200181 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700182};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200183
184#define ENTRIES_PER_PAGE \
185 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
186
187/* estimate from running different kernels */
188#define NR_TO_INIT 10000
189
190static struct ftrace_page *ftrace_pages_start;
191static struct ftrace_page *ftrace_pages;
192
Steven Rostedt3d083392008-05-12 21:20:42 +0200193static int ftraced_trigger;
194static int ftraced_suspend;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400195static int ftraced_stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200196
197static int ftrace_record_suspend;
198
Steven Rostedt37ad5082008-05-12 21:20:48 +0200199static struct dyn_ftrace *ftrace_free_records;
200
Abhishek Sagarecea6562008-06-21 23:47:53 +0530201
202#ifdef CONFIG_KPROBES
203static inline void freeze_record(struct dyn_ftrace *rec)
204{
205 if (!(rec->flags & FTRACE_FL_FROZEN)) {
206 rec->flags |= FTRACE_FL_FROZEN;
207 frozen_record_count++;
208 }
209}
210
211static inline void unfreeze_record(struct dyn_ftrace *rec)
212{
213 if (rec->flags & FTRACE_FL_FROZEN) {
214 rec->flags &= ~FTRACE_FL_FROZEN;
215 frozen_record_count--;
216 }
217}
218
219static inline int record_frozen(struct dyn_ftrace *rec)
220{
221 return rec->flags & FTRACE_FL_FROZEN;
222}
223#else
224# define freeze_record(rec) ({ 0; })
225# define unfreeze_record(rec) ({ 0; })
226# define record_frozen(rec) ({ 0; })
227#endif /* CONFIG_KPROBES */
228
229int skip_trace(unsigned long ip)
230{
231 unsigned long fl;
232 struct dyn_ftrace *rec;
233 struct hlist_node *t;
234 struct hlist_head *head;
235
236 if (frozen_record_count == 0)
237 return 0;
238
239 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
240 hlist_for_each_entry_rcu(rec, t, head, node) {
241 if (rec->ip == ip) {
242 if (record_frozen(rec)) {
243 if (rec->flags & FTRACE_FL_FAILED)
244 return 1;
245
246 if (!(rec->flags & FTRACE_FL_CONVERTED))
247 return 1;
248
249 if (!tracing_on || !ftrace_enabled)
250 return 1;
251
252 if (ftrace_filtered) {
253 fl = rec->flags & (FTRACE_FL_FILTER |
254 FTRACE_FL_NOTRACE);
255 if (!fl || (fl & FTRACE_FL_NOTRACE))
256 return 1;
257 }
258 }
259 break;
260 }
261 }
262
263 return 0;
264}
265
Ingo Molnare309b412008-05-12 21:20:51 +0200266static inline int
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +0200267ftrace_ip_in_hash(unsigned long ip, unsigned long key)
Steven Rostedt3d083392008-05-12 21:20:42 +0200268{
269 struct dyn_ftrace *p;
270 struct hlist_node *t;
271 int found = 0;
272
Abhishek Sagarffdaa352008-05-24 23:45:02 +0530273 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
Steven Rostedt3d083392008-05-12 21:20:42 +0200274 if (p->ip == ip) {
275 found = 1;
276 break;
277 }
278 }
279
280 return found;
281}
282
Ingo Molnare309b412008-05-12 21:20:51 +0200283static inline void
Steven Rostedt3d083392008-05-12 21:20:42 +0200284ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
285{
Abhishek Sagarffdaa352008-05-24 23:45:02 +0530286 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
Steven Rostedt3d083392008-05-12 21:20:42 +0200287}
288
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530289/* called from kstop_machine */
290static inline void ftrace_del_hash(struct dyn_ftrace *node)
291{
292 hlist_del(&node->node);
293}
294
Ingo Molnare309b412008-05-12 21:20:51 +0200295static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad5082008-05-12 21:20:48 +0200296{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200297 rec->ip = (unsigned long)ftrace_free_records;
298 ftrace_free_records = rec;
299 rec->flags |= FTRACE_FL_FREE;
300}
301
Steven Rostedtfed19392008-08-14 22:47:19 -0400302void ftrace_release(void *start, unsigned long size)
303{
304 struct dyn_ftrace *rec;
305 struct ftrace_page *pg;
306 unsigned long s = (unsigned long)start;
307 unsigned long e = s + size;
308 int i;
309
310 if (!start)
311 return;
312
313 /* No interrupt should call this */
314 spin_lock(&ftrace_lock);
315
316 for (pg = ftrace_pages_start; pg; pg = pg->next) {
317 for (i = 0; i < pg->index; i++) {
318 rec = &pg->records[i];
319
320 if ((rec->ip >= s) && (rec->ip < e))
321 ftrace_free_rec(rec);
322 }
323 }
324 spin_unlock(&ftrace_lock);
325
326}
327
Ingo Molnare309b412008-05-12 21:20:51 +0200328static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200329{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200330 struct dyn_ftrace *rec;
331
332 /* First check for freed records */
333 if (ftrace_free_records) {
334 rec = ftrace_free_records;
335
Steven Rostedt37ad5082008-05-12 21:20:48 +0200336 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
337 WARN_ON_ONCE(1);
338 ftrace_free_records = NULL;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200339 ftrace_disabled = 1;
340 ftrace_enabled = 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200341 return NULL;
342 }
343
344 ftrace_free_records = (void *)rec->ip;
345 memset(rec, 0, sizeof(*rec));
346 return rec;
347 }
348
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200349 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
350 if (!ftrace_pages->next)
351 return NULL;
352 ftrace_pages = ftrace_pages->next;
353 }
354
355 return &ftrace_pages->records[ftrace_pages->index++];
356}
357
Ingo Molnare309b412008-05-12 21:20:51 +0200358static void
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200359ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200360{
361 struct dyn_ftrace *node;
362 unsigned long flags;
363 unsigned long key;
364 int resched;
365 int atomic;
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200366 int cpu;
Steven Rostedt3d083392008-05-12 21:20:42 +0200367
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200368 if (!ftrace_enabled || ftrace_disabled)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200369 return;
370
Steven Rostedt3d083392008-05-12 21:20:42 +0200371 resched = need_resched();
372 preempt_disable_notrace();
373
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200374 /*
375 * We simply need to protect against recursion.
376 * Use the the raw version of smp_processor_id and not
377 * __get_cpu_var which can call debug hooks that can
378 * cause a recursive crash here.
379 */
380 cpu = raw_smp_processor_id();
381 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
382 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
Steven Rostedt3d083392008-05-12 21:20:42 +0200383 goto out;
384
385 if (unlikely(ftrace_record_suspend))
386 goto out;
387
388 key = hash_long(ip, FTRACE_HASHBITS);
389
390 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
391
392 if (ftrace_ip_in_hash(ip, key))
393 goto out;
394
395 atomic = irqs_disabled();
396
397 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
398
399 /* This ip may have hit the hash before the lock */
400 if (ftrace_ip_in_hash(ip, key))
401 goto out_unlock;
402
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200403 node = ftrace_alloc_dyn_node(ip);
Steven Rostedt3d083392008-05-12 21:20:42 +0200404 if (!node)
405 goto out_unlock;
406
407 node->ip = ip;
408
409 ftrace_add_hash(node, key);
410
411 ftraced_trigger = 1;
412
413 out_unlock:
414 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
415 out:
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200416 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
Steven Rostedt3d083392008-05-12 21:20:42 +0200417
418 /* prevent recursion with scheduler */
419 if (resched)
420 preempt_enable_no_resched_notrace();
421 else
422 preempt_enable_notrace();
423}
424
Steven Rostedtcaf8cde2008-05-12 21:20:50 +0200425#define FTRACE_ADDR ((long)(ftrace_caller))
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200426
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530427static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200428__ftrace_replace_code(struct dyn_ftrace *rec,
429 unsigned char *old, unsigned char *new, int enable)
430{
Steven Rostedt41c52c02008-05-22 11:46:33 -0400431 unsigned long ip, fl;
Steven Rostedt5072c592008-05-12 21:20:43 +0200432
433 ip = rec->ip;
434
435 if (ftrace_filtered && enable) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200436 /*
437 * If filtering is on:
438 *
439 * If this record is set to be filtered and
440 * is enabled then do nothing.
441 *
442 * If this record is set to be filtered and
443 * it is not enabled, enable it.
444 *
445 * If this record is not set to be filtered
446 * and it is not enabled do nothing.
447 *
Steven Rostedt41c52c02008-05-22 11:46:33 -0400448 * If this record is set not to trace then
449 * do nothing.
450 *
Abhishek Sagara4500b82008-06-14 11:59:39 +0530451 * If this record is set not to trace and
452 * it is enabled then disable it.
453 *
Steven Rostedt5072c592008-05-12 21:20:43 +0200454 * If this record is not set to be filtered and
455 * it is enabled, disable it.
456 */
Abhishek Sagara4500b82008-06-14 11:59:39 +0530457
458 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
459 FTRACE_FL_ENABLED);
Steven Rostedt5072c592008-05-12 21:20:43 +0200460
461 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
Abhishek Sagara4500b82008-06-14 11:59:39 +0530462 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
463 !fl || (fl == FTRACE_FL_NOTRACE))
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530464 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200465
466 /*
467 * If it is enabled disable it,
468 * otherwise enable it!
469 */
Abhishek Sagara4500b82008-06-14 11:59:39 +0530470 if (fl & FTRACE_FL_ENABLED) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200471 /* swap new and old */
472 new = old;
473 old = ftrace_call_replace(ip, FTRACE_ADDR);
474 rec->flags &= ~FTRACE_FL_ENABLED;
475 } else {
476 new = ftrace_call_replace(ip, FTRACE_ADDR);
477 rec->flags |= FTRACE_FL_ENABLED;
478 }
479 } else {
480
Steven Rostedt41c52c02008-05-22 11:46:33 -0400481 if (enable) {
482 /*
483 * If this record is set not to trace and is
484 * not enabled, do nothing.
485 */
486 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
487 if (fl == FTRACE_FL_NOTRACE)
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530488 return 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400489
Steven Rostedt5072c592008-05-12 21:20:43 +0200490 new = ftrace_call_replace(ip, FTRACE_ADDR);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400491 } else
Steven Rostedt5072c592008-05-12 21:20:43 +0200492 old = ftrace_call_replace(ip, FTRACE_ADDR);
493
494 if (enable) {
495 if (rec->flags & FTRACE_FL_ENABLED)
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530496 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200497 rec->flags |= FTRACE_FL_ENABLED;
498 } else {
499 if (!(rec->flags & FTRACE_FL_ENABLED))
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530500 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200501 rec->flags &= ~FTRACE_FL_ENABLED;
502 }
503 }
504
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530505 return ftrace_modify_code(ip, old, new);
Steven Rostedt5072c592008-05-12 21:20:43 +0200506}
507
Ingo Molnare309b412008-05-12 21:20:51 +0200508static void ftrace_replace_code(int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200509{
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530510 int i, failed;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200511 unsigned char *new = NULL, *old = NULL;
512 struct dyn_ftrace *rec;
513 struct ftrace_page *pg;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200514
Steven Rostedt5072c592008-05-12 21:20:43 +0200515 if (enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200516 old = ftrace_nop_replace();
517 else
518 new = ftrace_nop_replace();
519
520 for (pg = ftrace_pages_start; pg; pg = pg->next) {
521 for (i = 0; i < pg->index; i++) {
522 rec = &pg->records[i];
523
524 /* don't modify code that has already faulted */
525 if (rec->flags & FTRACE_FL_FAILED)
526 continue;
527
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530528 /* ignore updates to this record's mcount site */
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530529 if (get_kprobe((void *)rec->ip)) {
530 freeze_record(rec);
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530531 continue;
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530532 } else {
533 unfreeze_record(rec);
534 }
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530535
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530536 failed = __ftrace_replace_code(rec, old, new, enable);
537 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
538 rec->flags |= FTRACE_FL_FAILED;
539 if ((system_state == SYSTEM_BOOTING) ||
Abhishek Sagar34078a52008-06-03 08:33:41 +0530540 !core_kernel_text(rec->ip)) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530541 ftrace_del_hash(rec);
542 ftrace_free_rec(rec);
543 }
544 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200545 }
546 }
547}
548
Ingo Molnare309b412008-05-12 21:20:51 +0200549static void ftrace_shutdown_replenish(void)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200550{
551 if (ftrace_pages->next)
552 return;
553
554 /* allocate another page */
555 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
556}
Steven Rostedt3d083392008-05-12 21:20:42 +0200557
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530558static int
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200559ftrace_code_disable(struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200560{
561 unsigned long ip;
562 unsigned char *nop, *call;
563 int failed;
564
565 ip = rec->ip;
566
567 nop = ftrace_nop_replace();
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200568 call = ftrace_call_replace(ip, MCOUNT_ADDR);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200569
570 failed = ftrace_modify_code(ip, call, nop);
Steven Rostedt37ad5082008-05-12 21:20:48 +0200571 if (failed) {
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200572 rec->flags |= FTRACE_FL_FAILED;
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530573 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200574 }
Abhishek Sagar492a7ea2008-05-25 00:10:04 +0530575 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200576}
577
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400578static int __ftrace_update_code(void *ignore);
579
Ingo Molnare309b412008-05-12 21:20:51 +0200580static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200581{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200582 unsigned long addr;
583 int *command = data;
584
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400585 if (*command & FTRACE_ENABLE_CALLS) {
586 /*
587 * Update any recorded ips now that we have the
588 * machine stopped
589 */
590 __ftrace_update_code(NULL);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200591 ftrace_replace_code(1);
Abhishek Sagarecea6562008-06-21 23:47:53 +0530592 tracing_on = 1;
593 } else if (*command & FTRACE_DISABLE_CALLS) {
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200594 ftrace_replace_code(0);
Abhishek Sagarecea6562008-06-21 23:47:53 +0530595 tracing_on = 0;
596 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200597
598 if (*command & FTRACE_UPDATE_TRACE_FUNC)
599 ftrace_update_ftrace_func(ftrace_trace_function);
600
601 if (*command & FTRACE_ENABLE_MCOUNT) {
602 addr = (unsigned long)ftrace_record_ip;
603 ftrace_mcount_set(&addr);
604 } else if (*command & FTRACE_DISABLE_MCOUNT) {
605 addr = (unsigned long)ftrace_stub;
606 ftrace_mcount_set(&addr);
607 }
608
609 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200610}
611
Ingo Molnare309b412008-05-12 21:20:51 +0200612static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200613{
Rusty Russell784e2d72008-07-28 12:16:31 -0500614 stop_machine(__ftrace_modify_code, &command, NULL);
Steven Rostedt3d083392008-05-12 21:20:42 +0200615}
616
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400617void ftrace_disable_daemon(void)
618{
619 /* Stop the daemon from calling kstop_machine */
620 mutex_lock(&ftraced_lock);
621 ftraced_stop = 1;
622 mutex_unlock(&ftraced_lock);
623
624 ftrace_force_update();
625}
626
627void ftrace_enable_daemon(void)
628{
629 mutex_lock(&ftraced_lock);
630 ftraced_stop = 0;
631 mutex_unlock(&ftraced_lock);
632
633 ftrace_force_update();
634}
635
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200636static ftrace_func_t saved_ftrace_func;
637
Ingo Molnare309b412008-05-12 21:20:51 +0200638static void ftrace_startup(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200639{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200640 int command = 0;
641
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200642 if (unlikely(ftrace_disabled))
643 return;
644
Steven Rostedt3d083392008-05-12 21:20:42 +0200645 mutex_lock(&ftraced_lock);
646 ftraced_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200647 if (ftraced_suspend == 1)
648 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200649
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200650 if (saved_ftrace_func != ftrace_trace_function) {
651 saved_ftrace_func = ftrace_trace_function;
652 command |= FTRACE_UPDATE_TRACE_FUNC;
653 }
654
655 if (!command || !ftrace_enabled)
656 goto out;
657
658 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200659 out:
660 mutex_unlock(&ftraced_lock);
661}
662
Ingo Molnare309b412008-05-12 21:20:51 +0200663static void ftrace_shutdown(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200664{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200665 int command = 0;
666
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200667 if (unlikely(ftrace_disabled))
668 return;
669
Steven Rostedt3d083392008-05-12 21:20:42 +0200670 mutex_lock(&ftraced_lock);
671 ftraced_suspend--;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200672 if (!ftraced_suspend)
673 command |= FTRACE_DISABLE_CALLS;
674
675 if (saved_ftrace_func != ftrace_trace_function) {
676 saved_ftrace_func = ftrace_trace_function;
677 command |= FTRACE_UPDATE_TRACE_FUNC;
678 }
679
680 if (!command || !ftrace_enabled)
Steven Rostedt3d083392008-05-12 21:20:42 +0200681 goto out;
682
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200683 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200684 out:
685 mutex_unlock(&ftraced_lock);
686}
687
Ingo Molnare309b412008-05-12 21:20:51 +0200688static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200689{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200690 int command = FTRACE_ENABLE_MCOUNT;
691
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200692 if (unlikely(ftrace_disabled))
693 return;
694
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200695 mutex_lock(&ftraced_lock);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200696 /* Force update next time */
697 saved_ftrace_func = NULL;
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200698 /* ftraced_suspend is true if we want ftrace running */
699 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200700 command |= FTRACE_ENABLE_CALLS;
701
702 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200703 mutex_unlock(&ftraced_lock);
704}
705
Ingo Molnare309b412008-05-12 21:20:51 +0200706static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200707{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200708 int command = FTRACE_DISABLE_MCOUNT;
709
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200710 if (unlikely(ftrace_disabled))
711 return;
712
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200713 mutex_lock(&ftraced_lock);
714 /* ftraced_suspend is true if ftrace is running */
715 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200716 command |= FTRACE_DISABLE_CALLS;
717
718 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200719 mutex_unlock(&ftraced_lock);
720}
721
Steven Rostedt3d083392008-05-12 21:20:42 +0200722static cycle_t ftrace_update_time;
723static unsigned long ftrace_update_cnt;
724unsigned long ftrace_update_tot_cnt;
725
Ingo Molnare309b412008-05-12 21:20:51 +0200726static int __ftrace_update_code(void *ignore)
Steven Rostedt3d083392008-05-12 21:20:42 +0200727{
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530728 int i, save_ftrace_enabled;
729 cycle_t start, stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200730 struct dyn_ftrace *p;
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530731 struct hlist_node *t, *n;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530732 struct hlist_head *head, temp_list;
Steven Rostedt3d083392008-05-12 21:20:42 +0200733
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200734 /* Don't be recording funcs now */
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400735 ftrace_record_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200736 save_ftrace_enabled = ftrace_enabled;
737 ftrace_enabled = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200738
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200739 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200740 ftrace_update_cnt = 0;
741
742 /* No locks needed, the machine is stopped! */
743 for (i = 0; i < FTRACE_HASHSIZE; i++) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530744 INIT_HLIST_HEAD(&temp_list);
745 head = &ftrace_hash[i];
746
Steven Rostedt3d083392008-05-12 21:20:42 +0200747 /* all CPUS are stopped, we are safe to modify code */
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530748 hlist_for_each_entry_safe(p, t, n, head, node) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530749 /* Skip over failed records which have not been
750 * freed. */
751 if (p->flags & FTRACE_FL_FAILED)
752 continue;
Steven Rostedt3d083392008-05-12 21:20:42 +0200753
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530754 /* Unconverted records are always at the head of the
755 * hash bucket. Once we encounter a converted record,
756 * simply skip over to the next bucket. Saves ftraced
757 * some processor cycles (ftrace does its bid for
758 * global warming :-p ). */
759 if (p->flags & (FTRACE_FL_CONVERTED))
760 break;
761
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530762 /* Ignore updates to this record's mcount site.
763 * Reintroduce this record at the head of this
764 * bucket to attempt to "convert" it again if
765 * the kprobe on it is unregistered before the
766 * next run. */
767 if (get_kprobe((void *)p->ip)) {
768 ftrace_del_hash(p);
769 INIT_HLIST_NODE(&p->node);
770 hlist_add_head(&p->node, &temp_list);
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530771 freeze_record(p);
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530772 continue;
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530773 } else {
774 unfreeze_record(p);
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530775 }
776
777 /* convert record (i.e, patch mcount-call with NOP) */
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530778 if (ftrace_code_disable(p)) {
779 p->flags |= FTRACE_FL_CONVERTED;
780 ftrace_update_cnt++;
781 } else {
782 if ((system_state == SYSTEM_BOOTING) ||
Abhishek Sagar34078a52008-06-03 08:33:41 +0530783 !core_kernel_text(p->ip)) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530784 ftrace_del_hash(p);
785 ftrace_free_rec(p);
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530786 }
787 }
788 }
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530789
790 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
791 hlist_del(&p->node);
792 INIT_HLIST_NODE(&p->node);
793 hlist_add_head(&p->node, head);
794 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200795 }
796
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200797 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200798 ftrace_update_time = stop - start;
799 ftrace_update_tot_cnt += ftrace_update_cnt;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400800 ftraced_trigger = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200801
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200802 ftrace_enabled = save_ftrace_enabled;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400803 ftrace_record_suspend--;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200804
805 return 0;
806}
807
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400808static int ftrace_update_code(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200809{
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400810 if (unlikely(ftrace_disabled) ||
811 !ftrace_enabled || !ftraced_trigger)
812 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200813
Rusty Russell784e2d72008-07-28 12:16:31 -0500814 stop_machine(__ftrace_update_code, NULL, NULL);
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400815
816 return 1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200817}
818
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400819static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200820{
821 struct ftrace_page *pg;
822 int cnt;
823 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200824
825 /* allocate a few pages */
826 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
827 if (!ftrace_pages_start)
828 return -1;
829
830 /*
831 * Allocate a few more pages.
832 *
833 * TODO: have some parser search vmlinux before
834 * final linking to find all calls to ftrace.
835 * Then we can:
836 * a) know how many pages to allocate.
837 * and/or
838 * b) set up the table then.
839 *
840 * The dynamic code is still necessary for
841 * modules.
842 */
843
844 pg = ftrace_pages = ftrace_pages_start;
845
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400846 cnt = num_to_init / ENTRIES_PER_PAGE;
847 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
848 num_to_init, cnt);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200849
850 for (i = 0; i < cnt; i++) {
851 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
852
853 /* If we fail, we'll try later anyway */
854 if (!pg->next)
855 break;
856
857 pg = pg->next;
858 }
859
860 return 0;
861}
862
Steven Rostedt5072c592008-05-12 21:20:43 +0200863enum {
864 FTRACE_ITER_FILTER = (1 << 0),
865 FTRACE_ITER_CONT = (1 << 1),
Steven Rostedt41c52c02008-05-22 11:46:33 -0400866 FTRACE_ITER_NOTRACE = (1 << 2),
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530867 FTRACE_ITER_FAILURES = (1 << 3),
Steven Rostedt5072c592008-05-12 21:20:43 +0200868};
869
870#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
871
872struct ftrace_iterator {
873 loff_t pos;
874 struct ftrace_page *pg;
875 unsigned idx;
876 unsigned flags;
877 unsigned char buffer[FTRACE_BUFF_MAX+1];
878 unsigned buffer_idx;
879 unsigned filtered;
880};
881
Ingo Molnare309b412008-05-12 21:20:51 +0200882static void *
Steven Rostedt5072c592008-05-12 21:20:43 +0200883t_next(struct seq_file *m, void *v, loff_t *pos)
884{
885 struct ftrace_iterator *iter = m->private;
886 struct dyn_ftrace *rec = NULL;
887
888 (*pos)++;
889
890 retry:
891 if (iter->idx >= iter->pg->index) {
892 if (iter->pg->next) {
893 iter->pg = iter->pg->next;
894 iter->idx = 0;
895 goto retry;
896 }
897 } else {
898 rec = &iter->pg->records[iter->idx++];
Steven Rostedta9fdda32008-08-14 22:47:17 -0400899 if ((rec->flags & FTRACE_FL_FREE) ||
900
901 (!(iter->flags & FTRACE_ITER_FAILURES) &&
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530902 (rec->flags & FTRACE_FL_FAILED)) ||
903
904 ((iter->flags & FTRACE_ITER_FAILURES) &&
Steven Rostedta9fdda32008-08-14 22:47:17 -0400905 !(rec->flags & FTRACE_FL_FAILED)) ||
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530906
Steven Rostedt41c52c02008-05-22 11:46:33 -0400907 ((iter->flags & FTRACE_ITER_NOTRACE) &&
908 !(rec->flags & FTRACE_FL_NOTRACE))) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200909 rec = NULL;
910 goto retry;
911 }
912 }
913
914 iter->pos = *pos;
915
916 return rec;
917}
918
919static void *t_start(struct seq_file *m, loff_t *pos)
920{
921 struct ftrace_iterator *iter = m->private;
922 void *p = NULL;
923 loff_t l = -1;
924
925 if (*pos != iter->pos) {
926 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
927 ;
928 } else {
929 l = *pos;
930 p = t_next(m, p, &l);
931 }
932
933 return p;
934}
935
936static void t_stop(struct seq_file *m, void *p)
937{
938}
939
940static int t_show(struct seq_file *m, void *v)
941{
942 struct dyn_ftrace *rec = v;
943 char str[KSYM_SYMBOL_LEN];
944
945 if (!rec)
946 return 0;
947
948 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
949
950 seq_printf(m, "%s\n", str);
951
952 return 0;
953}
954
955static struct seq_operations show_ftrace_seq_ops = {
956 .start = t_start,
957 .next = t_next,
958 .stop = t_stop,
959 .show = t_show,
960};
961
Ingo Molnare309b412008-05-12 21:20:51 +0200962static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200963ftrace_avail_open(struct inode *inode, struct file *file)
964{
965 struct ftrace_iterator *iter;
966 int ret;
967
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200968 if (unlikely(ftrace_disabled))
969 return -ENODEV;
970
Steven Rostedt5072c592008-05-12 21:20:43 +0200971 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
972 if (!iter)
973 return -ENOMEM;
974
975 iter->pg = ftrace_pages_start;
976 iter->pos = -1;
977
978 ret = seq_open(file, &show_ftrace_seq_ops);
979 if (!ret) {
980 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200981
Steven Rostedt5072c592008-05-12 21:20:43 +0200982 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200983 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +0200984 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200985 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200986
987 return ret;
988}
989
990int ftrace_avail_release(struct inode *inode, struct file *file)
991{
992 struct seq_file *m = (struct seq_file *)file->private_data;
993 struct ftrace_iterator *iter = m->private;
994
995 seq_release(inode, file);
996 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200997
Steven Rostedt5072c592008-05-12 21:20:43 +0200998 return 0;
999}
1000
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301001static int
1002ftrace_failures_open(struct inode *inode, struct file *file)
1003{
1004 int ret;
1005 struct seq_file *m;
1006 struct ftrace_iterator *iter;
1007
1008 ret = ftrace_avail_open(inode, file);
1009 if (!ret) {
1010 m = (struct seq_file *)file->private_data;
1011 iter = (struct ftrace_iterator *)m->private;
1012 iter->flags = FTRACE_ITER_FAILURES;
1013 }
1014
1015 return ret;
1016}
1017
1018
Steven Rostedt41c52c02008-05-22 11:46:33 -04001019static void ftrace_filter_reset(int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001020{
1021 struct ftrace_page *pg;
1022 struct dyn_ftrace *rec;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001023 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001024 unsigned i;
1025
1026 /* keep kstop machine from running */
1027 preempt_disable();
Steven Rostedt41c52c02008-05-22 11:46:33 -04001028 if (enable)
1029 ftrace_filtered = 0;
Steven Rostedt5072c592008-05-12 21:20:43 +02001030 pg = ftrace_pages_start;
1031 while (pg) {
1032 for (i = 0; i < pg->index; i++) {
1033 rec = &pg->records[i];
1034 if (rec->flags & FTRACE_FL_FAILED)
1035 continue;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001036 rec->flags &= ~type;
Steven Rostedt5072c592008-05-12 21:20:43 +02001037 }
1038 pg = pg->next;
1039 }
1040 preempt_enable();
1041}
1042
Ingo Molnare309b412008-05-12 21:20:51 +02001043static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001044ftrace_regex_open(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001045{
1046 struct ftrace_iterator *iter;
1047 int ret = 0;
1048
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001049 if (unlikely(ftrace_disabled))
1050 return -ENODEV;
1051
Steven Rostedt5072c592008-05-12 21:20:43 +02001052 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1053 if (!iter)
1054 return -ENOMEM;
1055
Steven Rostedt41c52c02008-05-22 11:46:33 -04001056 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001057 if ((file->f_mode & FMODE_WRITE) &&
1058 !(file->f_flags & O_APPEND))
Steven Rostedt41c52c02008-05-22 11:46:33 -04001059 ftrace_filter_reset(enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001060
1061 if (file->f_mode & FMODE_READ) {
1062 iter->pg = ftrace_pages_start;
1063 iter->pos = -1;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001064 iter->flags = enable ? FTRACE_ITER_FILTER :
1065 FTRACE_ITER_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001066
1067 ret = seq_open(file, &show_ftrace_seq_ops);
1068 if (!ret) {
1069 struct seq_file *m = file->private_data;
1070 m->private = iter;
1071 } else
1072 kfree(iter);
1073 } else
1074 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001075 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001076
1077 return ret;
1078}
1079
Steven Rostedt41c52c02008-05-22 11:46:33 -04001080static int
1081ftrace_filter_open(struct inode *inode, struct file *file)
1082{
1083 return ftrace_regex_open(inode, file, 1);
1084}
1085
1086static int
1087ftrace_notrace_open(struct inode *inode, struct file *file)
1088{
1089 return ftrace_regex_open(inode, file, 0);
1090}
1091
Ingo Molnare309b412008-05-12 21:20:51 +02001092static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001093ftrace_regex_read(struct file *file, char __user *ubuf,
Steven Rostedt5072c592008-05-12 21:20:43 +02001094 size_t cnt, loff_t *ppos)
1095{
1096 if (file->f_mode & FMODE_READ)
1097 return seq_read(file, ubuf, cnt, ppos);
1098 else
1099 return -EPERM;
1100}
1101
Ingo Molnare309b412008-05-12 21:20:51 +02001102static loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001103ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02001104{
1105 loff_t ret;
1106
1107 if (file->f_mode & FMODE_READ)
1108 ret = seq_lseek(file, offset, origin);
1109 else
1110 file->f_pos = ret = 1;
1111
1112 return ret;
1113}
1114
1115enum {
1116 MATCH_FULL,
1117 MATCH_FRONT_ONLY,
1118 MATCH_MIDDLE_ONLY,
1119 MATCH_END_ONLY,
1120};
1121
Ingo Molnare309b412008-05-12 21:20:51 +02001122static void
Steven Rostedt41c52c02008-05-22 11:46:33 -04001123ftrace_match(unsigned char *buff, int len, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001124{
1125 char str[KSYM_SYMBOL_LEN];
1126 char *search = NULL;
1127 struct ftrace_page *pg;
1128 struct dyn_ftrace *rec;
1129 int type = MATCH_FULL;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001130 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001131 unsigned i, match = 0, search_len = 0;
1132
1133 for (i = 0; i < len; i++) {
1134 if (buff[i] == '*') {
1135 if (!i) {
1136 search = buff + i + 1;
1137 type = MATCH_END_ONLY;
1138 search_len = len - (i + 1);
1139 } else {
1140 if (type == MATCH_END_ONLY) {
1141 type = MATCH_MIDDLE_ONLY;
1142 } else {
1143 match = i;
1144 type = MATCH_FRONT_ONLY;
1145 }
1146 buff[i] = 0;
1147 break;
1148 }
1149 }
1150 }
1151
1152 /* keep kstop machine from running */
1153 preempt_disable();
Steven Rostedt41c52c02008-05-22 11:46:33 -04001154 if (enable)
1155 ftrace_filtered = 1;
Steven Rostedt5072c592008-05-12 21:20:43 +02001156 pg = ftrace_pages_start;
1157 while (pg) {
1158 for (i = 0; i < pg->index; i++) {
1159 int matched = 0;
1160 char *ptr;
1161
1162 rec = &pg->records[i];
1163 if (rec->flags & FTRACE_FL_FAILED)
1164 continue;
1165 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1166 switch (type) {
1167 case MATCH_FULL:
1168 if (strcmp(str, buff) == 0)
1169 matched = 1;
1170 break;
1171 case MATCH_FRONT_ONLY:
1172 if (memcmp(str, buff, match) == 0)
1173 matched = 1;
1174 break;
1175 case MATCH_MIDDLE_ONLY:
1176 if (strstr(str, search))
1177 matched = 1;
1178 break;
1179 case MATCH_END_ONLY:
1180 ptr = strstr(str, search);
1181 if (ptr && (ptr[search_len] == 0))
1182 matched = 1;
1183 break;
1184 }
1185 if (matched)
Steven Rostedt41c52c02008-05-22 11:46:33 -04001186 rec->flags |= flag;
Steven Rostedt5072c592008-05-12 21:20:43 +02001187 }
1188 pg = pg->next;
1189 }
1190 preempt_enable();
1191}
1192
Ingo Molnare309b412008-05-12 21:20:51 +02001193static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001194ftrace_regex_write(struct file *file, const char __user *ubuf,
1195 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001196{
1197 struct ftrace_iterator *iter;
1198 char ch;
1199 size_t read = 0;
1200 ssize_t ret;
1201
1202 if (!cnt || cnt < 0)
1203 return 0;
1204
Steven Rostedt41c52c02008-05-22 11:46:33 -04001205 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001206
1207 if (file->f_mode & FMODE_READ) {
1208 struct seq_file *m = file->private_data;
1209 iter = m->private;
1210 } else
1211 iter = file->private_data;
1212
1213 if (!*ppos) {
1214 iter->flags &= ~FTRACE_ITER_CONT;
1215 iter->buffer_idx = 0;
1216 }
1217
1218 ret = get_user(ch, ubuf++);
1219 if (ret)
1220 goto out;
1221 read++;
1222 cnt--;
1223
1224 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1225 /* skip white space */
1226 while (cnt && isspace(ch)) {
1227 ret = get_user(ch, ubuf++);
1228 if (ret)
1229 goto out;
1230 read++;
1231 cnt--;
1232 }
1233
Steven Rostedt5072c592008-05-12 21:20:43 +02001234 if (isspace(ch)) {
1235 file->f_pos += read;
1236 ret = read;
1237 goto out;
1238 }
1239
1240 iter->buffer_idx = 0;
1241 }
1242
1243 while (cnt && !isspace(ch)) {
1244 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1245 iter->buffer[iter->buffer_idx++] = ch;
1246 else {
1247 ret = -EINVAL;
1248 goto out;
1249 }
1250 ret = get_user(ch, ubuf++);
1251 if (ret)
1252 goto out;
1253 read++;
1254 cnt--;
1255 }
1256
1257 if (isspace(ch)) {
1258 iter->filtered++;
1259 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001260 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001261 iter->buffer_idx = 0;
1262 } else
1263 iter->flags |= FTRACE_ITER_CONT;
1264
1265
1266 file->f_pos += read;
1267
1268 ret = read;
1269 out:
Steven Rostedt41c52c02008-05-22 11:46:33 -04001270 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001271
1272 return ret;
1273}
1274
Steven Rostedt41c52c02008-05-22 11:46:33 -04001275static ssize_t
1276ftrace_filter_write(struct file *file, const char __user *ubuf,
1277 size_t cnt, loff_t *ppos)
1278{
1279 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1280}
1281
1282static ssize_t
1283ftrace_notrace_write(struct file *file, const char __user *ubuf,
1284 size_t cnt, loff_t *ppos)
1285{
1286 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1287}
1288
1289static void
1290ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1291{
1292 if (unlikely(ftrace_disabled))
1293 return;
1294
1295 mutex_lock(&ftrace_regex_lock);
1296 if (reset)
1297 ftrace_filter_reset(enable);
1298 if (buf)
1299 ftrace_match(buf, len, enable);
1300 mutex_unlock(&ftrace_regex_lock);
1301}
1302
Steven Rostedt77a2b372008-05-12 21:20:45 +02001303/**
1304 * ftrace_set_filter - set a function to filter on in ftrace
1305 * @buf - the string that holds the function filter text.
1306 * @len - the length of the string.
1307 * @reset - non zero to reset all filters before applying this filter.
1308 *
1309 * Filters denote which functions should be enabled when tracing is enabled.
1310 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1311 */
Ingo Molnare309b412008-05-12 21:20:51 +02001312void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02001313{
Steven Rostedt41c52c02008-05-22 11:46:33 -04001314 ftrace_set_regex(buf, len, reset, 1);
1315}
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001316
Steven Rostedt41c52c02008-05-22 11:46:33 -04001317/**
1318 * ftrace_set_notrace - set a function to not trace in ftrace
1319 * @buf - the string that holds the function notrace text.
1320 * @len - the length of the string.
1321 * @reset - non zero to reset all filters before applying this filter.
1322 *
1323 * Notrace Filters denote which functions should not be enabled when tracing
1324 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1325 * for tracing.
1326 */
1327void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1328{
1329 ftrace_set_regex(buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02001330}
1331
Ingo Molnare309b412008-05-12 21:20:51 +02001332static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001333ftrace_regex_release(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001334{
1335 struct seq_file *m = (struct seq_file *)file->private_data;
1336 struct ftrace_iterator *iter;
1337
Steven Rostedt41c52c02008-05-22 11:46:33 -04001338 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001339 if (file->f_mode & FMODE_READ) {
1340 iter = m->private;
1341
1342 seq_release(inode, file);
1343 } else
1344 iter = file->private_data;
1345
1346 if (iter->buffer_idx) {
1347 iter->filtered++;
1348 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001349 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001350 }
1351
1352 mutex_lock(&ftrace_sysctl_lock);
1353 mutex_lock(&ftraced_lock);
1354 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1355 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1356 mutex_unlock(&ftraced_lock);
1357 mutex_unlock(&ftrace_sysctl_lock);
1358
1359 kfree(iter);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001360 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001361 return 0;
1362}
1363
Steven Rostedt41c52c02008-05-22 11:46:33 -04001364static int
1365ftrace_filter_release(struct inode *inode, struct file *file)
1366{
1367 return ftrace_regex_release(inode, file, 1);
1368}
1369
1370static int
1371ftrace_notrace_release(struct inode *inode, struct file *file)
1372{
1373 return ftrace_regex_release(inode, file, 0);
1374}
1375
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001376static ssize_t
1377ftraced_read(struct file *filp, char __user *ubuf,
1378 size_t cnt, loff_t *ppos)
1379{
1380 /* don't worry about races */
1381 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1382 int r = strlen(buf);
1383
1384 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1385}
1386
1387static ssize_t
1388ftraced_write(struct file *filp, const char __user *ubuf,
1389 size_t cnt, loff_t *ppos)
1390{
1391 char buf[64];
1392 long val;
1393 int ret;
1394
1395 if (cnt >= sizeof(buf))
1396 return -EINVAL;
1397
1398 if (copy_from_user(&buf, ubuf, cnt))
1399 return -EFAULT;
1400
1401 if (strncmp(buf, "enable", 6) == 0)
1402 val = 1;
1403 else if (strncmp(buf, "disable", 7) == 0)
1404 val = 0;
1405 else {
1406 buf[cnt] = 0;
1407
1408 ret = strict_strtoul(buf, 10, &val);
1409 if (ret < 0)
1410 return ret;
1411
1412 val = !!val;
1413 }
1414
1415 if (val)
1416 ftrace_enable_daemon();
1417 else
1418 ftrace_disable_daemon();
1419
1420 filp->f_pos += cnt;
1421
1422 return cnt;
1423}
1424
Steven Rostedt5072c592008-05-12 21:20:43 +02001425static struct file_operations ftrace_avail_fops = {
1426 .open = ftrace_avail_open,
1427 .read = seq_read,
1428 .llseek = seq_lseek,
1429 .release = ftrace_avail_release,
1430};
1431
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301432static struct file_operations ftrace_failures_fops = {
1433 .open = ftrace_failures_open,
1434 .read = seq_read,
1435 .llseek = seq_lseek,
1436 .release = ftrace_avail_release,
1437};
1438
Steven Rostedt5072c592008-05-12 21:20:43 +02001439static struct file_operations ftrace_filter_fops = {
1440 .open = ftrace_filter_open,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001441 .read = ftrace_regex_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02001442 .write = ftrace_filter_write,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001443 .llseek = ftrace_regex_lseek,
Steven Rostedt5072c592008-05-12 21:20:43 +02001444 .release = ftrace_filter_release,
1445};
1446
Steven Rostedt41c52c02008-05-22 11:46:33 -04001447static struct file_operations ftrace_notrace_fops = {
1448 .open = ftrace_notrace_open,
1449 .read = ftrace_regex_read,
1450 .write = ftrace_notrace_write,
1451 .llseek = ftrace_regex_lseek,
1452 .release = ftrace_notrace_release,
1453};
1454
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001455static struct file_operations ftraced_fops = {
1456 .open = tracing_open_generic,
1457 .read = ftraced_read,
1458 .write = ftraced_write,
1459};
1460
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001461/**
1462 * ftrace_force_update - force an update to all recording ftrace functions
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001463 */
1464int ftrace_force_update(void)
1465{
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001466 int ret = 0;
1467
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001468 if (unlikely(ftrace_disabled))
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001469 return -ENODEV;
1470
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001471 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001472 mutex_lock(&ftraced_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001473
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001474 /*
1475 * If ftraced_trigger is not set, then there is nothing
1476 * to update.
1477 */
1478 if (ftraced_trigger && !ftrace_update_code())
1479 ret = -EBUSY;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001480
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001481 mutex_unlock(&ftraced_lock);
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001482 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001483
1484 return ret;
1485}
1486
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001487static void ftrace_force_shutdown(void)
1488{
1489 struct task_struct *task;
1490 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1491
1492 mutex_lock(&ftraced_lock);
1493 task = ftraced_task;
1494 ftraced_task = NULL;
1495 ftraced_suspend = -1;
1496 ftrace_run_update_code(command);
1497 mutex_unlock(&ftraced_lock);
1498
1499 if (task)
1500 kthread_stop(task);
1501}
1502
Steven Rostedt5072c592008-05-12 21:20:43 +02001503static __init int ftrace_init_debugfs(void)
1504{
1505 struct dentry *d_tracer;
1506 struct dentry *entry;
1507
1508 d_tracer = tracing_init_dentry();
1509
1510 entry = debugfs_create_file("available_filter_functions", 0444,
1511 d_tracer, NULL, &ftrace_avail_fops);
1512 if (!entry)
1513 pr_warning("Could not create debugfs "
1514 "'available_filter_functions' entry\n");
1515
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301516 entry = debugfs_create_file("failures", 0444,
1517 d_tracer, NULL, &ftrace_failures_fops);
1518 if (!entry)
1519 pr_warning("Could not create debugfs 'failures' entry\n");
1520
Steven Rostedt5072c592008-05-12 21:20:43 +02001521 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1522 NULL, &ftrace_filter_fops);
1523 if (!entry)
1524 pr_warning("Could not create debugfs "
1525 "'set_ftrace_filter' entry\n");
Steven Rostedt41c52c02008-05-22 11:46:33 -04001526
1527 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1528 NULL, &ftrace_notrace_fops);
1529 if (!entry)
1530 pr_warning("Could not create debugfs "
1531 "'set_ftrace_notrace' entry\n");
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001532
1533 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1534 NULL, &ftraced_fops);
1535 if (!entry)
1536 pr_warning("Could not create debugfs "
1537 "'ftraced_enabled' entry\n");
Steven Rostedt5072c592008-05-12 21:20:43 +02001538 return 0;
1539}
1540
1541fs_initcall(ftrace_init_debugfs);
1542
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001543#ifdef CONFIG_FTRACE_MCOUNT_RECORD
1544static int ftrace_convert_nops(unsigned long *start,
1545 unsigned long *end)
1546{
1547 unsigned long *p;
1548 unsigned long addr;
1549 unsigned long flags;
1550
1551 p = start;
1552 while (p < end) {
1553 addr = ftrace_call_adjust(*p++);
Steven Rostedtfed19392008-08-14 22:47:19 -04001554 spin_lock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001555 ftrace_record_ip(addr);
Steven Rostedtfed19392008-08-14 22:47:19 -04001556 spin_unlock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001557 ftrace_shutdown_replenish();
1558 }
1559
1560 /* p is ignored */
1561 local_irq_save(flags);
1562 __ftrace_update_code(p);
1563 local_irq_restore(flags);
1564
1565 return 0;
1566}
1567
Steven Rostedt90d595f2008-08-14 15:45:09 -04001568void ftrace_init_module(unsigned long *start, unsigned long *end)
1569{
Steven Rostedtfed19392008-08-14 22:47:19 -04001570 if (start == end)
1571 return;
Steven Rostedt90d595f2008-08-14 15:45:09 -04001572 ftrace_convert_nops(start, end);
1573}
1574
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001575extern unsigned long __start_mcount_loc[];
1576extern unsigned long __stop_mcount_loc[];
1577
1578void __init ftrace_init(void)
1579{
1580 unsigned long count, addr, flags;
1581 int ret;
1582
1583 /* Keep the ftrace pointer to the stub */
1584 addr = (unsigned long)ftrace_stub;
1585
1586 local_irq_save(flags);
1587 ftrace_dyn_arch_init(&addr);
1588 local_irq_restore(flags);
1589
1590 /* ftrace_dyn_arch_init places the return code in addr */
1591 if (addr)
1592 goto failed;
1593
1594 count = __stop_mcount_loc - __start_mcount_loc;
1595
1596 ret = ftrace_dyn_table_alloc(count);
1597 if (ret)
1598 goto failed;
1599
1600 last_ftrace_enabled = ftrace_enabled = 1;
1601
1602 ret = ftrace_convert_nops(__start_mcount_loc,
1603 __stop_mcount_loc);
1604
1605 return;
1606 failed:
1607 ftrace_disabled = 1;
1608}
1609#else /* CONFIG_FTRACE_MCOUNT_RECORD */
1610static int ftraced(void *ignore)
1611{
1612 unsigned long usecs;
1613
1614 while (!kthread_should_stop()) {
1615
1616 set_current_state(TASK_INTERRUPTIBLE);
1617
1618 /* check once a second */
1619 schedule_timeout(HZ);
1620
1621 if (unlikely(ftrace_disabled))
1622 continue;
1623
1624 mutex_lock(&ftrace_sysctl_lock);
1625 mutex_lock(&ftraced_lock);
1626 if (!ftraced_suspend && !ftraced_stop &&
1627 ftrace_update_code()) {
1628 usecs = nsecs_to_usecs(ftrace_update_time);
1629 if (ftrace_update_tot_cnt > 100000) {
1630 ftrace_update_tot_cnt = 0;
1631 pr_info("hm, dftrace overflow: %lu change%s"
1632 " (%lu total) in %lu usec%s\n",
1633 ftrace_update_cnt,
1634 ftrace_update_cnt != 1 ? "s" : "",
1635 ftrace_update_tot_cnt,
1636 usecs, usecs != 1 ? "s" : "");
1637 ftrace_disabled = 1;
1638 WARN_ON_ONCE(1);
1639 }
1640 }
1641 mutex_unlock(&ftraced_lock);
1642 mutex_unlock(&ftrace_sysctl_lock);
1643
1644 ftrace_shutdown_replenish();
1645 }
1646 __set_current_state(TASK_RUNNING);
1647 return 0;
1648}
1649
Ingo Molnare309b412008-05-12 21:20:51 +02001650static int __init ftrace_dynamic_init(void)
Steven Rostedt3d083392008-05-12 21:20:42 +02001651{
1652 struct task_struct *p;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001653 unsigned long addr;
Steven Rostedt3d083392008-05-12 21:20:42 +02001654 int ret;
1655
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001656 addr = (unsigned long)ftrace_record_ip;
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +02001657
Rusty Russell784e2d72008-07-28 12:16:31 -05001658 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001659
1660 /* ftrace_dyn_arch_init places the return code in addr */
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001661 if (addr) {
1662 ret = (int)addr;
1663 goto failed;
1664 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001665
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001666 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
Steven Rostedt3d083392008-05-12 21:20:42 +02001667 if (ret)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001668 goto failed;
Steven Rostedt3d083392008-05-12 21:20:42 +02001669
1670 p = kthread_run(ftraced, NULL, "ftraced");
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001671 if (IS_ERR(p)) {
1672 ret = -1;
1673 goto failed;
1674 }
Steven Rostedt3d083392008-05-12 21:20:42 +02001675
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001676 last_ftrace_enabled = ftrace_enabled = 1;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001677 ftraced_task = p;
Steven Rostedt3d083392008-05-12 21:20:42 +02001678
1679 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001680
1681 failed:
1682 ftrace_disabled = 1;
1683 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001684}
1685
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001686core_initcall(ftrace_dynamic_init);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001687#endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1688
Steven Rostedt3d083392008-05-12 21:20:42 +02001689#else
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001690# define ftrace_startup() do { } while (0)
1691# define ftrace_shutdown() do { } while (0)
1692# define ftrace_startup_sysctl() do { } while (0)
1693# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001694# define ftrace_force_shutdown() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001695#endif /* CONFIG_DYNAMIC_FTRACE */
1696
1697/**
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001698 * ftrace_kill_atomic - kill ftrace from critical sections
1699 *
1700 * This function should be used by panic code. It stops ftrace
1701 * but in a not so nice way. If you need to simply kill ftrace
1702 * from a non-atomic section, use ftrace_kill.
1703 */
1704void ftrace_kill_atomic(void)
1705{
1706 ftrace_disabled = 1;
1707 ftrace_enabled = 0;
Ingo Molnarb2613e32008-07-11 16:44:27 +02001708#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001709 ftraced_suspend = -1;
Ingo Molnarb2613e32008-07-11 16:44:27 +02001710#endif
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001711 clear_ftrace_function();
1712}
1713
1714/**
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001715 * ftrace_kill - totally shutdown ftrace
1716 *
1717 * This is a safety measure. If something was detected that seems
1718 * wrong, calling this function will keep ftrace from doing
1719 * any more modifications, and updates.
1720 * used when something went wrong.
1721 */
1722void ftrace_kill(void)
1723{
1724 mutex_lock(&ftrace_sysctl_lock);
1725 ftrace_disabled = 1;
1726 ftrace_enabled = 0;
1727
1728 clear_ftrace_function();
1729 mutex_unlock(&ftrace_sysctl_lock);
1730
1731 /* Try to totally disable ftrace */
1732 ftrace_force_shutdown();
1733}
1734
1735/**
Steven Rostedt3d083392008-05-12 21:20:42 +02001736 * register_ftrace_function - register a function for profiling
1737 * @ops - ops structure that holds the function for profiling.
1738 *
1739 * Register a function to be called by all functions in the
1740 * kernel.
1741 *
1742 * Note: @ops->func and all the functions it calls must be labeled
1743 * with "notrace", otherwise it will go into a
1744 * recursive loop.
1745 */
1746int register_ftrace_function(struct ftrace_ops *ops)
1747{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001748 int ret;
1749
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001750 if (unlikely(ftrace_disabled))
1751 return -1;
1752
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001753 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001754 ret = __register_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001755 ftrace_startup();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001756 mutex_unlock(&ftrace_sysctl_lock);
1757
1758 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001759}
1760
1761/**
1762 * unregister_ftrace_function - unresgister a function for profiling.
1763 * @ops - ops structure that holds the function to unregister
1764 *
1765 * Unregister a function that was added to be called by ftrace profiling.
1766 */
1767int unregister_ftrace_function(struct ftrace_ops *ops)
1768{
1769 int ret;
1770
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001771 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001772 ret = __unregister_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001773 ftrace_shutdown();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001774 mutex_unlock(&ftrace_sysctl_lock);
1775
1776 return ret;
1777}
1778
Ingo Molnare309b412008-05-12 21:20:51 +02001779int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001780ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02001781 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001782 loff_t *ppos)
1783{
1784 int ret;
1785
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001786 if (unlikely(ftrace_disabled))
1787 return -ENODEV;
1788
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001789 mutex_lock(&ftrace_sysctl_lock);
1790
Steven Rostedt5072c592008-05-12 21:20:43 +02001791 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001792
1793 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1794 goto out;
1795
1796 last_ftrace_enabled = ftrace_enabled;
1797
1798 if (ftrace_enabled) {
1799
1800 ftrace_startup_sysctl();
1801
1802 /* we are starting ftrace again */
1803 if (ftrace_list != &ftrace_list_end) {
1804 if (ftrace_list->next == &ftrace_list_end)
1805 ftrace_trace_function = ftrace_list->func;
1806 else
1807 ftrace_trace_function = ftrace_list_func;
1808 }
1809
1810 } else {
1811 /* stopping ftrace calls (just send to ftrace_stub) */
1812 ftrace_trace_function = ftrace_stub;
1813
1814 ftrace_shutdown_sysctl();
1815 }
1816
1817 out:
1818 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001819 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001820}