blob: 97d5cb7b7e757a206fbe497810c5ada742c19870 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
20#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020021#include <linux/kthread.h>
22#include <linux/hardirq.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020023#include <linux/ftrace.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020024#include <linux/uaccess.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020025#include <linux/sysctl.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020026#include <linux/hash.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020027#include <linux/ctype.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020028#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020029
Steven Rostedt3d083392008-05-12 21:20:42 +020030#include "trace.h"
31
Steven Rostedtd61f82d2008-05-12 21:20:43 +020032int ftrace_enabled;
33static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020034
Steven Rostedt3d083392008-05-12 21:20:42 +020035static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020036static DEFINE_MUTEX(ftrace_sysctl_lock);
37
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020038static struct ftrace_ops ftrace_list_end __read_mostly =
39{
40 .func = ftrace_stub,
41};
42
43static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
44ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
45
46/* mcount is defined per arch in assembly */
47EXPORT_SYMBOL(mcount);
48
49notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
50{
51 struct ftrace_ops *op = ftrace_list;
52
53 /* in case someone actually ports this to alpha! */
54 read_barrier_depends();
55
56 while (op != &ftrace_list_end) {
57 /* silly alpha */
58 read_barrier_depends();
59 op->func(ip, parent_ip);
60 op = op->next;
61 };
62}
63
64/**
Steven Rostedt3d083392008-05-12 21:20:42 +020065 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020066 *
Steven Rostedt3d083392008-05-12 21:20:42 +020067 * This NULLs the ftrace function and in essence stops
68 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020069 */
Steven Rostedt3d083392008-05-12 21:20:42 +020070void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020071{
Steven Rostedt3d083392008-05-12 21:20:42 +020072 ftrace_trace_function = ftrace_stub;
73}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020074
Steven Rostedt3d083392008-05-12 21:20:42 +020075static int notrace __register_ftrace_function(struct ftrace_ops *ops)
76{
77 /* Should never be called by interrupts */
78 spin_lock(&ftrace_lock);
79
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020080 ops->next = ftrace_list;
81 /*
82 * We are entering ops into the ftrace_list but another
83 * CPU might be walking that list. We need to make sure
84 * the ops->next pointer is valid before another CPU sees
85 * the ops pointer included into the ftrace_list.
86 */
87 smp_wmb();
88 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +020089
Steven Rostedtb0fc4942008-05-12 21:20:43 +020090 if (ftrace_enabled) {
91 /*
92 * For one func, simply call it directly.
93 * For more than one func, call the chain.
94 */
95 if (ops->next == &ftrace_list_end)
96 ftrace_trace_function = ops->func;
97 else
98 ftrace_trace_function = ftrace_list_func;
99 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200100
101 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200102
103 return 0;
104}
105
Steven Rostedt3d083392008-05-12 21:20:42 +0200106static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200107{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200108 struct ftrace_ops **p;
109 int ret = 0;
110
Steven Rostedt3d083392008-05-12 21:20:42 +0200111 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200112
113 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200114 * If we are removing the last function, then simply point
115 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200116 */
117 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
118 ftrace_trace_function = ftrace_stub;
119 ftrace_list = &ftrace_list_end;
120 goto out;
121 }
122
123 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
124 if (*p == ops)
125 break;
126
127 if (*p != ops) {
128 ret = -1;
129 goto out;
130 }
131
132 *p = (*p)->next;
133
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200134 if (ftrace_enabled) {
135 /* If we only have one func left, then call that directly */
136 if (ftrace_list == &ftrace_list_end ||
137 ftrace_list->next == &ftrace_list_end)
138 ftrace_trace_function = ftrace_list->func;
139 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200140
141 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200142 spin_unlock(&ftrace_lock);
143
144 return ret;
145}
146
147#ifdef CONFIG_DYNAMIC_FTRACE
148
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200149enum {
150 FTRACE_ENABLE_CALLS = (1 << 0),
151 FTRACE_DISABLE_CALLS = (1 << 1),
152 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
153 FTRACE_ENABLE_MCOUNT = (1 << 3),
154 FTRACE_DISABLE_MCOUNT = (1 << 4),
155};
156
Steven Rostedt5072c592008-05-12 21:20:43 +0200157static int ftrace_filtered;
158
Steven Rostedt3d083392008-05-12 21:20:42 +0200159static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
160
161static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
162
163static DEFINE_SPINLOCK(ftrace_shutdown_lock);
164static DEFINE_MUTEX(ftraced_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200165static DEFINE_MUTEX(ftrace_filter_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200166
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200167struct ftrace_page {
168 struct ftrace_page *next;
169 int index;
170 struct dyn_ftrace records[];
171} __attribute__((packed));
172
173#define ENTRIES_PER_PAGE \
174 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
175
176/* estimate from running different kernels */
177#define NR_TO_INIT 10000
178
179static struct ftrace_page *ftrace_pages_start;
180static struct ftrace_page *ftrace_pages;
181
Steven Rostedt3d083392008-05-12 21:20:42 +0200182static int ftraced_trigger;
183static int ftraced_suspend;
184
185static int ftrace_record_suspend;
186
187static inline int
188notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
189{
190 struct dyn_ftrace *p;
191 struct hlist_node *t;
192 int found = 0;
193
194 hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
195 if (p->ip == ip) {
196 found = 1;
197 break;
198 }
199 }
200
201 return found;
202}
203
204static inline void notrace
205ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
206{
207 hlist_add_head(&node->node, &ftrace_hash[key]);
208}
209
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200210static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200211{
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200212 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
213 if (!ftrace_pages->next)
214 return NULL;
215 ftrace_pages = ftrace_pages->next;
216 }
217
218 return &ftrace_pages->records[ftrace_pages->index++];
219}
220
Steven Rostedt3d083392008-05-12 21:20:42 +0200221static void notrace
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200222ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200223{
224 struct dyn_ftrace *node;
225 unsigned long flags;
226 unsigned long key;
227 int resched;
228 int atomic;
229
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200230 if (!ftrace_enabled)
231 return;
232
Steven Rostedt3d083392008-05-12 21:20:42 +0200233 resched = need_resched();
234 preempt_disable_notrace();
235
236 /* We simply need to protect against recursion */
237 __get_cpu_var(ftrace_shutdown_disable_cpu)++;
238 if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
239 goto out;
240
241 if (unlikely(ftrace_record_suspend))
242 goto out;
243
244 key = hash_long(ip, FTRACE_HASHBITS);
245
246 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
247
248 if (ftrace_ip_in_hash(ip, key))
249 goto out;
250
251 atomic = irqs_disabled();
252
253 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
254
255 /* This ip may have hit the hash before the lock */
256 if (ftrace_ip_in_hash(ip, key))
257 goto out_unlock;
258
259 /*
260 * There's a slight race that the ftraced will update the
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200261 * hash and reset here. If it is already converted, skip it.
Steven Rostedt3d083392008-05-12 21:20:42 +0200262 */
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200263 if (ftrace_ip_converted(ip))
264 goto out_unlock;
265
266 node = ftrace_alloc_dyn_node(ip);
Steven Rostedt3d083392008-05-12 21:20:42 +0200267 if (!node)
268 goto out_unlock;
269
270 node->ip = ip;
271
272 ftrace_add_hash(node, key);
273
274 ftraced_trigger = 1;
275
276 out_unlock:
277 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
278 out:
279 __get_cpu_var(ftrace_shutdown_disable_cpu)--;
280
281 /* prevent recursion with scheduler */
282 if (resched)
283 preempt_enable_no_resched_notrace();
284 else
285 preempt_enable_notrace();
286}
287
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200288#define FTRACE_ADDR ((long)(&ftrace_caller))
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200289#define MCOUNT_ADDR ((long)(&mcount))
290
Steven Rostedt5072c592008-05-12 21:20:43 +0200291static void notrace
292__ftrace_replace_code(struct dyn_ftrace *rec,
293 unsigned char *old, unsigned char *new, int enable)
294{
295 unsigned long ip;
296 int failed;
297
298 ip = rec->ip;
299
300 if (ftrace_filtered && enable) {
301 unsigned long fl;
302 /*
303 * If filtering is on:
304 *
305 * If this record is set to be filtered and
306 * is enabled then do nothing.
307 *
308 * If this record is set to be filtered and
309 * it is not enabled, enable it.
310 *
311 * If this record is not set to be filtered
312 * and it is not enabled do nothing.
313 *
314 * If this record is not set to be filtered and
315 * it is enabled, disable it.
316 */
317 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
318
319 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
320 (fl == 0))
321 return;
322
323 /*
324 * If it is enabled disable it,
325 * otherwise enable it!
326 */
327 if (fl == FTRACE_FL_ENABLED) {
328 /* swap new and old */
329 new = old;
330 old = ftrace_call_replace(ip, FTRACE_ADDR);
331 rec->flags &= ~FTRACE_FL_ENABLED;
332 } else {
333 new = ftrace_call_replace(ip, FTRACE_ADDR);
334 rec->flags |= FTRACE_FL_ENABLED;
335 }
336 } else {
337
338 if (enable)
339 new = ftrace_call_replace(ip, FTRACE_ADDR);
340 else
341 old = ftrace_call_replace(ip, FTRACE_ADDR);
342
343 if (enable) {
344 if (rec->flags & FTRACE_FL_ENABLED)
345 return;
346 rec->flags |= FTRACE_FL_ENABLED;
347 } else {
348 if (!(rec->flags & FTRACE_FL_ENABLED))
349 return;
350 rec->flags &= ~FTRACE_FL_ENABLED;
351 }
352 }
353
354 failed = ftrace_modify_code(ip, old, new);
355 if (failed)
356 rec->flags |= FTRACE_FL_FAILED;
357}
358
359static void notrace ftrace_replace_code(int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200360{
361 unsigned char *new = NULL, *old = NULL;
362 struct dyn_ftrace *rec;
363 struct ftrace_page *pg;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200364 int i;
365
Steven Rostedt5072c592008-05-12 21:20:43 +0200366 if (enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200367 old = ftrace_nop_replace();
368 else
369 new = ftrace_nop_replace();
370
371 for (pg = ftrace_pages_start; pg; pg = pg->next) {
372 for (i = 0; i < pg->index; i++) {
373 rec = &pg->records[i];
374
375 /* don't modify code that has already faulted */
376 if (rec->flags & FTRACE_FL_FAILED)
377 continue;
378
Steven Rostedt5072c592008-05-12 21:20:43 +0200379 __ftrace_replace_code(rec, old, new, enable);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200380 }
381 }
382}
383
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200384static notrace void ftrace_shutdown_replenish(void)
385{
386 if (ftrace_pages->next)
387 return;
388
389 /* allocate another page */
390 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
391}
Steven Rostedt3d083392008-05-12 21:20:42 +0200392
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200393static notrace void
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200394ftrace_code_disable(struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200395{
396 unsigned long ip;
397 unsigned char *nop, *call;
398 int failed;
399
400 ip = rec->ip;
401
402 nop = ftrace_nop_replace();
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200403 call = ftrace_call_replace(ip, MCOUNT_ADDR);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200404
405 failed = ftrace_modify_code(ip, call, nop);
406 if (failed)
407 rec->flags |= FTRACE_FL_FAILED;
408}
409
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200410static int notrace __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200411{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200412 unsigned long addr;
413 int *command = data;
414
415 if (*command & FTRACE_ENABLE_CALLS)
416 ftrace_replace_code(1);
417 else if (*command & FTRACE_DISABLE_CALLS)
418 ftrace_replace_code(0);
419
420 if (*command & FTRACE_UPDATE_TRACE_FUNC)
421 ftrace_update_ftrace_func(ftrace_trace_function);
422
423 if (*command & FTRACE_ENABLE_MCOUNT) {
424 addr = (unsigned long)ftrace_record_ip;
425 ftrace_mcount_set(&addr);
426 } else if (*command & FTRACE_DISABLE_MCOUNT) {
427 addr = (unsigned long)ftrace_stub;
428 ftrace_mcount_set(&addr);
429 }
430
431 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200432}
433
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200434static void notrace ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200435{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200436 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
Steven Rostedt3d083392008-05-12 21:20:42 +0200437}
438
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200439static ftrace_func_t saved_ftrace_func;
440
Steven Rostedt3d083392008-05-12 21:20:42 +0200441static void notrace ftrace_startup(void)
442{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200443 int command = 0;
444
Steven Rostedt3d083392008-05-12 21:20:42 +0200445 mutex_lock(&ftraced_lock);
446 ftraced_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200447 if (ftraced_suspend == 1)
448 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200449
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200450 if (saved_ftrace_func != ftrace_trace_function) {
451 saved_ftrace_func = ftrace_trace_function;
452 command |= FTRACE_UPDATE_TRACE_FUNC;
453 }
454
455 if (!command || !ftrace_enabled)
456 goto out;
457
458 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200459 out:
460 mutex_unlock(&ftraced_lock);
461}
462
463static void notrace ftrace_shutdown(void)
464{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200465 int command = 0;
466
Steven Rostedt3d083392008-05-12 21:20:42 +0200467 mutex_lock(&ftraced_lock);
468 ftraced_suspend--;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200469 if (!ftraced_suspend)
470 command |= FTRACE_DISABLE_CALLS;
471
472 if (saved_ftrace_func != ftrace_trace_function) {
473 saved_ftrace_func = ftrace_trace_function;
474 command |= FTRACE_UPDATE_TRACE_FUNC;
475 }
476
477 if (!command || !ftrace_enabled)
Steven Rostedt3d083392008-05-12 21:20:42 +0200478 goto out;
479
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200480 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200481 out:
482 mutex_unlock(&ftraced_lock);
483}
484
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200485static void notrace ftrace_startup_sysctl(void)
486{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200487 int command = FTRACE_ENABLE_MCOUNT;
488
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200489 mutex_lock(&ftraced_lock);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200490 /* Force update next time */
491 saved_ftrace_func = NULL;
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200492 /* ftraced_suspend is true if we want ftrace running */
493 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200494 command |= FTRACE_ENABLE_CALLS;
495
496 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200497 mutex_unlock(&ftraced_lock);
498}
499
500static void notrace ftrace_shutdown_sysctl(void)
501{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200502 int command = FTRACE_DISABLE_MCOUNT;
503
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200504 mutex_lock(&ftraced_lock);
505 /* ftraced_suspend is true if ftrace is running */
506 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200507 command |= FTRACE_DISABLE_CALLS;
508
509 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200510 mutex_unlock(&ftraced_lock);
511}
512
Steven Rostedt3d083392008-05-12 21:20:42 +0200513static cycle_t ftrace_update_time;
514static unsigned long ftrace_update_cnt;
515unsigned long ftrace_update_tot_cnt;
516
517static int notrace __ftrace_update_code(void *ignore)
518{
519 struct dyn_ftrace *p;
520 struct hlist_head head;
521 struct hlist_node *t;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200522 int save_ftrace_enabled;
Steven Rostedt3d083392008-05-12 21:20:42 +0200523 cycle_t start, stop;
524 int i;
525
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200526 /* Don't be recording funcs now */
527 save_ftrace_enabled = ftrace_enabled;
528 ftrace_enabled = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200529
530 start = now(raw_smp_processor_id());
531 ftrace_update_cnt = 0;
532
533 /* No locks needed, the machine is stopped! */
534 for (i = 0; i < FTRACE_HASHSIZE; i++) {
535 if (hlist_empty(&ftrace_hash[i]))
536 continue;
537
538 head = ftrace_hash[i];
539 INIT_HLIST_HEAD(&ftrace_hash[i]);
540
541 /* all CPUS are stopped, we are safe to modify code */
542 hlist_for_each_entry(p, t, &head, node) {
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200543 ftrace_code_disable(p);
Steven Rostedt3d083392008-05-12 21:20:42 +0200544 ftrace_update_cnt++;
545 }
546
547 }
548
549 stop = now(raw_smp_processor_id());
550 ftrace_update_time = stop - start;
551 ftrace_update_tot_cnt += ftrace_update_cnt;
552
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200553 ftrace_enabled = save_ftrace_enabled;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200554
555 return 0;
556}
557
Steven Rostedt3d083392008-05-12 21:20:42 +0200558static void notrace ftrace_update_code(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200559{
Steven Rostedt3d083392008-05-12 21:20:42 +0200560 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
561}
562
563static int notrace ftraced(void *ignore)
564{
565 unsigned long usecs;
566
567 set_current_state(TASK_INTERRUPTIBLE);
568
569 while (!kthread_should_stop()) {
570
571 /* check once a second */
572 schedule_timeout(HZ);
573
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200574 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200575 mutex_lock(&ftraced_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200576 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
Steven Rostedt3d083392008-05-12 21:20:42 +0200577 ftrace_record_suspend++;
578 ftrace_update_code();
579 usecs = nsecs_to_usecs(ftrace_update_time);
580 if (ftrace_update_tot_cnt > 100000) {
581 ftrace_update_tot_cnt = 0;
582 pr_info("hm, dftrace overflow: %lu change%s"
583 " (%lu total) in %lu usec%s\n",
584 ftrace_update_cnt,
585 ftrace_update_cnt != 1 ? "s" : "",
586 ftrace_update_tot_cnt,
587 usecs, usecs != 1 ? "s" : "");
588 WARN_ON_ONCE(1);
589 }
590 ftraced_trigger = 0;
591 ftrace_record_suspend--;
592 }
593 mutex_unlock(&ftraced_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200594 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200595
596 ftrace_shutdown_replenish();
597
598 set_current_state(TASK_INTERRUPTIBLE);
599 }
600 __set_current_state(TASK_RUNNING);
601 return 0;
602}
603
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200604static int __init ftrace_dyn_table_alloc(void)
605{
606 struct ftrace_page *pg;
607 int cnt;
608 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200609
610 /* allocate a few pages */
611 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
612 if (!ftrace_pages_start)
613 return -1;
614
615 /*
616 * Allocate a few more pages.
617 *
618 * TODO: have some parser search vmlinux before
619 * final linking to find all calls to ftrace.
620 * Then we can:
621 * a) know how many pages to allocate.
622 * and/or
623 * b) set up the table then.
624 *
625 * The dynamic code is still necessary for
626 * modules.
627 */
628
629 pg = ftrace_pages = ftrace_pages_start;
630
631 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
632
633 for (i = 0; i < cnt; i++) {
634 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
635
636 /* If we fail, we'll try later anyway */
637 if (!pg->next)
638 break;
639
640 pg = pg->next;
641 }
642
643 return 0;
644}
645
Steven Rostedt5072c592008-05-12 21:20:43 +0200646enum {
647 FTRACE_ITER_FILTER = (1 << 0),
648 FTRACE_ITER_CONT = (1 << 1),
649};
650
651#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
652
653struct ftrace_iterator {
654 loff_t pos;
655 struct ftrace_page *pg;
656 unsigned idx;
657 unsigned flags;
658 unsigned char buffer[FTRACE_BUFF_MAX+1];
659 unsigned buffer_idx;
660 unsigned filtered;
661};
662
663static void notrace *
664t_next(struct seq_file *m, void *v, loff_t *pos)
665{
666 struct ftrace_iterator *iter = m->private;
667 struct dyn_ftrace *rec = NULL;
668
669 (*pos)++;
670
671 retry:
672 if (iter->idx >= iter->pg->index) {
673 if (iter->pg->next) {
674 iter->pg = iter->pg->next;
675 iter->idx = 0;
676 goto retry;
677 }
678 } else {
679 rec = &iter->pg->records[iter->idx++];
680 if ((rec->flags & FTRACE_FL_FAILED) ||
681 ((iter->flags & FTRACE_ITER_FILTER) &&
682 !(rec->flags & FTRACE_FL_FILTER))) {
683 rec = NULL;
684 goto retry;
685 }
686 }
687
688 iter->pos = *pos;
689
690 return rec;
691}
692
693static void *t_start(struct seq_file *m, loff_t *pos)
694{
695 struct ftrace_iterator *iter = m->private;
696 void *p = NULL;
697 loff_t l = -1;
698
699 if (*pos != iter->pos) {
700 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
701 ;
702 } else {
703 l = *pos;
704 p = t_next(m, p, &l);
705 }
706
707 return p;
708}
709
710static void t_stop(struct seq_file *m, void *p)
711{
712}
713
714static int t_show(struct seq_file *m, void *v)
715{
716 struct dyn_ftrace *rec = v;
717 char str[KSYM_SYMBOL_LEN];
718
719 if (!rec)
720 return 0;
721
722 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
723
724 seq_printf(m, "%s\n", str);
725
726 return 0;
727}
728
729static struct seq_operations show_ftrace_seq_ops = {
730 .start = t_start,
731 .next = t_next,
732 .stop = t_stop,
733 .show = t_show,
734};
735
736static int notrace
737ftrace_avail_open(struct inode *inode, struct file *file)
738{
739 struct ftrace_iterator *iter;
740 int ret;
741
742 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
743 if (!iter)
744 return -ENOMEM;
745
746 iter->pg = ftrace_pages_start;
747 iter->pos = -1;
748
749 ret = seq_open(file, &show_ftrace_seq_ops);
750 if (!ret) {
751 struct seq_file *m = file->private_data;
752 m->private = iter;
753 } else
754 kfree(iter);
755
756 return ret;
757}
758
759int ftrace_avail_release(struct inode *inode, struct file *file)
760{
761 struct seq_file *m = (struct seq_file *)file->private_data;
762 struct ftrace_iterator *iter = m->private;
763
764 seq_release(inode, file);
765 kfree(iter);
766 return 0;
767}
768
769static void notrace ftrace_filter_reset(void)
770{
771 struct ftrace_page *pg;
772 struct dyn_ftrace *rec;
773 unsigned i;
774
775 /* keep kstop machine from running */
776 preempt_disable();
777 ftrace_filtered = 0;
778 pg = ftrace_pages_start;
779 while (pg) {
780 for (i = 0; i < pg->index; i++) {
781 rec = &pg->records[i];
782 if (rec->flags & FTRACE_FL_FAILED)
783 continue;
784 rec->flags &= ~FTRACE_FL_FILTER;
785 }
786 pg = pg->next;
787 }
788 preempt_enable();
789}
790
791static int notrace
792ftrace_filter_open(struct inode *inode, struct file *file)
793{
794 struct ftrace_iterator *iter;
795 int ret = 0;
796
797 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
798 if (!iter)
799 return -ENOMEM;
800
801 mutex_lock(&ftrace_filter_lock);
802 if ((file->f_mode & FMODE_WRITE) &&
803 !(file->f_flags & O_APPEND))
804 ftrace_filter_reset();
805
806 if (file->f_mode & FMODE_READ) {
807 iter->pg = ftrace_pages_start;
808 iter->pos = -1;
809 iter->flags = FTRACE_ITER_FILTER;
810
811 ret = seq_open(file, &show_ftrace_seq_ops);
812 if (!ret) {
813 struct seq_file *m = file->private_data;
814 m->private = iter;
815 } else
816 kfree(iter);
817 } else
818 file->private_data = iter;
819 mutex_unlock(&ftrace_filter_lock);
820
821 return ret;
822}
823
824static ssize_t notrace
825ftrace_filter_read(struct file *file, char __user *ubuf,
826 size_t cnt, loff_t *ppos)
827{
828 if (file->f_mode & FMODE_READ)
829 return seq_read(file, ubuf, cnt, ppos);
830 else
831 return -EPERM;
832}
833
834static loff_t notrace
835ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
836{
837 loff_t ret;
838
839 if (file->f_mode & FMODE_READ)
840 ret = seq_lseek(file, offset, origin);
841 else
842 file->f_pos = ret = 1;
843
844 return ret;
845}
846
847enum {
848 MATCH_FULL,
849 MATCH_FRONT_ONLY,
850 MATCH_MIDDLE_ONLY,
851 MATCH_END_ONLY,
852};
853
854static void notrace
855ftrace_match(unsigned char *buff, int len)
856{
857 char str[KSYM_SYMBOL_LEN];
858 char *search = NULL;
859 struct ftrace_page *pg;
860 struct dyn_ftrace *rec;
861 int type = MATCH_FULL;
862 unsigned i, match = 0, search_len = 0;
863
864 for (i = 0; i < len; i++) {
865 if (buff[i] == '*') {
866 if (!i) {
867 search = buff + i + 1;
868 type = MATCH_END_ONLY;
869 search_len = len - (i + 1);
870 } else {
871 if (type == MATCH_END_ONLY) {
872 type = MATCH_MIDDLE_ONLY;
873 } else {
874 match = i;
875 type = MATCH_FRONT_ONLY;
876 }
877 buff[i] = 0;
878 break;
879 }
880 }
881 }
882
883 /* keep kstop machine from running */
884 preempt_disable();
885 ftrace_filtered = 1;
886 pg = ftrace_pages_start;
887 while (pg) {
888 for (i = 0; i < pg->index; i++) {
889 int matched = 0;
890 char *ptr;
891
892 rec = &pg->records[i];
893 if (rec->flags & FTRACE_FL_FAILED)
894 continue;
895 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
896 switch (type) {
897 case MATCH_FULL:
898 if (strcmp(str, buff) == 0)
899 matched = 1;
900 break;
901 case MATCH_FRONT_ONLY:
902 if (memcmp(str, buff, match) == 0)
903 matched = 1;
904 break;
905 case MATCH_MIDDLE_ONLY:
906 if (strstr(str, search))
907 matched = 1;
908 break;
909 case MATCH_END_ONLY:
910 ptr = strstr(str, search);
911 if (ptr && (ptr[search_len] == 0))
912 matched = 1;
913 break;
914 }
915 if (matched)
916 rec->flags |= FTRACE_FL_FILTER;
917 }
918 pg = pg->next;
919 }
920 preempt_enable();
921}
922
923static ssize_t notrace
924ftrace_filter_write(struct file *file, const char __user *ubuf,
925 size_t cnt, loff_t *ppos)
926{
927 struct ftrace_iterator *iter;
928 char ch;
929 size_t read = 0;
930 ssize_t ret;
931
932 if (!cnt || cnt < 0)
933 return 0;
934
935 mutex_lock(&ftrace_filter_lock);
936
937 if (file->f_mode & FMODE_READ) {
938 struct seq_file *m = file->private_data;
939 iter = m->private;
940 } else
941 iter = file->private_data;
942
943 if (!*ppos) {
944 iter->flags &= ~FTRACE_ITER_CONT;
945 iter->buffer_idx = 0;
946 }
947
948 ret = get_user(ch, ubuf++);
949 if (ret)
950 goto out;
951 read++;
952 cnt--;
953
954 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
955 /* skip white space */
956 while (cnt && isspace(ch)) {
957 ret = get_user(ch, ubuf++);
958 if (ret)
959 goto out;
960 read++;
961 cnt--;
962 }
963
964
965 if (isspace(ch)) {
966 file->f_pos += read;
967 ret = read;
968 goto out;
969 }
970
971 iter->buffer_idx = 0;
972 }
973
974 while (cnt && !isspace(ch)) {
975 if (iter->buffer_idx < FTRACE_BUFF_MAX)
976 iter->buffer[iter->buffer_idx++] = ch;
977 else {
978 ret = -EINVAL;
979 goto out;
980 }
981 ret = get_user(ch, ubuf++);
982 if (ret)
983 goto out;
984 read++;
985 cnt--;
986 }
987
988 if (isspace(ch)) {
989 iter->filtered++;
990 iter->buffer[iter->buffer_idx] = 0;
991 ftrace_match(iter->buffer, iter->buffer_idx);
992 iter->buffer_idx = 0;
993 } else
994 iter->flags |= FTRACE_ITER_CONT;
995
996
997 file->f_pos += read;
998
999 ret = read;
1000 out:
1001 mutex_unlock(&ftrace_filter_lock);
1002
1003 return ret;
1004}
1005
1006static int notrace
1007ftrace_filter_release(struct inode *inode, struct file *file)
1008{
1009 struct seq_file *m = (struct seq_file *)file->private_data;
1010 struct ftrace_iterator *iter;
1011
1012 mutex_lock(&ftrace_filter_lock);
1013 if (file->f_mode & FMODE_READ) {
1014 iter = m->private;
1015
1016 seq_release(inode, file);
1017 } else
1018 iter = file->private_data;
1019
1020 if (iter->buffer_idx) {
1021 iter->filtered++;
1022 iter->buffer[iter->buffer_idx] = 0;
1023 ftrace_match(iter->buffer, iter->buffer_idx);
1024 }
1025
1026 mutex_lock(&ftrace_sysctl_lock);
1027 mutex_lock(&ftraced_lock);
1028 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1029 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1030 mutex_unlock(&ftraced_lock);
1031 mutex_unlock(&ftrace_sysctl_lock);
1032
1033 kfree(iter);
1034 mutex_unlock(&ftrace_filter_lock);
1035 return 0;
1036}
1037
1038static struct file_operations ftrace_avail_fops = {
1039 .open = ftrace_avail_open,
1040 .read = seq_read,
1041 .llseek = seq_lseek,
1042 .release = ftrace_avail_release,
1043};
1044
1045static struct file_operations ftrace_filter_fops = {
1046 .open = ftrace_filter_open,
1047 .read = ftrace_filter_read,
1048 .write = ftrace_filter_write,
1049 .llseek = ftrace_filter_lseek,
1050 .release = ftrace_filter_release,
1051};
1052
1053static __init int ftrace_init_debugfs(void)
1054{
1055 struct dentry *d_tracer;
1056 struct dentry *entry;
1057
1058 d_tracer = tracing_init_dentry();
1059
1060 entry = debugfs_create_file("available_filter_functions", 0444,
1061 d_tracer, NULL, &ftrace_avail_fops);
1062 if (!entry)
1063 pr_warning("Could not create debugfs "
1064 "'available_filter_functions' entry\n");
1065
1066 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1067 NULL, &ftrace_filter_fops);
1068 if (!entry)
1069 pr_warning("Could not create debugfs "
1070 "'set_ftrace_filter' entry\n");
1071 return 0;
1072}
1073
1074fs_initcall(ftrace_init_debugfs);
1075
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001076static int __init notrace ftrace_dynamic_init(void)
Steven Rostedt3d083392008-05-12 21:20:42 +02001077{
1078 struct task_struct *p;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001079 unsigned long addr;
Steven Rostedt3d083392008-05-12 21:20:42 +02001080 int ret;
1081
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001082 addr = (unsigned long)ftrace_record_ip;
1083 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1084
1085 /* ftrace_dyn_arch_init places the return code in addr */
1086 if (addr)
1087 return addr;
1088
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001089 ret = ftrace_dyn_table_alloc();
Steven Rostedt3d083392008-05-12 21:20:42 +02001090 if (ret)
1091 return ret;
1092
1093 p = kthread_run(ftraced, NULL, "ftraced");
1094 if (IS_ERR(p))
1095 return -1;
1096
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001097 last_ftrace_enabled = ftrace_enabled = 1;
Steven Rostedt3d083392008-05-12 21:20:42 +02001098
1099 return 0;
1100}
1101
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001102core_initcall(ftrace_dynamic_init);
Steven Rostedt3d083392008-05-12 21:20:42 +02001103#else
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001104# define ftrace_startup() do { } while (0)
1105# define ftrace_shutdown() do { } while (0)
1106# define ftrace_startup_sysctl() do { } while (0)
1107# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001108#endif /* CONFIG_DYNAMIC_FTRACE */
1109
1110/**
1111 * register_ftrace_function - register a function for profiling
1112 * @ops - ops structure that holds the function for profiling.
1113 *
1114 * Register a function to be called by all functions in the
1115 * kernel.
1116 *
1117 * Note: @ops->func and all the functions it calls must be labeled
1118 * with "notrace", otherwise it will go into a
1119 * recursive loop.
1120 */
1121int register_ftrace_function(struct ftrace_ops *ops)
1122{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001123 int ret;
1124
1125 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001126 ret = __register_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001127 ftrace_startup();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001128 mutex_unlock(&ftrace_sysctl_lock);
1129
1130 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001131}
1132
1133/**
1134 * unregister_ftrace_function - unresgister a function for profiling.
1135 * @ops - ops structure that holds the function to unregister
1136 *
1137 * Unregister a function that was added to be called by ftrace profiling.
1138 */
1139int unregister_ftrace_function(struct ftrace_ops *ops)
1140{
1141 int ret;
1142
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001143 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001144 ret = __unregister_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001145 ftrace_shutdown();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001146 mutex_unlock(&ftrace_sysctl_lock);
1147
1148 return ret;
1149}
1150
1151notrace int
1152ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02001153 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001154 loff_t *ppos)
1155{
1156 int ret;
1157
1158 mutex_lock(&ftrace_sysctl_lock);
1159
Steven Rostedt5072c592008-05-12 21:20:43 +02001160 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001161
1162 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1163 goto out;
1164
1165 last_ftrace_enabled = ftrace_enabled;
1166
1167 if (ftrace_enabled) {
1168
1169 ftrace_startup_sysctl();
1170
1171 /* we are starting ftrace again */
1172 if (ftrace_list != &ftrace_list_end) {
1173 if (ftrace_list->next == &ftrace_list_end)
1174 ftrace_trace_function = ftrace_list->func;
1175 else
1176 ftrace_trace_function = ftrace_list_func;
1177 }
1178
1179 } else {
1180 /* stopping ftrace calls (just send to ftrace_stub) */
1181 ftrace_trace_function = ftrace_stub;
1182
1183 ftrace_shutdown_sysctl();
1184 }
1185
1186 out:
1187 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001188 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001189}