ftrace: add trace_function api for other tracers to use
A new check was added in the ftrace function that wont trace if the CPU
trace buffer is disabled. Unfortunately, other tracers used ftrace() to
write to the buffer after they disabled it. The new disable check makes
these calls into a nop.
This patch changes the __ftrace that is called without the check into a
new api for the other tracers to use, called "trace_function". The other
tracers use this interface instead when the trace CPU buffer is already
disabled.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d041578..9022c35 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -641,8 +641,8 @@
}
notrace void
-__ftrace(struct trace_array *tr, struct trace_array_cpu *data,
- unsigned long ip, unsigned long parent_ip, unsigned long flags)
+trace_function(struct trace_array *tr, struct trace_array_cpu *data,
+ unsigned long ip, unsigned long parent_ip, unsigned long flags)
{
struct trace_entry *entry;
unsigned long irq_flags;
@@ -664,7 +664,7 @@
unsigned long ip, unsigned long parent_ip, unsigned long flags)
{
if (likely(!atomic_read(&data->disabled)))
- __ftrace(tr, data, ip, parent_ip, flags);
+ trace_function(tr, data, ip, parent_ip, flags);
}
notrace void
@@ -730,7 +730,7 @@
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1))
- __ftrace(tr, data, ip, parent_ip, flags);
+ trace_function(tr, data, ip, parent_ip, flags);
atomic_dec(&data->disabled);
local_irq_restore(flags);