blob: e66d21e15a0f01078e74024e12afa1e22adfd0b6 [file] [log] [blame]
Peter Zijlstraac199db2009-03-19 20:26:15 +01001/*
2 * trace event based perf counter profiling
3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
Frederic Weisbeckerc5306652010-03-03 07:16:16 +01005 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
Peter Zijlstraac199db2009-03-19 20:26:15 +01006 */
7
Li Zefan558e6542009-08-24 12:19:47 +08008#include <linux/module.h>
Xiao Guangrong430ad5a2010-01-28 09:32:29 +08009#include <linux/kprobes.h>
Peter Zijlstraac199db2009-03-19 20:26:15 +010010#include "trace.h"
11
Frederic Weisbeckerc5306652010-03-03 07:16:16 +010012DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020013
Xiao Guangrong430ad5a2010-01-28 09:32:29 +080014static char *perf_trace_buf;
15static char *perf_trace_buf_nmi;
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020016
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +010017typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
18
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020019/* Count the events in use (per event id, not per instance) */
20static int total_profile_count;
21
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020022static int ftrace_profile_enable_event(struct ftrace_event_call *event)
23{
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +010024 char *buf;
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020025 int ret = -ENOMEM;
26
Li Zefane00bf2e2009-12-08 11:17:29 +080027 if (event->profile_count++ > 0)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020028 return 0;
29
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020030 if (!total_profile_count) {
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +010031 buf = (char *)alloc_percpu(perf_trace_t);
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020032 if (!buf)
33 goto fail_buf;
34
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010035 rcu_assign_pointer(perf_trace_buf, buf);
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020036
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +010037 buf = (char *)alloc_percpu(perf_trace_t);
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020038 if (!buf)
39 goto fail_buf_nmi;
40
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010041 rcu_assign_pointer(perf_trace_buf_nmi, buf);
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020042 }
43
Frederic Weisbeckerd7a4b412009-09-23 23:08:43 +020044 ret = event->profile_enable(event);
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020045 if (!ret) {
46 total_profile_count++;
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020047 return 0;
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020048 }
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020049
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020050fail_buf_nmi:
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020051 if (!total_profile_count) {
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010052 free_percpu(perf_trace_buf_nmi);
53 free_percpu(perf_trace_buf);
54 perf_trace_buf_nmi = NULL;
55 perf_trace_buf = NULL;
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020056 }
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020057fail_buf:
Li Zefane00bf2e2009-12-08 11:17:29 +080058 event->profile_count--;
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020059
60 return ret;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020061}
62
Peter Zijlstraac199db2009-03-19 20:26:15 +010063int ftrace_profile_enable(int event_id)
64{
65 struct ftrace_event_call *event;
Li Zefan20c89282009-05-06 10:33:45 +080066 int ret = -EINVAL;
Peter Zijlstraac199db2009-03-19 20:26:15 +010067
Li Zefan20c89282009-05-06 10:33:45 +080068 mutex_lock(&event_mutex);
Steven Rostedta59fd602009-04-10 13:52:20 -040069 list_for_each_entry(event, &ftrace_events, list) {
Li Zefan558e6542009-08-24 12:19:47 +080070 if (event->id == event_id && event->profile_enable &&
71 try_module_get(event->mod)) {
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020072 ret = ftrace_profile_enable_event(event);
Li Zefan20c89282009-05-06 10:33:45 +080073 break;
74 }
Peter Zijlstraac199db2009-03-19 20:26:15 +010075 }
Li Zefan20c89282009-05-06 10:33:45 +080076 mutex_unlock(&event_mutex);
Peter Zijlstraac199db2009-03-19 20:26:15 +010077
Li Zefan20c89282009-05-06 10:33:45 +080078 return ret;
Peter Zijlstraac199db2009-03-19 20:26:15 +010079}
80
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020081static void ftrace_profile_disable_event(struct ftrace_event_call *event)
82{
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +010083 char *buf, *nmi_buf;
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020084
Li Zefane00bf2e2009-12-08 11:17:29 +080085 if (--event->profile_count > 0)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020086 return;
87
Frederic Weisbeckerd7a4b412009-09-23 23:08:43 +020088 event->profile_disable(event);
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020089
90 if (!--total_profile_count) {
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010091 buf = perf_trace_buf;
92 rcu_assign_pointer(perf_trace_buf, NULL);
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020093
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010094 nmi_buf = perf_trace_buf_nmi;
95 rcu_assign_pointer(perf_trace_buf_nmi, NULL);
Frederic Weisbecker20ab4422009-09-18 06:10:28 +020096
97 /*
98 * Ensure every events in profiling have finished before
99 * releasing the buffers
100 */
101 synchronize_sched();
102
103 free_percpu(buf);
104 free_percpu(nmi_buf);
105 }
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200106}
107
Peter Zijlstraac199db2009-03-19 20:26:15 +0100108void ftrace_profile_disable(int event_id)
109{
110 struct ftrace_event_call *event;
111
Li Zefan20c89282009-05-06 10:33:45 +0800112 mutex_lock(&event_mutex);
Steven Rostedta59fd602009-04-10 13:52:20 -0400113 list_for_each_entry(event, &ftrace_events, list) {
Li Zefan20c89282009-05-06 10:33:45 +0800114 if (event->id == event_id) {
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200115 ftrace_profile_disable_event(event);
Li Zefan558e6542009-08-24 12:19:47 +0800116 module_put(event->mod);
Li Zefan20c89282009-05-06 10:33:45 +0800117 break;
118 }
Peter Zijlstraac199db2009-03-19 20:26:15 +0100119 }
Li Zefan20c89282009-05-06 10:33:45 +0800120 mutex_unlock(&event_mutex);
Peter Zijlstraac199db2009-03-19 20:26:15 +0100121}
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800122
123__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
124 int *rctxp, unsigned long *irq_flags)
125{
126 struct trace_entry *entry;
127 char *trace_buf, *raw_data;
128 int pc, cpu;
129
130 pc = preempt_count();
131
132 /* Protect the per cpu buffer, begin the rcu read side */
133 local_irq_save(*irq_flags);
134
135 *rctxp = perf_swevent_get_recursion_context();
136 if (*rctxp < 0)
137 goto err_recursion;
138
139 cpu = smp_processor_id();
140
141 if (in_nmi())
142 trace_buf = rcu_dereference(perf_trace_buf_nmi);
143 else
144 trace_buf = rcu_dereference(perf_trace_buf);
145
146 if (!trace_buf)
147 goto err;
148
149 raw_data = per_cpu_ptr(trace_buf, cpu);
150
151 /* zero the dead bytes from align to not leak stack to user */
152 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
153
154 entry = (struct trace_entry *)raw_data;
155 tracing_generic_entry_update(entry, *irq_flags, pc);
156 entry->type = type;
157
158 return raw_data;
159err:
160 perf_swevent_put_recursion_context(*rctxp);
161err_recursion:
162 local_irq_restore(*irq_flags);
163 return NULL;
164}
165EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);