blob: e0d351b01f5ac9bacf230cf077cb1e5cbb82489a [file] [log] [blame]
Peter Zijlstraac199db2009-03-19 20:26:15 +01001/*
2 * trace event based perf counter profiling
3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 *
6 */
7
Li Zefan558e6542009-08-24 12:19:47 +08008#include <linux/module.h>
Peter Zijlstraac199db2009-03-19 20:26:15 +01009#include "trace.h"
10
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020011
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010012struct perf_trace_buf *perf_trace_buf;
13EXPORT_SYMBOL_GPL(perf_trace_buf);
Peter Zijlstra05bafda2009-09-20 12:34:38 +020014
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010015struct perf_trace_buf *perf_trace_buf_nmi;
16EXPORT_SYMBOL_GPL(perf_trace_buf_nmi);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020017
18/* Count the events in use (per event id, not per instance) */
19static int total_profile_count;
20
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020021static int ftrace_profile_enable_event(struct ftrace_event_call *event)
22{
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010023 struct perf_trace_buf *buf;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020024 int ret = -ENOMEM;
25
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020026 if (atomic_inc_return(&event->profile_count))
27 return 0;
28
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020029 if (!total_profile_count) {
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010030 buf = alloc_percpu(struct perf_trace_buf);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020031 if (!buf)
32 goto fail_buf;
33
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010034 rcu_assign_pointer(perf_trace_buf, buf);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020035
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010036 buf = alloc_percpu(struct perf_trace_buf);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020037 if (!buf)
38 goto fail_buf_nmi;
39
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010040 rcu_assign_pointer(perf_trace_buf_nmi, buf);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020041 }
42
Frederic Weisbeckerd7a4b412009-09-23 23:08:43 +020043 ret = event->profile_enable(event);
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020044 if (!ret) {
45 total_profile_count++;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020046 return 0;
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020047 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020048
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020049fail_buf_nmi:
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020050 if (!total_profile_count) {
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010051 free_percpu(perf_trace_buf_nmi);
52 free_percpu(perf_trace_buf);
53 perf_trace_buf_nmi = NULL;
54 perf_trace_buf = NULL;
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020055 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020056fail_buf:
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020057 atomic_dec(&event->profile_count);
58
59 return ret;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020060}
61
Peter Zijlstraac199db2009-03-19 20:26:15 +010062int ftrace_profile_enable(int event_id)
63{
64 struct ftrace_event_call *event;
Li Zefan20c89282009-05-06 10:33:45 +080065 int ret = -EINVAL;
Peter Zijlstraac199db2009-03-19 20:26:15 +010066
Li Zefan20c89282009-05-06 10:33:45 +080067 mutex_lock(&event_mutex);
Steven Rostedta59fd602009-04-10 13:52:20 -040068 list_for_each_entry(event, &ftrace_events, list) {
Li Zefan558e6542009-08-24 12:19:47 +080069 if (event->id == event_id && event->profile_enable &&
70 try_module_get(event->mod)) {
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020071 ret = ftrace_profile_enable_event(event);
Li Zefan20c89282009-05-06 10:33:45 +080072 break;
73 }
Peter Zijlstraac199db2009-03-19 20:26:15 +010074 }
Li Zefan20c89282009-05-06 10:33:45 +080075 mutex_unlock(&event_mutex);
Peter Zijlstraac199db2009-03-19 20:26:15 +010076
Li Zefan20c89282009-05-06 10:33:45 +080077 return ret;
Peter Zijlstraac199db2009-03-19 20:26:15 +010078}
79
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020080static void ftrace_profile_disable_event(struct ftrace_event_call *event)
81{
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010082 struct perf_trace_buf *buf, *nmi_buf;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020083
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020084 if (!atomic_add_negative(-1, &event->profile_count))
85 return;
86
Frederic Weisbeckerd7a4b412009-09-23 23:08:43 +020087 event->profile_disable(event);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020088
89 if (!--total_profile_count) {
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010090 buf = perf_trace_buf;
91 rcu_assign_pointer(perf_trace_buf, NULL);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020092
Frederic Weisbecker444a2a32009-11-06 04:13:05 +010093 nmi_buf = perf_trace_buf_nmi;
94 rcu_assign_pointer(perf_trace_buf_nmi, NULL);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020095
96 /*
97 * Ensure every events in profiling have finished before
98 * releasing the buffers
99 */
100 synchronize_sched();
101
102 free_percpu(buf);
103 free_percpu(nmi_buf);
104 }
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200105}
106
Peter Zijlstraac199db2009-03-19 20:26:15 +0100107void ftrace_profile_disable(int event_id)
108{
109 struct ftrace_event_call *event;
110
Li Zefan20c89282009-05-06 10:33:45 +0800111 mutex_lock(&event_mutex);
Steven Rostedta59fd602009-04-10 13:52:20 -0400112 list_for_each_entry(event, &ftrace_events, list) {
Li Zefan20c89282009-05-06 10:33:45 +0800113 if (event->id == event_id) {
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200114 ftrace_profile_disable_event(event);
Li Zefan558e6542009-08-24 12:19:47 +0800115 module_put(event->mod);
Li Zefan20c89282009-05-06 10:33:45 +0800116 break;
117 }
Peter Zijlstraac199db2009-03-19 20:26:15 +0100118 }
Li Zefan20c89282009-05-06 10:33:45 +0800119 mutex_unlock(&event_mutex);
Peter Zijlstraac199db2009-03-19 20:26:15 +0100120}