blob: 31cc4cb0dbf2afaa49d5f7428f5cce31d535211d [file] [log] [blame]
Peter Zijlstraac199db2009-03-19 20:26:15 +01001/*
Frederic Weisbecker97d5a222010-03-05 05:35:37 +01002 * trace event based perf event profiling/tracing
Peter Zijlstraac199db2009-03-19 20:26:15 +01003 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
Frederic Weisbeckerc5306652010-03-03 07:16:16 +01005 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
Peter Zijlstraac199db2009-03-19 20:26:15 +01006 */
7
Li Zefan558e6542009-08-24 12:19:47 +08008#include <linux/module.h>
Xiao Guangrong430ad5a2010-01-28 09:32:29 +08009#include <linux/kprobes.h>
Peter Zijlstraac199db2009-03-19 20:26:15 +010010#include "trace.h"
11
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +020012static char *perf_trace_buf[4];
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020013
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +010014/*
15 * Force it to be aligned to unsigned long to avoid misaligned accesses
16 * suprises
17 */
18typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19 perf_trace_t;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +010020
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020021/* Count the events in use (per event id, not per instance) */
Frederic Weisbecker97d5a222010-03-05 05:35:37 +010022static int total_ref_count;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020023
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020024static int perf_trace_event_init(struct ftrace_event_call *tp_event,
25 struct perf_event *p_event)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020026{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020027 struct hlist_head *list;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020028 int ret = -ENOMEM;
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020029 int cpu;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020030
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020031 p_event->tp_event = tp_event;
32 if (tp_event->perf_refcount++ > 0)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020033 return 0;
34
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020035 list = alloc_percpu(struct hlist_head);
36 if (!list)
37 goto fail;
38
39 for_each_possible_cpu(cpu)
40 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
41
42 tp_event->perf_events = list;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020043
Frederic Weisbecker97d5a222010-03-05 05:35:37 +010044 if (!total_ref_count) {
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +020045 char *buf;
46 int i;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020047
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +020048 for (i = 0; i < 4; i++) {
49 buf = (char *)alloc_percpu(perf_trace_t);
50 if (!buf)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020051 goto fail;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020052
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020053 perf_trace_buf[i] = buf;
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +020054 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020055 }
56
Steven Rostedta1d0ce82010-06-08 11:22:06 -040057 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020058 if (ret)
59 goto fail;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020060
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020061 total_ref_count++;
62 return 0;
63
64fail:
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +020065 if (!total_ref_count) {
66 int i;
67
68 for (i = 0; i < 4; i++) {
69 free_percpu(perf_trace_buf[i]);
70 perf_trace_buf[i] = NULL;
71 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020072 }
73
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020074 if (!--tp_event->perf_refcount) {
75 free_percpu(tp_event->perf_events);
76 tp_event->perf_events = NULL;
Frederic Weisbeckerfe8e5b52009-10-03 14:55:18 +020077 }
Frederic Weisbecker20ab44252009-09-18 06:10:28 +020078
79 return ret;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +020080}
81
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020082int perf_trace_init(struct perf_event *p_event)
Peter Zijlstraac199db2009-03-19 20:26:15 +010083{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020084 struct ftrace_event_call *tp_event;
85 int event_id = p_event->attr.config;
Li Zefan20c89282009-05-06 10:33:45 +080086 int ret = -EINVAL;
Peter Zijlstraac199db2009-03-19 20:26:15 +010087
Li Zefan20c89282009-05-06 10:33:45 +080088 mutex_lock(&event_mutex);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020089 list_for_each_entry(tp_event, &ftrace_events, list) {
Steven Rostedtff5f1492010-05-21 11:49:57 -040090 if (tp_event->event.type == event_id &&
Steven Rostedta1d0ce82010-06-08 11:22:06 -040091 tp_event->class && tp_event->class->reg &&
Peter Zijlstra1c024eca2010-05-19 14:02:22 +020092 try_module_get(tp_event->mod)) {
93 ret = perf_trace_event_init(tp_event, p_event);
Li Zefan9cb627d2010-09-01 12:58:43 +020094 if (ret)
95 module_put(tp_event->mod);
Li Zefan20c89282009-05-06 10:33:45 +080096 break;
97 }
Peter Zijlstraac199db2009-03-19 20:26:15 +010098 }
Li Zefan20c89282009-05-06 10:33:45 +080099 mutex_unlock(&event_mutex);
Peter Zijlstraac199db2009-03-19 20:26:15 +0100100
Li Zefan20c89282009-05-06 10:33:45 +0800101 return ret;
Peter Zijlstraac199db2009-03-19 20:26:15 +0100102}
103
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200104int perf_trace_enable(struct perf_event *p_event)
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200105{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200106 struct ftrace_event_call *tp_event = p_event->tp_event;
107 struct hlist_head *list;
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200108
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200109 list = tp_event->perf_events;
110 if (WARN_ON_ONCE(!list))
111 return -EINVAL;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200112
Peter Zijlstra3771f072010-05-21 12:31:09 +0200113 list = this_cpu_ptr(list);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200114 hlist_add_head_rcu(&p_event->hlist_entry, list);
Frederic Weisbecker20ab44252009-09-18 06:10:28 +0200115
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200116 return 0;
Frederic Weisbeckere5e25cf2009-09-18 00:54:43 +0200117}
118
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200119void perf_trace_disable(struct perf_event *p_event)
Peter Zijlstraac199db2009-03-19 20:26:15 +0100120{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200121 hlist_del_rcu(&p_event->hlist_entry);
122}
Peter Zijlstraac199db2009-03-19 20:26:15 +0100123
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200124void perf_trace_destroy(struct perf_event *p_event)
125{
126 struct ftrace_event_call *tp_event = p_event->tp_event;
127 int i;
128
Peter Zijlstra2e979422010-05-21 16:22:33 +0200129 mutex_lock(&event_mutex);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200130 if (--tp_event->perf_refcount > 0)
Peter Zijlstra2e979422010-05-21 16:22:33 +0200131 goto out;
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200132
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400133 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200134
Peter Zijlstra3771f072010-05-21 12:31:09 +0200135 /*
Frederic Weisbecker669336e2010-07-20 17:29:54 +0200136 * Ensure our callback won't be called anymore. The buffers
137 * will be freed after that.
Peter Zijlstra3771f072010-05-21 12:31:09 +0200138 */
Frederic Weisbecker669336e2010-07-20 17:29:54 +0200139 tracepoint_synchronize_unregister();
Peter Zijlstra3771f072010-05-21 12:31:09 +0200140
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200141 free_percpu(tp_event->perf_events);
142 tp_event->perf_events = NULL;
143
144 if (!--total_ref_count) {
145 for (i = 0; i < 4; i++) {
146 free_percpu(perf_trace_buf[i]);
147 perf_trace_buf[i] = NULL;
Li Zefan20c89282009-05-06 10:33:45 +0800148 }
Peter Zijlstraac199db2009-03-19 20:26:15 +0100149 }
Peter Zijlstra2e979422010-05-21 16:22:33 +0200150out:
Li Zefan9cb627d2010-09-01 12:58:43 +0200151 module_put(tp_event->mod);
Peter Zijlstra2e979422010-05-21 16:22:33 +0200152 mutex_unlock(&event_mutex);
Peter Zijlstraac199db2009-03-19 20:26:15 +0100153}
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800154
Frederic Weisbecker97d5a222010-03-05 05:35:37 +0100155__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200156 struct pt_regs *regs, int *rctxp)
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800157{
158 struct trace_entry *entry;
Peter Zijlstra87f44bb2010-05-25 11:02:55 +0200159 unsigned long flags;
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200160 char *raw_data;
Peter Zijlstrab7e2ece2010-05-19 10:52:27 +0200161 int pc;
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800162
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +0100163 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
164
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800165 pc = preempt_count();
166
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800167 *rctxp = perf_swevent_get_recursion_context();
168 if (*rctxp < 0)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +0200169 return NULL;
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800170
Peter Zijlstra3771f072010-05-21 12:31:09 +0200171 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800172
173 /* zero the dead bytes from align to not leak stack to user */
Frederic Weisbeckereb1e7962010-03-23 00:08:59 +0100174 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800175
176 entry = (struct trace_entry *)raw_data;
Peter Zijlstra87f44bb2010-05-25 11:02:55 +0200177 local_save_flags(flags);
178 tracing_generic_entry_update(entry, flags, pc);
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800179 entry->type = type;
180
181 return raw_data;
Xiao Guangrong430ad5a2010-01-28 09:32:29 +0800182}
Frederic Weisbecker97d5a222010-03-05 05:35:37 +0100183EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);