| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 1 | /* | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 2 | * trace event based perf event profiling/tracing | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 3 | * | 
|  | 4 | * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> | 
| Frederic Weisbecker | c530665 | 2010-03-03 07:16:16 +0100 | [diff] [blame] | 5 | * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com> | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 6 | */ | 
|  | 7 |  | 
| Li Zefan | 558e654 | 2009-08-24 12:19:47 +0800 | [diff] [blame] | 8 | #include <linux/module.h> | 
| Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 9 | #include <linux/kprobes.h> | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 10 | #include "trace.h" | 
|  | 11 |  | 
| Namhyung Kim | 6016ee1 | 2010-08-11 12:47:59 +0900 | [diff] [blame] | 12 | static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS]; | 
| Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 13 |  | 
| Frederic Weisbecker | eb1e796 | 2010-03-23 00:08:59 +0100 | [diff] [blame] | 14 | /* | 
|  | 15 | * Force it to be aligned to unsigned long to avoid misaligned accesses | 
|  | 16 | * suprises | 
|  | 17 | */ | 
|  | 18 | typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) | 
|  | 19 | perf_trace_t; | 
| Frederic Weisbecker | ce71b9d | 2009-11-22 05:26:55 +0100 | [diff] [blame] | 20 |  | 
| Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 21 | /* Count the events in use (per event id, not per instance) */ | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 22 | static int	total_ref_count; | 
| Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 23 |  | 
| Frederic Weisbecker | 61c3265 | 2010-11-18 01:39:17 +0100 | [diff] [blame] | 24 | static int perf_trace_event_perm(struct ftrace_event_call *tp_event, | 
|  | 25 | struct perf_event *p_event) | 
|  | 26 | { | 
| Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 27 | /* The ftrace function trace is allowed only for root. */ | 
|  | 28 | if (ftrace_event_is_function(tp_event) && | 
|  | 29 | perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) | 
|  | 30 | return -EPERM; | 
|  | 31 |  | 
| Frederic Weisbecker | 61c3265 | 2010-11-18 01:39:17 +0100 | [diff] [blame] | 32 | /* No tracing, just counting, so no obvious leak */ | 
|  | 33 | if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) | 
|  | 34 | return 0; | 
|  | 35 |  | 
|  | 36 | /* Some events are ok to be traced by non-root users... */ | 
|  | 37 | if (p_event->attach_state == PERF_ATTACH_TASK) { | 
|  | 38 | if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY) | 
|  | 39 | return 0; | 
|  | 40 | } | 
|  | 41 |  | 
|  | 42 | /* | 
|  | 43 | * ...otherwise raw tracepoint data can be a severe data leak, | 
|  | 44 | * only allow root to have these. | 
|  | 45 | */ | 
|  | 46 | if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) | 
|  | 47 | return -EPERM; | 
|  | 48 |  | 
|  | 49 | return 0; | 
|  | 50 | } | 
|  | 51 |  | 
| Jiri Olsa | ceec0b6 | 2012-02-15 15:51:49 +0100 | [diff] [blame] | 52 | static int perf_trace_event_reg(struct ftrace_event_call *tp_event, | 
|  | 53 | struct perf_event *p_event) | 
| Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 54 | { | 
| Namhyung Kim | 6016ee1 | 2010-08-11 12:47:59 +0900 | [diff] [blame] | 55 | struct hlist_head __percpu *list; | 
| Jiri Olsa | ceec0b6 | 2012-02-15 15:51:49 +0100 | [diff] [blame] | 56 | int ret = -ENOMEM; | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 57 | int cpu; | 
| Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 58 |  | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 59 | p_event->tp_event = tp_event; | 
|  | 60 | if (tp_event->perf_refcount++ > 0) | 
| Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 61 | return 0; | 
|  | 62 |  | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 63 | list = alloc_percpu(struct hlist_head); | 
|  | 64 | if (!list) | 
|  | 65 | goto fail; | 
|  | 66 |  | 
|  | 67 | for_each_possible_cpu(cpu) | 
|  | 68 | INIT_HLIST_HEAD(per_cpu_ptr(list, cpu)); | 
|  | 69 |  | 
|  | 70 | tp_event->perf_events = list; | 
| Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 71 |  | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 72 | if (!total_ref_count) { | 
| Namhyung Kim | 6016ee1 | 2010-08-11 12:47:59 +0900 | [diff] [blame] | 73 | char __percpu *buf; | 
| Peter Zijlstra | b7e2ece | 2010-05-19 10:52:27 +0200 | [diff] [blame] | 74 | int i; | 
| Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 75 |  | 
| Frederic Weisbecker | 7ae07ea | 2010-08-14 20:45:13 +0200 | [diff] [blame] | 76 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { | 
| Namhyung Kim | 6016ee1 | 2010-08-11 12:47:59 +0900 | [diff] [blame] | 77 | buf = (char __percpu *)alloc_percpu(perf_trace_t); | 
| Peter Zijlstra | b7e2ece | 2010-05-19 10:52:27 +0200 | [diff] [blame] | 78 | if (!buf) | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 79 | goto fail; | 
| Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 80 |  | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 81 | perf_trace_buf[i] = buf; | 
| Peter Zijlstra | b7e2ece | 2010-05-19 10:52:27 +0200 | [diff] [blame] | 82 | } | 
| Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 83 | } | 
|  | 84 |  | 
| Jiri Olsa | ceec0b6 | 2012-02-15 15:51:49 +0100 | [diff] [blame] | 85 | ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL); | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 86 | if (ret) | 
|  | 87 | goto fail; | 
| Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 88 |  | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 89 | total_ref_count++; | 
|  | 90 | return 0; | 
|  | 91 |  | 
|  | 92 | fail: | 
| Peter Zijlstra | b7e2ece | 2010-05-19 10:52:27 +0200 | [diff] [blame] | 93 | if (!total_ref_count) { | 
|  | 94 | int i; | 
|  | 95 |  | 
| Frederic Weisbecker | 7ae07ea | 2010-08-14 20:45:13 +0200 | [diff] [blame] | 96 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { | 
| Peter Zijlstra | b7e2ece | 2010-05-19 10:52:27 +0200 | [diff] [blame] | 97 | free_percpu(perf_trace_buf[i]); | 
|  | 98 | perf_trace_buf[i] = NULL; | 
|  | 99 | } | 
| Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 100 | } | 
|  | 101 |  | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 102 | if (!--tp_event->perf_refcount) { | 
|  | 103 | free_percpu(tp_event->perf_events); | 
|  | 104 | tp_event->perf_events = NULL; | 
| Frederic Weisbecker | fe8e5b5 | 2009-10-03 14:55:18 +0200 | [diff] [blame] | 105 | } | 
| Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 106 |  | 
|  | 107 | return ret; | 
| Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 108 | } | 
|  | 109 |  | 
| Jiri Olsa | ceec0b6 | 2012-02-15 15:51:49 +0100 | [diff] [blame] | 110 | static void perf_trace_event_unreg(struct perf_event *p_event) | 
|  | 111 | { | 
|  | 112 | struct ftrace_event_call *tp_event = p_event->tp_event; | 
|  | 113 | int i; | 
|  | 114 |  | 
|  | 115 | if (--tp_event->perf_refcount > 0) | 
|  | 116 | goto out; | 
|  | 117 |  | 
|  | 118 | tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL); | 
|  | 119 |  | 
|  | 120 | /* | 
|  | 121 | * Ensure our callback won't be called anymore. The buffers | 
|  | 122 | * will be freed after that. | 
|  | 123 | */ | 
|  | 124 | tracepoint_synchronize_unregister(); | 
|  | 125 |  | 
|  | 126 | free_percpu(tp_event->perf_events); | 
|  | 127 | tp_event->perf_events = NULL; | 
|  | 128 |  | 
|  | 129 | if (!--total_ref_count) { | 
|  | 130 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { | 
|  | 131 | free_percpu(perf_trace_buf[i]); | 
|  | 132 | perf_trace_buf[i] = NULL; | 
|  | 133 | } | 
|  | 134 | } | 
|  | 135 | out: | 
|  | 136 | module_put(tp_event->mod); | 
|  | 137 | } | 
|  | 138 |  | 
|  | 139 | static int perf_trace_event_open(struct perf_event *p_event) | 
|  | 140 | { | 
|  | 141 | struct ftrace_event_call *tp_event = p_event->tp_event; | 
|  | 142 | return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event); | 
|  | 143 | } | 
|  | 144 |  | 
|  | 145 | static void perf_trace_event_close(struct perf_event *p_event) | 
|  | 146 | { | 
|  | 147 | struct ftrace_event_call *tp_event = p_event->tp_event; | 
|  | 148 | tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event); | 
|  | 149 | } | 
|  | 150 |  | 
|  | 151 | static int perf_trace_event_init(struct ftrace_event_call *tp_event, | 
|  | 152 | struct perf_event *p_event) | 
|  | 153 | { | 
|  | 154 | int ret; | 
|  | 155 |  | 
|  | 156 | ret = perf_trace_event_perm(tp_event, p_event); | 
|  | 157 | if (ret) | 
|  | 158 | return ret; | 
|  | 159 |  | 
|  | 160 | ret = perf_trace_event_reg(tp_event, p_event); | 
|  | 161 | if (ret) | 
|  | 162 | return ret; | 
|  | 163 |  | 
|  | 164 | ret = perf_trace_event_open(p_event); | 
|  | 165 | if (ret) { | 
|  | 166 | perf_trace_event_unreg(p_event); | 
|  | 167 | return ret; | 
|  | 168 | } | 
|  | 169 |  | 
|  | 170 | return 0; | 
|  | 171 | } | 
|  | 172 |  | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 173 | int perf_trace_init(struct perf_event *p_event) | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 174 | { | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 175 | struct ftrace_event_call *tp_event; | 
|  | 176 | int event_id = p_event->attr.config; | 
| Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 177 | int ret = -EINVAL; | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 178 |  | 
| Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 179 | mutex_lock(&event_mutex); | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 180 | list_for_each_entry(tp_event, &ftrace_events, list) { | 
| Steven Rostedt | ff5f149 | 2010-05-21 11:49:57 -0400 | [diff] [blame] | 181 | if (tp_event->event.type == event_id && | 
| Steven Rostedt | a1d0ce8 | 2010-06-08 11:22:06 -0400 | [diff] [blame] | 182 | tp_event->class && tp_event->class->reg && | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 183 | try_module_get(tp_event->mod)) { | 
|  | 184 | ret = perf_trace_event_init(tp_event, p_event); | 
| Li Zefan | 9cb627d | 2010-09-01 12:58:43 +0200 | [diff] [blame] | 185 | if (ret) | 
|  | 186 | module_put(tp_event->mod); | 
| Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 187 | break; | 
|  | 188 | } | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 189 | } | 
| Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 190 | mutex_unlock(&event_mutex); | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 191 |  | 
| Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 192 | return ret; | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 193 | } | 
|  | 194 |  | 
| Jiri Olsa | ceec0b6 | 2012-02-15 15:51:49 +0100 | [diff] [blame] | 195 | void perf_trace_destroy(struct perf_event *p_event) | 
|  | 196 | { | 
|  | 197 | mutex_lock(&event_mutex); | 
|  | 198 | perf_trace_event_close(p_event); | 
|  | 199 | perf_trace_event_unreg(p_event); | 
|  | 200 | mutex_unlock(&event_mutex); | 
|  | 201 | } | 
|  | 202 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 203 | int perf_trace_add(struct perf_event *p_event, int flags) | 
| Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 204 | { | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 205 | struct ftrace_event_call *tp_event = p_event->tp_event; | 
| Namhyung Kim | 6016ee1 | 2010-08-11 12:47:59 +0900 | [diff] [blame] | 206 | struct hlist_head __percpu *pcpu_list; | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 207 | struct hlist_head *list; | 
| Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 208 |  | 
| Namhyung Kim | 6016ee1 | 2010-08-11 12:47:59 +0900 | [diff] [blame] | 209 | pcpu_list = tp_event->perf_events; | 
|  | 210 | if (WARN_ON_ONCE(!pcpu_list)) | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 211 | return -EINVAL; | 
| Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 212 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 213 | if (!(flags & PERF_EF_START)) | 
|  | 214 | p_event->hw.state = PERF_HES_STOPPED; | 
|  | 215 |  | 
| Namhyung Kim | 6016ee1 | 2010-08-11 12:47:59 +0900 | [diff] [blame] | 216 | list = this_cpu_ptr(pcpu_list); | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 217 | hlist_add_head_rcu(&p_event->hlist_entry, list); | 
| Frederic Weisbecker | 20ab4425 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 218 |  | 
| Jiri Olsa | 489c75c | 2012-02-15 15:51:50 +0100 | [diff] [blame] | 219 | return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event); | 
| Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 220 | } | 
|  | 221 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 222 | void perf_trace_del(struct perf_event *p_event, int flags) | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 223 | { | 
| Jiri Olsa | 489c75c | 2012-02-15 15:51:50 +0100 | [diff] [blame] | 224 | struct ftrace_event_call *tp_event = p_event->tp_event; | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 225 | hlist_del_rcu(&p_event->hlist_entry); | 
| Jiri Olsa | 489c75c | 2012-02-15 15:51:50 +0100 | [diff] [blame] | 226 | tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event); | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 227 | } | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 228 |  | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 229 | __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | 
| Peter Zijlstra | b7e2ece | 2010-05-19 10:52:27 +0200 | [diff] [blame] | 230 | struct pt_regs *regs, int *rctxp) | 
| Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 231 | { | 
|  | 232 | struct trace_entry *entry; | 
| Peter Zijlstra | 87f44bb | 2010-05-25 11:02:55 +0200 | [diff] [blame] | 233 | unsigned long flags; | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 234 | char *raw_data; | 
| Peter Zijlstra | b7e2ece | 2010-05-19 10:52:27 +0200 | [diff] [blame] | 235 | int pc; | 
| Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 236 |  | 
| Frederic Weisbecker | eb1e796 | 2010-03-23 00:08:59 +0100 | [diff] [blame] | 237 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); | 
|  | 238 |  | 
| Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 239 | pc = preempt_count(); | 
|  | 240 |  | 
| Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 241 | *rctxp = perf_swevent_get_recursion_context(); | 
|  | 242 | if (*rctxp < 0) | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 243 | return NULL; | 
| Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 244 |  | 
| Peter Zijlstra | 3771f07 | 2010-05-21 12:31:09 +0200 | [diff] [blame] | 245 | raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]); | 
| Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 246 |  | 
|  | 247 | /* zero the dead bytes from align to not leak stack to user */ | 
| Frederic Weisbecker | eb1e796 | 2010-03-23 00:08:59 +0100 | [diff] [blame] | 248 | memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); | 
| Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 249 |  | 
|  | 250 | entry = (struct trace_entry *)raw_data; | 
| Peter Zijlstra | 87f44bb | 2010-05-25 11:02:55 +0200 | [diff] [blame] | 251 | local_save_flags(flags); | 
|  | 252 | tracing_generic_entry_update(entry, flags, pc); | 
| Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 253 | entry->type = type; | 
|  | 254 |  | 
|  | 255 | return raw_data; | 
| Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 256 | } | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 257 | EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); | 
| Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 258 |  | 
|  | 259 | #ifdef CONFIG_FUNCTION_TRACER | 
|  | 260 | static void | 
| Steven Rostedt | 2f5f6ad | 2011-08-08 16:57:47 -0400 | [diff] [blame] | 261 | perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, | 
| Steven Rostedt | a1e2e31 | 2011-08-09 12:50:46 -0400 | [diff] [blame] | 262 | struct ftrace_ops *ops, struct pt_regs *pt_regs) | 
| Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 263 | { | 
|  | 264 | struct ftrace_entry *entry; | 
|  | 265 | struct hlist_head *head; | 
|  | 266 | struct pt_regs regs; | 
|  | 267 | int rctx; | 
|  | 268 |  | 
|  | 269 | #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \ | 
|  | 270 | sizeof(u64)) - sizeof(u32)) | 
|  | 271 |  | 
|  | 272 | BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE); | 
|  | 273 |  | 
|  | 274 | perf_fetch_caller_regs(®s); | 
|  | 275 |  | 
|  | 276 | entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx); | 
|  | 277 | if (!entry) | 
|  | 278 | return; | 
|  | 279 |  | 
|  | 280 | entry->ip = ip; | 
|  | 281 | entry->parent_ip = parent_ip; | 
|  | 282 |  | 
|  | 283 | head = this_cpu_ptr(event_function.perf_events); | 
|  | 284 | perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, | 
| Andrew Vagin | e6dab5f | 2012-07-11 18:14:58 +0400 | [diff] [blame] | 285 | 1, ®s, head, NULL); | 
| Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 286 |  | 
|  | 287 | #undef ENTRY_SIZE | 
|  | 288 | } | 
|  | 289 |  | 
|  | 290 | static int perf_ftrace_function_register(struct perf_event *event) | 
|  | 291 | { | 
|  | 292 | struct ftrace_ops *ops = &event->ftrace_ops; | 
|  | 293 |  | 
|  | 294 | ops->flags |= FTRACE_OPS_FL_CONTROL; | 
|  | 295 | ops->func = perf_ftrace_function_call; | 
|  | 296 | return register_ftrace_function(ops); | 
|  | 297 | } | 
|  | 298 |  | 
|  | 299 | static int perf_ftrace_function_unregister(struct perf_event *event) | 
|  | 300 | { | 
|  | 301 | struct ftrace_ops *ops = &event->ftrace_ops; | 
| Jiri Olsa | 5500fa5 | 2012-02-15 15:51:54 +0100 | [diff] [blame] | 302 | int ret = unregister_ftrace_function(ops); | 
|  | 303 | ftrace_free_filter(ops); | 
|  | 304 | return ret; | 
| Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 305 | } | 
|  | 306 |  | 
|  | 307 | static void perf_ftrace_function_enable(struct perf_event *event) | 
|  | 308 | { | 
|  | 309 | ftrace_function_local_enable(&event->ftrace_ops); | 
|  | 310 | } | 
|  | 311 |  | 
|  | 312 | static void perf_ftrace_function_disable(struct perf_event *event) | 
|  | 313 | { | 
|  | 314 | ftrace_function_local_disable(&event->ftrace_ops); | 
|  | 315 | } | 
|  | 316 |  | 
|  | 317 | int perf_ftrace_event_register(struct ftrace_event_call *call, | 
|  | 318 | enum trace_reg type, void *data) | 
|  | 319 | { | 
|  | 320 | switch (type) { | 
|  | 321 | case TRACE_REG_REGISTER: | 
|  | 322 | case TRACE_REG_UNREGISTER: | 
|  | 323 | break; | 
|  | 324 | case TRACE_REG_PERF_REGISTER: | 
|  | 325 | case TRACE_REG_PERF_UNREGISTER: | 
|  | 326 | return 0; | 
|  | 327 | case TRACE_REG_PERF_OPEN: | 
|  | 328 | return perf_ftrace_function_register(data); | 
|  | 329 | case TRACE_REG_PERF_CLOSE: | 
|  | 330 | return perf_ftrace_function_unregister(data); | 
|  | 331 | case TRACE_REG_PERF_ADD: | 
|  | 332 | perf_ftrace_function_enable(data); | 
|  | 333 | return 0; | 
|  | 334 | case TRACE_REG_PERF_DEL: | 
|  | 335 | perf_ftrace_function_disable(data); | 
|  | 336 | return 0; | 
|  | 337 | } | 
|  | 338 |  | 
|  | 339 | return -EINVAL; | 
|  | 340 | } | 
|  | 341 | #endif /* CONFIG_FUNCTION_TRACER */ |