| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 1 | /* | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 2 | * trace event based perf event profiling/tracing | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 3 | * | 
|  | 4 | * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> | 
| Frederic Weisbecker | c530665 | 2010-03-03 07:16:16 +0100 | [diff] [blame] | 5 | * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com> | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 6 | */ | 
|  | 7 |  | 
| Li Zefan | 558e654 | 2009-08-24 12:19:47 +0800 | [diff] [blame] | 8 | #include <linux/module.h> | 
| Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 9 | #include <linux/kprobes.h> | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 10 | #include "trace.h" | 
|  | 11 |  | 
| Frederic Weisbecker | c530665 | 2010-03-03 07:16:16 +0100 | [diff] [blame] | 12 | DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 13 |  | 
| Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 14 | static char *perf_trace_buf; | 
|  | 15 | static char *perf_trace_buf_nmi; | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 16 |  | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 17 | typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; | 
| Frederic Weisbecker | ce71b9d | 2009-11-22 05:26:55 +0100 | [diff] [blame] | 18 |  | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 19 | /* Count the events in use (per event id, not per instance) */ | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 20 | static int	total_ref_count; | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 21 |  | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 22 | static int perf_trace_event_enable(struct ftrace_event_call *event) | 
| Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 23 | { | 
| Frederic Weisbecker | ce71b9d | 2009-11-22 05:26:55 +0100 | [diff] [blame] | 24 | char *buf; | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 25 | int ret = -ENOMEM; | 
|  | 26 |  | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 27 | if (event->perf_refcount++ > 0) | 
| Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 28 | return 0; | 
|  | 29 |  | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 30 | if (!total_ref_count) { | 
| Frederic Weisbecker | ce71b9d | 2009-11-22 05:26:55 +0100 | [diff] [blame] | 31 | buf = (char *)alloc_percpu(perf_trace_t); | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 32 | if (!buf) | 
|  | 33 | goto fail_buf; | 
|  | 34 |  | 
| Frederic Weisbecker | 444a2a3 | 2009-11-06 04:13:05 +0100 | [diff] [blame] | 35 | rcu_assign_pointer(perf_trace_buf, buf); | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 36 |  | 
| Frederic Weisbecker | ce71b9d | 2009-11-22 05:26:55 +0100 | [diff] [blame] | 37 | buf = (char *)alloc_percpu(perf_trace_t); | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 38 | if (!buf) | 
|  | 39 | goto fail_buf_nmi; | 
|  | 40 |  | 
| Frederic Weisbecker | 444a2a3 | 2009-11-06 04:13:05 +0100 | [diff] [blame] | 41 | rcu_assign_pointer(perf_trace_buf_nmi, buf); | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 42 | } | 
|  | 43 |  | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 44 | ret = event->perf_event_enable(event); | 
| Frederic Weisbecker | fe8e5b5 | 2009-10-03 14:55:18 +0200 | [diff] [blame] | 45 | if (!ret) { | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 46 | total_ref_count++; | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 47 | return 0; | 
| Frederic Weisbecker | fe8e5b5 | 2009-10-03 14:55:18 +0200 | [diff] [blame] | 48 | } | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 49 |  | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 50 | fail_buf_nmi: | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 51 | if (!total_ref_count) { | 
| Frederic Weisbecker | 444a2a3 | 2009-11-06 04:13:05 +0100 | [diff] [blame] | 52 | free_percpu(perf_trace_buf_nmi); | 
|  | 53 | free_percpu(perf_trace_buf); | 
|  | 54 | perf_trace_buf_nmi = NULL; | 
|  | 55 | perf_trace_buf = NULL; | 
| Frederic Weisbecker | fe8e5b5 | 2009-10-03 14:55:18 +0200 | [diff] [blame] | 56 | } | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 57 | fail_buf: | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 58 | event->perf_refcount--; | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 59 |  | 
|  | 60 | return ret; | 
| Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 61 | } | 
|  | 62 |  | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 63 | int perf_trace_enable(int event_id) | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 64 | { | 
|  | 65 | struct ftrace_event_call *event; | 
| Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 66 | int ret = -EINVAL; | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 67 |  | 
| Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 68 | mutex_lock(&event_mutex); | 
| Steven Rostedt | a59fd60 | 2009-04-10 13:52:20 -0400 | [diff] [blame] | 69 | list_for_each_entry(event, &ftrace_events, list) { | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 70 | if (event->id == event_id && event->perf_event_enable && | 
| Li Zefan | 558e654 | 2009-08-24 12:19:47 +0800 | [diff] [blame] | 71 | try_module_get(event->mod)) { | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 72 | ret = perf_trace_event_enable(event); | 
| Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 73 | break; | 
|  | 74 | } | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 75 | } | 
| Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 76 | mutex_unlock(&event_mutex); | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 77 |  | 
| Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 78 | return ret; | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 79 | } | 
|  | 80 |  | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 81 | static void perf_trace_event_disable(struct ftrace_event_call *event) | 
| Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 82 | { | 
| Frederic Weisbecker | ce71b9d | 2009-11-22 05:26:55 +0100 | [diff] [blame] | 83 | char *buf, *nmi_buf; | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 84 |  | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 85 | if (--event->perf_refcount > 0) | 
| Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 86 | return; | 
|  | 87 |  | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 88 | event->perf_event_disable(event); | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 89 |  | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 90 | if (!--total_ref_count) { | 
| Frederic Weisbecker | 444a2a3 | 2009-11-06 04:13:05 +0100 | [diff] [blame] | 91 | buf = perf_trace_buf; | 
|  | 92 | rcu_assign_pointer(perf_trace_buf, NULL); | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 93 |  | 
| Frederic Weisbecker | 444a2a3 | 2009-11-06 04:13:05 +0100 | [diff] [blame] | 94 | nmi_buf = perf_trace_buf_nmi; | 
|  | 95 | rcu_assign_pointer(perf_trace_buf_nmi, NULL); | 
| Frederic Weisbecker | 20ab442 | 2009-09-18 06:10:28 +0200 | [diff] [blame] | 96 |  | 
|  | 97 | /* | 
|  | 98 | * Ensure every events in profiling have finished before | 
|  | 99 | * releasing the buffers | 
|  | 100 | */ | 
|  | 101 | synchronize_sched(); | 
|  | 102 |  | 
|  | 103 | free_percpu(buf); | 
|  | 104 | free_percpu(nmi_buf); | 
|  | 105 | } | 
| Frederic Weisbecker | e5e25cf | 2009-09-18 00:54:43 +0200 | [diff] [blame] | 106 | } | 
|  | 107 |  | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 108 | void perf_trace_disable(int event_id) | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 109 | { | 
|  | 110 | struct ftrace_event_call *event; | 
|  | 111 |  | 
| Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 112 | mutex_lock(&event_mutex); | 
| Steven Rostedt | a59fd60 | 2009-04-10 13:52:20 -0400 | [diff] [blame] | 113 | list_for_each_entry(event, &ftrace_events, list) { | 
| Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 114 | if (event->id == event_id) { | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 115 | perf_trace_event_disable(event); | 
| Li Zefan | 558e654 | 2009-08-24 12:19:47 +0800 | [diff] [blame] | 116 | module_put(event->mod); | 
| Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 117 | break; | 
|  | 118 | } | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 119 | } | 
| Li Zefan | 20c8928 | 2009-05-06 10:33:45 +0800 | [diff] [blame] | 120 | mutex_unlock(&event_mutex); | 
| Peter Zijlstra | ac199db | 2009-03-19 20:26:15 +0100 | [diff] [blame] | 121 | } | 
| Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 122 |  | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 123 | __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | 
|  | 124 | int *rctxp, unsigned long *irq_flags) | 
| Xiao Guangrong | 430ad5a | 2010-01-28 09:32:29 +0800 | [diff] [blame] | 125 | { | 
|  | 126 | struct trace_entry *entry; | 
|  | 127 | char *trace_buf, *raw_data; | 
|  | 128 | int pc, cpu; | 
|  | 129 |  | 
|  | 130 | pc = preempt_count(); | 
|  | 131 |  | 
|  | 132 | /* Protect the per cpu buffer, begin the rcu read side */ | 
|  | 133 | local_irq_save(*irq_flags); | 
|  | 134 |  | 
|  | 135 | *rctxp = perf_swevent_get_recursion_context(); | 
|  | 136 | if (*rctxp < 0) | 
|  | 137 | goto err_recursion; | 
|  | 138 |  | 
|  | 139 | cpu = smp_processor_id(); | 
|  | 140 |  | 
|  | 141 | if (in_nmi()) | 
|  | 142 | trace_buf = rcu_dereference(perf_trace_buf_nmi); | 
|  | 143 | else | 
|  | 144 | trace_buf = rcu_dereference(perf_trace_buf); | 
|  | 145 |  | 
|  | 146 | if (!trace_buf) | 
|  | 147 | goto err; | 
|  | 148 |  | 
|  | 149 | raw_data = per_cpu_ptr(trace_buf, cpu); | 
|  | 150 |  | 
|  | 151 | /* zero the dead bytes from align to not leak stack to user */ | 
|  | 152 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | 
|  | 153 |  | 
|  | 154 | entry = (struct trace_entry *)raw_data; | 
|  | 155 | tracing_generic_entry_update(entry, *irq_flags, pc); | 
|  | 156 | entry->type = type; | 
|  | 157 |  | 
|  | 158 | return raw_data; | 
|  | 159 | err: | 
|  | 160 | perf_swevent_put_recursion_context(*rctxp); | 
|  | 161 | err_recursion: | 
|  | 162 | local_irq_restore(*irq_flags); | 
|  | 163 | return NULL; | 
|  | 164 | } | 
| Frederic Weisbecker | 97d5a22 | 2010-03-05 05:35:37 +0100 | [diff] [blame] | 165 | EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); |