| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1 | /* | 
|  | 2 | * Performance counter core code | 
|  | 3 | * | 
| Ingo Molnar | 9814451 | 2009-04-29 14:52:50 +0200 | [diff] [blame] | 4 | *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | 
|  | 5 | *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | 
|  | 6 | *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
| Paul Mackerras | c5dd016 | 2009-04-30 09:48:16 +1000 | [diff] [blame] | 7 | *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 8 | * | 
|  | 9 | *  For licensing details see kernel-base/COPYING | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 10 | */ | 
|  | 11 |  | 
|  | 12 | #include <linux/fs.h> | 
| Peter Zijlstra | b9cacc7 | 2009-03-25 12:30:22 +0100 | [diff] [blame] | 13 | #include <linux/mm.h> | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 14 | #include <linux/cpu.h> | 
|  | 15 | #include <linux/smp.h> | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 16 | #include <linux/file.h> | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 17 | #include <linux/poll.h> | 
|  | 18 | #include <linux/sysfs.h> | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 19 | #include <linux/dcache.h> | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 20 | #include <linux/percpu.h> | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 21 | #include <linux/ptrace.h> | 
| Peter Zijlstra | b9cacc7 | 2009-03-25 12:30:22 +0100 | [diff] [blame] | 22 | #include <linux/vmstat.h> | 
|  | 23 | #include <linux/hardirq.h> | 
|  | 24 | #include <linux/rculist.h> | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 25 | #include <linux/uaccess.h> | 
|  | 26 | #include <linux/syscalls.h> | 
|  | 27 | #include <linux/anon_inodes.h> | 
| Ingo Molnar | aa9c4c0 | 2008-12-17 14:10:57 +0100 | [diff] [blame] | 28 | #include <linux/kernel_stat.h> | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 29 | #include <linux/perf_counter.h> | 
|  | 30 |  | 
| Tim Blechmann | 4e193bd | 2009-03-14 14:29:25 +0100 | [diff] [blame] | 31 | #include <asm/irq_regs.h> | 
|  | 32 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 33 | /* | 
|  | 34 | * Each CPU has a list of per CPU counters: | 
|  | 35 | */ | 
|  | 36 | DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); | 
|  | 37 |  | 
| Ingo Molnar | 088e285 | 2008-12-14 20:21:00 +0100 | [diff] [blame] | 38 | int perf_max_counters __read_mostly = 1; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 39 | static int perf_reserved_percpu __read_mostly; | 
|  | 40 | static int perf_overcommit __read_mostly = 1; | 
|  | 41 |  | 
| Peter Zijlstra | 7fc23a5 | 2009-05-08 18:52:21 +0200 | [diff] [blame] | 42 | static atomic_t nr_counters __read_mostly; | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 43 | static atomic_t nr_mmap_counters __read_mostly; | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 44 | static atomic_t nr_comm_counters __read_mostly; | 
| Peter Zijlstra | 9ee318a | 2009-04-09 10:53:44 +0200 | [diff] [blame] | 45 |  | 
| Peter Zijlstra | 0764771 | 2009-06-11 11:18:36 +0200 | [diff] [blame] | 46 | /* | 
| Peter Zijlstra | df58ab2 | 2009-06-11 11:25:05 +0200 | [diff] [blame] | 47 | * perf counter paranoia level: | 
|  | 48 | *  0 - not paranoid | 
|  | 49 | *  1 - disallow cpu counters to unpriv | 
|  | 50 | *  2 - disallow kernel profiling to unpriv | 
| Peter Zijlstra | 0764771 | 2009-06-11 11:18:36 +0200 | [diff] [blame] | 51 | */ | 
| Peter Zijlstra | df58ab2 | 2009-06-11 11:25:05 +0200 | [diff] [blame] | 52 | int sysctl_perf_counter_paranoid __read_mostly; | 
| Peter Zijlstra | 0764771 | 2009-06-11 11:18:36 +0200 | [diff] [blame] | 53 |  | 
|  | 54 | static inline bool perf_paranoid_cpu(void) | 
|  | 55 | { | 
|  | 56 | return sysctl_perf_counter_paranoid > 0; | 
|  | 57 | } | 
|  | 58 |  | 
|  | 59 | static inline bool perf_paranoid_kernel(void) | 
|  | 60 | { | 
|  | 61 | return sysctl_perf_counter_paranoid > 1; | 
|  | 62 | } | 
|  | 63 |  | 
| Peter Zijlstra | 789f90f | 2009-05-15 15:19:27 +0200 | [diff] [blame] | 64 | int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ | 
| Peter Zijlstra | df58ab2 | 2009-06-11 11:25:05 +0200 | [diff] [blame] | 65 |  | 
|  | 66 | /* | 
|  | 67 | * max perf counter sample rate | 
|  | 68 | */ | 
|  | 69 | int sysctl_perf_counter_sample_rate __read_mostly = 100000; | 
| Peter Zijlstra | 1ccd154 | 2009-04-09 10:53:45 +0200 | [diff] [blame] | 70 |  | 
| Peter Zijlstra | a96bbc1 | 2009-06-03 14:01:36 +0200 | [diff] [blame] | 71 | static atomic64_t perf_counter_id; | 
|  | 72 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 73 | /* | 
| Ingo Molnar | 1dce8d9 | 2009-05-04 19:23:18 +0200 | [diff] [blame] | 74 | * Lock for (sysadmin-configurable) counter reservations: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 75 | */ | 
| Ingo Molnar | 1dce8d9 | 2009-05-04 19:23:18 +0200 | [diff] [blame] | 76 | static DEFINE_SPINLOCK(perf_resource_lock); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 77 |  | 
|  | 78 | /* | 
|  | 79 | * Architecture provided APIs - weak aliases: | 
|  | 80 | */ | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 81 | extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 82 | { | 
| Paul Mackerras | ff6f054 | 2009-01-09 16:19:25 +1100 | [diff] [blame] | 83 | return NULL; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 84 | } | 
|  | 85 |  | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 86 | void __weak hw_perf_disable(void)		{ barrier(); } | 
|  | 87 | void __weak hw_perf_enable(void)		{ barrier(); } | 
|  | 88 |  | 
| Paul Mackerras | 01d0287 | 2009-01-14 13:44:19 +1100 | [diff] [blame] | 89 | void __weak hw_perf_counter_setup(int cpu)	{ barrier(); } | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 90 |  | 
|  | 91 | int __weak | 
|  | 92 | hw_perf_group_sched_in(struct perf_counter *group_leader, | 
| Paul Mackerras | 3cbed42 | 2009-01-09 16:43:42 +1100 | [diff] [blame] | 93 | struct perf_cpu_context *cpuctx, | 
|  | 94 | struct perf_counter_context *ctx, int cpu) | 
|  | 95 | { | 
|  | 96 | return 0; | 
|  | 97 | } | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 98 |  | 
| Paul Mackerras | 4eb96fc | 2009-01-09 17:24:34 +1100 | [diff] [blame] | 99 | void __weak perf_counter_print_debug(void)	{ } | 
|  | 100 |  | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 101 | static DEFINE_PER_CPU(int, disable_count); | 
|  | 102 |  | 
|  | 103 | void __perf_disable(void) | 
|  | 104 | { | 
|  | 105 | __get_cpu_var(disable_count)++; | 
|  | 106 | } | 
|  | 107 |  | 
|  | 108 | bool __perf_enable(void) | 
|  | 109 | { | 
|  | 110 | return !--__get_cpu_var(disable_count); | 
|  | 111 | } | 
|  | 112 |  | 
|  | 113 | void perf_disable(void) | 
|  | 114 | { | 
|  | 115 | __perf_disable(); | 
|  | 116 | hw_perf_disable(); | 
|  | 117 | } | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 118 |  | 
|  | 119 | void perf_enable(void) | 
|  | 120 | { | 
|  | 121 | if (__perf_enable()) | 
|  | 122 | hw_perf_enable(); | 
|  | 123 | } | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 124 |  | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 125 | static void get_ctx(struct perf_counter_context *ctx) | 
|  | 126 | { | 
| Peter Zijlstra | e5289d4 | 2009-06-19 13:22:51 +0200 | [diff] [blame] | 127 | WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 128 | } | 
|  | 129 |  | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 130 | static void free_ctx(struct rcu_head *head) | 
|  | 131 | { | 
|  | 132 | struct perf_counter_context *ctx; | 
|  | 133 |  | 
|  | 134 | ctx = container_of(head, struct perf_counter_context, rcu_head); | 
|  | 135 | kfree(ctx); | 
|  | 136 | } | 
|  | 137 |  | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 138 | static void put_ctx(struct perf_counter_context *ctx) | 
|  | 139 | { | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 140 | if (atomic_dec_and_test(&ctx->refcount)) { | 
|  | 141 | if (ctx->parent_ctx) | 
|  | 142 | put_ctx(ctx->parent_ctx); | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 143 | if (ctx->task) | 
|  | 144 | put_task_struct(ctx->task); | 
|  | 145 | call_rcu(&ctx->rcu_head, free_ctx); | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 146 | } | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 147 | } | 
|  | 148 |  | 
| Peter Zijlstra | 71a851b | 2009-07-10 09:06:56 +0200 | [diff] [blame] | 149 | static void unclone_ctx(struct perf_counter_context *ctx) | 
|  | 150 | { | 
|  | 151 | if (ctx->parent_ctx) { | 
|  | 152 | put_ctx(ctx->parent_ctx); | 
|  | 153 | ctx->parent_ctx = NULL; | 
|  | 154 | } | 
|  | 155 | } | 
|  | 156 |  | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 157 | /* | 
| Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 158 | * If we inherit counters we want to return the parent counter id | 
|  | 159 | * to userspace. | 
|  | 160 | */ | 
|  | 161 | static u64 primary_counter_id(struct perf_counter *counter) | 
|  | 162 | { | 
|  | 163 | u64 id = counter->id; | 
|  | 164 |  | 
|  | 165 | if (counter->parent) | 
|  | 166 | id = counter->parent->id; | 
|  | 167 |  | 
|  | 168 | return id; | 
|  | 169 | } | 
|  | 170 |  | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 171 | /* | 
| Paul Mackerras | 25346b9 | 2009-06-01 17:48:12 +1000 | [diff] [blame] | 172 | * Get the perf_counter_context for a task and lock it. | 
|  | 173 | * This has to cope with with the fact that until it is locked, | 
|  | 174 | * the context could get moved to another task. | 
|  | 175 | */ | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 176 | static struct perf_counter_context * | 
|  | 177 | perf_lock_task_context(struct task_struct *task, unsigned long *flags) | 
| Paul Mackerras | 25346b9 | 2009-06-01 17:48:12 +1000 | [diff] [blame] | 178 | { | 
|  | 179 | struct perf_counter_context *ctx; | 
|  | 180 |  | 
|  | 181 | rcu_read_lock(); | 
|  | 182 | retry: | 
|  | 183 | ctx = rcu_dereference(task->perf_counter_ctxp); | 
|  | 184 | if (ctx) { | 
|  | 185 | /* | 
|  | 186 | * If this context is a clone of another, it might | 
|  | 187 | * get swapped for another underneath us by | 
|  | 188 | * perf_counter_task_sched_out, though the | 
|  | 189 | * rcu_read_lock() protects us from any context | 
|  | 190 | * getting freed.  Lock the context and check if it | 
|  | 191 | * got swapped before we could get the lock, and retry | 
|  | 192 | * if so.  If we locked the right context, then it | 
|  | 193 | * can't get swapped on us any more. | 
|  | 194 | */ | 
|  | 195 | spin_lock_irqsave(&ctx->lock, *flags); | 
|  | 196 | if (ctx != rcu_dereference(task->perf_counter_ctxp)) { | 
|  | 197 | spin_unlock_irqrestore(&ctx->lock, *flags); | 
|  | 198 | goto retry; | 
|  | 199 | } | 
| Peter Zijlstra | b49a9e7 | 2009-06-19 17:39:33 +0200 | [diff] [blame] | 200 |  | 
|  | 201 | if (!atomic_inc_not_zero(&ctx->refcount)) { | 
|  | 202 | spin_unlock_irqrestore(&ctx->lock, *flags); | 
|  | 203 | ctx = NULL; | 
|  | 204 | } | 
| Paul Mackerras | 25346b9 | 2009-06-01 17:48:12 +1000 | [diff] [blame] | 205 | } | 
|  | 206 | rcu_read_unlock(); | 
|  | 207 | return ctx; | 
|  | 208 | } | 
|  | 209 |  | 
|  | 210 | /* | 
|  | 211 | * Get the context for a task and increment its pin_count so it | 
|  | 212 | * can't get swapped to another task.  This also increments its | 
|  | 213 | * reference count so that the context can't get freed. | 
|  | 214 | */ | 
|  | 215 | static struct perf_counter_context *perf_pin_task_context(struct task_struct *task) | 
|  | 216 | { | 
|  | 217 | struct perf_counter_context *ctx; | 
|  | 218 | unsigned long flags; | 
|  | 219 |  | 
|  | 220 | ctx = perf_lock_task_context(task, &flags); | 
|  | 221 | if (ctx) { | 
|  | 222 | ++ctx->pin_count; | 
| Paul Mackerras | 25346b9 | 2009-06-01 17:48:12 +1000 | [diff] [blame] | 223 | spin_unlock_irqrestore(&ctx->lock, flags); | 
|  | 224 | } | 
|  | 225 | return ctx; | 
|  | 226 | } | 
|  | 227 |  | 
|  | 228 | static void perf_unpin_context(struct perf_counter_context *ctx) | 
|  | 229 | { | 
|  | 230 | unsigned long flags; | 
|  | 231 |  | 
|  | 232 | spin_lock_irqsave(&ctx->lock, flags); | 
|  | 233 | --ctx->pin_count; | 
|  | 234 | spin_unlock_irqrestore(&ctx->lock, flags); | 
|  | 235 | put_ctx(ctx); | 
|  | 236 | } | 
|  | 237 |  | 
|  | 238 | /* | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 239 | * Add a counter from the lists for its context. | 
|  | 240 | * Must be called with ctx->mutex and ctx->lock held. | 
|  | 241 | */ | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 242 | static void | 
|  | 243 | list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) | 
|  | 244 | { | 
|  | 245 | struct perf_counter *group_leader = counter->group_leader; | 
|  | 246 |  | 
|  | 247 | /* | 
|  | 248 | * Depending on whether it is a standalone or sibling counter, | 
|  | 249 | * add it straight to the context's counter list, or to the group | 
|  | 250 | * leader's sibling list: | 
|  | 251 | */ | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 252 | if (group_leader == counter) | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 253 | list_add_tail(&counter->list_entry, &ctx->counter_list); | 
| Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 254 | else { | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 255 | list_add_tail(&counter->list_entry, &group_leader->sibling_list); | 
| Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 256 | group_leader->nr_siblings++; | 
|  | 257 | } | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 258 |  | 
|  | 259 | list_add_rcu(&counter->event_entry, &ctx->event_list); | 
| Peter Zijlstra | 8bc2095 | 2009-05-15 20:45:59 +0200 | [diff] [blame] | 260 | ctx->nr_counters++; | 
| Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 261 | if (counter->attr.inherit_stat) | 
|  | 262 | ctx->nr_stat++; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 263 | } | 
|  | 264 |  | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 265 | /* | 
|  | 266 | * Remove a counter from the lists for its context. | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 267 | * Must be called with ctx->mutex and ctx->lock held. | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 268 | */ | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 269 | static void | 
|  | 270 | list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) | 
|  | 271 | { | 
|  | 272 | struct perf_counter *sibling, *tmp; | 
|  | 273 |  | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 274 | if (list_empty(&counter->list_entry)) | 
|  | 275 | return; | 
| Peter Zijlstra | 8bc2095 | 2009-05-15 20:45:59 +0200 | [diff] [blame] | 276 | ctx->nr_counters--; | 
| Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 277 | if (counter->attr.inherit_stat) | 
|  | 278 | ctx->nr_stat--; | 
| Peter Zijlstra | 8bc2095 | 2009-05-15 20:45:59 +0200 | [diff] [blame] | 279 |  | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 280 | list_del_init(&counter->list_entry); | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 281 | list_del_rcu(&counter->event_entry); | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 282 |  | 
| Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 283 | if (counter->group_leader != counter) | 
|  | 284 | counter->group_leader->nr_siblings--; | 
|  | 285 |  | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 286 | /* | 
|  | 287 | * If this was a group counter with sibling counters then | 
|  | 288 | * upgrade the siblings to singleton counters by adding them | 
|  | 289 | * to the context list directly: | 
|  | 290 | */ | 
|  | 291 | list_for_each_entry_safe(sibling, tmp, | 
|  | 292 | &counter->sibling_list, list_entry) { | 
|  | 293 |  | 
| Peter Zijlstra | 7556423 | 2009-03-13 12:21:29 +0100 | [diff] [blame] | 294 | list_move_tail(&sibling->list_entry, &ctx->counter_list); | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 295 | sibling->group_leader = sibling; | 
|  | 296 | } | 
|  | 297 | } | 
|  | 298 |  | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 299 | static void | 
|  | 300 | counter_sched_out(struct perf_counter *counter, | 
|  | 301 | struct perf_cpu_context *cpuctx, | 
|  | 302 | struct perf_counter_context *ctx) | 
|  | 303 | { | 
|  | 304 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) | 
|  | 305 | return; | 
|  | 306 |  | 
|  | 307 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 308 | counter->tstamp_stopped = ctx->time; | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 309 | counter->pmu->disable(counter); | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 310 | counter->oncpu = -1; | 
|  | 311 |  | 
|  | 312 | if (!is_software_counter(counter)) | 
|  | 313 | cpuctx->active_oncpu--; | 
|  | 314 | ctx->nr_active--; | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 315 | if (counter->attr.exclusive || !cpuctx->active_oncpu) | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 316 | cpuctx->exclusive = 0; | 
|  | 317 | } | 
|  | 318 |  | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 319 | static void | 
|  | 320 | group_sched_out(struct perf_counter *group_counter, | 
|  | 321 | struct perf_cpu_context *cpuctx, | 
|  | 322 | struct perf_counter_context *ctx) | 
|  | 323 | { | 
|  | 324 | struct perf_counter *counter; | 
|  | 325 |  | 
|  | 326 | if (group_counter->state != PERF_COUNTER_STATE_ACTIVE) | 
|  | 327 | return; | 
|  | 328 |  | 
|  | 329 | counter_sched_out(group_counter, cpuctx, ctx); | 
|  | 330 |  | 
|  | 331 | /* | 
|  | 332 | * Schedule out siblings (if any): | 
|  | 333 | */ | 
|  | 334 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) | 
|  | 335 | counter_sched_out(counter, cpuctx, ctx); | 
|  | 336 |  | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 337 | if (group_counter->attr.exclusive) | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 338 | cpuctx->exclusive = 0; | 
|  | 339 | } | 
|  | 340 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 341 | /* | 
|  | 342 | * Cross CPU call to remove a performance counter | 
|  | 343 | * | 
|  | 344 | * We disable the counter on the hardware level first. After that we | 
|  | 345 | * remove it from the context list. | 
|  | 346 | */ | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 347 | static void __perf_counter_remove_from_context(void *info) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 348 | { | 
|  | 349 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 
|  | 350 | struct perf_counter *counter = info; | 
|  | 351 | struct perf_counter_context *ctx = counter->ctx; | 
|  | 352 |  | 
|  | 353 | /* | 
|  | 354 | * If this is a task context, we need to check whether it is | 
|  | 355 | * the current task context of this cpu. If not it has been | 
|  | 356 | * scheduled out before the smp call arrived. | 
|  | 357 | */ | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 358 | if (ctx->task && cpuctx->task_ctx != ctx) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 359 | return; | 
|  | 360 |  | 
| Ingo Molnar | 3f4dee2 | 2009-05-29 11:25:09 +0200 | [diff] [blame] | 361 | spin_lock(&ctx->lock); | 
| Ingo Molnar | 34adc80 | 2009-05-20 20:13:28 +0200 | [diff] [blame] | 362 | /* | 
|  | 363 | * Protect the list operation against NMI by disabling the | 
|  | 364 | * counters on a global level. | 
|  | 365 | */ | 
|  | 366 | perf_disable(); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 367 |  | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 368 | counter_sched_out(counter, cpuctx, ctx); | 
|  | 369 |  | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 370 | list_del_counter(counter, ctx); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 371 |  | 
|  | 372 | if (!ctx->task) { | 
|  | 373 | /* | 
|  | 374 | * Allow more per task counters with respect to the | 
|  | 375 | * reservation: | 
|  | 376 | */ | 
|  | 377 | cpuctx->max_pertask = | 
|  | 378 | min(perf_max_counters - ctx->nr_counters, | 
|  | 379 | perf_max_counters - perf_reserved_percpu); | 
|  | 380 | } | 
|  | 381 |  | 
| Ingo Molnar | 34adc80 | 2009-05-20 20:13:28 +0200 | [diff] [blame] | 382 | perf_enable(); | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 383 | spin_unlock(&ctx->lock); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 384 | } | 
|  | 385 |  | 
|  | 386 |  | 
|  | 387 | /* | 
|  | 388 | * Remove the counter from a task's (or a CPU's) list of counters. | 
|  | 389 | * | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 390 | * Must be called with ctx->mutex held. | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 391 | * | 
|  | 392 | * CPU counters are removed with a smp call. For task counters we only | 
|  | 393 | * call when the task is on a CPU. | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 394 | * | 
|  | 395 | * If counter->ctx is a cloned context, callers must make sure that | 
|  | 396 | * every task struct that counter->ctx->task could possibly point to | 
|  | 397 | * remains valid.  This is OK when called from perf_release since | 
|  | 398 | * that only calls us on the top-level context, which can't be a clone. | 
|  | 399 | * When called from perf_counter_exit_task, it's OK because the | 
|  | 400 | * context has been detached from its task. | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 401 | */ | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 402 | static void perf_counter_remove_from_context(struct perf_counter *counter) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 403 | { | 
|  | 404 | struct perf_counter_context *ctx = counter->ctx; | 
|  | 405 | struct task_struct *task = ctx->task; | 
|  | 406 |  | 
|  | 407 | if (!task) { | 
|  | 408 | /* | 
|  | 409 | * Per cpu counters are removed via an smp call and | 
|  | 410 | * the removal is always sucessful. | 
|  | 411 | */ | 
|  | 412 | smp_call_function_single(counter->cpu, | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 413 | __perf_counter_remove_from_context, | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 414 | counter, 1); | 
|  | 415 | return; | 
|  | 416 | } | 
|  | 417 |  | 
|  | 418 | retry: | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 419 | task_oncpu_function_call(task, __perf_counter_remove_from_context, | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 420 | counter); | 
|  | 421 |  | 
|  | 422 | spin_lock_irq(&ctx->lock); | 
|  | 423 | /* | 
|  | 424 | * If the context is active we need to retry the smp call. | 
|  | 425 | */ | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 426 | if (ctx->nr_active && !list_empty(&counter->list_entry)) { | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 427 | spin_unlock_irq(&ctx->lock); | 
|  | 428 | goto retry; | 
|  | 429 | } | 
|  | 430 |  | 
|  | 431 | /* | 
|  | 432 | * The lock prevents that this context is scheduled in so we | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 433 | * can remove the counter safely, if the call above did not | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 434 | * succeed. | 
|  | 435 | */ | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 436 | if (!list_empty(&counter->list_entry)) { | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 437 | list_del_counter(counter, ctx); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 438 | } | 
|  | 439 | spin_unlock_irq(&ctx->lock); | 
|  | 440 | } | 
|  | 441 |  | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 442 | static inline u64 perf_clock(void) | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 443 | { | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 444 | return cpu_clock(smp_processor_id()); | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 445 | } | 
|  | 446 |  | 
|  | 447 | /* | 
|  | 448 | * Update the record of the current time in a context. | 
|  | 449 | */ | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 450 | static void update_context_time(struct perf_counter_context *ctx) | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 451 | { | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 452 | u64 now = perf_clock(); | 
|  | 453 |  | 
|  | 454 | ctx->time += now - ctx->timestamp; | 
|  | 455 | ctx->timestamp = now; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 456 | } | 
|  | 457 |  | 
|  | 458 | /* | 
|  | 459 | * Update the total_time_enabled and total_time_running fields for a counter. | 
|  | 460 | */ | 
|  | 461 | static void update_counter_times(struct perf_counter *counter) | 
|  | 462 | { | 
|  | 463 | struct perf_counter_context *ctx = counter->ctx; | 
|  | 464 | u64 run_end; | 
|  | 465 |  | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 466 | if (counter->state < PERF_COUNTER_STATE_INACTIVE) | 
|  | 467 | return; | 
|  | 468 |  | 
|  | 469 | counter->total_time_enabled = ctx->time - counter->tstamp_enabled; | 
|  | 470 |  | 
|  | 471 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) | 
|  | 472 | run_end = counter->tstamp_stopped; | 
|  | 473 | else | 
|  | 474 | run_end = ctx->time; | 
|  | 475 |  | 
|  | 476 | counter->total_time_running = run_end - counter->tstamp_running; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 477 | } | 
|  | 478 |  | 
|  | 479 | /* | 
|  | 480 | * Update total_time_enabled and total_time_running for all counters in a group. | 
|  | 481 | */ | 
|  | 482 | static void update_group_times(struct perf_counter *leader) | 
|  | 483 | { | 
|  | 484 | struct perf_counter *counter; | 
|  | 485 |  | 
|  | 486 | update_counter_times(leader); | 
|  | 487 | list_for_each_entry(counter, &leader->sibling_list, list_entry) | 
|  | 488 | update_counter_times(counter); | 
|  | 489 | } | 
|  | 490 |  | 
|  | 491 | /* | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 492 | * Cross CPU call to disable a performance counter | 
|  | 493 | */ | 
|  | 494 | static void __perf_counter_disable(void *info) | 
|  | 495 | { | 
|  | 496 | struct perf_counter *counter = info; | 
|  | 497 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 
|  | 498 | struct perf_counter_context *ctx = counter->ctx; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 499 |  | 
|  | 500 | /* | 
|  | 501 | * If this is a per-task counter, need to check whether this | 
|  | 502 | * counter's task is the current task on this cpu. | 
|  | 503 | */ | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 504 | if (ctx->task && cpuctx->task_ctx != ctx) | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 505 | return; | 
|  | 506 |  | 
| Ingo Molnar | 3f4dee2 | 2009-05-29 11:25:09 +0200 | [diff] [blame] | 507 | spin_lock(&ctx->lock); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 508 |  | 
|  | 509 | /* | 
|  | 510 | * If the counter is on, turn it off. | 
|  | 511 | * If it is in error state, leave it in error state. | 
|  | 512 | */ | 
|  | 513 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 514 | update_context_time(ctx); | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 515 | update_counter_times(counter); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 516 | if (counter == counter->group_leader) | 
|  | 517 | group_sched_out(counter, cpuctx, ctx); | 
|  | 518 | else | 
|  | 519 | counter_sched_out(counter, cpuctx, ctx); | 
|  | 520 | counter->state = PERF_COUNTER_STATE_OFF; | 
|  | 521 | } | 
|  | 522 |  | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 523 | spin_unlock(&ctx->lock); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 524 | } | 
|  | 525 |  | 
|  | 526 | /* | 
|  | 527 | * Disable a counter. | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 528 | * | 
|  | 529 | * If counter->ctx is a cloned context, callers must make sure that | 
|  | 530 | * every task struct that counter->ctx->task could possibly point to | 
|  | 531 | * remains valid.  This condition is satisifed when called through | 
|  | 532 | * perf_counter_for_each_child or perf_counter_for_each because they | 
|  | 533 | * hold the top-level counter's child_mutex, so any descendant that | 
|  | 534 | * goes to exit will block in sync_child_counter. | 
|  | 535 | * When called from perf_pending_counter it's OK because counter->ctx | 
|  | 536 | * is the current context on this CPU and preemption is disabled, | 
|  | 537 | * hence we can't get into perf_counter_task_sched_out for this context. | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 538 | */ | 
|  | 539 | static void perf_counter_disable(struct perf_counter *counter) | 
|  | 540 | { | 
|  | 541 | struct perf_counter_context *ctx = counter->ctx; | 
|  | 542 | struct task_struct *task = ctx->task; | 
|  | 543 |  | 
|  | 544 | if (!task) { | 
|  | 545 | /* | 
|  | 546 | * Disable the counter on the cpu that it's on | 
|  | 547 | */ | 
|  | 548 | smp_call_function_single(counter->cpu, __perf_counter_disable, | 
|  | 549 | counter, 1); | 
|  | 550 | return; | 
|  | 551 | } | 
|  | 552 |  | 
|  | 553 | retry: | 
|  | 554 | task_oncpu_function_call(task, __perf_counter_disable, counter); | 
|  | 555 |  | 
|  | 556 | spin_lock_irq(&ctx->lock); | 
|  | 557 | /* | 
|  | 558 | * If the counter is still active, we need to retry the cross-call. | 
|  | 559 | */ | 
|  | 560 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) { | 
|  | 561 | spin_unlock_irq(&ctx->lock); | 
|  | 562 | goto retry; | 
|  | 563 | } | 
|  | 564 |  | 
|  | 565 | /* | 
|  | 566 | * Since we have the lock this context can't be scheduled | 
|  | 567 | * in, so we can change the state safely. | 
|  | 568 | */ | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 569 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) { | 
|  | 570 | update_counter_times(counter); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 571 | counter->state = PERF_COUNTER_STATE_OFF; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 572 | } | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 573 |  | 
|  | 574 | spin_unlock_irq(&ctx->lock); | 
|  | 575 | } | 
|  | 576 |  | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 577 | static int | 
|  | 578 | counter_sched_in(struct perf_counter *counter, | 
|  | 579 | struct perf_cpu_context *cpuctx, | 
|  | 580 | struct perf_counter_context *ctx, | 
|  | 581 | int cpu) | 
|  | 582 | { | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 583 | if (counter->state <= PERF_COUNTER_STATE_OFF) | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 584 | return 0; | 
|  | 585 |  | 
|  | 586 | counter->state = PERF_COUNTER_STATE_ACTIVE; | 
|  | 587 | counter->oncpu = cpu;	/* TODO: put 'cpu' into cpuctx->cpu */ | 
|  | 588 | /* | 
|  | 589 | * The new state must be visible before we turn it on in the hardware: | 
|  | 590 | */ | 
|  | 591 | smp_wmb(); | 
|  | 592 |  | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 593 | if (counter->pmu->enable(counter)) { | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 594 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 
|  | 595 | counter->oncpu = -1; | 
|  | 596 | return -EAGAIN; | 
|  | 597 | } | 
|  | 598 |  | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 599 | counter->tstamp_running += ctx->time - counter->tstamp_stopped; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 600 |  | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 601 | if (!is_software_counter(counter)) | 
|  | 602 | cpuctx->active_oncpu++; | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 603 | ctx->nr_active++; | 
|  | 604 |  | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 605 | if (counter->attr.exclusive) | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 606 | cpuctx->exclusive = 1; | 
|  | 607 |  | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 608 | return 0; | 
|  | 609 | } | 
|  | 610 |  | 
| Paul Mackerras | 6751b71 | 2009-05-11 12:08:02 +1000 | [diff] [blame] | 611 | static int | 
|  | 612 | group_sched_in(struct perf_counter *group_counter, | 
|  | 613 | struct perf_cpu_context *cpuctx, | 
|  | 614 | struct perf_counter_context *ctx, | 
|  | 615 | int cpu) | 
|  | 616 | { | 
|  | 617 | struct perf_counter *counter, *partial_group; | 
|  | 618 | int ret; | 
|  | 619 |  | 
|  | 620 | if (group_counter->state == PERF_COUNTER_STATE_OFF) | 
|  | 621 | return 0; | 
|  | 622 |  | 
|  | 623 | ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); | 
|  | 624 | if (ret) | 
|  | 625 | return ret < 0 ? ret : 0; | 
|  | 626 |  | 
| Paul Mackerras | 6751b71 | 2009-05-11 12:08:02 +1000 | [diff] [blame] | 627 | if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) | 
|  | 628 | return -EAGAIN; | 
|  | 629 |  | 
|  | 630 | /* | 
|  | 631 | * Schedule in siblings as one group (if any): | 
|  | 632 | */ | 
|  | 633 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { | 
| Paul Mackerras | 6751b71 | 2009-05-11 12:08:02 +1000 | [diff] [blame] | 634 | if (counter_sched_in(counter, cpuctx, ctx, cpu)) { | 
|  | 635 | partial_group = counter; | 
|  | 636 | goto group_error; | 
|  | 637 | } | 
|  | 638 | } | 
|  | 639 |  | 
|  | 640 | return 0; | 
|  | 641 |  | 
|  | 642 | group_error: | 
|  | 643 | /* | 
|  | 644 | * Groups can be scheduled in as one unit only, so undo any | 
|  | 645 | * partial group before returning: | 
|  | 646 | */ | 
|  | 647 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { | 
|  | 648 | if (counter == partial_group) | 
|  | 649 | break; | 
|  | 650 | counter_sched_out(counter, cpuctx, ctx); | 
|  | 651 | } | 
|  | 652 | counter_sched_out(group_counter, cpuctx, ctx); | 
|  | 653 |  | 
|  | 654 | return -EAGAIN; | 
|  | 655 | } | 
|  | 656 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 657 | /* | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 658 | * Return 1 for a group consisting entirely of software counters, | 
|  | 659 | * 0 if the group contains any hardware counters. | 
|  | 660 | */ | 
|  | 661 | static int is_software_only_group(struct perf_counter *leader) | 
|  | 662 | { | 
|  | 663 | struct perf_counter *counter; | 
|  | 664 |  | 
|  | 665 | if (!is_software_counter(leader)) | 
|  | 666 | return 0; | 
| Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 667 |  | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 668 | list_for_each_entry(counter, &leader->sibling_list, list_entry) | 
|  | 669 | if (!is_software_counter(counter)) | 
|  | 670 | return 0; | 
| Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 671 |  | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 672 | return 1; | 
|  | 673 | } | 
|  | 674 |  | 
|  | 675 | /* | 
|  | 676 | * Work out whether we can put this counter group on the CPU now. | 
|  | 677 | */ | 
|  | 678 | static int group_can_go_on(struct perf_counter *counter, | 
|  | 679 | struct perf_cpu_context *cpuctx, | 
|  | 680 | int can_add_hw) | 
|  | 681 | { | 
|  | 682 | /* | 
|  | 683 | * Groups consisting entirely of software counters can always go on. | 
|  | 684 | */ | 
|  | 685 | if (is_software_only_group(counter)) | 
|  | 686 | return 1; | 
|  | 687 | /* | 
|  | 688 | * If an exclusive group is already on, no other hardware | 
|  | 689 | * counters can go on. | 
|  | 690 | */ | 
|  | 691 | if (cpuctx->exclusive) | 
|  | 692 | return 0; | 
|  | 693 | /* | 
|  | 694 | * If this group is exclusive and there are already | 
|  | 695 | * counters on the CPU, it can't go on. | 
|  | 696 | */ | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 697 | if (counter->attr.exclusive && cpuctx->active_oncpu) | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 698 | return 0; | 
|  | 699 | /* | 
|  | 700 | * Otherwise, try to add it if all previous groups were able | 
|  | 701 | * to go on. | 
|  | 702 | */ | 
|  | 703 | return can_add_hw; | 
|  | 704 | } | 
|  | 705 |  | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 706 | static void add_counter_to_ctx(struct perf_counter *counter, | 
|  | 707 | struct perf_counter_context *ctx) | 
|  | 708 | { | 
|  | 709 | list_add_counter(counter, ctx); | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 710 | counter->tstamp_enabled = ctx->time; | 
|  | 711 | counter->tstamp_running = ctx->time; | 
|  | 712 | counter->tstamp_stopped = ctx->time; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 713 | } | 
|  | 714 |  | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 715 | /* | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 716 | * Cross CPU call to install and enable a performance counter | 
| Peter Zijlstra | 682076a | 2009-05-23 18:28:57 +0200 | [diff] [blame] | 717 | * | 
|  | 718 | * Must be called with ctx->mutex held | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 719 | */ | 
|  | 720 | static void __perf_install_in_context(void *info) | 
|  | 721 | { | 
|  | 722 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 
|  | 723 | struct perf_counter *counter = info; | 
|  | 724 | struct perf_counter_context *ctx = counter->ctx; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 725 | struct perf_counter *leader = counter->group_leader; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 726 | int cpu = smp_processor_id(); | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 727 | int err; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 728 |  | 
|  | 729 | /* | 
|  | 730 | * If this is a task context, we need to check whether it is | 
|  | 731 | * the current task context of this cpu. If not it has been | 
|  | 732 | * scheduled out before the smp call arrived. | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 733 | * Or possibly this is the right context but it isn't | 
|  | 734 | * on this cpu because it had no counters. | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 735 | */ | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 736 | if (ctx->task && cpuctx->task_ctx != ctx) { | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 737 | if (cpuctx->task_ctx || ctx->task != current) | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 738 | return; | 
|  | 739 | cpuctx->task_ctx = ctx; | 
|  | 740 | } | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 741 |  | 
| Ingo Molnar | 3f4dee2 | 2009-05-29 11:25:09 +0200 | [diff] [blame] | 742 | spin_lock(&ctx->lock); | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 743 | ctx->is_active = 1; | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 744 | update_context_time(ctx); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 745 |  | 
|  | 746 | /* | 
|  | 747 | * Protect the list operation against NMI by disabling the | 
|  | 748 | * counters on a global level. NOP for non NMI based counters. | 
|  | 749 | */ | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 750 | perf_disable(); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 751 |  | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 752 | add_counter_to_ctx(counter, ctx); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 753 |  | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 754 | /* | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 755 | * Don't put the counter on if it is disabled or if | 
|  | 756 | * it is in a group and the group isn't on. | 
|  | 757 | */ | 
|  | 758 | if (counter->state != PERF_COUNTER_STATE_INACTIVE || | 
|  | 759 | (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)) | 
|  | 760 | goto unlock; | 
|  | 761 |  | 
|  | 762 | /* | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 763 | * An exclusive counter can't go on if there are already active | 
|  | 764 | * hardware counters, and no hardware counter can go on if there | 
|  | 765 | * is already an exclusive counter on. | 
|  | 766 | */ | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 767 | if (!group_can_go_on(counter, cpuctx, 1)) | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 768 | err = -EEXIST; | 
|  | 769 | else | 
|  | 770 | err = counter_sched_in(counter, cpuctx, ctx, cpu); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 771 |  | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 772 | if (err) { | 
|  | 773 | /* | 
|  | 774 | * This counter couldn't go on.  If it is in a group | 
|  | 775 | * then we have to pull the whole group off. | 
|  | 776 | * If the counter group is pinned then put it in error state. | 
|  | 777 | */ | 
|  | 778 | if (leader != counter) | 
|  | 779 | group_sched_out(leader, cpuctx, ctx); | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 780 | if (leader->attr.pinned) { | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 781 | update_group_times(leader); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 782 | leader->state = PERF_COUNTER_STATE_ERROR; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 783 | } | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 784 | } | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 785 |  | 
|  | 786 | if (!err && !ctx->task && cpuctx->max_pertask) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 787 | cpuctx->max_pertask--; | 
|  | 788 |  | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 789 | unlock: | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 790 | perf_enable(); | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 791 |  | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 792 | spin_unlock(&ctx->lock); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 793 | } | 
|  | 794 |  | 
|  | 795 | /* | 
|  | 796 | * Attach a performance counter to a context | 
|  | 797 | * | 
|  | 798 | * First we add the counter to the list with the hardware enable bit | 
|  | 799 | * in counter->hw_config cleared. | 
|  | 800 | * | 
|  | 801 | * If the counter is attached to a task which is on a CPU we use a smp | 
|  | 802 | * call to enable it in the task context. The task might have been | 
|  | 803 | * scheduled away, but we check this in the smp call again. | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 804 | * | 
|  | 805 | * Must be called with ctx->mutex held. | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 806 | */ | 
|  | 807 | static void | 
|  | 808 | perf_install_in_context(struct perf_counter_context *ctx, | 
|  | 809 | struct perf_counter *counter, | 
|  | 810 | int cpu) | 
|  | 811 | { | 
|  | 812 | struct task_struct *task = ctx->task; | 
|  | 813 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 814 | if (!task) { | 
|  | 815 | /* | 
|  | 816 | * Per cpu counters are installed via an smp call and | 
|  | 817 | * the install is always sucessful. | 
|  | 818 | */ | 
|  | 819 | smp_call_function_single(cpu, __perf_install_in_context, | 
|  | 820 | counter, 1); | 
|  | 821 | return; | 
|  | 822 | } | 
|  | 823 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 824 | retry: | 
|  | 825 | task_oncpu_function_call(task, __perf_install_in_context, | 
|  | 826 | counter); | 
|  | 827 |  | 
|  | 828 | spin_lock_irq(&ctx->lock); | 
|  | 829 | /* | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 830 | * we need to retry the smp call. | 
|  | 831 | */ | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 832 | if (ctx->is_active && list_empty(&counter->list_entry)) { | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 833 | spin_unlock_irq(&ctx->lock); | 
|  | 834 | goto retry; | 
|  | 835 | } | 
|  | 836 |  | 
|  | 837 | /* | 
|  | 838 | * The lock prevents that this context is scheduled in so we | 
|  | 839 | * can add the counter safely, if it the call above did not | 
|  | 840 | * succeed. | 
|  | 841 | */ | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 842 | if (list_empty(&counter->list_entry)) | 
|  | 843 | add_counter_to_ctx(counter, ctx); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 844 | spin_unlock_irq(&ctx->lock); | 
|  | 845 | } | 
|  | 846 |  | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 847 | /* | 
|  | 848 | * Cross CPU call to enable a performance counter | 
|  | 849 | */ | 
|  | 850 | static void __perf_counter_enable(void *info) | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 851 | { | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 852 | struct perf_counter *counter = info; | 
|  | 853 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 
|  | 854 | struct perf_counter_context *ctx = counter->ctx; | 
|  | 855 | struct perf_counter *leader = counter->group_leader; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 856 | int err; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 857 |  | 
|  | 858 | /* | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 859 | * If this is a per-task counter, need to check whether this | 
|  | 860 | * counter's task is the current task on this cpu. | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 861 | */ | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 862 | if (ctx->task && cpuctx->task_ctx != ctx) { | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 863 | if (cpuctx->task_ctx || ctx->task != current) | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 864 | return; | 
|  | 865 | cpuctx->task_ctx = ctx; | 
|  | 866 | } | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 867 |  | 
| Ingo Molnar | 3f4dee2 | 2009-05-29 11:25:09 +0200 | [diff] [blame] | 868 | spin_lock(&ctx->lock); | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 869 | ctx->is_active = 1; | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 870 | update_context_time(ctx); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 871 |  | 
|  | 872 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | 
|  | 873 | goto unlock; | 
|  | 874 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 875 | counter->tstamp_enabled = ctx->time - counter->total_time_enabled; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 876 |  | 
|  | 877 | /* | 
|  | 878 | * If the counter is in a group and isn't the group leader, | 
|  | 879 | * then don't put it on unless the group is on. | 
|  | 880 | */ | 
|  | 881 | if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE) | 
|  | 882 | goto unlock; | 
|  | 883 |  | 
| Paul Mackerras | e758a33 | 2009-05-12 21:59:01 +1000 | [diff] [blame] | 884 | if (!group_can_go_on(counter, cpuctx, 1)) { | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 885 | err = -EEXIST; | 
| Paul Mackerras | e758a33 | 2009-05-12 21:59:01 +1000 | [diff] [blame] | 886 | } else { | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 887 | perf_disable(); | 
| Paul Mackerras | e758a33 | 2009-05-12 21:59:01 +1000 | [diff] [blame] | 888 | if (counter == leader) | 
|  | 889 | err = group_sched_in(counter, cpuctx, ctx, | 
|  | 890 | smp_processor_id()); | 
|  | 891 | else | 
|  | 892 | err = counter_sched_in(counter, cpuctx, ctx, | 
|  | 893 | smp_processor_id()); | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 894 | perf_enable(); | 
| Paul Mackerras | e758a33 | 2009-05-12 21:59:01 +1000 | [diff] [blame] | 895 | } | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 896 |  | 
|  | 897 | if (err) { | 
|  | 898 | /* | 
|  | 899 | * If this counter can't go on and it's part of a | 
|  | 900 | * group, then the whole group has to come off. | 
|  | 901 | */ | 
|  | 902 | if (leader != counter) | 
|  | 903 | group_sched_out(leader, cpuctx, ctx); | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 904 | if (leader->attr.pinned) { | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 905 | update_group_times(leader); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 906 | leader->state = PERF_COUNTER_STATE_ERROR; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 907 | } | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 908 | } | 
|  | 909 |  | 
|  | 910 | unlock: | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 911 | spin_unlock(&ctx->lock); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 912 | } | 
|  | 913 |  | 
|  | 914 | /* | 
|  | 915 | * Enable a counter. | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 916 | * | 
|  | 917 | * If counter->ctx is a cloned context, callers must make sure that | 
|  | 918 | * every task struct that counter->ctx->task could possibly point to | 
|  | 919 | * remains valid.  This condition is satisfied when called through | 
|  | 920 | * perf_counter_for_each_child or perf_counter_for_each as described | 
|  | 921 | * for perf_counter_disable. | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 922 | */ | 
|  | 923 | static void perf_counter_enable(struct perf_counter *counter) | 
|  | 924 | { | 
|  | 925 | struct perf_counter_context *ctx = counter->ctx; | 
|  | 926 | struct task_struct *task = ctx->task; | 
|  | 927 |  | 
|  | 928 | if (!task) { | 
|  | 929 | /* | 
|  | 930 | * Enable the counter on the cpu that it's on | 
|  | 931 | */ | 
|  | 932 | smp_call_function_single(counter->cpu, __perf_counter_enable, | 
|  | 933 | counter, 1); | 
|  | 934 | return; | 
|  | 935 | } | 
|  | 936 |  | 
|  | 937 | spin_lock_irq(&ctx->lock); | 
|  | 938 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | 
|  | 939 | goto out; | 
|  | 940 |  | 
|  | 941 | /* | 
|  | 942 | * If the counter is in error state, clear that first. | 
|  | 943 | * That way, if we see the counter in error state below, we | 
|  | 944 | * know that it has gone back into error state, as distinct | 
|  | 945 | * from the task having been scheduled away before the | 
|  | 946 | * cross-call arrived. | 
|  | 947 | */ | 
|  | 948 | if (counter->state == PERF_COUNTER_STATE_ERROR) | 
|  | 949 | counter->state = PERF_COUNTER_STATE_OFF; | 
|  | 950 |  | 
|  | 951 | retry: | 
|  | 952 | spin_unlock_irq(&ctx->lock); | 
|  | 953 | task_oncpu_function_call(task, __perf_counter_enable, counter); | 
|  | 954 |  | 
|  | 955 | spin_lock_irq(&ctx->lock); | 
|  | 956 |  | 
|  | 957 | /* | 
|  | 958 | * If the context is active and the counter is still off, | 
|  | 959 | * we need to retry the cross-call. | 
|  | 960 | */ | 
|  | 961 | if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF) | 
|  | 962 | goto retry; | 
|  | 963 |  | 
|  | 964 | /* | 
|  | 965 | * Since we have the lock this context can't be scheduled | 
|  | 966 | * in, so we can change the state safely. | 
|  | 967 | */ | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 968 | if (counter->state == PERF_COUNTER_STATE_OFF) { | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 969 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 970 | counter->tstamp_enabled = | 
|  | 971 | ctx->time - counter->total_time_enabled; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 972 | } | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 973 | out: | 
|  | 974 | spin_unlock_irq(&ctx->lock); | 
|  | 975 | } | 
|  | 976 |  | 
| Peter Zijlstra | 2023b35 | 2009-05-05 17:50:26 +0200 | [diff] [blame] | 977 | static int perf_counter_refresh(struct perf_counter *counter, int refresh) | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 978 | { | 
| Peter Zijlstra | 2023b35 | 2009-05-05 17:50:26 +0200 | [diff] [blame] | 979 | /* | 
|  | 980 | * not supported on inherited counters | 
|  | 981 | */ | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 982 | if (counter->attr.inherit) | 
| Peter Zijlstra | 2023b35 | 2009-05-05 17:50:26 +0200 | [diff] [blame] | 983 | return -EINVAL; | 
|  | 984 |  | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 985 | atomic_add(refresh, &counter->event_limit); | 
|  | 986 | perf_counter_enable(counter); | 
| Peter Zijlstra | 2023b35 | 2009-05-05 17:50:26 +0200 | [diff] [blame] | 987 |  | 
|  | 988 | return 0; | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 989 | } | 
|  | 990 |  | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 991 | void __perf_counter_sched_out(struct perf_counter_context *ctx, | 
|  | 992 | struct perf_cpu_context *cpuctx) | 
|  | 993 | { | 
|  | 994 | struct perf_counter *counter; | 
|  | 995 |  | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 996 | spin_lock(&ctx->lock); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 997 | ctx->is_active = 0; | 
|  | 998 | if (likely(!ctx->nr_counters)) | 
|  | 999 | goto out; | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 1000 | update_context_time(ctx); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1001 |  | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 1002 | perf_disable(); | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1003 | if (ctx->nr_active) { | 
| Peter Zijlstra | afedadf | 2009-05-20 12:21:22 +0200 | [diff] [blame] | 1004 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | 
|  | 1005 | if (counter != counter->group_leader) | 
|  | 1006 | counter_sched_out(counter, cpuctx, ctx); | 
|  | 1007 | else | 
|  | 1008 | group_sched_out(counter, cpuctx, ctx); | 
|  | 1009 | } | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1010 | } | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 1011 | perf_enable(); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1012 | out: | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1013 | spin_unlock(&ctx->lock); | 
|  | 1014 | } | 
|  | 1015 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1016 | /* | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 1017 | * Test whether two contexts are equivalent, i.e. whether they | 
|  | 1018 | * have both been cloned from the same version of the same context | 
|  | 1019 | * and they both have the same number of enabled counters. | 
|  | 1020 | * If the number of enabled counters is the same, then the set | 
|  | 1021 | * of enabled counters should be the same, because these are both | 
|  | 1022 | * inherited contexts, therefore we can't access individual counters | 
|  | 1023 | * in them directly with an fd; we can only enable/disable all | 
|  | 1024 | * counters via prctl, or enable/disable all counters in a family | 
|  | 1025 | * via ioctl, which will have the same effect on both contexts. | 
|  | 1026 | */ | 
|  | 1027 | static int context_equiv(struct perf_counter_context *ctx1, | 
|  | 1028 | struct perf_counter_context *ctx2) | 
|  | 1029 | { | 
|  | 1030 | return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 1031 | && ctx1->parent_gen == ctx2->parent_gen | 
| Paul Mackerras | 25346b9 | 2009-06-01 17:48:12 +1000 | [diff] [blame] | 1032 | && !ctx1->pin_count && !ctx2->pin_count; | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 1033 | } | 
|  | 1034 |  | 
| Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 1035 | static void __perf_counter_read(void *counter); | 
|  | 1036 |  | 
|  | 1037 | static void __perf_counter_sync_stat(struct perf_counter *counter, | 
|  | 1038 | struct perf_counter *next_counter) | 
|  | 1039 | { | 
|  | 1040 | u64 value; | 
|  | 1041 |  | 
|  | 1042 | if (!counter->attr.inherit_stat) | 
|  | 1043 | return; | 
|  | 1044 |  | 
|  | 1045 | /* | 
|  | 1046 | * Update the counter value, we cannot use perf_counter_read() | 
|  | 1047 | * because we're in the middle of a context switch and have IRQs | 
|  | 1048 | * disabled, which upsets smp_call_function_single(), however | 
|  | 1049 | * we know the counter must be on the current CPU, therefore we | 
|  | 1050 | * don't need to use it. | 
|  | 1051 | */ | 
|  | 1052 | switch (counter->state) { | 
|  | 1053 | case PERF_COUNTER_STATE_ACTIVE: | 
|  | 1054 | __perf_counter_read(counter); | 
|  | 1055 | break; | 
|  | 1056 |  | 
|  | 1057 | case PERF_COUNTER_STATE_INACTIVE: | 
|  | 1058 | update_counter_times(counter); | 
|  | 1059 | break; | 
|  | 1060 |  | 
|  | 1061 | default: | 
|  | 1062 | break; | 
|  | 1063 | } | 
|  | 1064 |  | 
|  | 1065 | /* | 
|  | 1066 | * In order to keep per-task stats reliable we need to flip the counter | 
|  | 1067 | * values when we flip the contexts. | 
|  | 1068 | */ | 
|  | 1069 | value = atomic64_read(&next_counter->count); | 
|  | 1070 | value = atomic64_xchg(&counter->count, value); | 
|  | 1071 | atomic64_set(&next_counter->count, value); | 
|  | 1072 |  | 
| Peter Zijlstra | 19d2e75 | 2009-06-26 13:10:23 +0200 | [diff] [blame] | 1073 | swap(counter->total_time_enabled, next_counter->total_time_enabled); | 
|  | 1074 | swap(counter->total_time_running, next_counter->total_time_running); | 
|  | 1075 |  | 
| Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 1076 | /* | 
| Peter Zijlstra | 19d2e75 | 2009-06-26 13:10:23 +0200 | [diff] [blame] | 1077 | * Since we swizzled the values, update the user visible data too. | 
| Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 1078 | */ | 
| Peter Zijlstra | 19d2e75 | 2009-06-26 13:10:23 +0200 | [diff] [blame] | 1079 | perf_counter_update_userpage(counter); | 
|  | 1080 | perf_counter_update_userpage(next_counter); | 
| Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 1081 | } | 
|  | 1082 |  | 
|  | 1083 | #define list_next_entry(pos, member) \ | 
|  | 1084 | list_entry(pos->member.next, typeof(*pos), member) | 
|  | 1085 |  | 
|  | 1086 | static void perf_counter_sync_stat(struct perf_counter_context *ctx, | 
|  | 1087 | struct perf_counter_context *next_ctx) | 
|  | 1088 | { | 
|  | 1089 | struct perf_counter *counter, *next_counter; | 
|  | 1090 |  | 
|  | 1091 | if (!ctx->nr_stat) | 
|  | 1092 | return; | 
|  | 1093 |  | 
|  | 1094 | counter = list_first_entry(&ctx->event_list, | 
|  | 1095 | struct perf_counter, event_entry); | 
|  | 1096 |  | 
|  | 1097 | next_counter = list_first_entry(&next_ctx->event_list, | 
|  | 1098 | struct perf_counter, event_entry); | 
|  | 1099 |  | 
|  | 1100 | while (&counter->event_entry != &ctx->event_list && | 
|  | 1101 | &next_counter->event_entry != &next_ctx->event_list) { | 
|  | 1102 |  | 
|  | 1103 | __perf_counter_sync_stat(counter, next_counter); | 
|  | 1104 |  | 
|  | 1105 | counter = list_next_entry(counter, event_entry); | 
|  | 1106 | next_counter = list_next_entry(counter, event_entry); | 
|  | 1107 | } | 
|  | 1108 | } | 
|  | 1109 |  | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 1110 | /* | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1111 | * Called from scheduler to remove the counters of the current task, | 
|  | 1112 | * with interrupts disabled. | 
|  | 1113 | * | 
|  | 1114 | * We stop each counter and update the counter value in counter->count. | 
|  | 1115 | * | 
| Ingo Molnar | 7671581 | 2008-12-17 14:20:28 +0100 | [diff] [blame] | 1116 | * This does not protect us against NMI, but disable() | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1117 | * sets the disabled bit in the control field of counter _before_ | 
|  | 1118 | * accessing the counter control register. If a NMI hits, then it will | 
|  | 1119 | * not restart the counter. | 
|  | 1120 | */ | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 1121 | void perf_counter_task_sched_out(struct task_struct *task, | 
|  | 1122 | struct task_struct *next, int cpu) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1123 | { | 
|  | 1124 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1125 | struct perf_counter_context *ctx = task->perf_counter_ctxp; | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 1126 | struct perf_counter_context *next_ctx; | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1127 | struct perf_counter_context *parent; | 
| Peter Zijlstra | 4a0deca | 2009-03-19 20:26:12 +0100 | [diff] [blame] | 1128 | struct pt_regs *regs; | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1129 | int do_switch = 1; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1130 |  | 
| Peter Zijlstra | 10989fb | 2009-05-25 14:45:28 +0200 | [diff] [blame] | 1131 | regs = task_pt_regs(task); | 
| Peter Zijlstra | f4dbfa8 | 2009-06-11 14:06:28 +0200 | [diff] [blame] | 1132 | perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); | 
| Peter Zijlstra | 10989fb | 2009-05-25 14:45:28 +0200 | [diff] [blame] | 1133 |  | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1134 | if (likely(!ctx || !cpuctx->task_ctx)) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1135 | return; | 
|  | 1136 |  | 
| Peter Zijlstra | bce379b | 2009-04-06 11:45:13 +0200 | [diff] [blame] | 1137 | update_context_time(ctx); | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1138 |  | 
|  | 1139 | rcu_read_lock(); | 
|  | 1140 | parent = rcu_dereference(ctx->parent_ctx); | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 1141 | next_ctx = next->perf_counter_ctxp; | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1142 | if (parent && next_ctx && | 
|  | 1143 | rcu_dereference(next_ctx->parent_ctx) == parent) { | 
|  | 1144 | /* | 
|  | 1145 | * Looks like the two contexts are clones, so we might be | 
|  | 1146 | * able to optimize the context switch.  We lock both | 
|  | 1147 | * contexts and check that they are clones under the | 
|  | 1148 | * lock (including re-checking that neither has been | 
|  | 1149 | * uncloned in the meantime).  It doesn't matter which | 
|  | 1150 | * order we take the locks because no other cpu could | 
|  | 1151 | * be trying to lock both of these tasks. | 
|  | 1152 | */ | 
|  | 1153 | spin_lock(&ctx->lock); | 
|  | 1154 | spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); | 
|  | 1155 | if (context_equiv(ctx, next_ctx)) { | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 1156 | /* | 
|  | 1157 | * XXX do we need a memory barrier of sorts | 
|  | 1158 | * wrt to rcu_dereference() of perf_counter_ctxp | 
|  | 1159 | */ | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1160 | task->perf_counter_ctxp = next_ctx; | 
|  | 1161 | next->perf_counter_ctxp = ctx; | 
|  | 1162 | ctx->task = next; | 
|  | 1163 | next_ctx->task = task; | 
|  | 1164 | do_switch = 0; | 
| Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 1165 |  | 
|  | 1166 | perf_counter_sync_stat(ctx, next_ctx); | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1167 | } | 
|  | 1168 | spin_unlock(&next_ctx->lock); | 
|  | 1169 | spin_unlock(&ctx->lock); | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 1170 | } | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1171 | rcu_read_unlock(); | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 1172 |  | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1173 | if (do_switch) { | 
|  | 1174 | __perf_counter_sched_out(ctx, cpuctx); | 
|  | 1175 | cpuctx->task_ctx = NULL; | 
|  | 1176 | } | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1177 | } | 
|  | 1178 |  | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 1179 | /* | 
|  | 1180 | * Called with IRQs disabled | 
|  | 1181 | */ | 
| Paul Mackerras | a08b159 | 2009-05-11 15:46:10 +1000 | [diff] [blame] | 1182 | static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) | 
|  | 1183 | { | 
|  | 1184 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 
|  | 1185 |  | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1186 | if (!cpuctx->task_ctx) | 
|  | 1187 | return; | 
| Ingo Molnar | 012b84d | 2009-05-17 11:08:41 +0200 | [diff] [blame] | 1188 |  | 
|  | 1189 | if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) | 
|  | 1190 | return; | 
|  | 1191 |  | 
| Paul Mackerras | a08b159 | 2009-05-11 15:46:10 +1000 | [diff] [blame] | 1192 | __perf_counter_sched_out(ctx, cpuctx); | 
|  | 1193 | cpuctx->task_ctx = NULL; | 
|  | 1194 | } | 
|  | 1195 |  | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 1196 | /* | 
|  | 1197 | * Called with IRQs disabled | 
|  | 1198 | */ | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1199 | static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 1200 | { | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1201 | __perf_counter_sched_out(&cpuctx->ctx, cpuctx); | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 1202 | } | 
|  | 1203 |  | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1204 | static void | 
|  | 1205 | __perf_counter_sched_in(struct perf_counter_context *ctx, | 
|  | 1206 | struct perf_cpu_context *cpuctx, int cpu) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1207 | { | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1208 | struct perf_counter *counter; | 
| Paul Mackerras | dd0e6ba | 2009-01-12 15:11:00 +1100 | [diff] [blame] | 1209 | int can_add_hw = 1; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1210 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1211 | spin_lock(&ctx->lock); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1212 | ctx->is_active = 1; | 
|  | 1213 | if (likely(!ctx->nr_counters)) | 
|  | 1214 | goto out; | 
|  | 1215 |  | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 1216 | ctx->timestamp = perf_clock(); | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1217 |  | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 1218 | perf_disable(); | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1219 |  | 
|  | 1220 | /* | 
|  | 1221 | * First go through the list and put on any pinned groups | 
|  | 1222 | * in order to give them the best chance of going on. | 
|  | 1223 | */ | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 1224 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1225 | if (counter->state <= PERF_COUNTER_STATE_OFF || | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 1226 | !counter->attr.pinned) | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1227 | continue; | 
|  | 1228 | if (counter->cpu != -1 && counter->cpu != cpu) | 
|  | 1229 | continue; | 
|  | 1230 |  | 
| Peter Zijlstra | afedadf | 2009-05-20 12:21:22 +0200 | [diff] [blame] | 1231 | if (counter != counter->group_leader) | 
|  | 1232 | counter_sched_in(counter, cpuctx, ctx, cpu); | 
|  | 1233 | else { | 
|  | 1234 | if (group_can_go_on(counter, cpuctx, 1)) | 
|  | 1235 | group_sched_in(counter, cpuctx, ctx, cpu); | 
|  | 1236 | } | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1237 |  | 
|  | 1238 | /* | 
|  | 1239 | * If this pinned group hasn't been scheduled, | 
|  | 1240 | * put it in error state. | 
|  | 1241 | */ | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1242 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) { | 
|  | 1243 | update_group_times(counter); | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1244 | counter->state = PERF_COUNTER_STATE_ERROR; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1245 | } | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1246 | } | 
|  | 1247 |  | 
|  | 1248 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | 
|  | 1249 | /* | 
|  | 1250 | * Ignore counters in OFF or ERROR state, and | 
|  | 1251 | * ignore pinned counters since we did them already. | 
|  | 1252 | */ | 
|  | 1253 | if (counter->state <= PERF_COUNTER_STATE_OFF || | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 1254 | counter->attr.pinned) | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1255 | continue; | 
|  | 1256 |  | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 1257 | /* | 
|  | 1258 | * Listen to the 'cpu' scheduling filter constraint | 
|  | 1259 | * of counters: | 
|  | 1260 | */ | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1261 | if (counter->cpu != -1 && counter->cpu != cpu) | 
|  | 1262 | continue; | 
|  | 1263 |  | 
| Peter Zijlstra | afedadf | 2009-05-20 12:21:22 +0200 | [diff] [blame] | 1264 | if (counter != counter->group_leader) { | 
|  | 1265 | if (counter_sched_in(counter, cpuctx, ctx, cpu)) | 
| Paul Mackerras | dd0e6ba | 2009-01-12 15:11:00 +1100 | [diff] [blame] | 1266 | can_add_hw = 0; | 
| Peter Zijlstra | afedadf | 2009-05-20 12:21:22 +0200 | [diff] [blame] | 1267 | } else { | 
|  | 1268 | if (group_can_go_on(counter, cpuctx, can_add_hw)) { | 
|  | 1269 | if (group_sched_in(counter, cpuctx, ctx, cpu)) | 
|  | 1270 | can_add_hw = 0; | 
|  | 1271 | } | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1272 | } | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1273 | } | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 1274 | perf_enable(); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1275 | out: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1276 | spin_unlock(&ctx->lock); | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1277 | } | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 1278 |  | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1279 | /* | 
|  | 1280 | * Called from scheduler to add the counters of the current task | 
|  | 1281 | * with interrupts disabled. | 
|  | 1282 | * | 
|  | 1283 | * We restore the counter value and then enable it. | 
|  | 1284 | * | 
|  | 1285 | * This does not protect us against NMI, but enable() | 
|  | 1286 | * sets the enabled bit in the control field of counter _before_ | 
|  | 1287 | * accessing the counter control register. If a NMI hits, then it will | 
|  | 1288 | * keep the counter running. | 
|  | 1289 | */ | 
|  | 1290 | void perf_counter_task_sched_in(struct task_struct *task, int cpu) | 
|  | 1291 | { | 
|  | 1292 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1293 | struct perf_counter_context *ctx = task->perf_counter_ctxp; | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1294 |  | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1295 | if (likely(!ctx)) | 
|  | 1296 | return; | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 1297 | if (cpuctx->task_ctx == ctx) | 
|  | 1298 | return; | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1299 | __perf_counter_sched_in(ctx, cpuctx, cpu); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1300 | cpuctx->task_ctx = ctx; | 
|  | 1301 | } | 
|  | 1302 |  | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1303 | static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) | 
|  | 1304 | { | 
|  | 1305 | struct perf_counter_context *ctx = &cpuctx->ctx; | 
|  | 1306 |  | 
|  | 1307 | __perf_counter_sched_in(ctx, cpuctx, cpu); | 
|  | 1308 | } | 
|  | 1309 |  | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 1310 | #define MAX_INTERRUPTS (~0ULL) | 
|  | 1311 |  | 
|  | 1312 | static void perf_log_throttle(struct perf_counter *counter, int enable); | 
| Peter Zijlstra | 26b119b | 2009-05-20 12:21:20 +0200 | [diff] [blame] | 1313 |  | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 1314 | static void perf_adjust_period(struct perf_counter *counter, u64 events) | 
|  | 1315 | { | 
|  | 1316 | struct hw_perf_counter *hwc = &counter->hw; | 
|  | 1317 | u64 period, sample_period; | 
|  | 1318 | s64 delta; | 
|  | 1319 |  | 
|  | 1320 | events *= hwc->sample_period; | 
|  | 1321 | period = div64_u64(events, counter->attr.sample_freq); | 
|  | 1322 |  | 
|  | 1323 | delta = (s64)(period - hwc->sample_period); | 
|  | 1324 | delta = (delta + 7) / 8; /* low pass filter */ | 
|  | 1325 |  | 
|  | 1326 | sample_period = hwc->sample_period + delta; | 
|  | 1327 |  | 
|  | 1328 | if (!sample_period) | 
|  | 1329 | sample_period = 1; | 
|  | 1330 |  | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 1331 | hwc->sample_period = sample_period; | 
|  | 1332 | } | 
|  | 1333 |  | 
|  | 1334 | static void perf_ctx_adjust_freq(struct perf_counter_context *ctx) | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 1335 | { | 
|  | 1336 | struct perf_counter *counter; | 
| Peter Zijlstra | 6a24ed6c | 2009-06-05 18:01:29 +0200 | [diff] [blame] | 1337 | struct hw_perf_counter *hwc; | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 1338 | u64 interrupts, freq; | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 1339 |  | 
|  | 1340 | spin_lock(&ctx->lock); | 
|  | 1341 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | 
|  | 1342 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) | 
|  | 1343 | continue; | 
|  | 1344 |  | 
| Peter Zijlstra | 6a24ed6c | 2009-06-05 18:01:29 +0200 | [diff] [blame] | 1345 | hwc = &counter->hw; | 
|  | 1346 |  | 
|  | 1347 | interrupts = hwc->interrupts; | 
|  | 1348 | hwc->interrupts = 0; | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 1349 |  | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 1350 | /* | 
|  | 1351 | * unthrottle counters on the tick | 
|  | 1352 | */ | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 1353 | if (interrupts == MAX_INTERRUPTS) { | 
|  | 1354 | perf_log_throttle(counter, 1); | 
|  | 1355 | counter->pmu->unthrottle(counter); | 
| Peter Zijlstra | df58ab2 | 2009-06-11 11:25:05 +0200 | [diff] [blame] | 1356 | interrupts = 2*sysctl_perf_counter_sample_rate/HZ; | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 1357 | } | 
|  | 1358 |  | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 1359 | if (!counter->attr.freq || !counter->attr.sample_freq) | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 1360 | continue; | 
|  | 1361 |  | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 1362 | /* | 
|  | 1363 | * if the specified freq < HZ then we need to skip ticks | 
|  | 1364 | */ | 
| Peter Zijlstra | 6a24ed6c | 2009-06-05 18:01:29 +0200 | [diff] [blame] | 1365 | if (counter->attr.sample_freq < HZ) { | 
|  | 1366 | freq = counter->attr.sample_freq; | 
|  | 1367 |  | 
|  | 1368 | hwc->freq_count += freq; | 
|  | 1369 | hwc->freq_interrupts += interrupts; | 
|  | 1370 |  | 
|  | 1371 | if (hwc->freq_count < HZ) | 
|  | 1372 | continue; | 
|  | 1373 |  | 
|  | 1374 | interrupts = hwc->freq_interrupts; | 
|  | 1375 | hwc->freq_interrupts = 0; | 
|  | 1376 | hwc->freq_count -= HZ; | 
|  | 1377 | } else | 
|  | 1378 | freq = HZ; | 
|  | 1379 |  | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 1380 | perf_adjust_period(counter, freq * interrupts); | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 1381 |  | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 1382 | /* | 
|  | 1383 | * In order to avoid being stalled by an (accidental) huge | 
|  | 1384 | * sample period, force reset the sample period if we didn't | 
|  | 1385 | * get any events in this freq period. | 
|  | 1386 | */ | 
|  | 1387 | if (!interrupts) { | 
|  | 1388 | perf_disable(); | 
|  | 1389 | counter->pmu->disable(counter); | 
| Paul Mackerras | 87847b8 | 2009-06-13 17:06:50 +1000 | [diff] [blame] | 1390 | atomic64_set(&hwc->period_left, 0); | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 1391 | counter->pmu->enable(counter); | 
|  | 1392 | perf_enable(); | 
|  | 1393 | } | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 1394 | } | 
|  | 1395 | spin_unlock(&ctx->lock); | 
|  | 1396 | } | 
|  | 1397 |  | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1398 | /* | 
|  | 1399 | * Round-robin a context's counters: | 
|  | 1400 | */ | 
|  | 1401 | static void rotate_ctx(struct perf_counter_context *ctx) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1402 | { | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1403 | struct perf_counter *counter; | 
|  | 1404 |  | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1405 | if (!ctx->nr_counters) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1406 | return; | 
|  | 1407 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1408 | spin_lock(&ctx->lock); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1409 | /* | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 1410 | * Rotate the first entry last (works just fine for group counters too): | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1411 | */ | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 1412 | perf_disable(); | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 1413 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | 
| Peter Zijlstra | 7556423 | 2009-03-13 12:21:29 +0100 | [diff] [blame] | 1414 | list_move_tail(&counter->list_entry, &ctx->counter_list); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1415 | break; | 
|  | 1416 | } | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 1417 | perf_enable(); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1418 |  | 
|  | 1419 | spin_unlock(&ctx->lock); | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1420 | } | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1421 |  | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1422 | void perf_counter_task_tick(struct task_struct *curr, int cpu) | 
|  | 1423 | { | 
| Peter Zijlstra | 7fc23a5 | 2009-05-08 18:52:21 +0200 | [diff] [blame] | 1424 | struct perf_cpu_context *cpuctx; | 
|  | 1425 | struct perf_counter_context *ctx; | 
|  | 1426 |  | 
|  | 1427 | if (!atomic_read(&nr_counters)) | 
|  | 1428 | return; | 
|  | 1429 |  | 
|  | 1430 | cpuctx = &per_cpu(perf_cpu_context, cpu); | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1431 | ctx = curr->perf_counter_ctxp; | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1432 |  | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 1433 | perf_ctx_adjust_freq(&cpuctx->ctx); | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1434 | if (ctx) | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 1435 | perf_ctx_adjust_freq(ctx); | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 1436 |  | 
| Ingo Molnar | b82914c | 2009-05-04 18:54:32 +0200 | [diff] [blame] | 1437 | perf_counter_cpu_sched_out(cpuctx); | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1438 | if (ctx) | 
|  | 1439 | __perf_counter_task_sched_out(ctx); | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1440 |  | 
| Ingo Molnar | b82914c | 2009-05-04 18:54:32 +0200 | [diff] [blame] | 1441 | rotate_ctx(&cpuctx->ctx); | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1442 | if (ctx) | 
|  | 1443 | rotate_ctx(ctx); | 
| Ingo Molnar | 235c7fc | 2008-12-21 14:43:25 +0100 | [diff] [blame] | 1444 |  | 
| Ingo Molnar | b82914c | 2009-05-04 18:54:32 +0200 | [diff] [blame] | 1445 | perf_counter_cpu_sched_in(cpuctx, cpu); | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1446 | if (ctx) | 
|  | 1447 | perf_counter_task_sched_in(curr, cpu); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1448 | } | 
|  | 1449 |  | 
|  | 1450 | /* | 
| Paul Mackerras | 57e7986 | 2009-06-30 16:07:19 +1000 | [diff] [blame] | 1451 | * Enable all of a task's counters that have been marked enable-on-exec. | 
|  | 1452 | * This expects task == current. | 
|  | 1453 | */ | 
|  | 1454 | static void perf_counter_enable_on_exec(struct task_struct *task) | 
|  | 1455 | { | 
|  | 1456 | struct perf_counter_context *ctx; | 
|  | 1457 | struct perf_counter *counter; | 
|  | 1458 | unsigned long flags; | 
|  | 1459 | int enabled = 0; | 
|  | 1460 |  | 
|  | 1461 | local_irq_save(flags); | 
|  | 1462 | ctx = task->perf_counter_ctxp; | 
|  | 1463 | if (!ctx || !ctx->nr_counters) | 
|  | 1464 | goto out; | 
|  | 1465 |  | 
|  | 1466 | __perf_counter_task_sched_out(ctx); | 
|  | 1467 |  | 
|  | 1468 | spin_lock(&ctx->lock); | 
|  | 1469 |  | 
|  | 1470 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | 
|  | 1471 | if (!counter->attr.enable_on_exec) | 
|  | 1472 | continue; | 
|  | 1473 | counter->attr.enable_on_exec = 0; | 
|  | 1474 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | 
|  | 1475 | continue; | 
|  | 1476 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 
|  | 1477 | counter->tstamp_enabled = | 
|  | 1478 | ctx->time - counter->total_time_enabled; | 
|  | 1479 | enabled = 1; | 
|  | 1480 | } | 
|  | 1481 |  | 
|  | 1482 | /* | 
|  | 1483 | * Unclone this context if we enabled any counter. | 
|  | 1484 | */ | 
| Peter Zijlstra | 71a851b | 2009-07-10 09:06:56 +0200 | [diff] [blame] | 1485 | if (enabled) | 
|  | 1486 | unclone_ctx(ctx); | 
| Paul Mackerras | 57e7986 | 2009-06-30 16:07:19 +1000 | [diff] [blame] | 1487 |  | 
|  | 1488 | spin_unlock(&ctx->lock); | 
|  | 1489 |  | 
|  | 1490 | perf_counter_task_sched_in(task, smp_processor_id()); | 
|  | 1491 | out: | 
|  | 1492 | local_irq_restore(flags); | 
|  | 1493 | } | 
|  | 1494 |  | 
|  | 1495 | /* | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1496 | * Cross CPU call to read the hardware counter | 
|  | 1497 | */ | 
| Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 1498 | static void __perf_counter_read(void *info) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1499 | { | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 1500 | struct perf_counter *counter = info; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1501 | struct perf_counter_context *ctx = counter->ctx; | 
| Ingo Molnar | aa9c4c0 | 2008-12-17 14:10:57 +0100 | [diff] [blame] | 1502 | unsigned long flags; | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 1503 |  | 
| Peter Zijlstra | 849691a | 2009-04-06 11:45:12 +0200 | [diff] [blame] | 1504 | local_irq_save(flags); | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1505 | if (ctx->is_active) | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 1506 | update_context_time(ctx); | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 1507 | counter->pmu->read(counter); | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1508 | update_counter_times(counter); | 
| Peter Zijlstra | 849691a | 2009-04-06 11:45:12 +0200 | [diff] [blame] | 1509 | local_irq_restore(flags); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1510 | } | 
|  | 1511 |  | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 1512 | static u64 perf_counter_read(struct perf_counter *counter) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1513 | { | 
|  | 1514 | /* | 
|  | 1515 | * If counter is enabled and currently active on a CPU, update the | 
|  | 1516 | * value in the counter structure: | 
|  | 1517 | */ | 
| Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 1518 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) { | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1519 | smp_call_function_single(counter->oncpu, | 
| Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 1520 | __perf_counter_read, counter, 1); | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1521 | } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) { | 
|  | 1522 | update_counter_times(counter); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1523 | } | 
|  | 1524 |  | 
| Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 1525 | return atomic64_read(&counter->count); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1526 | } | 
|  | 1527 |  | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1528 | /* | 
|  | 1529 | * Initialize the perf_counter context in a task_struct: | 
|  | 1530 | */ | 
|  | 1531 | static void | 
|  | 1532 | __perf_counter_init_context(struct perf_counter_context *ctx, | 
|  | 1533 | struct task_struct *task) | 
|  | 1534 | { | 
|  | 1535 | memset(ctx, 0, sizeof(*ctx)); | 
|  | 1536 | spin_lock_init(&ctx->lock); | 
|  | 1537 | mutex_init(&ctx->mutex); | 
|  | 1538 | INIT_LIST_HEAD(&ctx->counter_list); | 
|  | 1539 | INIT_LIST_HEAD(&ctx->event_list); | 
|  | 1540 | atomic_set(&ctx->refcount, 1); | 
|  | 1541 | ctx->task = task; | 
|  | 1542 | } | 
|  | 1543 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1544 | static struct perf_counter_context *find_get_context(pid_t pid, int cpu) | 
|  | 1545 | { | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 1546 | struct perf_counter_context *ctx; | 
|  | 1547 | struct perf_cpu_context *cpuctx; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1548 | struct task_struct *task; | 
| Paul Mackerras | 25346b9 | 2009-06-01 17:48:12 +1000 | [diff] [blame] | 1549 | unsigned long flags; | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1550 | int err; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1551 |  | 
|  | 1552 | /* | 
|  | 1553 | * If cpu is not a wildcard then this is a percpu counter: | 
|  | 1554 | */ | 
|  | 1555 | if (cpu != -1) { | 
|  | 1556 | /* Must be root to operate on a CPU counter: */ | 
| Peter Zijlstra | 0764771 | 2009-06-11 11:18:36 +0200 | [diff] [blame] | 1557 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1558 | return ERR_PTR(-EACCES); | 
|  | 1559 |  | 
|  | 1560 | if (cpu < 0 || cpu > num_possible_cpus()) | 
|  | 1561 | return ERR_PTR(-EINVAL); | 
|  | 1562 |  | 
|  | 1563 | /* | 
|  | 1564 | * We could be clever and allow to attach a counter to an | 
|  | 1565 | * offline CPU and activate it when the CPU comes up, but | 
|  | 1566 | * that's for later. | 
|  | 1567 | */ | 
|  | 1568 | if (!cpu_isset(cpu, cpu_online_map)) | 
|  | 1569 | return ERR_PTR(-ENODEV); | 
|  | 1570 |  | 
|  | 1571 | cpuctx = &per_cpu(perf_cpu_context, cpu); | 
|  | 1572 | ctx = &cpuctx->ctx; | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1573 | get_ctx(ctx); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1574 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1575 | return ctx; | 
|  | 1576 | } | 
|  | 1577 |  | 
|  | 1578 | rcu_read_lock(); | 
|  | 1579 | if (!pid) | 
|  | 1580 | task = current; | 
|  | 1581 | else | 
|  | 1582 | task = find_task_by_vpid(pid); | 
|  | 1583 | if (task) | 
|  | 1584 | get_task_struct(task); | 
|  | 1585 | rcu_read_unlock(); | 
|  | 1586 |  | 
|  | 1587 | if (!task) | 
|  | 1588 | return ERR_PTR(-ESRCH); | 
|  | 1589 |  | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1590 | /* | 
|  | 1591 | * Can't attach counters to a dying task. | 
|  | 1592 | */ | 
|  | 1593 | err = -ESRCH; | 
|  | 1594 | if (task->flags & PF_EXITING) | 
|  | 1595 | goto errout; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1596 |  | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1597 | /* Reuse ptrace permission checks for now. */ | 
|  | 1598 | err = -EACCES; | 
|  | 1599 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) | 
|  | 1600 | goto errout; | 
|  | 1601 |  | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1602 | retry: | 
| Paul Mackerras | 25346b9 | 2009-06-01 17:48:12 +1000 | [diff] [blame] | 1603 | ctx = perf_lock_task_context(task, &flags); | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1604 | if (ctx) { | 
| Peter Zijlstra | 71a851b | 2009-07-10 09:06:56 +0200 | [diff] [blame] | 1605 | unclone_ctx(ctx); | 
| Paul Mackerras | 25346b9 | 2009-06-01 17:48:12 +1000 | [diff] [blame] | 1606 | spin_unlock_irqrestore(&ctx->lock, flags); | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1607 | } | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1608 |  | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1609 | if (!ctx) { | 
|  | 1610 | ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1611 | err = -ENOMEM; | 
|  | 1612 | if (!ctx) | 
|  | 1613 | goto errout; | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1614 | __perf_counter_init_context(ctx, task); | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1615 | get_ctx(ctx); | 
|  | 1616 | if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) { | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1617 | /* | 
|  | 1618 | * We raced with some other task; use | 
|  | 1619 | * the context they set. | 
|  | 1620 | */ | 
|  | 1621 | kfree(ctx); | 
| Paul Mackerras | 25346b9 | 2009-06-01 17:48:12 +1000 | [diff] [blame] | 1622 | goto retry; | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1623 | } | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1624 | get_task_struct(task); | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1625 | } | 
|  | 1626 |  | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1627 | put_task_struct(task); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1628 | return ctx; | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1629 |  | 
|  | 1630 | errout: | 
|  | 1631 | put_task_struct(task); | 
|  | 1632 | return ERR_PTR(err); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1633 | } | 
|  | 1634 |  | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 1635 | static void free_counter_rcu(struct rcu_head *head) | 
|  | 1636 | { | 
|  | 1637 | struct perf_counter *counter; | 
|  | 1638 |  | 
|  | 1639 | counter = container_of(head, struct perf_counter, rcu_head); | 
| Peter Zijlstra | 709e50c | 2009-06-02 14:13:15 +0200 | [diff] [blame] | 1640 | if (counter->ns) | 
|  | 1641 | put_pid_ns(counter->ns); | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 1642 | kfree(counter); | 
|  | 1643 | } | 
|  | 1644 |  | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 1645 | static void perf_pending_sync(struct perf_counter *counter); | 
|  | 1646 |  | 
| Peter Zijlstra | f160095 | 2009-03-19 20:26:16 +0100 | [diff] [blame] | 1647 | static void free_counter(struct perf_counter *counter) | 
|  | 1648 | { | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 1649 | perf_pending_sync(counter); | 
|  | 1650 |  | 
| Peter Zijlstra | f344011 | 2009-06-22 13:58:35 +0200 | [diff] [blame] | 1651 | if (!counter->parent) { | 
|  | 1652 | atomic_dec(&nr_counters); | 
|  | 1653 | if (counter->attr.mmap) | 
|  | 1654 | atomic_dec(&nr_mmap_counters); | 
|  | 1655 | if (counter->attr.comm) | 
|  | 1656 | atomic_dec(&nr_comm_counters); | 
|  | 1657 | } | 
| Peter Zijlstra | 9ee318a | 2009-04-09 10:53:44 +0200 | [diff] [blame] | 1658 |  | 
| Peter Zijlstra | e077df4 | 2009-03-19 20:26:17 +0100 | [diff] [blame] | 1659 | if (counter->destroy) | 
|  | 1660 | counter->destroy(counter); | 
|  | 1661 |  | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1662 | put_ctx(counter->ctx); | 
| Peter Zijlstra | f160095 | 2009-03-19 20:26:16 +0100 | [diff] [blame] | 1663 | call_rcu(&counter->rcu_head, free_counter_rcu); | 
|  | 1664 | } | 
|  | 1665 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1666 | /* | 
|  | 1667 | * Called when the last reference to the file is gone. | 
|  | 1668 | */ | 
|  | 1669 | static int perf_release(struct inode *inode, struct file *file) | 
|  | 1670 | { | 
|  | 1671 | struct perf_counter *counter = file->private_data; | 
|  | 1672 | struct perf_counter_context *ctx = counter->ctx; | 
|  | 1673 |  | 
|  | 1674 | file->private_data = NULL; | 
|  | 1675 |  | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 1676 | WARN_ON_ONCE(ctx->parent_ctx); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1677 | mutex_lock(&ctx->mutex); | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 1678 | perf_counter_remove_from_context(counter); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1679 | mutex_unlock(&ctx->mutex); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1680 |  | 
| Peter Zijlstra | 082ff5a | 2009-05-23 18:29:00 +0200 | [diff] [blame] | 1681 | mutex_lock(&counter->owner->perf_counter_mutex); | 
|  | 1682 | list_del_init(&counter->owner_entry); | 
|  | 1683 | mutex_unlock(&counter->owner->perf_counter_mutex); | 
|  | 1684 | put_task_struct(counter->owner); | 
|  | 1685 |  | 
| Peter Zijlstra | f160095 | 2009-03-19 20:26:16 +0100 | [diff] [blame] | 1686 | free_counter(counter); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1687 |  | 
|  | 1688 | return 0; | 
|  | 1689 | } | 
|  | 1690 |  | 
|  | 1691 | /* | 
|  | 1692 | * Read the performance counter - simple non blocking version for now | 
|  | 1693 | */ | 
|  | 1694 | static ssize_t | 
|  | 1695 | perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | 
|  | 1696 | { | 
| Marti Raudsepp | d5e8da6 | 2009-06-13 02:35:01 +0300 | [diff] [blame] | 1697 | u64 values[4]; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1698 | int n; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1699 |  | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1700 | /* | 
|  | 1701 | * Return end-of-file for a read on a counter that is in | 
|  | 1702 | * error state (i.e. because it was pinned but it couldn't be | 
|  | 1703 | * scheduled on to the CPU at some point). | 
|  | 1704 | */ | 
|  | 1705 | if (counter->state == PERF_COUNTER_STATE_ERROR) | 
|  | 1706 | return 0; | 
|  | 1707 |  | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 1708 | WARN_ON_ONCE(counter->ctx->parent_ctx); | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 1709 | mutex_lock(&counter->child_mutex); | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1710 | values[0] = perf_counter_read(counter); | 
|  | 1711 | n = 1; | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 1712 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1713 | values[n++] = counter->total_time_enabled + | 
|  | 1714 | atomic64_read(&counter->child_total_time_enabled); | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 1715 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1716 | values[n++] = counter->total_time_running + | 
|  | 1717 | atomic64_read(&counter->child_total_time_running); | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 1718 | if (counter->attr.read_format & PERF_FORMAT_ID) | 
| Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 1719 | values[n++] = primary_counter_id(counter); | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 1720 | mutex_unlock(&counter->child_mutex); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1721 |  | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1722 | if (count < n * sizeof(u64)) | 
|  | 1723 | return -EINVAL; | 
|  | 1724 | count = n * sizeof(u64); | 
|  | 1725 |  | 
|  | 1726 | if (copy_to_user(buf, values, count)) | 
|  | 1727 | return -EFAULT; | 
|  | 1728 |  | 
|  | 1729 | return count; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1730 | } | 
|  | 1731 |  | 
|  | 1732 | static ssize_t | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1733 | perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | 
|  | 1734 | { | 
|  | 1735 | struct perf_counter *counter = file->private_data; | 
|  | 1736 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 1737 | return perf_read_hw(counter, buf, count); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1738 | } | 
|  | 1739 |  | 
|  | 1740 | static unsigned int perf_poll(struct file *file, poll_table *wait) | 
|  | 1741 | { | 
|  | 1742 | struct perf_counter *counter = file->private_data; | 
| Peter Zijlstra | c7138f3 | 2009-03-24 13:18:16 +0100 | [diff] [blame] | 1743 | struct perf_mmap_data *data; | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 1744 | unsigned int events = POLL_HUP; | 
| Peter Zijlstra | c7138f3 | 2009-03-24 13:18:16 +0100 | [diff] [blame] | 1745 |  | 
|  | 1746 | rcu_read_lock(); | 
|  | 1747 | data = rcu_dereference(counter->data); | 
|  | 1748 | if (data) | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 1749 | events = atomic_xchg(&data->poll, 0); | 
| Peter Zijlstra | c7138f3 | 2009-03-24 13:18:16 +0100 | [diff] [blame] | 1750 | rcu_read_unlock(); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1751 |  | 
|  | 1752 | poll_wait(file, &counter->waitq, wait); | 
|  | 1753 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1754 | return events; | 
|  | 1755 | } | 
|  | 1756 |  | 
| Peter Zijlstra | 6de6a7b | 2009-05-05 17:50:23 +0200 | [diff] [blame] | 1757 | static void perf_counter_reset(struct perf_counter *counter) | 
|  | 1758 | { | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 1759 | (void)perf_counter_read(counter); | 
| Paul Mackerras | 615a3f1 | 2009-05-11 15:50:21 +1000 | [diff] [blame] | 1760 | atomic64_set(&counter->count, 0); | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 1761 | perf_counter_update_userpage(counter); | 
|  | 1762 | } | 
|  | 1763 |  | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 1764 | /* | 
|  | 1765 | * Holding the top-level counter's child_mutex means that any | 
|  | 1766 | * descendant process that has inherited this counter will block | 
|  | 1767 | * in sync_child_counter if it goes to exit, thus satisfying the | 
|  | 1768 | * task existence requirements of perf_counter_enable/disable. | 
|  | 1769 | */ | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 1770 | static void perf_counter_for_each_child(struct perf_counter *counter, | 
|  | 1771 | void (*func)(struct perf_counter *)) | 
|  | 1772 | { | 
|  | 1773 | struct perf_counter *child; | 
|  | 1774 |  | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 1775 | WARN_ON_ONCE(counter->ctx->parent_ctx); | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 1776 | mutex_lock(&counter->child_mutex); | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 1777 | func(counter); | 
|  | 1778 | list_for_each_entry(child, &counter->child_list, child_list) | 
|  | 1779 | func(child); | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 1780 | mutex_unlock(&counter->child_mutex); | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 1781 | } | 
|  | 1782 |  | 
|  | 1783 | static void perf_counter_for_each(struct perf_counter *counter, | 
|  | 1784 | void (*func)(struct perf_counter *)) | 
|  | 1785 | { | 
| Peter Zijlstra | 75f937f | 2009-06-15 15:05:12 +0200 | [diff] [blame] | 1786 | struct perf_counter_context *ctx = counter->ctx; | 
|  | 1787 | struct perf_counter *sibling; | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 1788 |  | 
| Peter Zijlstra | 75f937f | 2009-06-15 15:05:12 +0200 | [diff] [blame] | 1789 | WARN_ON_ONCE(ctx->parent_ctx); | 
|  | 1790 | mutex_lock(&ctx->mutex); | 
|  | 1791 | counter = counter->group_leader; | 
|  | 1792 |  | 
|  | 1793 | perf_counter_for_each_child(counter, func); | 
|  | 1794 | func(counter); | 
|  | 1795 | list_for_each_entry(sibling, &counter->sibling_list, list_entry) | 
|  | 1796 | perf_counter_for_each_child(counter, func); | 
|  | 1797 | mutex_unlock(&ctx->mutex); | 
| Peter Zijlstra | 6de6a7b | 2009-05-05 17:50:23 +0200 | [diff] [blame] | 1798 | } | 
|  | 1799 |  | 
| Peter Zijlstra | 08247e3 | 2009-06-02 16:46:57 +0200 | [diff] [blame] | 1800 | static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) | 
|  | 1801 | { | 
|  | 1802 | struct perf_counter_context *ctx = counter->ctx; | 
|  | 1803 | unsigned long size; | 
|  | 1804 | int ret = 0; | 
|  | 1805 | u64 value; | 
|  | 1806 |  | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 1807 | if (!counter->attr.sample_period) | 
| Peter Zijlstra | 08247e3 | 2009-06-02 16:46:57 +0200 | [diff] [blame] | 1808 | return -EINVAL; | 
|  | 1809 |  | 
|  | 1810 | size = copy_from_user(&value, arg, sizeof(value)); | 
|  | 1811 | if (size != sizeof(value)) | 
|  | 1812 | return -EFAULT; | 
|  | 1813 |  | 
|  | 1814 | if (!value) | 
|  | 1815 | return -EINVAL; | 
|  | 1816 |  | 
|  | 1817 | spin_lock_irq(&ctx->lock); | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 1818 | if (counter->attr.freq) { | 
| Peter Zijlstra | df58ab2 | 2009-06-11 11:25:05 +0200 | [diff] [blame] | 1819 | if (value > sysctl_perf_counter_sample_rate) { | 
| Peter Zijlstra | 08247e3 | 2009-06-02 16:46:57 +0200 | [diff] [blame] | 1820 | ret = -EINVAL; | 
|  | 1821 | goto unlock; | 
|  | 1822 | } | 
|  | 1823 |  | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 1824 | counter->attr.sample_freq = value; | 
| Peter Zijlstra | 08247e3 | 2009-06-02 16:46:57 +0200 | [diff] [blame] | 1825 | } else { | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 1826 | counter->attr.sample_period = value; | 
| Peter Zijlstra | 08247e3 | 2009-06-02 16:46:57 +0200 | [diff] [blame] | 1827 | counter->hw.sample_period = value; | 
| Peter Zijlstra | 08247e3 | 2009-06-02 16:46:57 +0200 | [diff] [blame] | 1828 | } | 
|  | 1829 | unlock: | 
|  | 1830 | spin_unlock_irq(&ctx->lock); | 
|  | 1831 |  | 
|  | 1832 | return ret; | 
|  | 1833 | } | 
|  | 1834 |  | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1835 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 
|  | 1836 | { | 
|  | 1837 | struct perf_counter *counter = file->private_data; | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 1838 | void (*func)(struct perf_counter *); | 
|  | 1839 | u32 flags = arg; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1840 |  | 
|  | 1841 | switch (cmd) { | 
|  | 1842 | case PERF_COUNTER_IOC_ENABLE: | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 1843 | func = perf_counter_enable; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1844 | break; | 
|  | 1845 | case PERF_COUNTER_IOC_DISABLE: | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 1846 | func = perf_counter_disable; | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 1847 | break; | 
| Peter Zijlstra | 6de6a7b | 2009-05-05 17:50:23 +0200 | [diff] [blame] | 1848 | case PERF_COUNTER_IOC_RESET: | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 1849 | func = perf_counter_reset; | 
| Peter Zijlstra | 6de6a7b | 2009-05-05 17:50:23 +0200 | [diff] [blame] | 1850 | break; | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 1851 |  | 
|  | 1852 | case PERF_COUNTER_IOC_REFRESH: | 
|  | 1853 | return perf_counter_refresh(counter, arg); | 
| Peter Zijlstra | 08247e3 | 2009-06-02 16:46:57 +0200 | [diff] [blame] | 1854 |  | 
|  | 1855 | case PERF_COUNTER_IOC_PERIOD: | 
|  | 1856 | return perf_counter_period(counter, (u64 __user *)arg); | 
|  | 1857 |  | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1858 | default: | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 1859 | return -ENOTTY; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1860 | } | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 1861 |  | 
|  | 1862 | if (flags & PERF_IOC_FLAG_GROUP) | 
|  | 1863 | perf_counter_for_each(counter, func); | 
|  | 1864 | else | 
|  | 1865 | perf_counter_for_each_child(counter, func); | 
|  | 1866 |  | 
|  | 1867 | return 0; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1868 | } | 
|  | 1869 |  | 
| Peter Zijlstra | 771d7cd | 2009-05-25 14:45:26 +0200 | [diff] [blame] | 1870 | int perf_counter_task_enable(void) | 
|  | 1871 | { | 
|  | 1872 | struct perf_counter *counter; | 
|  | 1873 |  | 
|  | 1874 | mutex_lock(¤t->perf_counter_mutex); | 
|  | 1875 | list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) | 
|  | 1876 | perf_counter_for_each_child(counter, perf_counter_enable); | 
|  | 1877 | mutex_unlock(¤t->perf_counter_mutex); | 
|  | 1878 |  | 
|  | 1879 | return 0; | 
|  | 1880 | } | 
|  | 1881 |  | 
|  | 1882 | int perf_counter_task_disable(void) | 
|  | 1883 | { | 
|  | 1884 | struct perf_counter *counter; | 
|  | 1885 |  | 
|  | 1886 | mutex_lock(¤t->perf_counter_mutex); | 
|  | 1887 | list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) | 
|  | 1888 | perf_counter_for_each_child(counter, perf_counter_disable); | 
|  | 1889 | mutex_unlock(¤t->perf_counter_mutex); | 
|  | 1890 |  | 
|  | 1891 | return 0; | 
|  | 1892 | } | 
|  | 1893 |  | 
| Peter Zijlstra | 194002b | 2009-06-22 16:35:24 +0200 | [diff] [blame] | 1894 | static int perf_counter_index(struct perf_counter *counter) | 
|  | 1895 | { | 
|  | 1896 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) | 
|  | 1897 | return 0; | 
|  | 1898 |  | 
|  | 1899 | return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET; | 
|  | 1900 | } | 
|  | 1901 |  | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 1902 | /* | 
|  | 1903 | * Callers need to ensure there can be no nesting of this function, otherwise | 
|  | 1904 | * the seqlock logic goes bad. We can not serialize this because the arch | 
|  | 1905 | * code calls this from NMI context. | 
|  | 1906 | */ | 
|  | 1907 | void perf_counter_update_userpage(struct perf_counter *counter) | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 1908 | { | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 1909 | struct perf_counter_mmap_page *userpg; | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 1910 | struct perf_mmap_data *data; | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 1911 |  | 
|  | 1912 | rcu_read_lock(); | 
|  | 1913 | data = rcu_dereference(counter->data); | 
|  | 1914 | if (!data) | 
|  | 1915 | goto unlock; | 
|  | 1916 |  | 
|  | 1917 | userpg = data->user_page; | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 1918 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 1919 | /* | 
|  | 1920 | * Disable preemption so as to not let the corresponding user-space | 
|  | 1921 | * spin too long if we get preempted. | 
|  | 1922 | */ | 
|  | 1923 | preempt_disable(); | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 1924 | ++userpg->lock; | 
| Peter Zijlstra | 92f22a3 | 2009-04-02 11:12:04 +0200 | [diff] [blame] | 1925 | barrier(); | 
| Peter Zijlstra | 194002b | 2009-06-22 16:35:24 +0200 | [diff] [blame] | 1926 | userpg->index = perf_counter_index(counter); | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 1927 | userpg->offset = atomic64_read(&counter->count); | 
|  | 1928 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) | 
|  | 1929 | userpg->offset -= atomic64_read(&counter->hw.prev_count); | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 1930 |  | 
| Peter Zijlstra | 7f8b4e4 | 2009-06-22 14:34:35 +0200 | [diff] [blame] | 1931 | userpg->time_enabled = counter->total_time_enabled + | 
|  | 1932 | atomic64_read(&counter->child_total_time_enabled); | 
|  | 1933 |  | 
|  | 1934 | userpg->time_running = counter->total_time_running + | 
|  | 1935 | atomic64_read(&counter->child_total_time_running); | 
|  | 1936 |  | 
| Peter Zijlstra | 92f22a3 | 2009-04-02 11:12:04 +0200 | [diff] [blame] | 1937 | barrier(); | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 1938 | ++userpg->lock; | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 1939 | preempt_enable(); | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 1940 | unlock: | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 1941 | rcu_read_unlock(); | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 1942 | } | 
|  | 1943 |  | 
|  | 1944 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 
|  | 1945 | { | 
|  | 1946 | struct perf_counter *counter = vma->vm_file->private_data; | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 1947 | struct perf_mmap_data *data; | 
|  | 1948 | int ret = VM_FAULT_SIGBUS; | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 1949 |  | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 1950 | if (vmf->flags & FAULT_FLAG_MKWRITE) { | 
|  | 1951 | if (vmf->pgoff == 0) | 
|  | 1952 | ret = 0; | 
|  | 1953 | return ret; | 
|  | 1954 | } | 
|  | 1955 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 1956 | rcu_read_lock(); | 
|  | 1957 | data = rcu_dereference(counter->data); | 
|  | 1958 | if (!data) | 
|  | 1959 | goto unlock; | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 1960 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 1961 | if (vmf->pgoff == 0) { | 
|  | 1962 | vmf->page = virt_to_page(data->user_page); | 
|  | 1963 | } else { | 
|  | 1964 | int nr = vmf->pgoff - 1; | 
|  | 1965 |  | 
|  | 1966 | if ((unsigned)nr > data->nr_pages) | 
|  | 1967 | goto unlock; | 
|  | 1968 |  | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 1969 | if (vmf->flags & FAULT_FLAG_WRITE) | 
|  | 1970 | goto unlock; | 
|  | 1971 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 1972 | vmf->page = virt_to_page(data->data_pages[nr]); | 
|  | 1973 | } | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 1974 |  | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 1975 | get_page(vmf->page); | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 1976 | vmf->page->mapping = vma->vm_file->f_mapping; | 
|  | 1977 | vmf->page->index   = vmf->pgoff; | 
|  | 1978 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 1979 | ret = 0; | 
|  | 1980 | unlock: | 
|  | 1981 | rcu_read_unlock(); | 
|  | 1982 |  | 
|  | 1983 | return ret; | 
|  | 1984 | } | 
|  | 1985 |  | 
|  | 1986 | static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages) | 
|  | 1987 | { | 
|  | 1988 | struct perf_mmap_data *data; | 
|  | 1989 | unsigned long size; | 
|  | 1990 | int i; | 
|  | 1991 |  | 
|  | 1992 | WARN_ON(atomic_read(&counter->mmap_count)); | 
|  | 1993 |  | 
|  | 1994 | size = sizeof(struct perf_mmap_data); | 
|  | 1995 | size += nr_pages * sizeof(void *); | 
|  | 1996 |  | 
|  | 1997 | data = kzalloc(size, GFP_KERNEL); | 
|  | 1998 | if (!data) | 
|  | 1999 | goto fail; | 
|  | 2000 |  | 
|  | 2001 | data->user_page = (void *)get_zeroed_page(GFP_KERNEL); | 
|  | 2002 | if (!data->user_page) | 
|  | 2003 | goto fail_user_page; | 
|  | 2004 |  | 
|  | 2005 | for (i = 0; i < nr_pages; i++) { | 
|  | 2006 | data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL); | 
|  | 2007 | if (!data->data_pages[i]) | 
|  | 2008 | goto fail_data_pages; | 
|  | 2009 | } | 
|  | 2010 |  | 
|  | 2011 | data->nr_pages = nr_pages; | 
| Peter Zijlstra | 22c1558 | 2009-05-05 17:50:25 +0200 | [diff] [blame] | 2012 | atomic_set(&data->lock, -1); | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2013 |  | 
|  | 2014 | rcu_assign_pointer(counter->data, data); | 
|  | 2015 |  | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 2016 | return 0; | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2017 |  | 
|  | 2018 | fail_data_pages: | 
|  | 2019 | for (i--; i >= 0; i--) | 
|  | 2020 | free_page((unsigned long)data->data_pages[i]); | 
|  | 2021 |  | 
|  | 2022 | free_page((unsigned long)data->user_page); | 
|  | 2023 |  | 
|  | 2024 | fail_user_page: | 
|  | 2025 | kfree(data); | 
|  | 2026 |  | 
|  | 2027 | fail: | 
|  | 2028 | return -ENOMEM; | 
|  | 2029 | } | 
|  | 2030 |  | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 2031 | static void perf_mmap_free_page(unsigned long addr) | 
|  | 2032 | { | 
| Kevin Cernekee | 5bfd756 | 2009-07-05 12:08:19 -0700 | [diff] [blame] | 2033 | struct page *page = virt_to_page((void *)addr); | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 2034 |  | 
|  | 2035 | page->mapping = NULL; | 
|  | 2036 | __free_page(page); | 
|  | 2037 | } | 
|  | 2038 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2039 | static void __perf_mmap_data_free(struct rcu_head *rcu_head) | 
|  | 2040 | { | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 2041 | struct perf_mmap_data *data; | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2042 | int i; | 
|  | 2043 |  | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 2044 | data = container_of(rcu_head, struct perf_mmap_data, rcu_head); | 
|  | 2045 |  | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 2046 | perf_mmap_free_page((unsigned long)data->user_page); | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2047 | for (i = 0; i < data->nr_pages; i++) | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 2048 | perf_mmap_free_page((unsigned long)data->data_pages[i]); | 
|  | 2049 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2050 | kfree(data); | 
|  | 2051 | } | 
|  | 2052 |  | 
|  | 2053 | static void perf_mmap_data_free(struct perf_counter *counter) | 
|  | 2054 | { | 
|  | 2055 | struct perf_mmap_data *data = counter->data; | 
|  | 2056 |  | 
|  | 2057 | WARN_ON(atomic_read(&counter->mmap_count)); | 
|  | 2058 |  | 
|  | 2059 | rcu_assign_pointer(counter->data, NULL); | 
|  | 2060 | call_rcu(&data->rcu_head, __perf_mmap_data_free); | 
|  | 2061 | } | 
|  | 2062 |  | 
|  | 2063 | static void perf_mmap_open(struct vm_area_struct *vma) | 
|  | 2064 | { | 
|  | 2065 | struct perf_counter *counter = vma->vm_file->private_data; | 
|  | 2066 |  | 
|  | 2067 | atomic_inc(&counter->mmap_count); | 
|  | 2068 | } | 
|  | 2069 |  | 
|  | 2070 | static void perf_mmap_close(struct vm_area_struct *vma) | 
|  | 2071 | { | 
|  | 2072 | struct perf_counter *counter = vma->vm_file->private_data; | 
|  | 2073 |  | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 2074 | WARN_ON_ONCE(counter->ctx->parent_ctx); | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 2075 | if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) { | 
| Peter Zijlstra | 789f90f | 2009-05-15 15:19:27 +0200 | [diff] [blame] | 2076 | struct user_struct *user = current_user(); | 
|  | 2077 |  | 
|  | 2078 | atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm); | 
| Peter Zijlstra | c5078f7 | 2009-05-05 17:50:24 +0200 | [diff] [blame] | 2079 | vma->vm_mm->locked_vm -= counter->data->nr_locked; | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2080 | perf_mmap_data_free(counter); | 
|  | 2081 | mutex_unlock(&counter->mmap_mutex); | 
|  | 2082 | } | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 2083 | } | 
|  | 2084 |  | 
|  | 2085 | static struct vm_operations_struct perf_mmap_vmops = { | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 2086 | .open		= perf_mmap_open, | 
|  | 2087 | .close		= perf_mmap_close, | 
|  | 2088 | .fault		= perf_mmap_fault, | 
|  | 2089 | .page_mkwrite	= perf_mmap_fault, | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 2090 | }; | 
|  | 2091 |  | 
|  | 2092 | static int perf_mmap(struct file *file, struct vm_area_struct *vma) | 
|  | 2093 | { | 
|  | 2094 | struct perf_counter *counter = file->private_data; | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 2095 | unsigned long user_locked, user_lock_limit; | 
| Peter Zijlstra | 789f90f | 2009-05-15 15:19:27 +0200 | [diff] [blame] | 2096 | struct user_struct *user = current_user(); | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 2097 | unsigned long locked, lock_limit; | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2098 | unsigned long vma_size; | 
|  | 2099 | unsigned long nr_pages; | 
| Peter Zijlstra | 789f90f | 2009-05-15 15:19:27 +0200 | [diff] [blame] | 2100 | long user_extra, extra; | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2101 | int ret = 0; | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 2102 |  | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 2103 | if (!(vma->vm_flags & VM_SHARED)) | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 2104 | return -EINVAL; | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2105 |  | 
|  | 2106 | vma_size = vma->vm_end - vma->vm_start; | 
|  | 2107 | nr_pages = (vma_size / PAGE_SIZE) - 1; | 
|  | 2108 |  | 
| Peter Zijlstra | 7730d86 | 2009-03-25 12:48:31 +0100 | [diff] [blame] | 2109 | /* | 
|  | 2110 | * If we have data pages ensure they're a power-of-two number, so we | 
|  | 2111 | * can do bitmasks instead of modulo. | 
|  | 2112 | */ | 
|  | 2113 | if (nr_pages != 0 && !is_power_of_2(nr_pages)) | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 2114 | return -EINVAL; | 
|  | 2115 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2116 | if (vma_size != PAGE_SIZE * (1 + nr_pages)) | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 2117 | return -EINVAL; | 
|  | 2118 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2119 | if (vma->vm_pgoff != 0) | 
|  | 2120 | return -EINVAL; | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 2121 |  | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 2122 | WARN_ON_ONCE(counter->ctx->parent_ctx); | 
| Peter Zijlstra | ebb3c4c | 2009-04-06 11:45:05 +0200 | [diff] [blame] | 2123 | mutex_lock(&counter->mmap_mutex); | 
|  | 2124 | if (atomic_inc_not_zero(&counter->mmap_count)) { | 
|  | 2125 | if (nr_pages != counter->data->nr_pages) | 
|  | 2126 | ret = -EINVAL; | 
|  | 2127 | goto unlock; | 
|  | 2128 | } | 
|  | 2129 |  | 
| Peter Zijlstra | 789f90f | 2009-05-15 15:19:27 +0200 | [diff] [blame] | 2130 | user_extra = nr_pages + 1; | 
|  | 2131 | user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); | 
| Ingo Molnar | a3862d3 | 2009-05-24 09:02:37 +0200 | [diff] [blame] | 2132 |  | 
|  | 2133 | /* | 
|  | 2134 | * Increase the limit linearly with more CPUs: | 
|  | 2135 | */ | 
|  | 2136 | user_lock_limit *= num_online_cpus(); | 
|  | 2137 |  | 
| Peter Zijlstra | 789f90f | 2009-05-15 15:19:27 +0200 | [diff] [blame] | 2138 | user_locked = atomic_long_read(&user->locked_vm) + user_extra; | 
| Peter Zijlstra | c5078f7 | 2009-05-05 17:50:24 +0200 | [diff] [blame] | 2139 |  | 
| Peter Zijlstra | 789f90f | 2009-05-15 15:19:27 +0200 | [diff] [blame] | 2140 | extra = 0; | 
|  | 2141 | if (user_locked > user_lock_limit) | 
|  | 2142 | extra = user_locked - user_lock_limit; | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2143 |  | 
|  | 2144 | lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; | 
|  | 2145 | lock_limit >>= PAGE_SHIFT; | 
| Peter Zijlstra | 789f90f | 2009-05-15 15:19:27 +0200 | [diff] [blame] | 2146 | locked = vma->vm_mm->locked_vm + extra; | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2147 |  | 
| Peter Zijlstra | ebb3c4c | 2009-04-06 11:45:05 +0200 | [diff] [blame] | 2148 | if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { | 
|  | 2149 | ret = -EPERM; | 
|  | 2150 | goto unlock; | 
|  | 2151 | } | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2152 |  | 
|  | 2153 | WARN_ON(counter->data); | 
|  | 2154 | ret = perf_mmap_data_alloc(counter, nr_pages); | 
| Peter Zijlstra | ebb3c4c | 2009-04-06 11:45:05 +0200 | [diff] [blame] | 2155 | if (ret) | 
|  | 2156 | goto unlock; | 
|  | 2157 |  | 
|  | 2158 | atomic_set(&counter->mmap_count, 1); | 
| Peter Zijlstra | 789f90f | 2009-05-15 15:19:27 +0200 | [diff] [blame] | 2159 | atomic_long_add(user_extra, &user->locked_vm); | 
| Peter Zijlstra | c5078f7 | 2009-05-05 17:50:24 +0200 | [diff] [blame] | 2160 | vma->vm_mm->locked_vm += extra; | 
|  | 2161 | counter->data->nr_locked = extra; | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 2162 | if (vma->vm_flags & VM_WRITE) | 
|  | 2163 | counter->data->writable = 1; | 
|  | 2164 |  | 
| Peter Zijlstra | ebb3c4c | 2009-04-06 11:45:05 +0200 | [diff] [blame] | 2165 | unlock: | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2166 | mutex_unlock(&counter->mmap_mutex); | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 2167 |  | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 2168 | vma->vm_flags |= VM_RESERVED; | 
|  | 2169 | vma->vm_ops = &perf_mmap_vmops; | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2170 |  | 
|  | 2171 | return ret; | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 2172 | } | 
|  | 2173 |  | 
| Peter Zijlstra | 3c446b3 | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 2174 | static int perf_fasync(int fd, struct file *filp, int on) | 
|  | 2175 | { | 
| Peter Zijlstra | 3c446b3 | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 2176 | struct inode *inode = filp->f_path.dentry->d_inode; | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 2177 | struct perf_counter *counter = filp->private_data; | 
| Peter Zijlstra | 3c446b3 | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 2178 | int retval; | 
|  | 2179 |  | 
|  | 2180 | mutex_lock(&inode->i_mutex); | 
|  | 2181 | retval = fasync_helper(fd, filp, on, &counter->fasync); | 
|  | 2182 | mutex_unlock(&inode->i_mutex); | 
|  | 2183 |  | 
|  | 2184 | if (retval < 0) | 
|  | 2185 | return retval; | 
|  | 2186 |  | 
|  | 2187 | return 0; | 
|  | 2188 | } | 
|  | 2189 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 2190 | static const struct file_operations perf_fops = { | 
|  | 2191 | .release		= perf_release, | 
|  | 2192 | .read			= perf_read, | 
|  | 2193 | .poll			= perf_poll, | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 2194 | .unlocked_ioctl		= perf_ioctl, | 
|  | 2195 | .compat_ioctl		= perf_ioctl, | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 2196 | .mmap			= perf_mmap, | 
| Peter Zijlstra | 3c446b3 | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 2197 | .fasync			= perf_fasync, | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 2198 | }; | 
|  | 2199 |  | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 2200 | /* | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2201 | * Perf counter wakeup | 
|  | 2202 | * | 
|  | 2203 | * If there's data, ensure we set the poll() state and publish everything | 
|  | 2204 | * to user-space before waking everybody up. | 
|  | 2205 | */ | 
|  | 2206 |  | 
|  | 2207 | void perf_counter_wakeup(struct perf_counter *counter) | 
|  | 2208 | { | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2209 | wake_up_all(&counter->waitq); | 
| Peter Zijlstra | 4c9e254 | 2009-04-06 11:45:09 +0200 | [diff] [blame] | 2210 |  | 
|  | 2211 | if (counter->pending_kill) { | 
|  | 2212 | kill_fasync(&counter->fasync, SIGIO, counter->pending_kill); | 
|  | 2213 | counter->pending_kill = 0; | 
|  | 2214 | } | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2215 | } | 
|  | 2216 |  | 
|  | 2217 | /* | 
|  | 2218 | * Pending wakeups | 
|  | 2219 | * | 
|  | 2220 | * Handle the case where we need to wakeup up from NMI (or rq->lock) context. | 
|  | 2221 | * | 
|  | 2222 | * The NMI bit means we cannot possibly take locks. Therefore, maintain a | 
|  | 2223 | * single linked list and use cmpxchg() to add entries lockless. | 
|  | 2224 | */ | 
|  | 2225 |  | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 2226 | static void perf_pending_counter(struct perf_pending_entry *entry) | 
|  | 2227 | { | 
|  | 2228 | struct perf_counter *counter = container_of(entry, | 
|  | 2229 | struct perf_counter, pending); | 
|  | 2230 |  | 
|  | 2231 | if (counter->pending_disable) { | 
|  | 2232 | counter->pending_disable = 0; | 
|  | 2233 | perf_counter_disable(counter); | 
|  | 2234 | } | 
|  | 2235 |  | 
|  | 2236 | if (counter->pending_wakeup) { | 
|  | 2237 | counter->pending_wakeup = 0; | 
|  | 2238 | perf_counter_wakeup(counter); | 
|  | 2239 | } | 
|  | 2240 | } | 
|  | 2241 |  | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2242 | #define PENDING_TAIL ((struct perf_pending_entry *)-1UL) | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2243 |  | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2244 | static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2245 | PENDING_TAIL, | 
|  | 2246 | }; | 
|  | 2247 |  | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2248 | static void perf_pending_queue(struct perf_pending_entry *entry, | 
|  | 2249 | void (*func)(struct perf_pending_entry *)) | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2250 | { | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2251 | struct perf_pending_entry **head; | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2252 |  | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2253 | if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL) | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2254 | return; | 
|  | 2255 |  | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2256 | entry->func = func; | 
|  | 2257 |  | 
|  | 2258 | head = &get_cpu_var(perf_pending_head); | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2259 |  | 
|  | 2260 | do { | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2261 | entry->next = *head; | 
|  | 2262 | } while (cmpxchg(head, entry->next, entry) != entry->next); | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2263 |  | 
|  | 2264 | set_perf_counter_pending(); | 
|  | 2265 |  | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2266 | put_cpu_var(perf_pending_head); | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2267 | } | 
|  | 2268 |  | 
|  | 2269 | static int __perf_pending_run(void) | 
|  | 2270 | { | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2271 | struct perf_pending_entry *list; | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2272 | int nr = 0; | 
|  | 2273 |  | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2274 | list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL); | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2275 | while (list != PENDING_TAIL) { | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2276 | void (*func)(struct perf_pending_entry *); | 
|  | 2277 | struct perf_pending_entry *entry = list; | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2278 |  | 
|  | 2279 | list = list->next; | 
|  | 2280 |  | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2281 | func = entry->func; | 
|  | 2282 | entry->next = NULL; | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2283 | /* | 
|  | 2284 | * Ensure we observe the unqueue before we issue the wakeup, | 
|  | 2285 | * so that we won't be waiting forever. | 
|  | 2286 | * -- see perf_not_pending(). | 
|  | 2287 | */ | 
|  | 2288 | smp_wmb(); | 
|  | 2289 |  | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2290 | func(entry); | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2291 | nr++; | 
|  | 2292 | } | 
|  | 2293 |  | 
|  | 2294 | return nr; | 
|  | 2295 | } | 
|  | 2296 |  | 
|  | 2297 | static inline int perf_not_pending(struct perf_counter *counter) | 
|  | 2298 | { | 
|  | 2299 | /* | 
|  | 2300 | * If we flush on whatever cpu we run, there is a chance we don't | 
|  | 2301 | * need to wait. | 
|  | 2302 | */ | 
|  | 2303 | get_cpu(); | 
|  | 2304 | __perf_pending_run(); | 
|  | 2305 | put_cpu(); | 
|  | 2306 |  | 
|  | 2307 | /* | 
|  | 2308 | * Ensure we see the proper queue state before going to sleep | 
|  | 2309 | * so that we do not miss the wakeup. -- see perf_pending_handle() | 
|  | 2310 | */ | 
|  | 2311 | smp_rmb(); | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2312 | return counter->pending.next == NULL; | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 2313 | } | 
|  | 2314 |  | 
|  | 2315 | static void perf_pending_sync(struct perf_counter *counter) | 
|  | 2316 | { | 
|  | 2317 | wait_event(counter->waitq, perf_not_pending(counter)); | 
|  | 2318 | } | 
|  | 2319 |  | 
|  | 2320 | void perf_counter_do_pending(void) | 
|  | 2321 | { | 
|  | 2322 | __perf_pending_run(); | 
|  | 2323 | } | 
|  | 2324 |  | 
|  | 2325 | /* | 
| Peter Zijlstra | 394ee07 | 2009-03-30 19:07:14 +0200 | [diff] [blame] | 2326 | * Callchain support -- arch specific | 
|  | 2327 | */ | 
|  | 2328 |  | 
| Peter Zijlstra | 9c03d88 | 2009-04-06 11:45:00 +0200 | [diff] [blame] | 2329 | __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | 
| Peter Zijlstra | 394ee07 | 2009-03-30 19:07:14 +0200 | [diff] [blame] | 2330 | { | 
|  | 2331 | return NULL; | 
|  | 2332 | } | 
|  | 2333 |  | 
|  | 2334 | /* | 
| Peter Zijlstra | 0322cd6 | 2009-03-19 20:26:19 +0100 | [diff] [blame] | 2335 | * Output | 
|  | 2336 | */ | 
|  | 2337 |  | 
| Peter Zijlstra | b9cacc7 | 2009-03-25 12:30:22 +0100 | [diff] [blame] | 2338 | struct perf_output_handle { | 
|  | 2339 | struct perf_counter	*counter; | 
|  | 2340 | struct perf_mmap_data	*data; | 
| Peter Zijlstra | 8e3747c | 2009-06-02 16:16:02 +0200 | [diff] [blame] | 2341 | unsigned long		head; | 
|  | 2342 | unsigned long		offset; | 
| Peter Zijlstra | 78d613e | 2009-03-30 19:07:11 +0200 | [diff] [blame] | 2343 | int			nmi; | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 2344 | int			sample; | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2345 | int			locked; | 
|  | 2346 | unsigned long		flags; | 
| Peter Zijlstra | b9cacc7 | 2009-03-25 12:30:22 +0100 | [diff] [blame] | 2347 | }; | 
|  | 2348 |  | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 2349 | static bool perf_output_space(struct perf_mmap_data *data, | 
|  | 2350 | unsigned int offset, unsigned int head) | 
|  | 2351 | { | 
|  | 2352 | unsigned long tail; | 
|  | 2353 | unsigned long mask; | 
|  | 2354 |  | 
|  | 2355 | if (!data->writable) | 
|  | 2356 | return true; | 
|  | 2357 |  | 
|  | 2358 | mask = (data->nr_pages << PAGE_SHIFT) - 1; | 
|  | 2359 | /* | 
|  | 2360 | * Userspace could choose to issue a mb() before updating the tail | 
|  | 2361 | * pointer. So that all reads will be completed before the write is | 
|  | 2362 | * issued. | 
|  | 2363 | */ | 
|  | 2364 | tail = ACCESS_ONCE(data->user_page->data_tail); | 
|  | 2365 | smp_rmb(); | 
|  | 2366 |  | 
|  | 2367 | offset = (offset - tail) & mask; | 
|  | 2368 | head   = (head   - tail) & mask; | 
|  | 2369 |  | 
|  | 2370 | if ((int)(head - offset) < 0) | 
|  | 2371 | return false; | 
|  | 2372 |  | 
|  | 2373 | return true; | 
|  | 2374 | } | 
|  | 2375 |  | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2376 | static void perf_output_wakeup(struct perf_output_handle *handle) | 
| Peter Zijlstra | 78d613e | 2009-03-30 19:07:11 +0200 | [diff] [blame] | 2377 | { | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2378 | atomic_set(&handle->data->poll, POLL_IN); | 
|  | 2379 |  | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2380 | if (handle->nmi) { | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 2381 | handle->counter->pending_wakeup = 1; | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2382 | perf_pending_queue(&handle->counter->pending, | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 2383 | perf_pending_counter); | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 2384 | } else | 
| Peter Zijlstra | 78d613e | 2009-03-30 19:07:11 +0200 | [diff] [blame] | 2385 | perf_counter_wakeup(handle->counter); | 
|  | 2386 | } | 
|  | 2387 |  | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2388 | /* | 
|  | 2389 | * Curious locking construct. | 
|  | 2390 | * | 
|  | 2391 | * We need to ensure a later event doesn't publish a head when a former | 
|  | 2392 | * event isn't done writing. However since we need to deal with NMIs we | 
|  | 2393 | * cannot fully serialize things. | 
|  | 2394 | * | 
|  | 2395 | * What we do is serialize between CPUs so we only have to deal with NMI | 
|  | 2396 | * nesting on a single CPU. | 
|  | 2397 | * | 
|  | 2398 | * We only publish the head (and generate a wakeup) when the outer-most | 
|  | 2399 | * event completes. | 
|  | 2400 | */ | 
|  | 2401 | static void perf_output_lock(struct perf_output_handle *handle) | 
|  | 2402 | { | 
|  | 2403 | struct perf_mmap_data *data = handle->data; | 
|  | 2404 | int cpu; | 
|  | 2405 |  | 
|  | 2406 | handle->locked = 0; | 
|  | 2407 |  | 
|  | 2408 | local_irq_save(handle->flags); | 
|  | 2409 | cpu = smp_processor_id(); | 
|  | 2410 |  | 
|  | 2411 | if (in_nmi() && atomic_read(&data->lock) == cpu) | 
|  | 2412 | return; | 
|  | 2413 |  | 
| Peter Zijlstra | 22c1558 | 2009-05-05 17:50:25 +0200 | [diff] [blame] | 2414 | while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2415 | cpu_relax(); | 
|  | 2416 |  | 
|  | 2417 | handle->locked = 1; | 
|  | 2418 | } | 
|  | 2419 |  | 
|  | 2420 | static void perf_output_unlock(struct perf_output_handle *handle) | 
|  | 2421 | { | 
|  | 2422 | struct perf_mmap_data *data = handle->data; | 
| Peter Zijlstra | 8e3747c | 2009-06-02 16:16:02 +0200 | [diff] [blame] | 2423 | unsigned long head; | 
|  | 2424 | int cpu; | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2425 |  | 
| Peter Zijlstra | c66de4a | 2009-05-05 17:50:22 +0200 | [diff] [blame] | 2426 | data->done_head = data->head; | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2427 |  | 
|  | 2428 | if (!handle->locked) | 
|  | 2429 | goto out; | 
|  | 2430 |  | 
|  | 2431 | again: | 
|  | 2432 | /* | 
|  | 2433 | * The xchg implies a full barrier that ensures all writes are done | 
|  | 2434 | * before we publish the new head, matched by a rmb() in userspace when | 
|  | 2435 | * reading this position. | 
|  | 2436 | */ | 
| Peter Zijlstra | 8e3747c | 2009-06-02 16:16:02 +0200 | [diff] [blame] | 2437 | while ((head = atomic_long_xchg(&data->done_head, 0))) | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2438 | data->user_page->data_head = head; | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2439 |  | 
|  | 2440 | /* | 
| Peter Zijlstra | c66de4a | 2009-05-05 17:50:22 +0200 | [diff] [blame] | 2441 | * NMI can happen here, which means we can miss a done_head update. | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2442 | */ | 
|  | 2443 |  | 
| Peter Zijlstra | 22c1558 | 2009-05-05 17:50:25 +0200 | [diff] [blame] | 2444 | cpu = atomic_xchg(&data->lock, -1); | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2445 | WARN_ON_ONCE(cpu != smp_processor_id()); | 
|  | 2446 |  | 
|  | 2447 | /* | 
|  | 2448 | * Therefore we have to validate we did not indeed do so. | 
|  | 2449 | */ | 
| Peter Zijlstra | 8e3747c | 2009-06-02 16:16:02 +0200 | [diff] [blame] | 2450 | if (unlikely(atomic_long_read(&data->done_head))) { | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2451 | /* | 
|  | 2452 | * Since we had it locked, we can lock it again. | 
|  | 2453 | */ | 
| Peter Zijlstra | 22c1558 | 2009-05-05 17:50:25 +0200 | [diff] [blame] | 2454 | while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2455 | cpu_relax(); | 
|  | 2456 |  | 
|  | 2457 | goto again; | 
|  | 2458 | } | 
|  | 2459 |  | 
| Peter Zijlstra | c66de4a | 2009-05-05 17:50:22 +0200 | [diff] [blame] | 2460 | if (atomic_xchg(&data->wakeup, 0)) | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2461 | perf_output_wakeup(handle); | 
|  | 2462 | out: | 
|  | 2463 | local_irq_restore(handle->flags); | 
|  | 2464 | } | 
|  | 2465 |  | 
| Peter Zijlstra | b9cacc7 | 2009-03-25 12:30:22 +0100 | [diff] [blame] | 2466 | static void perf_output_copy(struct perf_output_handle *handle, | 
| Peter Zijlstra | 089dd79 | 2009-06-05 14:04:55 +0200 | [diff] [blame] | 2467 | const void *buf, unsigned int len) | 
| Peter Zijlstra | b9cacc7 | 2009-03-25 12:30:22 +0100 | [diff] [blame] | 2468 | { | 
|  | 2469 | unsigned int pages_mask; | 
|  | 2470 | unsigned int offset; | 
|  | 2471 | unsigned int size; | 
|  | 2472 | void **pages; | 
|  | 2473 |  | 
|  | 2474 | offset		= handle->offset; | 
|  | 2475 | pages_mask	= handle->data->nr_pages - 1; | 
|  | 2476 | pages		= handle->data->data_pages; | 
|  | 2477 |  | 
|  | 2478 | do { | 
|  | 2479 | unsigned int page_offset; | 
|  | 2480 | int nr; | 
|  | 2481 |  | 
|  | 2482 | nr	    = (offset >> PAGE_SHIFT) & pages_mask; | 
|  | 2483 | page_offset = offset & (PAGE_SIZE - 1); | 
|  | 2484 | size	    = min_t(unsigned int, PAGE_SIZE - page_offset, len); | 
|  | 2485 |  | 
|  | 2486 | memcpy(pages[nr] + page_offset, buf, size); | 
|  | 2487 |  | 
|  | 2488 | len	    -= size; | 
|  | 2489 | buf	    += size; | 
|  | 2490 | offset	    += size; | 
|  | 2491 | } while (len); | 
|  | 2492 |  | 
|  | 2493 | handle->offset = offset; | 
| Peter Zijlstra | 63e35b2 | 2009-03-25 12:30:24 +0100 | [diff] [blame] | 2494 |  | 
| Peter Zijlstra | 53020fe | 2009-05-13 21:26:19 +0200 | [diff] [blame] | 2495 | /* | 
|  | 2496 | * Check we didn't copy past our reservation window, taking the | 
|  | 2497 | * possible unsigned int wrap into account. | 
|  | 2498 | */ | 
| Peter Zijlstra | 8e3747c | 2009-06-02 16:16:02 +0200 | [diff] [blame] | 2499 | WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); | 
| Peter Zijlstra | b9cacc7 | 2009-03-25 12:30:22 +0100 | [diff] [blame] | 2500 | } | 
|  | 2501 |  | 
| Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 2502 | #define perf_output_put(handle, x) \ | 
|  | 2503 | perf_output_copy((handle), &(x), sizeof(x)) | 
|  | 2504 |  | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 2505 | static int perf_output_begin(struct perf_output_handle *handle, | 
|  | 2506 | struct perf_counter *counter, unsigned int size, | 
|  | 2507 | int nmi, int sample) | 
|  | 2508 | { | 
|  | 2509 | struct perf_mmap_data *data; | 
|  | 2510 | unsigned int offset, head; | 
|  | 2511 | int have_lost; | 
|  | 2512 | struct { | 
|  | 2513 | struct perf_event_header header; | 
|  | 2514 | u64			 id; | 
|  | 2515 | u64			 lost; | 
|  | 2516 | } lost_event; | 
|  | 2517 |  | 
|  | 2518 | /* | 
|  | 2519 | * For inherited counters we send all the output towards the parent. | 
|  | 2520 | */ | 
|  | 2521 | if (counter->parent) | 
|  | 2522 | counter = counter->parent; | 
|  | 2523 |  | 
|  | 2524 | rcu_read_lock(); | 
|  | 2525 | data = rcu_dereference(counter->data); | 
|  | 2526 | if (!data) | 
|  | 2527 | goto out; | 
|  | 2528 |  | 
|  | 2529 | handle->data	= data; | 
|  | 2530 | handle->counter	= counter; | 
|  | 2531 | handle->nmi	= nmi; | 
|  | 2532 | handle->sample	= sample; | 
|  | 2533 |  | 
|  | 2534 | if (!data->nr_pages) | 
|  | 2535 | goto fail; | 
|  | 2536 |  | 
|  | 2537 | have_lost = atomic_read(&data->lost); | 
|  | 2538 | if (have_lost) | 
|  | 2539 | size += sizeof(lost_event); | 
|  | 2540 |  | 
|  | 2541 | perf_output_lock(handle); | 
|  | 2542 |  | 
|  | 2543 | do { | 
|  | 2544 | offset = head = atomic_long_read(&data->head); | 
|  | 2545 | head += size; | 
|  | 2546 | if (unlikely(!perf_output_space(data, offset, head))) | 
|  | 2547 | goto fail; | 
|  | 2548 | } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); | 
|  | 2549 |  | 
|  | 2550 | handle->offset	= offset; | 
|  | 2551 | handle->head	= head; | 
|  | 2552 |  | 
|  | 2553 | if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT)) | 
|  | 2554 | atomic_set(&data->wakeup, 1); | 
|  | 2555 |  | 
|  | 2556 | if (have_lost) { | 
|  | 2557 | lost_event.header.type = PERF_EVENT_LOST; | 
|  | 2558 | lost_event.header.misc = 0; | 
|  | 2559 | lost_event.header.size = sizeof(lost_event); | 
|  | 2560 | lost_event.id          = counter->id; | 
|  | 2561 | lost_event.lost        = atomic_xchg(&data->lost, 0); | 
|  | 2562 |  | 
|  | 2563 | perf_output_put(handle, lost_event); | 
|  | 2564 | } | 
|  | 2565 |  | 
|  | 2566 | return 0; | 
|  | 2567 |  | 
|  | 2568 | fail: | 
|  | 2569 | atomic_inc(&data->lost); | 
|  | 2570 | perf_output_unlock(handle); | 
|  | 2571 | out: | 
|  | 2572 | rcu_read_unlock(); | 
|  | 2573 |  | 
|  | 2574 | return -ENOSPC; | 
|  | 2575 | } | 
|  | 2576 |  | 
| Peter Zijlstra | 78d613e | 2009-03-30 19:07:11 +0200 | [diff] [blame] | 2577 | static void perf_output_end(struct perf_output_handle *handle) | 
| Peter Zijlstra | b9cacc7 | 2009-03-25 12:30:22 +0100 | [diff] [blame] | 2578 | { | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2579 | struct perf_counter *counter = handle->counter; | 
|  | 2580 | struct perf_mmap_data *data = handle->data; | 
|  | 2581 |  | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 2582 | int wakeup_events = counter->attr.wakeup_events; | 
| Peter Zijlstra | c457810 | 2009-04-02 11:12:01 +0200 | [diff] [blame] | 2583 |  | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 2584 | if (handle->sample && wakeup_events) { | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2585 | int events = atomic_inc_return(&data->events); | 
| Peter Zijlstra | c457810 | 2009-04-02 11:12:01 +0200 | [diff] [blame] | 2586 | if (events >= wakeup_events) { | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2587 | atomic_sub(wakeup_events, &data->events); | 
| Peter Zijlstra | c66de4a | 2009-05-05 17:50:22 +0200 | [diff] [blame] | 2588 | atomic_set(&data->wakeup, 1); | 
| Peter Zijlstra | c457810 | 2009-04-02 11:12:01 +0200 | [diff] [blame] | 2589 | } | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 2590 | } | 
|  | 2591 |  | 
|  | 2592 | perf_output_unlock(handle); | 
| Peter Zijlstra | b9cacc7 | 2009-03-25 12:30:22 +0100 | [diff] [blame] | 2593 | rcu_read_unlock(); | 
|  | 2594 | } | 
|  | 2595 |  | 
| Peter Zijlstra | 709e50c | 2009-06-02 14:13:15 +0200 | [diff] [blame] | 2596 | static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p) | 
|  | 2597 | { | 
|  | 2598 | /* | 
|  | 2599 | * only top level counters have the pid namespace they were created in | 
|  | 2600 | */ | 
|  | 2601 | if (counter->parent) | 
|  | 2602 | counter = counter->parent; | 
|  | 2603 |  | 
|  | 2604 | return task_tgid_nr_ns(p, counter->ns); | 
|  | 2605 | } | 
|  | 2606 |  | 
|  | 2607 | static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) | 
|  | 2608 | { | 
|  | 2609 | /* | 
|  | 2610 | * only top level counters have the pid namespace they were created in | 
|  | 2611 | */ | 
|  | 2612 | if (counter->parent) | 
|  | 2613 | counter = counter->parent; | 
|  | 2614 |  | 
|  | 2615 | return task_pid_nr_ns(p, counter->ns); | 
|  | 2616 | } | 
|  | 2617 |  | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 2618 | static void perf_counter_output(struct perf_counter *counter, int nmi, | 
|  | 2619 | struct perf_sample_data *data) | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2620 | { | 
| Peter Zijlstra | 5ed0041 | 2009-03-30 19:07:12 +0200 | [diff] [blame] | 2621 | int ret; | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 2622 | u64 sample_type = counter->attr.sample_type; | 
| Peter Zijlstra | 5ed0041 | 2009-03-30 19:07:12 +0200 | [diff] [blame] | 2623 | struct perf_output_handle handle; | 
|  | 2624 | struct perf_event_header header; | 
|  | 2625 | u64 ip; | 
| Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 2626 | struct { | 
| Peter Zijlstra | ea5d20c | 2009-03-25 12:30:25 +0100 | [diff] [blame] | 2627 | u32 pid, tid; | 
| Peter Zijlstra | 5ed0041 | 2009-03-30 19:07:12 +0200 | [diff] [blame] | 2628 | } tid_entry; | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 2629 | struct { | 
| Peter Zijlstra | 8e5799b | 2009-06-02 15:08:15 +0200 | [diff] [blame] | 2630 | u64 id; | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 2631 | u64 counter; | 
|  | 2632 | } group_entry; | 
| Peter Zijlstra | 394ee07 | 2009-03-30 19:07:14 +0200 | [diff] [blame] | 2633 | struct perf_callchain_entry *callchain = NULL; | 
|  | 2634 | int callchain_size = 0; | 
| Peter Zijlstra | 339f7c9 | 2009-04-06 11:45:06 +0200 | [diff] [blame] | 2635 | u64 time; | 
| Peter Zijlstra | f370e1e | 2009-05-08 18:52:24 +0200 | [diff] [blame] | 2636 | struct { | 
|  | 2637 | u32 cpu, reserved; | 
|  | 2638 | } cpu_entry; | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2639 |  | 
| Peter Zijlstra | e6e18ec | 2009-06-25 11:27:12 +0200 | [diff] [blame] | 2640 | header.type = PERF_EVENT_SAMPLE; | 
| Peter Zijlstra | 5ed0041 | 2009-03-30 19:07:12 +0200 | [diff] [blame] | 2641 | header.size = sizeof(header); | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2642 |  | 
| Peter Zijlstra | e6e18ec | 2009-06-25 11:27:12 +0200 | [diff] [blame] | 2643 | header.misc = 0; | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 2644 | header.misc |= perf_misc_flags(data->regs); | 
| Peter Zijlstra | 6fab019 | 2009-04-08 15:01:26 +0200 | [diff] [blame] | 2645 |  | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 2646 | if (sample_type & PERF_SAMPLE_IP) { | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 2647 | ip = perf_instruction_pointer(data->regs); | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 2648 | header.size += sizeof(ip); | 
|  | 2649 | } | 
| Peter Zijlstra | ea5d20c | 2009-03-25 12:30:25 +0100 | [diff] [blame] | 2650 |  | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 2651 | if (sample_type & PERF_SAMPLE_TID) { | 
| Peter Zijlstra | ea5d20c | 2009-03-25 12:30:25 +0100 | [diff] [blame] | 2652 | /* namespace issues */ | 
| Peter Zijlstra | 709e50c | 2009-06-02 14:13:15 +0200 | [diff] [blame] | 2653 | tid_entry.pid = perf_counter_pid(counter, current); | 
|  | 2654 | tid_entry.tid = perf_counter_tid(counter, current); | 
| Peter Zijlstra | ea5d20c | 2009-03-25 12:30:25 +0100 | [diff] [blame] | 2655 |  | 
| Peter Zijlstra | 5ed0041 | 2009-03-30 19:07:12 +0200 | [diff] [blame] | 2656 | header.size += sizeof(tid_entry); | 
|  | 2657 | } | 
| Peter Zijlstra | ea5d20c | 2009-03-25 12:30:25 +0100 | [diff] [blame] | 2658 |  | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 2659 | if (sample_type & PERF_SAMPLE_TIME) { | 
| Peter Zijlstra | 4d85545 | 2009-04-08 15:01:32 +0200 | [diff] [blame] | 2660 | /* | 
|  | 2661 | * Maybe do better on x86 and provide cpu_clock_nmi() | 
|  | 2662 | */ | 
|  | 2663 | time = sched_clock(); | 
|  | 2664 |  | 
| Peter Zijlstra | 4d85545 | 2009-04-08 15:01:32 +0200 | [diff] [blame] | 2665 | header.size += sizeof(u64); | 
|  | 2666 | } | 
|  | 2667 |  | 
| Peter Zijlstra | e6e18ec | 2009-06-25 11:27:12 +0200 | [diff] [blame] | 2668 | if (sample_type & PERF_SAMPLE_ADDR) | 
| Peter Zijlstra | 78f13e9 | 2009-04-08 15:01:33 +0200 | [diff] [blame] | 2669 | header.size += sizeof(u64); | 
| Peter Zijlstra | 78f13e9 | 2009-04-08 15:01:33 +0200 | [diff] [blame] | 2670 |  | 
| Peter Zijlstra | e6e18ec | 2009-06-25 11:27:12 +0200 | [diff] [blame] | 2671 | if (sample_type & PERF_SAMPLE_ID) | 
| Peter Zijlstra | a85f61a | 2009-05-08 18:52:23 +0200 | [diff] [blame] | 2672 | header.size += sizeof(u64); | 
| Peter Zijlstra | a85f61a | 2009-05-08 18:52:23 +0200 | [diff] [blame] | 2673 |  | 
| Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 2674 | if (sample_type & PERF_SAMPLE_STREAM_ID) | 
|  | 2675 | header.size += sizeof(u64); | 
|  | 2676 |  | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 2677 | if (sample_type & PERF_SAMPLE_CPU) { | 
| Peter Zijlstra | f370e1e | 2009-05-08 18:52:24 +0200 | [diff] [blame] | 2678 | header.size += sizeof(cpu_entry); | 
|  | 2679 |  | 
|  | 2680 | cpu_entry.cpu = raw_smp_processor_id(); | 
| Arjan van de Ven | 0dc3d52 | 2009-07-21 00:55:05 -0700 | [diff] [blame] | 2681 | cpu_entry.reserved = 0; | 
| Peter Zijlstra | f370e1e | 2009-05-08 18:52:24 +0200 | [diff] [blame] | 2682 | } | 
|  | 2683 |  | 
| Peter Zijlstra | e6e18ec | 2009-06-25 11:27:12 +0200 | [diff] [blame] | 2684 | if (sample_type & PERF_SAMPLE_PERIOD) | 
| Peter Zijlstra | 689802b | 2009-06-05 15:05:43 +0200 | [diff] [blame] | 2685 | header.size += sizeof(u64); | 
| Peter Zijlstra | 689802b | 2009-06-05 15:05:43 +0200 | [diff] [blame] | 2686 |  | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 2687 | if (sample_type & PERF_SAMPLE_GROUP) { | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 2688 | header.size += sizeof(u64) + | 
|  | 2689 | counter->nr_siblings * sizeof(group_entry); | 
|  | 2690 | } | 
|  | 2691 |  | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 2692 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 2693 | callchain = perf_callchain(data->regs); | 
| Peter Zijlstra | 394ee07 | 2009-03-30 19:07:14 +0200 | [diff] [blame] | 2694 |  | 
|  | 2695 | if (callchain) { | 
| Peter Zijlstra | 9c03d88 | 2009-04-06 11:45:00 +0200 | [diff] [blame] | 2696 | callchain_size = (1 + callchain->nr) * sizeof(u64); | 
| Peter Zijlstra | 394ee07 | 2009-03-30 19:07:14 +0200 | [diff] [blame] | 2697 | header.size += callchain_size; | 
| Peter Zijlstra | e6e18ec | 2009-06-25 11:27:12 +0200 | [diff] [blame] | 2698 | } else | 
|  | 2699 | header.size += sizeof(u64); | 
| Peter Zijlstra | 394ee07 | 2009-03-30 19:07:14 +0200 | [diff] [blame] | 2700 | } | 
|  | 2701 |  | 
| Peter Zijlstra | 4c9e254 | 2009-04-06 11:45:09 +0200 | [diff] [blame] | 2702 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); | 
| Peter Zijlstra | 5ed0041 | 2009-03-30 19:07:12 +0200 | [diff] [blame] | 2703 | if (ret) | 
|  | 2704 | return; | 
| Peter Zijlstra | ea5d20c | 2009-03-25 12:30:25 +0100 | [diff] [blame] | 2705 |  | 
| Peter Zijlstra | 5ed0041 | 2009-03-30 19:07:12 +0200 | [diff] [blame] | 2706 | perf_output_put(&handle, header); | 
| Peter Zijlstra | 5ed0041 | 2009-03-30 19:07:12 +0200 | [diff] [blame] | 2707 |  | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 2708 | if (sample_type & PERF_SAMPLE_IP) | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 2709 | perf_output_put(&handle, ip); | 
|  | 2710 |  | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 2711 | if (sample_type & PERF_SAMPLE_TID) | 
| Peter Zijlstra | 5ed0041 | 2009-03-30 19:07:12 +0200 | [diff] [blame] | 2712 | perf_output_put(&handle, tid_entry); | 
|  | 2713 |  | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 2714 | if (sample_type & PERF_SAMPLE_TIME) | 
| Peter Zijlstra | 4d85545 | 2009-04-08 15:01:32 +0200 | [diff] [blame] | 2715 | perf_output_put(&handle, time); | 
|  | 2716 |  | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 2717 | if (sample_type & PERF_SAMPLE_ADDR) | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 2718 | perf_output_put(&handle, data->addr); | 
| Peter Zijlstra | 78f13e9 | 2009-04-08 15:01:33 +0200 | [diff] [blame] | 2719 |  | 
| Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 2720 | if (sample_type & PERF_SAMPLE_ID) { | 
|  | 2721 | u64 id = primary_counter_id(counter); | 
|  | 2722 |  | 
|  | 2723 | perf_output_put(&handle, id); | 
|  | 2724 | } | 
|  | 2725 |  | 
|  | 2726 | if (sample_type & PERF_SAMPLE_STREAM_ID) | 
| Peter Zijlstra | ac4bcf8 | 2009-06-05 14:44:52 +0200 | [diff] [blame] | 2727 | perf_output_put(&handle, counter->id); | 
| Peter Zijlstra | a85f61a | 2009-05-08 18:52:23 +0200 | [diff] [blame] | 2728 |  | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 2729 | if (sample_type & PERF_SAMPLE_CPU) | 
| Peter Zijlstra | f370e1e | 2009-05-08 18:52:24 +0200 | [diff] [blame] | 2730 | perf_output_put(&handle, cpu_entry); | 
|  | 2731 |  | 
| Peter Zijlstra | 689802b | 2009-06-05 15:05:43 +0200 | [diff] [blame] | 2732 | if (sample_type & PERF_SAMPLE_PERIOD) | 
| Peter Zijlstra | 9e350de | 2009-06-10 21:34:59 +0200 | [diff] [blame] | 2733 | perf_output_put(&handle, data->period); | 
| Peter Zijlstra | 689802b | 2009-06-05 15:05:43 +0200 | [diff] [blame] | 2734 |  | 
| Peter Zijlstra | 2023b35 | 2009-05-05 17:50:26 +0200 | [diff] [blame] | 2735 | /* | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 2736 | * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. | 
| Peter Zijlstra | 2023b35 | 2009-05-05 17:50:26 +0200 | [diff] [blame] | 2737 | */ | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 2738 | if (sample_type & PERF_SAMPLE_GROUP) { | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 2739 | struct perf_counter *leader, *sub; | 
|  | 2740 | u64 nr = counter->nr_siblings; | 
|  | 2741 |  | 
|  | 2742 | perf_output_put(&handle, nr); | 
|  | 2743 |  | 
|  | 2744 | leader = counter->group_leader; | 
|  | 2745 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { | 
|  | 2746 | if (sub != counter) | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 2747 | sub->pmu->read(sub); | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 2748 |  | 
| Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 2749 | group_entry.id = primary_counter_id(sub); | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 2750 | group_entry.counter = atomic64_read(&sub->count); | 
|  | 2751 |  | 
|  | 2752 | perf_output_put(&handle, group_entry); | 
|  | 2753 | } | 
|  | 2754 | } | 
|  | 2755 |  | 
| Peter Zijlstra | e6e18ec | 2009-06-25 11:27:12 +0200 | [diff] [blame] | 2756 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 
|  | 2757 | if (callchain) | 
|  | 2758 | perf_output_copy(&handle, callchain, callchain_size); | 
|  | 2759 | else { | 
|  | 2760 | u64 nr = 0; | 
|  | 2761 | perf_output_put(&handle, nr); | 
|  | 2762 | } | 
|  | 2763 | } | 
| Peter Zijlstra | 394ee07 | 2009-03-30 19:07:14 +0200 | [diff] [blame] | 2764 |  | 
| Peter Zijlstra | 5ed0041 | 2009-03-30 19:07:12 +0200 | [diff] [blame] | 2765 | perf_output_end(&handle); | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 2766 | } | 
|  | 2767 |  | 
| Peter Zijlstra | 0322cd6 | 2009-03-19 20:26:19 +0100 | [diff] [blame] | 2768 | /* | 
| Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 2769 | * read event | 
|  | 2770 | */ | 
|  | 2771 |  | 
|  | 2772 | struct perf_read_event { | 
|  | 2773 | struct perf_event_header	header; | 
|  | 2774 |  | 
|  | 2775 | u32				pid; | 
|  | 2776 | u32				tid; | 
|  | 2777 | u64				value; | 
|  | 2778 | u64				format[3]; | 
|  | 2779 | }; | 
|  | 2780 |  | 
|  | 2781 | static void | 
|  | 2782 | perf_counter_read_event(struct perf_counter *counter, | 
|  | 2783 | struct task_struct *task) | 
|  | 2784 | { | 
|  | 2785 | struct perf_output_handle handle; | 
|  | 2786 | struct perf_read_event event = { | 
|  | 2787 | .header = { | 
|  | 2788 | .type = PERF_EVENT_READ, | 
|  | 2789 | .misc = 0, | 
|  | 2790 | .size = sizeof(event) - sizeof(event.format), | 
|  | 2791 | }, | 
|  | 2792 | .pid = perf_counter_pid(counter, task), | 
|  | 2793 | .tid = perf_counter_tid(counter, task), | 
|  | 2794 | .value = atomic64_read(&counter->count), | 
|  | 2795 | }; | 
|  | 2796 | int ret, i = 0; | 
|  | 2797 |  | 
|  | 2798 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | 
|  | 2799 | event.header.size += sizeof(u64); | 
|  | 2800 | event.format[i++] = counter->total_time_enabled; | 
|  | 2801 | } | 
|  | 2802 |  | 
|  | 2803 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | 
|  | 2804 | event.header.size += sizeof(u64); | 
|  | 2805 | event.format[i++] = counter->total_time_running; | 
|  | 2806 | } | 
|  | 2807 |  | 
|  | 2808 | if (counter->attr.read_format & PERF_FORMAT_ID) { | 
| Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 2809 | event.header.size += sizeof(u64); | 
| Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 2810 | event.format[i++] = primary_counter_id(counter); | 
| Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 2811 | } | 
|  | 2812 |  | 
|  | 2813 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); | 
|  | 2814 | if (ret) | 
|  | 2815 | return; | 
|  | 2816 |  | 
|  | 2817 | perf_output_copy(&handle, &event, event.header.size); | 
|  | 2818 | perf_output_end(&handle); | 
|  | 2819 | } | 
|  | 2820 |  | 
|  | 2821 | /* | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 2822 | * fork tracking | 
|  | 2823 | */ | 
|  | 2824 |  | 
|  | 2825 | struct perf_fork_event { | 
|  | 2826 | struct task_struct	*task; | 
|  | 2827 |  | 
|  | 2828 | struct { | 
|  | 2829 | struct perf_event_header	header; | 
|  | 2830 |  | 
|  | 2831 | u32				pid; | 
|  | 2832 | u32				ppid; | 
|  | 2833 | } event; | 
|  | 2834 | }; | 
|  | 2835 |  | 
|  | 2836 | static void perf_counter_fork_output(struct perf_counter *counter, | 
|  | 2837 | struct perf_fork_event *fork_event) | 
|  | 2838 | { | 
|  | 2839 | struct perf_output_handle handle; | 
|  | 2840 | int size = fork_event->event.header.size; | 
|  | 2841 | struct task_struct *task = fork_event->task; | 
|  | 2842 | int ret = perf_output_begin(&handle, counter, size, 0, 0); | 
|  | 2843 |  | 
|  | 2844 | if (ret) | 
|  | 2845 | return; | 
|  | 2846 |  | 
|  | 2847 | fork_event->event.pid = perf_counter_pid(counter, task); | 
|  | 2848 | fork_event->event.ppid = perf_counter_pid(counter, task->real_parent); | 
|  | 2849 |  | 
|  | 2850 | perf_output_put(&handle, fork_event->event); | 
|  | 2851 | perf_output_end(&handle); | 
|  | 2852 | } | 
|  | 2853 |  | 
|  | 2854 | static int perf_counter_fork_match(struct perf_counter *counter) | 
|  | 2855 | { | 
| Peter Zijlstra | d99e944 | 2009-06-04 17:08:58 +0200 | [diff] [blame] | 2856 | if (counter->attr.comm || counter->attr.mmap) | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 2857 | return 1; | 
|  | 2858 |  | 
|  | 2859 | return 0; | 
|  | 2860 | } | 
|  | 2861 |  | 
|  | 2862 | static void perf_counter_fork_ctx(struct perf_counter_context *ctx, | 
|  | 2863 | struct perf_fork_event *fork_event) | 
|  | 2864 | { | 
|  | 2865 | struct perf_counter *counter; | 
|  | 2866 |  | 
|  | 2867 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | 
|  | 2868 | return; | 
|  | 2869 |  | 
|  | 2870 | rcu_read_lock(); | 
|  | 2871 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | 
|  | 2872 | if (perf_counter_fork_match(counter)) | 
|  | 2873 | perf_counter_fork_output(counter, fork_event); | 
|  | 2874 | } | 
|  | 2875 | rcu_read_unlock(); | 
|  | 2876 | } | 
|  | 2877 |  | 
|  | 2878 | static void perf_counter_fork_event(struct perf_fork_event *fork_event) | 
|  | 2879 | { | 
|  | 2880 | struct perf_cpu_context *cpuctx; | 
|  | 2881 | struct perf_counter_context *ctx; | 
|  | 2882 |  | 
|  | 2883 | cpuctx = &get_cpu_var(perf_cpu_context); | 
|  | 2884 | perf_counter_fork_ctx(&cpuctx->ctx, fork_event); | 
|  | 2885 | put_cpu_var(perf_cpu_context); | 
|  | 2886 |  | 
|  | 2887 | rcu_read_lock(); | 
|  | 2888 | /* | 
|  | 2889 | * doesn't really matter which of the child contexts the | 
|  | 2890 | * events ends up in. | 
|  | 2891 | */ | 
|  | 2892 | ctx = rcu_dereference(current->perf_counter_ctxp); | 
|  | 2893 | if (ctx) | 
|  | 2894 | perf_counter_fork_ctx(ctx, fork_event); | 
|  | 2895 | rcu_read_unlock(); | 
|  | 2896 | } | 
|  | 2897 |  | 
|  | 2898 | void perf_counter_fork(struct task_struct *task) | 
|  | 2899 | { | 
|  | 2900 | struct perf_fork_event fork_event; | 
|  | 2901 |  | 
|  | 2902 | if (!atomic_read(&nr_comm_counters) && | 
| Peter Zijlstra | d99e944 | 2009-06-04 17:08:58 +0200 | [diff] [blame] | 2903 | !atomic_read(&nr_mmap_counters)) | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 2904 | return; | 
|  | 2905 |  | 
|  | 2906 | fork_event = (struct perf_fork_event){ | 
|  | 2907 | .task	= task, | 
|  | 2908 | .event  = { | 
|  | 2909 | .header = { | 
|  | 2910 | .type = PERF_EVENT_FORK, | 
| Peter Zijlstra | 573402d | 2009-07-22 11:13:50 +0200 | [diff] [blame] | 2911 | .misc = 0, | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 2912 | .size = sizeof(fork_event.event), | 
|  | 2913 | }, | 
| Peter Zijlstra | 573402d | 2009-07-22 11:13:50 +0200 | [diff] [blame] | 2914 | /* .pid  */ | 
|  | 2915 | /* .ppid */ | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 2916 | }, | 
|  | 2917 | }; | 
|  | 2918 |  | 
|  | 2919 | perf_counter_fork_event(&fork_event); | 
|  | 2920 | } | 
|  | 2921 |  | 
|  | 2922 | /* | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 2923 | * comm tracking | 
|  | 2924 | */ | 
|  | 2925 |  | 
|  | 2926 | struct perf_comm_event { | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 2927 | struct task_struct	*task; | 
|  | 2928 | char			*comm; | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 2929 | int			comm_size; | 
|  | 2930 |  | 
|  | 2931 | struct { | 
|  | 2932 | struct perf_event_header	header; | 
|  | 2933 |  | 
|  | 2934 | u32				pid; | 
|  | 2935 | u32				tid; | 
|  | 2936 | } event; | 
|  | 2937 | }; | 
|  | 2938 |  | 
|  | 2939 | static void perf_counter_comm_output(struct perf_counter *counter, | 
|  | 2940 | struct perf_comm_event *comm_event) | 
|  | 2941 | { | 
|  | 2942 | struct perf_output_handle handle; | 
|  | 2943 | int size = comm_event->event.header.size; | 
|  | 2944 | int ret = perf_output_begin(&handle, counter, size, 0, 0); | 
|  | 2945 |  | 
|  | 2946 | if (ret) | 
|  | 2947 | return; | 
|  | 2948 |  | 
| Peter Zijlstra | 709e50c | 2009-06-02 14:13:15 +0200 | [diff] [blame] | 2949 | comm_event->event.pid = perf_counter_pid(counter, comm_event->task); | 
|  | 2950 | comm_event->event.tid = perf_counter_tid(counter, comm_event->task); | 
|  | 2951 |  | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 2952 | perf_output_put(&handle, comm_event->event); | 
|  | 2953 | perf_output_copy(&handle, comm_event->comm, | 
|  | 2954 | comm_event->comm_size); | 
|  | 2955 | perf_output_end(&handle); | 
|  | 2956 | } | 
|  | 2957 |  | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 2958 | static int perf_counter_comm_match(struct perf_counter *counter) | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 2959 | { | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 2960 | if (counter->attr.comm) | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 2961 | return 1; | 
|  | 2962 |  | 
|  | 2963 | return 0; | 
|  | 2964 | } | 
|  | 2965 |  | 
|  | 2966 | static void perf_counter_comm_ctx(struct perf_counter_context *ctx, | 
|  | 2967 | struct perf_comm_event *comm_event) | 
|  | 2968 | { | 
|  | 2969 | struct perf_counter *counter; | 
|  | 2970 |  | 
|  | 2971 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | 
|  | 2972 | return; | 
|  | 2973 |  | 
|  | 2974 | rcu_read_lock(); | 
|  | 2975 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 2976 | if (perf_counter_comm_match(counter)) | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 2977 | perf_counter_comm_output(counter, comm_event); | 
|  | 2978 | } | 
|  | 2979 | rcu_read_unlock(); | 
|  | 2980 | } | 
|  | 2981 |  | 
|  | 2982 | static void perf_counter_comm_event(struct perf_comm_event *comm_event) | 
|  | 2983 | { | 
|  | 2984 | struct perf_cpu_context *cpuctx; | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 2985 | struct perf_counter_context *ctx; | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 2986 | unsigned int size; | 
| Anton Blanchard | 413ee3b | 2009-07-16 15:15:52 +0200 | [diff] [blame] | 2987 | char comm[TASK_COMM_LEN]; | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 2988 |  | 
| Anton Blanchard | 413ee3b | 2009-07-16 15:15:52 +0200 | [diff] [blame] | 2989 | memset(comm, 0, sizeof(comm)); | 
|  | 2990 | strncpy(comm, comm_event->task->comm, sizeof(comm)); | 
| Ingo Molnar | 888fcee | 2009-04-09 09:48:22 +0200 | [diff] [blame] | 2991 | size = ALIGN(strlen(comm)+1, sizeof(u64)); | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 2992 |  | 
|  | 2993 | comm_event->comm = comm; | 
|  | 2994 | comm_event->comm_size = size; | 
|  | 2995 |  | 
|  | 2996 | comm_event->event.header.size = sizeof(comm_event->event) + size; | 
|  | 2997 |  | 
|  | 2998 | cpuctx = &get_cpu_var(perf_cpu_context); | 
|  | 2999 | perf_counter_comm_ctx(&cpuctx->ctx, comm_event); | 
|  | 3000 | put_cpu_var(perf_cpu_context); | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 3001 |  | 
|  | 3002 | rcu_read_lock(); | 
|  | 3003 | /* | 
|  | 3004 | * doesn't really matter which of the child contexts the | 
|  | 3005 | * events ends up in. | 
|  | 3006 | */ | 
|  | 3007 | ctx = rcu_dereference(current->perf_counter_ctxp); | 
|  | 3008 | if (ctx) | 
|  | 3009 | perf_counter_comm_ctx(ctx, comm_event); | 
|  | 3010 | rcu_read_unlock(); | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 3011 | } | 
|  | 3012 |  | 
|  | 3013 | void perf_counter_comm(struct task_struct *task) | 
|  | 3014 | { | 
| Peter Zijlstra | 9ee318a | 2009-04-09 10:53:44 +0200 | [diff] [blame] | 3015 | struct perf_comm_event comm_event; | 
|  | 3016 |  | 
| Paul Mackerras | 57e7986 | 2009-06-30 16:07:19 +1000 | [diff] [blame] | 3017 | if (task->perf_counter_ctxp) | 
|  | 3018 | perf_counter_enable_on_exec(task); | 
|  | 3019 |  | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 3020 | if (!atomic_read(&nr_comm_counters)) | 
| Peter Zijlstra | 9ee318a | 2009-04-09 10:53:44 +0200 | [diff] [blame] | 3021 | return; | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 3022 |  | 
| Peter Zijlstra | 9ee318a | 2009-04-09 10:53:44 +0200 | [diff] [blame] | 3023 | comm_event = (struct perf_comm_event){ | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 3024 | .task	= task, | 
| Peter Zijlstra | 573402d | 2009-07-22 11:13:50 +0200 | [diff] [blame] | 3025 | /* .comm      */ | 
|  | 3026 | /* .comm_size */ | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 3027 | .event  = { | 
| Peter Zijlstra | 573402d | 2009-07-22 11:13:50 +0200 | [diff] [blame] | 3028 | .header = { | 
|  | 3029 | .type = PERF_EVENT_COMM, | 
|  | 3030 | .misc = 0, | 
|  | 3031 | /* .size */ | 
|  | 3032 | }, | 
|  | 3033 | /* .pid */ | 
|  | 3034 | /* .tid */ | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 3035 | }, | 
|  | 3036 | }; | 
|  | 3037 |  | 
|  | 3038 | perf_counter_comm_event(&comm_event); | 
|  | 3039 | } | 
|  | 3040 |  | 
|  | 3041 | /* | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3042 | * mmap tracking | 
|  | 3043 | */ | 
|  | 3044 |  | 
|  | 3045 | struct perf_mmap_event { | 
| Peter Zijlstra | 089dd79 | 2009-06-05 14:04:55 +0200 | [diff] [blame] | 3046 | struct vm_area_struct	*vma; | 
|  | 3047 |  | 
|  | 3048 | const char		*file_name; | 
|  | 3049 | int			file_size; | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3050 |  | 
|  | 3051 | struct { | 
|  | 3052 | struct perf_event_header	header; | 
|  | 3053 |  | 
|  | 3054 | u32				pid; | 
|  | 3055 | u32				tid; | 
|  | 3056 | u64				start; | 
|  | 3057 | u64				len; | 
|  | 3058 | u64				pgoff; | 
|  | 3059 | } event; | 
|  | 3060 | }; | 
|  | 3061 |  | 
|  | 3062 | static void perf_counter_mmap_output(struct perf_counter *counter, | 
|  | 3063 | struct perf_mmap_event *mmap_event) | 
|  | 3064 | { | 
|  | 3065 | struct perf_output_handle handle; | 
|  | 3066 | int size = mmap_event->event.header.size; | 
| Peter Zijlstra | 4c9e254 | 2009-04-06 11:45:09 +0200 | [diff] [blame] | 3067 | int ret = perf_output_begin(&handle, counter, size, 0, 0); | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3068 |  | 
|  | 3069 | if (ret) | 
|  | 3070 | return; | 
|  | 3071 |  | 
| Peter Zijlstra | 709e50c | 2009-06-02 14:13:15 +0200 | [diff] [blame] | 3072 | mmap_event->event.pid = perf_counter_pid(counter, current); | 
|  | 3073 | mmap_event->event.tid = perf_counter_tid(counter, current); | 
|  | 3074 |  | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3075 | perf_output_put(&handle, mmap_event->event); | 
|  | 3076 | perf_output_copy(&handle, mmap_event->file_name, | 
|  | 3077 | mmap_event->file_size); | 
| Peter Zijlstra | 78d613e | 2009-03-30 19:07:11 +0200 | [diff] [blame] | 3078 | perf_output_end(&handle); | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3079 | } | 
|  | 3080 |  | 
|  | 3081 | static int perf_counter_mmap_match(struct perf_counter *counter, | 
|  | 3082 | struct perf_mmap_event *mmap_event) | 
|  | 3083 | { | 
| Peter Zijlstra | d99e944 | 2009-06-04 17:08:58 +0200 | [diff] [blame] | 3084 | if (counter->attr.mmap) | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3085 | return 1; | 
|  | 3086 |  | 
|  | 3087 | return 0; | 
|  | 3088 | } | 
|  | 3089 |  | 
|  | 3090 | static void perf_counter_mmap_ctx(struct perf_counter_context *ctx, | 
|  | 3091 | struct perf_mmap_event *mmap_event) | 
|  | 3092 | { | 
|  | 3093 | struct perf_counter *counter; | 
|  | 3094 |  | 
|  | 3095 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | 
|  | 3096 | return; | 
|  | 3097 |  | 
|  | 3098 | rcu_read_lock(); | 
|  | 3099 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | 
|  | 3100 | if (perf_counter_mmap_match(counter, mmap_event)) | 
|  | 3101 | perf_counter_mmap_output(counter, mmap_event); | 
|  | 3102 | } | 
|  | 3103 | rcu_read_unlock(); | 
|  | 3104 | } | 
|  | 3105 |  | 
|  | 3106 | static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) | 
|  | 3107 | { | 
|  | 3108 | struct perf_cpu_context *cpuctx; | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 3109 | struct perf_counter_context *ctx; | 
| Peter Zijlstra | 089dd79 | 2009-06-05 14:04:55 +0200 | [diff] [blame] | 3110 | struct vm_area_struct *vma = mmap_event->vma; | 
|  | 3111 | struct file *file = vma->vm_file; | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3112 | unsigned int size; | 
|  | 3113 | char tmp[16]; | 
|  | 3114 | char *buf = NULL; | 
| Peter Zijlstra | 089dd79 | 2009-06-05 14:04:55 +0200 | [diff] [blame] | 3115 | const char *name; | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3116 |  | 
| Anton Blanchard | 413ee3b | 2009-07-16 15:15:52 +0200 | [diff] [blame] | 3117 | memset(tmp, 0, sizeof(tmp)); | 
|  | 3118 |  | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3119 | if (file) { | 
| Anton Blanchard | 413ee3b | 2009-07-16 15:15:52 +0200 | [diff] [blame] | 3120 | /* | 
|  | 3121 | * d_path works from the end of the buffer backwards, so we | 
|  | 3122 | * need to add enough zero bytes after the string to handle | 
|  | 3123 | * the 64bit alignment we do later. | 
|  | 3124 | */ | 
|  | 3125 | buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3126 | if (!buf) { | 
|  | 3127 | name = strncpy(tmp, "//enomem", sizeof(tmp)); | 
|  | 3128 | goto got_name; | 
|  | 3129 | } | 
| Peter Zijlstra | d3d21c4 | 2009-04-09 10:53:46 +0200 | [diff] [blame] | 3130 | name = d_path(&file->f_path, buf, PATH_MAX); | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3131 | if (IS_ERR(name)) { | 
|  | 3132 | name = strncpy(tmp, "//toolong", sizeof(tmp)); | 
|  | 3133 | goto got_name; | 
|  | 3134 | } | 
|  | 3135 | } else { | 
| Anton Blanchard | 413ee3b | 2009-07-16 15:15:52 +0200 | [diff] [blame] | 3136 | if (arch_vma_name(mmap_event->vma)) { | 
|  | 3137 | name = strncpy(tmp, arch_vma_name(mmap_event->vma), | 
|  | 3138 | sizeof(tmp)); | 
| Peter Zijlstra | 089dd79 | 2009-06-05 14:04:55 +0200 | [diff] [blame] | 3139 | goto got_name; | 
| Anton Blanchard | 413ee3b | 2009-07-16 15:15:52 +0200 | [diff] [blame] | 3140 | } | 
| Peter Zijlstra | 089dd79 | 2009-06-05 14:04:55 +0200 | [diff] [blame] | 3141 |  | 
|  | 3142 | if (!vma->vm_mm) { | 
|  | 3143 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); | 
|  | 3144 | goto got_name; | 
|  | 3145 | } | 
|  | 3146 |  | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3147 | name = strncpy(tmp, "//anon", sizeof(tmp)); | 
|  | 3148 | goto got_name; | 
|  | 3149 | } | 
|  | 3150 |  | 
|  | 3151 | got_name: | 
| Ingo Molnar | 888fcee | 2009-04-09 09:48:22 +0200 | [diff] [blame] | 3152 | size = ALIGN(strlen(name)+1, sizeof(u64)); | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3153 |  | 
|  | 3154 | mmap_event->file_name = name; | 
|  | 3155 | mmap_event->file_size = size; | 
|  | 3156 |  | 
|  | 3157 | mmap_event->event.header.size = sizeof(mmap_event->event) + size; | 
|  | 3158 |  | 
|  | 3159 | cpuctx = &get_cpu_var(perf_cpu_context); | 
|  | 3160 | perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event); | 
|  | 3161 | put_cpu_var(perf_cpu_context); | 
|  | 3162 |  | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 3163 | rcu_read_lock(); | 
|  | 3164 | /* | 
|  | 3165 | * doesn't really matter which of the child contexts the | 
|  | 3166 | * events ends up in. | 
|  | 3167 | */ | 
|  | 3168 | ctx = rcu_dereference(current->perf_counter_ctxp); | 
|  | 3169 | if (ctx) | 
|  | 3170 | perf_counter_mmap_ctx(ctx, mmap_event); | 
|  | 3171 | rcu_read_unlock(); | 
|  | 3172 |  | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3173 | kfree(buf); | 
|  | 3174 | } | 
|  | 3175 |  | 
| Peter Zijlstra | 089dd79 | 2009-06-05 14:04:55 +0200 | [diff] [blame] | 3176 | void __perf_counter_mmap(struct vm_area_struct *vma) | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3177 | { | 
| Peter Zijlstra | 9ee318a | 2009-04-09 10:53:44 +0200 | [diff] [blame] | 3178 | struct perf_mmap_event mmap_event; | 
|  | 3179 |  | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 3180 | if (!atomic_read(&nr_mmap_counters)) | 
| Peter Zijlstra | 9ee318a | 2009-04-09 10:53:44 +0200 | [diff] [blame] | 3181 | return; | 
|  | 3182 |  | 
|  | 3183 | mmap_event = (struct perf_mmap_event){ | 
| Peter Zijlstra | 089dd79 | 2009-06-05 14:04:55 +0200 | [diff] [blame] | 3184 | .vma	= vma, | 
| Peter Zijlstra | 573402d | 2009-07-22 11:13:50 +0200 | [diff] [blame] | 3185 | /* .file_name */ | 
|  | 3186 | /* .file_size */ | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3187 | .event  = { | 
| Peter Zijlstra | 573402d | 2009-07-22 11:13:50 +0200 | [diff] [blame] | 3188 | .header = { | 
|  | 3189 | .type = PERF_EVENT_MMAP, | 
|  | 3190 | .misc = 0, | 
|  | 3191 | /* .size */ | 
|  | 3192 | }, | 
|  | 3193 | /* .pid */ | 
|  | 3194 | /* .tid */ | 
| Peter Zijlstra | 089dd79 | 2009-06-05 14:04:55 +0200 | [diff] [blame] | 3195 | .start  = vma->vm_start, | 
|  | 3196 | .len    = vma->vm_end - vma->vm_start, | 
|  | 3197 | .pgoff  = vma->vm_pgoff, | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3198 | }, | 
|  | 3199 | }; | 
|  | 3200 |  | 
|  | 3201 | perf_counter_mmap_event(&mmap_event); | 
|  | 3202 | } | 
|  | 3203 |  | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 3204 | /* | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 3205 | * IRQ throttle logging | 
|  | 3206 | */ | 
|  | 3207 |  | 
|  | 3208 | static void perf_log_throttle(struct perf_counter *counter, int enable) | 
|  | 3209 | { | 
|  | 3210 | struct perf_output_handle handle; | 
|  | 3211 | int ret; | 
|  | 3212 |  | 
|  | 3213 | struct { | 
|  | 3214 | struct perf_event_header	header; | 
|  | 3215 | u64				time; | 
| Peter Zijlstra | cca3f45 | 2009-06-11 14:57:55 +0200 | [diff] [blame] | 3216 | u64				id; | 
| Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 3217 | u64				stream_id; | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 3218 | } throttle_event = { | 
|  | 3219 | .header = { | 
| Anton Blanchard | 966ee4d | 2009-07-22 23:05:46 +1000 | [diff] [blame] | 3220 | .type = PERF_EVENT_THROTTLE, | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 3221 | .misc = 0, | 
|  | 3222 | .size = sizeof(throttle_event), | 
|  | 3223 | }, | 
| Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 3224 | .time		= sched_clock(), | 
|  | 3225 | .id		= primary_counter_id(counter), | 
|  | 3226 | .stream_id	= counter->id, | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 3227 | }; | 
|  | 3228 |  | 
| Anton Blanchard | 966ee4d | 2009-07-22 23:05:46 +1000 | [diff] [blame] | 3229 | if (enable) | 
|  | 3230 | throttle_event.header.type = PERF_EVENT_UNTHROTTLE; | 
|  | 3231 |  | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 3232 | ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 3233 | if (ret) | 
|  | 3234 | return; | 
|  | 3235 |  | 
|  | 3236 | perf_output_put(&handle, throttle_event); | 
|  | 3237 | perf_output_end(&handle); | 
|  | 3238 | } | 
|  | 3239 |  | 
|  | 3240 | /* | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 3241 | * Generic counter overflow handling, sampling. | 
| Peter Zijlstra | f6c7d5f | 2009-04-06 11:45:04 +0200 | [diff] [blame] | 3242 | */ | 
|  | 3243 |  | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 3244 | int perf_counter_overflow(struct perf_counter *counter, int nmi, | 
|  | 3245 | struct perf_sample_data *data) | 
| Peter Zijlstra | f6c7d5f | 2009-04-06 11:45:04 +0200 | [diff] [blame] | 3246 | { | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 3247 | int events = atomic_read(&counter->event_limit); | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 3248 | int throttle = counter->pmu->unthrottle != NULL; | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 3249 | struct hw_perf_counter *hwc = &counter->hw; | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 3250 | int ret = 0; | 
|  | 3251 |  | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 3252 | if (!throttle) { | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 3253 | hwc->interrupts++; | 
| Ingo Molnar | 128f048 | 2009-06-03 22:19:36 +0200 | [diff] [blame] | 3254 | } else { | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 3255 | if (hwc->interrupts != MAX_INTERRUPTS) { | 
|  | 3256 | hwc->interrupts++; | 
| Peter Zijlstra | df58ab2 | 2009-06-11 11:25:05 +0200 | [diff] [blame] | 3257 | if (HZ * hwc->interrupts > | 
|  | 3258 | (u64)sysctl_perf_counter_sample_rate) { | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 3259 | hwc->interrupts = MAX_INTERRUPTS; | 
| Ingo Molnar | 128f048 | 2009-06-03 22:19:36 +0200 | [diff] [blame] | 3260 | perf_log_throttle(counter, 0); | 
|  | 3261 | ret = 1; | 
|  | 3262 | } | 
|  | 3263 | } else { | 
|  | 3264 | /* | 
|  | 3265 | * Keep re-disabling counters even though on the previous | 
|  | 3266 | * pass we disabled it - just in case we raced with a | 
|  | 3267 | * sched-in and the counter got enabled again: | 
|  | 3268 | */ | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 3269 | ret = 1; | 
|  | 3270 | } | 
|  | 3271 | } | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 3272 |  | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 3273 | if (counter->attr.freq) { | 
|  | 3274 | u64 now = sched_clock(); | 
|  | 3275 | s64 delta = now - hwc->freq_stamp; | 
|  | 3276 |  | 
|  | 3277 | hwc->freq_stamp = now; | 
|  | 3278 |  | 
|  | 3279 | if (delta > 0 && delta < TICK_NSEC) | 
|  | 3280 | perf_adjust_period(counter, NSEC_PER_SEC / (int)delta); | 
|  | 3281 | } | 
|  | 3282 |  | 
| Peter Zijlstra | 2023b35 | 2009-05-05 17:50:26 +0200 | [diff] [blame] | 3283 | /* | 
|  | 3284 | * XXX event_limit might not quite work as expected on inherited | 
|  | 3285 | * counters | 
|  | 3286 | */ | 
|  | 3287 |  | 
| Peter Zijlstra | 4c9e254 | 2009-04-06 11:45:09 +0200 | [diff] [blame] | 3288 | counter->pending_kill = POLL_IN; | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 3289 | if (events && atomic_dec_and_test(&counter->event_limit)) { | 
|  | 3290 | ret = 1; | 
| Peter Zijlstra | 4c9e254 | 2009-04-06 11:45:09 +0200 | [diff] [blame] | 3291 | counter->pending_kill = POLL_HUP; | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 3292 | if (nmi) { | 
|  | 3293 | counter->pending_disable = 1; | 
|  | 3294 | perf_pending_queue(&counter->pending, | 
|  | 3295 | perf_pending_counter); | 
|  | 3296 | } else | 
|  | 3297 | perf_counter_disable(counter); | 
|  | 3298 | } | 
|  | 3299 |  | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 3300 | perf_counter_output(counter, nmi, data); | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 3301 | return ret; | 
| Peter Zijlstra | f6c7d5f | 2009-04-06 11:45:04 +0200 | [diff] [blame] | 3302 | } | 
|  | 3303 |  | 
|  | 3304 | /* | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3305 | * Generic software counter infrastructure | 
|  | 3306 | */ | 
|  | 3307 |  | 
|  | 3308 | static void perf_swcounter_update(struct perf_counter *counter) | 
|  | 3309 | { | 
|  | 3310 | struct hw_perf_counter *hwc = &counter->hw; | 
|  | 3311 | u64 prev, now; | 
|  | 3312 | s64 delta; | 
|  | 3313 |  | 
|  | 3314 | again: | 
|  | 3315 | prev = atomic64_read(&hwc->prev_count); | 
|  | 3316 | now = atomic64_read(&hwc->count); | 
|  | 3317 | if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev) | 
|  | 3318 | goto again; | 
|  | 3319 |  | 
|  | 3320 | delta = now - prev; | 
|  | 3321 |  | 
|  | 3322 | atomic64_add(delta, &counter->count); | 
|  | 3323 | atomic64_sub(delta, &hwc->period_left); | 
|  | 3324 | } | 
|  | 3325 |  | 
|  | 3326 | static void perf_swcounter_set_period(struct perf_counter *counter) | 
|  | 3327 | { | 
|  | 3328 | struct hw_perf_counter *hwc = &counter->hw; | 
|  | 3329 | s64 left = atomic64_read(&hwc->period_left); | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 3330 | s64 period = hwc->sample_period; | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3331 |  | 
|  | 3332 | if (unlikely(left <= -period)) { | 
|  | 3333 | left = period; | 
|  | 3334 | atomic64_set(&hwc->period_left, left); | 
| Peter Zijlstra | 9e350de | 2009-06-10 21:34:59 +0200 | [diff] [blame] | 3335 | hwc->last_period = period; | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3336 | } | 
|  | 3337 |  | 
|  | 3338 | if (unlikely(left <= 0)) { | 
|  | 3339 | left += period; | 
|  | 3340 | atomic64_add(period, &hwc->period_left); | 
| Peter Zijlstra | 9e350de | 2009-06-10 21:34:59 +0200 | [diff] [blame] | 3341 | hwc->last_period = period; | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3342 | } | 
|  | 3343 |  | 
|  | 3344 | atomic64_set(&hwc->prev_count, -left); | 
|  | 3345 | atomic64_set(&hwc->count, -left); | 
|  | 3346 | } | 
|  | 3347 |  | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3348 | static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | 
|  | 3349 | { | 
| Peter Zijlstra | f6c7d5f | 2009-04-06 11:45:04 +0200 | [diff] [blame] | 3350 | enum hrtimer_restart ret = HRTIMER_RESTART; | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 3351 | struct perf_sample_data data; | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3352 | struct perf_counter *counter; | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 3353 | u64 period; | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3354 |  | 
|  | 3355 | counter	= container_of(hrtimer, struct perf_counter, hw.hrtimer); | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3356 | counter->pmu->read(counter); | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3357 |  | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 3358 | data.addr = 0; | 
|  | 3359 | data.regs = get_irq_regs(); | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3360 | /* | 
|  | 3361 | * In case we exclude kernel IPs or are somehow not in interrupt | 
|  | 3362 | * context, provide the next best thing, the user IP. | 
|  | 3363 | */ | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 3364 | if ((counter->attr.exclude_kernel || !data.regs) && | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 3365 | !counter->attr.exclude_user) | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 3366 | data.regs = task_pt_regs(current); | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3367 |  | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 3368 | if (data.regs) { | 
|  | 3369 | if (perf_counter_overflow(counter, 0, &data)) | 
| Peter Zijlstra | f6c7d5f | 2009-04-06 11:45:04 +0200 | [diff] [blame] | 3370 | ret = HRTIMER_NORESTART; | 
|  | 3371 | } | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3372 |  | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 3373 | period = max_t(u64, 10000, counter->hw.sample_period); | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 3374 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3375 |  | 
| Peter Zijlstra | f6c7d5f | 2009-04-06 11:45:04 +0200 | [diff] [blame] | 3376 | return ret; | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3377 | } | 
|  | 3378 |  | 
|  | 3379 | static void perf_swcounter_overflow(struct perf_counter *counter, | 
| Peter Zijlstra | 92bf309 | 2009-06-19 18:11:53 +0200 | [diff] [blame] | 3380 | int nmi, struct perf_sample_data *data) | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3381 | { | 
| Peter Zijlstra | 92bf309 | 2009-06-19 18:11:53 +0200 | [diff] [blame] | 3382 | data->period = counter->hw.last_period; | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 3383 |  | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 3384 | perf_swcounter_update(counter); | 
|  | 3385 | perf_swcounter_set_period(counter); | 
| Peter Zijlstra | 92bf309 | 2009-06-19 18:11:53 +0200 | [diff] [blame] | 3386 | if (perf_counter_overflow(counter, nmi, data)) | 
| Peter Zijlstra | f6c7d5f | 2009-04-06 11:45:04 +0200 | [diff] [blame] | 3387 | /* soft-disable the counter */ | 
|  | 3388 | ; | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3389 | } | 
|  | 3390 |  | 
| Paul Mackerras | 880ca15 | 2009-06-01 17:49:14 +1000 | [diff] [blame] | 3391 | static int perf_swcounter_is_counting(struct perf_counter *counter) | 
|  | 3392 | { | 
|  | 3393 | struct perf_counter_context *ctx; | 
|  | 3394 | unsigned long flags; | 
|  | 3395 | int count; | 
|  | 3396 |  | 
|  | 3397 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) | 
|  | 3398 | return 1; | 
|  | 3399 |  | 
|  | 3400 | if (counter->state != PERF_COUNTER_STATE_INACTIVE) | 
|  | 3401 | return 0; | 
|  | 3402 |  | 
|  | 3403 | /* | 
|  | 3404 | * If the counter is inactive, it could be just because | 
|  | 3405 | * its task is scheduled out, or because it's in a group | 
|  | 3406 | * which could not go on the PMU.  We want to count in | 
|  | 3407 | * the first case but not the second.  If the context is | 
|  | 3408 | * currently active then an inactive software counter must | 
|  | 3409 | * be the second case.  If it's not currently active then | 
|  | 3410 | * we need to know whether the counter was active when the | 
|  | 3411 | * context was last active, which we can determine by | 
|  | 3412 | * comparing counter->tstamp_stopped with ctx->time. | 
|  | 3413 | * | 
|  | 3414 | * We are within an RCU read-side critical section, | 
|  | 3415 | * which protects the existence of *ctx. | 
|  | 3416 | */ | 
|  | 3417 | ctx = counter->ctx; | 
|  | 3418 | spin_lock_irqsave(&ctx->lock, flags); | 
|  | 3419 | count = 1; | 
|  | 3420 | /* Re-check state now we have the lock */ | 
|  | 3421 | if (counter->state < PERF_COUNTER_STATE_INACTIVE || | 
|  | 3422 | counter->ctx->is_active || | 
|  | 3423 | counter->tstamp_stopped < ctx->time) | 
|  | 3424 | count = 0; | 
|  | 3425 | spin_unlock_irqrestore(&ctx->lock, flags); | 
|  | 3426 | return count; | 
|  | 3427 | } | 
|  | 3428 |  | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3429 | static int perf_swcounter_match(struct perf_counter *counter, | 
| Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 3430 | enum perf_type_id type, | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 3431 | u32 event, struct pt_regs *regs) | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3432 | { | 
| Paul Mackerras | 880ca15 | 2009-06-01 17:49:14 +1000 | [diff] [blame] | 3433 | if (!perf_swcounter_is_counting(counter)) | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3434 | return 0; | 
|  | 3435 |  | 
| Ingo Molnar | a21ca2c | 2009-06-06 09:58:57 +0200 | [diff] [blame] | 3436 | if (counter->attr.type != type) | 
|  | 3437 | return 0; | 
|  | 3438 | if (counter->attr.config != event) | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3439 | return 0; | 
|  | 3440 |  | 
| Paul Mackerras | 3f731ca | 2009-06-01 17:52:30 +1000 | [diff] [blame] | 3441 | if (regs) { | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 3442 | if (counter->attr.exclude_user && user_mode(regs)) | 
| Paul Mackerras | 3f731ca | 2009-06-01 17:52:30 +1000 | [diff] [blame] | 3443 | return 0; | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3444 |  | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 3445 | if (counter->attr.exclude_kernel && !user_mode(regs)) | 
| Paul Mackerras | 3f731ca | 2009-06-01 17:52:30 +1000 | [diff] [blame] | 3446 | return 0; | 
|  | 3447 | } | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3448 |  | 
|  | 3449 | return 1; | 
|  | 3450 | } | 
|  | 3451 |  | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3452 | static void perf_swcounter_add(struct perf_counter *counter, u64 nr, | 
| Peter Zijlstra | 92bf309 | 2009-06-19 18:11:53 +0200 | [diff] [blame] | 3453 | int nmi, struct perf_sample_data *data) | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3454 | { | 
|  | 3455 | int neg = atomic64_add_negative(nr, &counter->hw.count); | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 3456 |  | 
| Peter Zijlstra | 92bf309 | 2009-06-19 18:11:53 +0200 | [diff] [blame] | 3457 | if (counter->hw.sample_period && !neg && data->regs) | 
|  | 3458 | perf_swcounter_overflow(counter, nmi, data); | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3459 | } | 
|  | 3460 |  | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3461 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, | 
| Peter Zijlstra | 92bf309 | 2009-06-19 18:11:53 +0200 | [diff] [blame] | 3462 | enum perf_type_id type, | 
|  | 3463 | u32 event, u64 nr, int nmi, | 
|  | 3464 | struct perf_sample_data *data) | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3465 | { | 
|  | 3466 | struct perf_counter *counter; | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3467 |  | 
| Peter Zijlstra | 01ef09d | 2009-03-19 20:26:11 +0100 | [diff] [blame] | 3468 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3469 | return; | 
|  | 3470 |  | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 3471 | rcu_read_lock(); | 
|  | 3472 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | 
| Peter Zijlstra | 92bf309 | 2009-06-19 18:11:53 +0200 | [diff] [blame] | 3473 | if (perf_swcounter_match(counter, type, event, data->regs)) | 
|  | 3474 | perf_swcounter_add(counter, nr, nmi, data); | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3475 | } | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 3476 | rcu_read_unlock(); | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3477 | } | 
|  | 3478 |  | 
| Peter Zijlstra | 96f6d44 | 2009-03-23 18:22:07 +0100 | [diff] [blame] | 3479 | static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) | 
|  | 3480 | { | 
|  | 3481 | if (in_nmi()) | 
|  | 3482 | return &cpuctx->recursion[3]; | 
|  | 3483 |  | 
|  | 3484 | if (in_irq()) | 
|  | 3485 | return &cpuctx->recursion[2]; | 
|  | 3486 |  | 
|  | 3487 | if (in_softirq()) | 
|  | 3488 | return &cpuctx->recursion[1]; | 
|  | 3489 |  | 
|  | 3490 | return &cpuctx->recursion[0]; | 
|  | 3491 | } | 
|  | 3492 |  | 
| Peter Zijlstra | 92bf309 | 2009-06-19 18:11:53 +0200 | [diff] [blame] | 3493 | static void do_perf_swcounter_event(enum perf_type_id type, u32 event, | 
|  | 3494 | u64 nr, int nmi, | 
|  | 3495 | struct perf_sample_data *data) | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3496 | { | 
|  | 3497 | struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); | 
| Peter Zijlstra | 96f6d44 | 2009-03-23 18:22:07 +0100 | [diff] [blame] | 3498 | int *recursion = perf_swcounter_recursion_context(cpuctx); | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 3499 | struct perf_counter_context *ctx; | 
| Peter Zijlstra | 96f6d44 | 2009-03-23 18:22:07 +0100 | [diff] [blame] | 3500 |  | 
|  | 3501 | if (*recursion) | 
|  | 3502 | goto out; | 
|  | 3503 |  | 
|  | 3504 | (*recursion)++; | 
|  | 3505 | barrier(); | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3506 |  | 
| Peter Zijlstra | 78f13e9 | 2009-04-08 15:01:33 +0200 | [diff] [blame] | 3507 | perf_swcounter_ctx_event(&cpuctx->ctx, type, event, | 
| Peter Zijlstra | 92bf309 | 2009-06-19 18:11:53 +0200 | [diff] [blame] | 3508 | nr, nmi, data); | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 3509 | rcu_read_lock(); | 
|  | 3510 | /* | 
|  | 3511 | * doesn't really matter which of the child contexts the | 
|  | 3512 | * events ends up in. | 
|  | 3513 | */ | 
|  | 3514 | ctx = rcu_dereference(current->perf_counter_ctxp); | 
|  | 3515 | if (ctx) | 
| Peter Zijlstra | 92bf309 | 2009-06-19 18:11:53 +0200 | [diff] [blame] | 3516 | perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data); | 
| Peter Zijlstra | 665c214 | 2009-05-29 14:51:57 +0200 | [diff] [blame] | 3517 | rcu_read_unlock(); | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3518 |  | 
| Peter Zijlstra | 96f6d44 | 2009-03-23 18:22:07 +0100 | [diff] [blame] | 3519 | barrier(); | 
|  | 3520 | (*recursion)--; | 
|  | 3521 |  | 
|  | 3522 | out: | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3523 | put_cpu_var(perf_cpu_context); | 
|  | 3524 | } | 
|  | 3525 |  | 
| Peter Zijlstra | f29ac75 | 2009-06-19 18:27:26 +0200 | [diff] [blame] | 3526 | void __perf_swcounter_event(u32 event, u64 nr, int nmi, | 
|  | 3527 | struct pt_regs *regs, u64 addr) | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 3528 | { | 
| Peter Zijlstra | 92bf309 | 2009-06-19 18:11:53 +0200 | [diff] [blame] | 3529 | struct perf_sample_data data = { | 
|  | 3530 | .regs = regs, | 
|  | 3531 | .addr = addr, | 
|  | 3532 | }; | 
|  | 3533 |  | 
|  | 3534 | do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data); | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 3535 | } | 
|  | 3536 |  | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3537 | static void perf_swcounter_read(struct perf_counter *counter) | 
|  | 3538 | { | 
|  | 3539 | perf_swcounter_update(counter); | 
|  | 3540 | } | 
|  | 3541 |  | 
|  | 3542 | static int perf_swcounter_enable(struct perf_counter *counter) | 
|  | 3543 | { | 
|  | 3544 | perf_swcounter_set_period(counter); | 
|  | 3545 | return 0; | 
|  | 3546 | } | 
|  | 3547 |  | 
|  | 3548 | static void perf_swcounter_disable(struct perf_counter *counter) | 
|  | 3549 | { | 
|  | 3550 | perf_swcounter_update(counter); | 
|  | 3551 | } | 
|  | 3552 |  | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3553 | static const struct pmu perf_ops_generic = { | 
| Peter Zijlstra | ac17dc8 | 2009-03-13 12:21:34 +0100 | [diff] [blame] | 3554 | .enable		= perf_swcounter_enable, | 
|  | 3555 | .disable	= perf_swcounter_disable, | 
|  | 3556 | .read		= perf_swcounter_read, | 
|  | 3557 | }; | 
|  | 3558 |  | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3559 | /* | 
|  | 3560 | * Software counter: cpu wall time clock | 
|  | 3561 | */ | 
|  | 3562 |  | 
| Paul Mackerras | 9abf8a0 | 2009-01-09 16:26:43 +1100 | [diff] [blame] | 3563 | static void cpu_clock_perf_counter_update(struct perf_counter *counter) | 
|  | 3564 | { | 
|  | 3565 | int cpu = raw_smp_processor_id(); | 
|  | 3566 | s64 prev; | 
|  | 3567 | u64 now; | 
|  | 3568 |  | 
|  | 3569 | now = cpu_clock(cpu); | 
|  | 3570 | prev = atomic64_read(&counter->hw.prev_count); | 
|  | 3571 | atomic64_set(&counter->hw.prev_count, now); | 
|  | 3572 | atomic64_add(now - prev, &counter->count); | 
|  | 3573 | } | 
|  | 3574 |  | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3575 | static int cpu_clock_perf_counter_enable(struct perf_counter *counter) | 
|  | 3576 | { | 
|  | 3577 | struct hw_perf_counter *hwc = &counter->hw; | 
|  | 3578 | int cpu = raw_smp_processor_id(); | 
|  | 3579 |  | 
|  | 3580 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); | 
| Peter Zijlstra | 039fc91 | 2009-03-13 16:43:47 +0100 | [diff] [blame] | 3581 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 
|  | 3582 | hwc->hrtimer.function = perf_swcounter_hrtimer; | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 3583 | if (hwc->sample_period) { | 
|  | 3584 | u64 period = max_t(u64, 10000, hwc->sample_period); | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3585 | __hrtimer_start_range_ns(&hwc->hrtimer, | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 3586 | ns_to_ktime(period), 0, | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3587 | HRTIMER_MODE_REL, 0); | 
|  | 3588 | } | 
|  | 3589 |  | 
|  | 3590 | return 0; | 
|  | 3591 | } | 
|  | 3592 |  | 
| Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 3593 | static void cpu_clock_perf_counter_disable(struct perf_counter *counter) | 
|  | 3594 | { | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 3595 | if (counter->hw.sample_period) | 
| Peter Zijlstra | b986d7e | 2009-05-20 12:21:21 +0200 | [diff] [blame] | 3596 | hrtimer_cancel(&counter->hw.hrtimer); | 
| Paul Mackerras | 9abf8a0 | 2009-01-09 16:26:43 +1100 | [diff] [blame] | 3597 | cpu_clock_perf_counter_update(counter); | 
| Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 3598 | } | 
|  | 3599 |  | 
|  | 3600 | static void cpu_clock_perf_counter_read(struct perf_counter *counter) | 
|  | 3601 | { | 
| Paul Mackerras | 9abf8a0 | 2009-01-09 16:26:43 +1100 | [diff] [blame] | 3602 | cpu_clock_perf_counter_update(counter); | 
| Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 3603 | } | 
|  | 3604 |  | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3605 | static const struct pmu perf_ops_cpu_clock = { | 
| Ingo Molnar | 7671581 | 2008-12-17 14:20:28 +0100 | [diff] [blame] | 3606 | .enable		= cpu_clock_perf_counter_enable, | 
|  | 3607 | .disable	= cpu_clock_perf_counter_disable, | 
|  | 3608 | .read		= cpu_clock_perf_counter_read, | 
| Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 3609 | }; | 
|  | 3610 |  | 
| Ingo Molnar | aa9c4c0 | 2008-12-17 14:10:57 +0100 | [diff] [blame] | 3611 | /* | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3612 | * Software counter: task time clock | 
|  | 3613 | */ | 
|  | 3614 |  | 
| Peter Zijlstra | e30e08f | 2009-04-08 15:01:25 +0200 | [diff] [blame] | 3615 | static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) | 
| Ingo Molnar | bae43c9 | 2008-12-11 14:03:20 +0100 | [diff] [blame] | 3616 | { | 
| Peter Zijlstra | e30e08f | 2009-04-08 15:01:25 +0200 | [diff] [blame] | 3617 | u64 prev; | 
| Ingo Molnar | 8cb391e | 2008-12-14 12:22:31 +0100 | [diff] [blame] | 3618 | s64 delta; | 
| Ingo Molnar | bae43c9 | 2008-12-11 14:03:20 +0100 | [diff] [blame] | 3619 |  | 
| Peter Zijlstra | a39d6f2 | 2009-04-06 11:45:11 +0200 | [diff] [blame] | 3620 | prev = atomic64_xchg(&counter->hw.prev_count, now); | 
| Ingo Molnar | 8cb391e | 2008-12-14 12:22:31 +0100 | [diff] [blame] | 3621 | delta = now - prev; | 
| Ingo Molnar | 8cb391e | 2008-12-14 12:22:31 +0100 | [diff] [blame] | 3622 | atomic64_add(delta, &counter->count); | 
| Ingo Molnar | bae43c9 | 2008-12-11 14:03:20 +0100 | [diff] [blame] | 3623 | } | 
|  | 3624 |  | 
| Ingo Molnar | 95cdd2e | 2008-12-21 13:50:42 +0100 | [diff] [blame] | 3625 | static int task_clock_perf_counter_enable(struct perf_counter *counter) | 
| Ingo Molnar | 8cb391e | 2008-12-14 12:22:31 +0100 | [diff] [blame] | 3626 | { | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3627 | struct hw_perf_counter *hwc = &counter->hw; | 
| Peter Zijlstra | a39d6f2 | 2009-04-06 11:45:11 +0200 | [diff] [blame] | 3628 | u64 now; | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3629 |  | 
| Peter Zijlstra | a39d6f2 | 2009-04-06 11:45:11 +0200 | [diff] [blame] | 3630 | now = counter->ctx->time; | 
|  | 3631 |  | 
|  | 3632 | atomic64_set(&hwc->prev_count, now); | 
| Peter Zijlstra | 039fc91 | 2009-03-13 16:43:47 +0100 | [diff] [blame] | 3633 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 
|  | 3634 | hwc->hrtimer.function = perf_swcounter_hrtimer; | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 3635 | if (hwc->sample_period) { | 
|  | 3636 | u64 period = max_t(u64, 10000, hwc->sample_period); | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3637 | __hrtimer_start_range_ns(&hwc->hrtimer, | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 3638 | ns_to_ktime(period), 0, | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3639 | HRTIMER_MODE_REL, 0); | 
|  | 3640 | } | 
| Ingo Molnar | 95cdd2e | 2008-12-21 13:50:42 +0100 | [diff] [blame] | 3641 |  | 
|  | 3642 | return 0; | 
| Ingo Molnar | 8cb391e | 2008-12-14 12:22:31 +0100 | [diff] [blame] | 3643 | } | 
|  | 3644 |  | 
|  | 3645 | static void task_clock_perf_counter_disable(struct perf_counter *counter) | 
|  | 3646 | { | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 3647 | if (counter->hw.sample_period) | 
| Peter Zijlstra | b986d7e | 2009-05-20 12:21:21 +0200 | [diff] [blame] | 3648 | hrtimer_cancel(&counter->hw.hrtimer); | 
| Peter Zijlstra | e30e08f | 2009-04-08 15:01:25 +0200 | [diff] [blame] | 3649 | task_clock_perf_counter_update(counter, counter->ctx->time); | 
|  | 3650 |  | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3651 | } | 
| Ingo Molnar | aa9c4c0 | 2008-12-17 14:10:57 +0100 | [diff] [blame] | 3652 |  | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3653 | static void task_clock_perf_counter_read(struct perf_counter *counter) | 
|  | 3654 | { | 
| Peter Zijlstra | e30e08f | 2009-04-08 15:01:25 +0200 | [diff] [blame] | 3655 | u64 time; | 
|  | 3656 |  | 
|  | 3657 | if (!in_nmi()) { | 
|  | 3658 | update_context_time(counter->ctx); | 
|  | 3659 | time = counter->ctx->time; | 
|  | 3660 | } else { | 
|  | 3661 | u64 now = perf_clock(); | 
|  | 3662 | u64 delta = now - counter->ctx->timestamp; | 
|  | 3663 | time = counter->ctx->time + delta; | 
|  | 3664 | } | 
|  | 3665 |  | 
|  | 3666 | task_clock_perf_counter_update(counter, time); | 
| Ingo Molnar | bae43c9 | 2008-12-11 14:03:20 +0100 | [diff] [blame] | 3667 | } | 
|  | 3668 |  | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3669 | static const struct pmu perf_ops_task_clock = { | 
| Ingo Molnar | 7671581 | 2008-12-17 14:20:28 +0100 | [diff] [blame] | 3670 | .enable		= task_clock_perf_counter_enable, | 
|  | 3671 | .disable	= task_clock_perf_counter_disable, | 
|  | 3672 | .read		= task_clock_perf_counter_read, | 
| Ingo Molnar | bae43c9 | 2008-12-11 14:03:20 +0100 | [diff] [blame] | 3673 | }; | 
|  | 3674 |  | 
| Peter Zijlstra | e077df4 | 2009-03-19 20:26:17 +0100 | [diff] [blame] | 3675 | #ifdef CONFIG_EVENT_PROFILE | 
|  | 3676 | void perf_tpcounter_event(int event_id) | 
|  | 3677 | { | 
| Peter Zijlstra | 92bf309 | 2009-06-19 18:11:53 +0200 | [diff] [blame] | 3678 | struct perf_sample_data data = { | 
| Chris Wilson | d4d7d0b | 2009-07-06 09:31:33 +0100 | [diff] [blame] | 3679 | .regs = get_irq_regs(), | 
| Peter Zijlstra | 92bf309 | 2009-06-19 18:11:53 +0200 | [diff] [blame] | 3680 | .addr = 0, | 
|  | 3681 | }; | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 3682 |  | 
| Peter Zijlstra | 92bf309 | 2009-06-19 18:11:53 +0200 | [diff] [blame] | 3683 | if (!data.regs) | 
|  | 3684 | data.regs = task_pt_regs(current); | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 3685 |  | 
| Peter Zijlstra | 92bf309 | 2009-06-19 18:11:53 +0200 | [diff] [blame] | 3686 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); | 
| Peter Zijlstra | e077df4 | 2009-03-19 20:26:17 +0100 | [diff] [blame] | 3687 | } | 
| Steven Whitehouse | ff7b1b4 | 2009-04-15 16:55:05 +0100 | [diff] [blame] | 3688 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); | 
| Peter Zijlstra | e077df4 | 2009-03-19 20:26:17 +0100 | [diff] [blame] | 3689 |  | 
|  | 3690 | extern int ftrace_profile_enable(int); | 
|  | 3691 | extern void ftrace_profile_disable(int); | 
|  | 3692 |  | 
|  | 3693 | static void tp_perf_counter_destroy(struct perf_counter *counter) | 
|  | 3694 | { | 
| Chris Wilson | d4d7d0b | 2009-07-06 09:31:33 +0100 | [diff] [blame] | 3695 | ftrace_profile_disable(counter->attr.config); | 
| Peter Zijlstra | e077df4 | 2009-03-19 20:26:17 +0100 | [diff] [blame] | 3696 | } | 
|  | 3697 |  | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3698 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | 
| Peter Zijlstra | e077df4 | 2009-03-19 20:26:17 +0100 | [diff] [blame] | 3699 | { | 
| Chris Wilson | d4d7d0b | 2009-07-06 09:31:33 +0100 | [diff] [blame] | 3700 | if (ftrace_profile_enable(counter->attr.config)) | 
| Peter Zijlstra | e077df4 | 2009-03-19 20:26:17 +0100 | [diff] [blame] | 3701 | return NULL; | 
|  | 3702 |  | 
|  | 3703 | counter->destroy = tp_perf_counter_destroy; | 
|  | 3704 |  | 
|  | 3705 | return &perf_ops_generic; | 
|  | 3706 | } | 
|  | 3707 | #else | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3708 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | 
| Peter Zijlstra | e077df4 | 2009-03-19 20:26:17 +0100 | [diff] [blame] | 3709 | { | 
|  | 3710 | return NULL; | 
|  | 3711 | } | 
|  | 3712 | #endif | 
|  | 3713 |  | 
| Peter Zijlstra | f29ac75 | 2009-06-19 18:27:26 +0200 | [diff] [blame] | 3714 | atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; | 
|  | 3715 |  | 
|  | 3716 | static void sw_perf_counter_destroy(struct perf_counter *counter) | 
|  | 3717 | { | 
|  | 3718 | u64 event = counter->attr.config; | 
|  | 3719 |  | 
| Peter Zijlstra | f344011 | 2009-06-22 13:58:35 +0200 | [diff] [blame] | 3720 | WARN_ON(counter->parent); | 
|  | 3721 |  | 
| Peter Zijlstra | f29ac75 | 2009-06-19 18:27:26 +0200 | [diff] [blame] | 3722 | atomic_dec(&perf_swcounter_enabled[event]); | 
|  | 3723 | } | 
|  | 3724 |  | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3725 | static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) | 
| Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 3726 | { | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3727 | const struct pmu *pmu = NULL; | 
| Peter Zijlstra | f29ac75 | 2009-06-19 18:27:26 +0200 | [diff] [blame] | 3728 | u64 event = counter->attr.config; | 
| Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 3729 |  | 
| Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 3730 | /* | 
|  | 3731 | * Software counters (currently) can't in general distinguish | 
|  | 3732 | * between user, kernel and hypervisor events. | 
|  | 3733 | * However, context switches and cpu migrations are considered | 
|  | 3734 | * to be kernel events, and page faults are never hypervisor | 
|  | 3735 | * events. | 
|  | 3736 | */ | 
| Peter Zijlstra | f29ac75 | 2009-06-19 18:27:26 +0200 | [diff] [blame] | 3737 | switch (event) { | 
| Peter Zijlstra | f4dbfa8 | 2009-06-11 14:06:28 +0200 | [diff] [blame] | 3738 | case PERF_COUNT_SW_CPU_CLOCK: | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3739 | pmu = &perf_ops_cpu_clock; | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3740 |  | 
| Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 3741 | break; | 
| Peter Zijlstra | f4dbfa8 | 2009-06-11 14:06:28 +0200 | [diff] [blame] | 3742 | case PERF_COUNT_SW_TASK_CLOCK: | 
| Paul Mackerras | 23a185c | 2009-02-09 22:42:47 +1100 | [diff] [blame] | 3743 | /* | 
|  | 3744 | * If the user instantiates this as a per-cpu counter, | 
|  | 3745 | * use the cpu_clock counter instead. | 
|  | 3746 | */ | 
|  | 3747 | if (counter->ctx->task) | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3748 | pmu = &perf_ops_task_clock; | 
| Paul Mackerras | 23a185c | 2009-02-09 22:42:47 +1100 | [diff] [blame] | 3749 | else | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3750 | pmu = &perf_ops_cpu_clock; | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 3751 |  | 
| Ingo Molnar | bae43c9 | 2008-12-11 14:03:20 +0100 | [diff] [blame] | 3752 | break; | 
| Peter Zijlstra | f4dbfa8 | 2009-06-11 14:06:28 +0200 | [diff] [blame] | 3753 | case PERF_COUNT_SW_PAGE_FAULTS: | 
|  | 3754 | case PERF_COUNT_SW_PAGE_FAULTS_MIN: | 
|  | 3755 | case PERF_COUNT_SW_PAGE_FAULTS_MAJ: | 
|  | 3756 | case PERF_COUNT_SW_CONTEXT_SWITCHES: | 
|  | 3757 | case PERF_COUNT_SW_CPU_MIGRATIONS: | 
| Peter Zijlstra | f344011 | 2009-06-22 13:58:35 +0200 | [diff] [blame] | 3758 | if (!counter->parent) { | 
|  | 3759 | atomic_inc(&perf_swcounter_enabled[event]); | 
|  | 3760 | counter->destroy = sw_perf_counter_destroy; | 
|  | 3761 | } | 
| Paul Mackerras | 3f731ca | 2009-06-01 17:52:30 +1000 | [diff] [blame] | 3762 | pmu = &perf_ops_generic; | 
| Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 3763 | break; | 
| Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 3764 | } | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 3765 |  | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3766 | return pmu; | 
| Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 3767 | } | 
|  | 3768 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3769 | /* | 
|  | 3770 | * Allocate and initialize a counter structure | 
|  | 3771 | */ | 
|  | 3772 | static struct perf_counter * | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 3773 | perf_counter_alloc(struct perf_counter_attr *attr, | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 3774 | int cpu, | 
| Paul Mackerras | 23a185c | 2009-02-09 22:42:47 +1100 | [diff] [blame] | 3775 | struct perf_counter_context *ctx, | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 3776 | struct perf_counter *group_leader, | 
| Peter Zijlstra | b84fbc9 | 2009-06-22 13:57:40 +0200 | [diff] [blame] | 3777 | struct perf_counter *parent_counter, | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 3778 | gfp_t gfpflags) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3779 | { | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3780 | const struct pmu *pmu; | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 3781 | struct perf_counter *counter; | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 3782 | struct hw_perf_counter *hwc; | 
| Paul Mackerras | d5d2bc0d | 2009-03-30 19:07:08 +0200 | [diff] [blame] | 3783 | long err; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3784 |  | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 3785 | counter = kzalloc(sizeof(*counter), gfpflags); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3786 | if (!counter) | 
| Paul Mackerras | d5d2bc0d | 2009-03-30 19:07:08 +0200 | [diff] [blame] | 3787 | return ERR_PTR(-ENOMEM); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3788 |  | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 3789 | /* | 
|  | 3790 | * Single counters are their own group leaders, with an | 
|  | 3791 | * empty sibling list: | 
|  | 3792 | */ | 
|  | 3793 | if (!group_leader) | 
|  | 3794 | group_leader = counter; | 
|  | 3795 |  | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 3796 | mutex_init(&counter->child_mutex); | 
|  | 3797 | INIT_LIST_HEAD(&counter->child_list); | 
|  | 3798 |  | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 3799 | INIT_LIST_HEAD(&counter->list_entry); | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 3800 | INIT_LIST_HEAD(&counter->event_entry); | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 3801 | INIT_LIST_HEAD(&counter->sibling_list); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3802 | init_waitqueue_head(&counter->waitq); | 
|  | 3803 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 3804 | mutex_init(&counter->mmap_mutex); | 
|  | 3805 |  | 
| Peter Zijlstra | a96bbc1 | 2009-06-03 14:01:36 +0200 | [diff] [blame] | 3806 | counter->cpu		= cpu; | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 3807 | counter->attr		= *attr; | 
| Peter Zijlstra | a96bbc1 | 2009-06-03 14:01:36 +0200 | [diff] [blame] | 3808 | counter->group_leader	= group_leader; | 
|  | 3809 | counter->pmu		= NULL; | 
|  | 3810 | counter->ctx		= ctx; | 
|  | 3811 | counter->oncpu		= -1; | 
| Ingo Molnar | 329d876 | 2009-05-26 08:10:00 +0200 | [diff] [blame] | 3812 |  | 
| Peter Zijlstra | b84fbc9 | 2009-06-22 13:57:40 +0200 | [diff] [blame] | 3813 | counter->parent		= parent_counter; | 
|  | 3814 |  | 
| Peter Zijlstra | a96bbc1 | 2009-06-03 14:01:36 +0200 | [diff] [blame] | 3815 | counter->ns		= get_pid_ns(current->nsproxy->pid_ns); | 
|  | 3816 | counter->id		= atomic64_inc_return(&perf_counter_id); | 
|  | 3817 |  | 
|  | 3818 | counter->state		= PERF_COUNTER_STATE_INACTIVE; | 
|  | 3819 |  | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 3820 | if (attr->disabled) | 
| Ingo Molnar | a86ed50 | 2008-12-17 00:43:10 +0100 | [diff] [blame] | 3821 | counter->state = PERF_COUNTER_STATE_OFF; | 
|  | 3822 |  | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3823 | pmu = NULL; | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 3824 |  | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 3825 | hwc = &counter->hw; | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 3826 | hwc->sample_period = attr->sample_period; | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 3827 | if (attr->freq && attr->sample_freq) | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 3828 | hwc->sample_period = 1; | 
|  | 3829 |  | 
|  | 3830 | atomic64_set(&hwc->period_left, hwc->sample_period); | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 3831 |  | 
| Peter Zijlstra | 2023b35 | 2009-05-05 17:50:26 +0200 | [diff] [blame] | 3832 | /* | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 3833 | * we currently do not support PERF_SAMPLE_GROUP on inherited counters | 
| Peter Zijlstra | 2023b35 | 2009-05-05 17:50:26 +0200 | [diff] [blame] | 3834 | */ | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 3835 | if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) | 
| Peter Zijlstra | 2023b35 | 2009-05-05 17:50:26 +0200 | [diff] [blame] | 3836 | goto done; | 
|  | 3837 |  | 
| Ingo Molnar | a21ca2c | 2009-06-06 09:58:57 +0200 | [diff] [blame] | 3838 | switch (attr->type) { | 
| Peter Zijlstra | 081fad8 | 2009-06-11 17:57:21 +0200 | [diff] [blame] | 3839 | case PERF_TYPE_RAW: | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 3840 | case PERF_TYPE_HARDWARE: | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 3841 | case PERF_TYPE_HW_CACHE: | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3842 | pmu = hw_perf_counter_init(counter); | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 3843 | break; | 
|  | 3844 |  | 
|  | 3845 | case PERF_TYPE_SOFTWARE: | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3846 | pmu = sw_perf_counter_init(counter); | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 3847 | break; | 
|  | 3848 |  | 
|  | 3849 | case PERF_TYPE_TRACEPOINT: | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3850 | pmu = tp_perf_counter_init(counter); | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 3851 | break; | 
| Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 3852 |  | 
|  | 3853 | default: | 
|  | 3854 | break; | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 3855 | } | 
| Peter Zijlstra | f4a2deb | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 3856 | done: | 
| Paul Mackerras | d5d2bc0d | 2009-03-30 19:07:08 +0200 | [diff] [blame] | 3857 | err = 0; | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3858 | if (!pmu) | 
| Paul Mackerras | d5d2bc0d | 2009-03-30 19:07:08 +0200 | [diff] [blame] | 3859 | err = -EINVAL; | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3860 | else if (IS_ERR(pmu)) | 
|  | 3861 | err = PTR_ERR(pmu); | 
| Paul Mackerras | d5d2bc0d | 2009-03-30 19:07:08 +0200 | [diff] [blame] | 3862 |  | 
|  | 3863 | if (err) { | 
| Peter Zijlstra | a96bbc1 | 2009-06-03 14:01:36 +0200 | [diff] [blame] | 3864 | if (counter->ns) | 
|  | 3865 | put_pid_ns(counter->ns); | 
| Paul Mackerras | d5d2bc0d | 2009-03-30 19:07:08 +0200 | [diff] [blame] | 3866 | kfree(counter); | 
|  | 3867 | return ERR_PTR(err); | 
|  | 3868 | } | 
|  | 3869 |  | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 3870 | counter->pmu = pmu; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3871 |  | 
| Peter Zijlstra | f344011 | 2009-06-22 13:58:35 +0200 | [diff] [blame] | 3872 | if (!counter->parent) { | 
|  | 3873 | atomic_inc(&nr_counters); | 
|  | 3874 | if (counter->attr.mmap) | 
|  | 3875 | atomic_inc(&nr_mmap_counters); | 
|  | 3876 | if (counter->attr.comm) | 
|  | 3877 | atomic_inc(&nr_comm_counters); | 
|  | 3878 | } | 
| Peter Zijlstra | 9ee318a | 2009-04-09 10:53:44 +0200 | [diff] [blame] | 3879 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3880 | return counter; | 
|  | 3881 | } | 
|  | 3882 |  | 
| Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 3883 | static int perf_copy_attr(struct perf_counter_attr __user *uattr, | 
|  | 3884 | struct perf_counter_attr *attr) | 
|  | 3885 | { | 
|  | 3886 | int ret; | 
|  | 3887 | u32 size; | 
|  | 3888 |  | 
|  | 3889 | if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) | 
|  | 3890 | return -EFAULT; | 
|  | 3891 |  | 
|  | 3892 | /* | 
|  | 3893 | * zero the full structure, so that a short copy will be nice. | 
|  | 3894 | */ | 
|  | 3895 | memset(attr, 0, sizeof(*attr)); | 
|  | 3896 |  | 
|  | 3897 | ret = get_user(size, &uattr->size); | 
|  | 3898 | if (ret) | 
|  | 3899 | return ret; | 
|  | 3900 |  | 
|  | 3901 | if (size > PAGE_SIZE)	/* silly large */ | 
|  | 3902 | goto err_size; | 
|  | 3903 |  | 
|  | 3904 | if (!size)		/* abi compat */ | 
|  | 3905 | size = PERF_ATTR_SIZE_VER0; | 
|  | 3906 |  | 
|  | 3907 | if (size < PERF_ATTR_SIZE_VER0) | 
|  | 3908 | goto err_size; | 
|  | 3909 |  | 
|  | 3910 | /* | 
|  | 3911 | * If we're handed a bigger struct than we know of, | 
|  | 3912 | * ensure all the unknown bits are 0. | 
|  | 3913 | */ | 
|  | 3914 | if (size > sizeof(*attr)) { | 
|  | 3915 | unsigned long val; | 
|  | 3916 | unsigned long __user *addr; | 
|  | 3917 | unsigned long __user *end; | 
|  | 3918 |  | 
|  | 3919 | addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr), | 
|  | 3920 | sizeof(unsigned long)); | 
|  | 3921 | end  = PTR_ALIGN((void __user *)uattr + size, | 
|  | 3922 | sizeof(unsigned long)); | 
|  | 3923 |  | 
|  | 3924 | for (; addr < end; addr += sizeof(unsigned long)) { | 
|  | 3925 | ret = get_user(val, addr); | 
|  | 3926 | if (ret) | 
|  | 3927 | return ret; | 
|  | 3928 | if (val) | 
|  | 3929 | goto err_size; | 
|  | 3930 | } | 
|  | 3931 | } | 
|  | 3932 |  | 
|  | 3933 | ret = copy_from_user(attr, uattr, size); | 
|  | 3934 | if (ret) | 
|  | 3935 | return -EFAULT; | 
|  | 3936 |  | 
|  | 3937 | /* | 
|  | 3938 | * If the type exists, the corresponding creation will verify | 
|  | 3939 | * the attr->config. | 
|  | 3940 | */ | 
|  | 3941 | if (attr->type >= PERF_TYPE_MAX) | 
|  | 3942 | return -EINVAL; | 
|  | 3943 |  | 
|  | 3944 | if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) | 
|  | 3945 | return -EINVAL; | 
|  | 3946 |  | 
|  | 3947 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) | 
|  | 3948 | return -EINVAL; | 
|  | 3949 |  | 
|  | 3950 | if (attr->read_format & ~(PERF_FORMAT_MAX-1)) | 
|  | 3951 | return -EINVAL; | 
|  | 3952 |  | 
|  | 3953 | out: | 
|  | 3954 | return ret; | 
|  | 3955 |  | 
|  | 3956 | err_size: | 
|  | 3957 | put_user(sizeof(*attr), &uattr->size); | 
|  | 3958 | ret = -E2BIG; | 
|  | 3959 | goto out; | 
|  | 3960 | } | 
|  | 3961 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3962 | /** | 
| Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 3963 | * sys_perf_counter_open - open a performance counter, associate it to a task/cpu | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 3964 | * | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 3965 | * @attr_uptr:	event type attributes for monitoring/sampling | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3966 | * @pid:		target pid | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 3967 | * @cpu:		target cpu | 
|  | 3968 | * @group_fd:		group leader counter fd | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3969 | */ | 
| Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 3970 | SYSCALL_DEFINE5(perf_counter_open, | 
| Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 3971 | struct perf_counter_attr __user *, attr_uptr, | 
| Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 3972 | pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3973 | { | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 3974 | struct perf_counter *counter, *group_leader; | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 3975 | struct perf_counter_attr attr; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 3976 | struct perf_counter_context *ctx; | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 3977 | struct file *counter_file = NULL; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 3978 | struct file *group_file = NULL; | 
|  | 3979 | int fput_needed = 0; | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 3980 | int fput_needed2 = 0; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3981 | int ret; | 
|  | 3982 |  | 
| Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 3983 | /* for future expandability... */ | 
|  | 3984 | if (flags) | 
|  | 3985 | return -EINVAL; | 
|  | 3986 |  | 
| Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 3987 | ret = perf_copy_attr(attr_uptr, &attr); | 
|  | 3988 | if (ret) | 
|  | 3989 | return ret; | 
| Thomas Gleixner | eab656a | 2008-12-08 19:26:59 +0100 | [diff] [blame] | 3990 |  | 
| Peter Zijlstra | 0764771 | 2009-06-11 11:18:36 +0200 | [diff] [blame] | 3991 | if (!attr.exclude_kernel) { | 
|  | 3992 | if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) | 
|  | 3993 | return -EACCES; | 
|  | 3994 | } | 
|  | 3995 |  | 
| Peter Zijlstra | df58ab2 | 2009-06-11 11:25:05 +0200 | [diff] [blame] | 3996 | if (attr.freq) { | 
|  | 3997 | if (attr.sample_freq > sysctl_perf_counter_sample_rate) | 
|  | 3998 | return -EINVAL; | 
|  | 3999 | } | 
|  | 4000 |  | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4001 | /* | 
| Ingo Molnar | ccff286 | 2008-12-11 11:26:29 +0100 | [diff] [blame] | 4002 | * Get the target context (task or percpu): | 
|  | 4003 | */ | 
|  | 4004 | ctx = find_get_context(pid, cpu); | 
|  | 4005 | if (IS_ERR(ctx)) | 
|  | 4006 | return PTR_ERR(ctx); | 
|  | 4007 |  | 
|  | 4008 | /* | 
|  | 4009 | * Look up the group leader (we will attach this counter to it): | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4010 | */ | 
|  | 4011 | group_leader = NULL; | 
|  | 4012 | if (group_fd != -1) { | 
|  | 4013 | ret = -EINVAL; | 
|  | 4014 | group_file = fget_light(group_fd, &fput_needed); | 
|  | 4015 | if (!group_file) | 
| Ingo Molnar | ccff286 | 2008-12-11 11:26:29 +0100 | [diff] [blame] | 4016 | goto err_put_context; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4017 | if (group_file->f_op != &perf_fops) | 
| Ingo Molnar | ccff286 | 2008-12-11 11:26:29 +0100 | [diff] [blame] | 4018 | goto err_put_context; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4019 |  | 
|  | 4020 | group_leader = group_file->private_data; | 
|  | 4021 | /* | 
| Ingo Molnar | ccff286 | 2008-12-11 11:26:29 +0100 | [diff] [blame] | 4022 | * Do not allow a recursive hierarchy (this new sibling | 
|  | 4023 | * becoming part of another group-sibling): | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4024 | */ | 
| Ingo Molnar | ccff286 | 2008-12-11 11:26:29 +0100 | [diff] [blame] | 4025 | if (group_leader->group_leader != group_leader) | 
|  | 4026 | goto err_put_context; | 
|  | 4027 | /* | 
|  | 4028 | * Do not allow to attach to a group in a different | 
|  | 4029 | * task or CPU context: | 
|  | 4030 | */ | 
|  | 4031 | if (group_leader->ctx != ctx) | 
|  | 4032 | goto err_put_context; | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 4033 | /* | 
|  | 4034 | * Only a group leader can be exclusive or pinned | 
|  | 4035 | */ | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 4036 | if (attr.exclusive || attr.pinned) | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 4037 | goto err_put_context; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4038 | } | 
|  | 4039 |  | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 4040 | counter = perf_counter_alloc(&attr, cpu, ctx, group_leader, | 
| Peter Zijlstra | b84fbc9 | 2009-06-22 13:57:40 +0200 | [diff] [blame] | 4041 | NULL, GFP_KERNEL); | 
| Paul Mackerras | d5d2bc0d | 2009-03-30 19:07:08 +0200 | [diff] [blame] | 4042 | ret = PTR_ERR(counter); | 
|  | 4043 | if (IS_ERR(counter)) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4044 | goto err_put_context; | 
|  | 4045 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4046 | ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); | 
|  | 4047 | if (ret < 0) | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4048 | goto err_free_put_context; | 
|  | 4049 |  | 
|  | 4050 | counter_file = fget_light(ret, &fput_needed2); | 
|  | 4051 | if (!counter_file) | 
|  | 4052 | goto err_free_put_context; | 
|  | 4053 |  | 
|  | 4054 | counter->filp = counter_file; | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 4055 | WARN_ON_ONCE(ctx->parent_ctx); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4056 | mutex_lock(&ctx->mutex); | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4057 | perf_install_in_context(ctx, counter, cpu); | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 4058 | ++ctx->generation; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4059 | mutex_unlock(&ctx->mutex); | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4060 |  | 
| Peter Zijlstra | 082ff5a | 2009-05-23 18:29:00 +0200 | [diff] [blame] | 4061 | counter->owner = current; | 
|  | 4062 | get_task_struct(current); | 
|  | 4063 | mutex_lock(¤t->perf_counter_mutex); | 
|  | 4064 | list_add_tail(&counter->owner_entry, ¤t->perf_counter_list); | 
|  | 4065 | mutex_unlock(¤t->perf_counter_mutex); | 
|  | 4066 |  | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4067 | fput_light(counter_file, fput_needed2); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4068 |  | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4069 | out_fput: | 
|  | 4070 | fput_light(group_file, fput_needed); | 
|  | 4071 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4072 | return ret; | 
|  | 4073 |  | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4074 | err_free_put_context: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4075 | kfree(counter); | 
|  | 4076 |  | 
|  | 4077 | err_put_context: | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 4078 | put_ctx(ctx); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4079 |  | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4080 | goto out_fput; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4081 | } | 
|  | 4082 |  | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4083 | /* | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4084 | * inherit a counter from parent task to child task: | 
|  | 4085 | */ | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4086 | static struct perf_counter * | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4087 | inherit_counter(struct perf_counter *parent_counter, | 
|  | 4088 | struct task_struct *parent, | 
|  | 4089 | struct perf_counter_context *parent_ctx, | 
|  | 4090 | struct task_struct *child, | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4091 | struct perf_counter *group_leader, | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4092 | struct perf_counter_context *child_ctx) | 
|  | 4093 | { | 
|  | 4094 | struct perf_counter *child_counter; | 
|  | 4095 |  | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4096 | /* | 
|  | 4097 | * Instead of creating recursive hierarchies of counters, | 
|  | 4098 | * we link inherited counters back to the original parent, | 
|  | 4099 | * which has a filp for sure, which we use as the reference | 
|  | 4100 | * count: | 
|  | 4101 | */ | 
|  | 4102 | if (parent_counter->parent) | 
|  | 4103 | parent_counter = parent_counter->parent; | 
|  | 4104 |  | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 4105 | child_counter = perf_counter_alloc(&parent_counter->attr, | 
| Paul Mackerras | 23a185c | 2009-02-09 22:42:47 +1100 | [diff] [blame] | 4106 | parent_counter->cpu, child_ctx, | 
| Peter Zijlstra | b84fbc9 | 2009-06-22 13:57:40 +0200 | [diff] [blame] | 4107 | group_leader, parent_counter, | 
|  | 4108 | GFP_KERNEL); | 
| Paul Mackerras | d5d2bc0d | 2009-03-30 19:07:08 +0200 | [diff] [blame] | 4109 | if (IS_ERR(child_counter)) | 
|  | 4110 | return child_counter; | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 4111 | get_ctx(child_ctx); | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4112 |  | 
|  | 4113 | /* | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 4114 | * Make the child state follow the state of the parent counter, | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 4115 | * not its attr.disabled bit.  We hold the parent's mutex, | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 4116 | * so we won't race with perf_counter_{en, dis}able_family. | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 4117 | */ | 
|  | 4118 | if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) | 
|  | 4119 | child_counter->state = PERF_COUNTER_STATE_INACTIVE; | 
|  | 4120 | else | 
|  | 4121 | child_counter->state = PERF_COUNTER_STATE_OFF; | 
|  | 4122 |  | 
| Peter Zijlstra | bd2b5b1 | 2009-06-10 13:40:57 +0200 | [diff] [blame] | 4123 | if (parent_counter->attr.freq) | 
|  | 4124 | child_counter->hw.sample_period = parent_counter->hw.sample_period; | 
|  | 4125 |  | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 4126 | /* | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4127 | * Link it up in the child's context: | 
|  | 4128 | */ | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 4129 | add_counter_to_ctx(child_counter, child_ctx); | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4130 |  | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4131 | /* | 
|  | 4132 | * Get a reference to the parent filp - we will fput it | 
|  | 4133 | * when the child counter exits. This is safe to do because | 
|  | 4134 | * we are in the parent and we know that the filp still | 
|  | 4135 | * exists and has a nonzero count: | 
|  | 4136 | */ | 
|  | 4137 | atomic_long_inc(&parent_counter->filp->f_count); | 
|  | 4138 |  | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4139 | /* | 
|  | 4140 | * Link this into the parent counter's child list | 
|  | 4141 | */ | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 4142 | WARN_ON_ONCE(parent_counter->ctx->parent_ctx); | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 4143 | mutex_lock(&parent_counter->child_mutex); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4144 | list_add_tail(&child_counter->child_list, &parent_counter->child_list); | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 4145 | mutex_unlock(&parent_counter->child_mutex); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4146 |  | 
|  | 4147 | return child_counter; | 
|  | 4148 | } | 
|  | 4149 |  | 
|  | 4150 | static int inherit_group(struct perf_counter *parent_counter, | 
|  | 4151 | struct task_struct *parent, | 
|  | 4152 | struct perf_counter_context *parent_ctx, | 
|  | 4153 | struct task_struct *child, | 
|  | 4154 | struct perf_counter_context *child_ctx) | 
|  | 4155 | { | 
|  | 4156 | struct perf_counter *leader; | 
|  | 4157 | struct perf_counter *sub; | 
| Paul Mackerras | d5d2bc0d | 2009-03-30 19:07:08 +0200 | [diff] [blame] | 4158 | struct perf_counter *child_ctr; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4159 |  | 
|  | 4160 | leader = inherit_counter(parent_counter, parent, parent_ctx, | 
|  | 4161 | child, NULL, child_ctx); | 
| Paul Mackerras | d5d2bc0d | 2009-03-30 19:07:08 +0200 | [diff] [blame] | 4162 | if (IS_ERR(leader)) | 
|  | 4163 | return PTR_ERR(leader); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4164 | list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) { | 
| Paul Mackerras | d5d2bc0d | 2009-03-30 19:07:08 +0200 | [diff] [blame] | 4165 | child_ctr = inherit_counter(sub, parent, parent_ctx, | 
|  | 4166 | child, leader, child_ctx); | 
|  | 4167 | if (IS_ERR(child_ctr)) | 
|  | 4168 | return PTR_ERR(child_ctr); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4169 | } | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4170 | return 0; | 
|  | 4171 | } | 
|  | 4172 |  | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4173 | static void sync_child_counter(struct perf_counter *child_counter, | 
| Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 4174 | struct task_struct *child) | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4175 | { | 
| Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 4176 | struct perf_counter *parent_counter = child_counter->parent; | 
| Peter Zijlstra | 8bc2095 | 2009-05-15 20:45:59 +0200 | [diff] [blame] | 4177 | u64 child_val; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4178 |  | 
| Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 4179 | if (child_counter->attr.inherit_stat) | 
|  | 4180 | perf_counter_read_event(child_counter, child); | 
| Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 4181 |  | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4182 | child_val = atomic64_read(&child_counter->count); | 
|  | 4183 |  | 
|  | 4184 | /* | 
|  | 4185 | * Add back the child's count to the parent's count: | 
|  | 4186 | */ | 
|  | 4187 | atomic64_add(child_val, &parent_counter->count); | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 4188 | atomic64_add(child_counter->total_time_enabled, | 
|  | 4189 | &parent_counter->child_total_time_enabled); | 
|  | 4190 | atomic64_add(child_counter->total_time_running, | 
|  | 4191 | &parent_counter->child_total_time_running); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4192 |  | 
|  | 4193 | /* | 
|  | 4194 | * Remove this counter from the parent's list | 
|  | 4195 | */ | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 4196 | WARN_ON_ONCE(parent_counter->ctx->parent_ctx); | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 4197 | mutex_lock(&parent_counter->child_mutex); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4198 | list_del_init(&child_counter->child_list); | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 4199 | mutex_unlock(&parent_counter->child_mutex); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4200 |  | 
|  | 4201 | /* | 
|  | 4202 | * Release the parent counter, if this was the last | 
|  | 4203 | * reference to it. | 
|  | 4204 | */ | 
|  | 4205 | fput(parent_counter->filp); | 
|  | 4206 | } | 
|  | 4207 |  | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4208 | static void | 
| Peter Zijlstra | bbbee90 | 2009-05-29 14:25:58 +0200 | [diff] [blame] | 4209 | __perf_counter_exit_task(struct perf_counter *child_counter, | 
| Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 4210 | struct perf_counter_context *child_ctx, | 
|  | 4211 | struct task_struct *child) | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4212 | { | 
|  | 4213 | struct perf_counter *parent_counter; | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4214 |  | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 4215 | update_counter_times(child_counter); | 
| Peter Zijlstra | aa9c67f | 2009-05-23 18:28:59 +0200 | [diff] [blame] | 4216 | perf_counter_remove_from_context(child_counter); | 
| Ingo Molnar | 0cc0c02 | 2008-12-14 23:20:36 +0100 | [diff] [blame] | 4217 |  | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4218 | parent_counter = child_counter->parent; | 
|  | 4219 | /* | 
|  | 4220 | * It can happen that parent exits first, and has counters | 
|  | 4221 | * that are still around due to the child reference. These | 
|  | 4222 | * counters need to be zapped - but otherwise linger. | 
|  | 4223 | */ | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4224 | if (parent_counter) { | 
| Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 4225 | sync_child_counter(child_counter, child); | 
| Peter Zijlstra | f160095 | 2009-03-19 20:26:16 +0100 | [diff] [blame] | 4226 | free_counter(child_counter); | 
| Paul Mackerras | 4bcf349 | 2009-02-11 13:53:19 +0100 | [diff] [blame] | 4227 | } | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4228 | } | 
|  | 4229 |  | 
|  | 4230 | /* | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4231 | * When a child task exits, feed back counter values to parent counters. | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4232 | */ | 
|  | 4233 | void perf_counter_exit_task(struct task_struct *child) | 
|  | 4234 | { | 
|  | 4235 | struct perf_counter *child_counter, *tmp; | 
|  | 4236 | struct perf_counter_context *child_ctx; | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 4237 | unsigned long flags; | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4238 |  | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 4239 | if (likely(!child->perf_counter_ctxp)) | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4240 | return; | 
|  | 4241 |  | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 4242 | local_irq_save(flags); | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 4243 | /* | 
|  | 4244 | * We can't reschedule here because interrupts are disabled, | 
|  | 4245 | * and either child is current or it is a task that can't be | 
|  | 4246 | * scheduled, so we are now safe from rescheduling changing | 
|  | 4247 | * our context. | 
|  | 4248 | */ | 
|  | 4249 | child_ctx = child->perf_counter_ctxp; | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 4250 | __perf_counter_task_sched_out(child_ctx); | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 4251 |  | 
|  | 4252 | /* | 
|  | 4253 | * Take the context lock here so that if find_get_context is | 
|  | 4254 | * reading child->perf_counter_ctxp, we wait until it has | 
|  | 4255 | * incremented the context's refcount before we do put_ctx below. | 
|  | 4256 | */ | 
|  | 4257 | spin_lock(&child_ctx->lock); | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 4258 | child->perf_counter_ctxp = NULL; | 
| Peter Zijlstra | 71a851b | 2009-07-10 09:06:56 +0200 | [diff] [blame] | 4259 | /* | 
|  | 4260 | * If this context is a clone; unclone it so it can't get | 
|  | 4261 | * swapped to another process while we're removing all | 
|  | 4262 | * the counters from it. | 
|  | 4263 | */ | 
|  | 4264 | unclone_ctx(child_ctx); | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 4265 | spin_unlock(&child_ctx->lock); | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 4266 | local_irq_restore(flags); | 
|  | 4267 |  | 
| Peter Zijlstra | 66fff22 | 2009-06-10 22:53:37 +0200 | [diff] [blame] | 4268 | /* | 
|  | 4269 | * We can recurse on the same lock type through: | 
|  | 4270 | * | 
|  | 4271 | *   __perf_counter_exit_task() | 
|  | 4272 | *     sync_child_counter() | 
|  | 4273 | *       fput(parent_counter->filp) | 
|  | 4274 | *         perf_release() | 
|  | 4275 | *           mutex_lock(&ctx->mutex) | 
|  | 4276 | * | 
|  | 4277 | * But since its the parent context it won't be the same instance. | 
|  | 4278 | */ | 
|  | 4279 | mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 4280 |  | 
| Peter Zijlstra | 8bc2095 | 2009-05-15 20:45:59 +0200 | [diff] [blame] | 4281 | again: | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4282 | list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, | 
|  | 4283 | list_entry) | 
| Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 4284 | __perf_counter_exit_task(child_counter, child_ctx, child); | 
| Peter Zijlstra | 8bc2095 | 2009-05-15 20:45:59 +0200 | [diff] [blame] | 4285 |  | 
|  | 4286 | /* | 
|  | 4287 | * If the last counter was a group counter, it will have appended all | 
|  | 4288 | * its siblings to the list, but we obtained 'tmp' before that which | 
|  | 4289 | * will still point to the list head terminating the iteration. | 
|  | 4290 | */ | 
|  | 4291 | if (!list_empty(&child_ctx->counter_list)) | 
|  | 4292 | goto again; | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 4293 |  | 
|  | 4294 | mutex_unlock(&child_ctx->mutex); | 
|  | 4295 |  | 
|  | 4296 | put_ctx(child_ctx); | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4297 | } | 
|  | 4298 |  | 
|  | 4299 | /* | 
| Peter Zijlstra | bbbee90 | 2009-05-29 14:25:58 +0200 | [diff] [blame] | 4300 | * free an unexposed, unused context as created by inheritance by | 
|  | 4301 | * init_task below, used by fork() in case of fail. | 
|  | 4302 | */ | 
|  | 4303 | void perf_counter_free_task(struct task_struct *task) | 
|  | 4304 | { | 
|  | 4305 | struct perf_counter_context *ctx = task->perf_counter_ctxp; | 
|  | 4306 | struct perf_counter *counter, *tmp; | 
|  | 4307 |  | 
|  | 4308 | if (!ctx) | 
|  | 4309 | return; | 
|  | 4310 |  | 
|  | 4311 | mutex_lock(&ctx->mutex); | 
|  | 4312 | again: | 
|  | 4313 | list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) { | 
|  | 4314 | struct perf_counter *parent = counter->parent; | 
|  | 4315 |  | 
|  | 4316 | if (WARN_ON_ONCE(!parent)) | 
|  | 4317 | continue; | 
|  | 4318 |  | 
|  | 4319 | mutex_lock(&parent->child_mutex); | 
|  | 4320 | list_del_init(&counter->child_list); | 
|  | 4321 | mutex_unlock(&parent->child_mutex); | 
|  | 4322 |  | 
|  | 4323 | fput(parent->filp); | 
|  | 4324 |  | 
|  | 4325 | list_del_counter(counter, ctx); | 
|  | 4326 | free_counter(counter); | 
|  | 4327 | } | 
|  | 4328 |  | 
|  | 4329 | if (!list_empty(&ctx->counter_list)) | 
|  | 4330 | goto again; | 
|  | 4331 |  | 
|  | 4332 | mutex_unlock(&ctx->mutex); | 
|  | 4333 |  | 
|  | 4334 | put_ctx(ctx); | 
|  | 4335 | } | 
|  | 4336 |  | 
|  | 4337 | /* | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4338 | * Initialize the perf_counter context in task_struct | 
|  | 4339 | */ | 
| Peter Zijlstra | 6ab423e | 2009-05-25 14:45:27 +0200 | [diff] [blame] | 4340 | int perf_counter_init_task(struct task_struct *child) | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4341 | { | 
|  | 4342 | struct perf_counter_context *child_ctx, *parent_ctx; | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 4343 | struct perf_counter_context *cloned_ctx; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4344 | struct perf_counter *counter; | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4345 | struct task_struct *parent = current; | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 4346 | int inherited_all = 1; | 
| Peter Zijlstra | 6ab423e | 2009-05-25 14:45:27 +0200 | [diff] [blame] | 4347 | int ret = 0; | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4348 |  | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 4349 | child->perf_counter_ctxp = NULL; | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4350 |  | 
| Peter Zijlstra | 082ff5a | 2009-05-23 18:29:00 +0200 | [diff] [blame] | 4351 | mutex_init(&child->perf_counter_mutex); | 
|  | 4352 | INIT_LIST_HEAD(&child->perf_counter_list); | 
|  | 4353 |  | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 4354 | if (likely(!parent->perf_counter_ctxp)) | 
| Peter Zijlstra | 6ab423e | 2009-05-25 14:45:27 +0200 | [diff] [blame] | 4355 | return 0; | 
|  | 4356 |  | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4357 | /* | 
|  | 4358 | * This is executed from the parent task context, so inherit | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 4359 | * counters that have been marked for cloning. | 
|  | 4360 | * First allocate and initialize a context for the child. | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4361 | */ | 
|  | 4362 |  | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 4363 | child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); | 
|  | 4364 | if (!child_ctx) | 
| Peter Zijlstra | 6ab423e | 2009-05-25 14:45:27 +0200 | [diff] [blame] | 4365 | return -ENOMEM; | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 4366 |  | 
|  | 4367 | __perf_counter_init_context(child_ctx, child); | 
|  | 4368 | child->perf_counter_ctxp = child_ctx; | 
| Paul Mackerras | c93f766 | 2009-05-28 22:18:17 +1000 | [diff] [blame] | 4369 | get_task_struct(child); | 
| Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 4370 |  | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4371 | /* | 
| Paul Mackerras | 25346b9 | 2009-06-01 17:48:12 +1000 | [diff] [blame] | 4372 | * If the parent's context is a clone, pin it so it won't get | 
|  | 4373 | * swapped under us. | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 4374 | */ | 
| Paul Mackerras | 25346b9 | 2009-06-01 17:48:12 +1000 | [diff] [blame] | 4375 | parent_ctx = perf_pin_task_context(parent); | 
|  | 4376 |  | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 4377 | /* | 
|  | 4378 | * No need to check if parent_ctx != NULL here; since we saw | 
|  | 4379 | * it non-NULL earlier, the only reason for it to become NULL | 
|  | 4380 | * is if we exit, and since we're currently in the middle of | 
|  | 4381 | * a fork we can't be exiting at the same time. | 
|  | 4382 | */ | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 4383 |  | 
|  | 4384 | /* | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4385 | * Lock the parent list. No need to lock the child - not PID | 
|  | 4386 | * hashed yet and not running, so nobody can access it. | 
|  | 4387 | */ | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4388 | mutex_lock(&parent_ctx->mutex); | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4389 |  | 
|  | 4390 | /* | 
|  | 4391 | * We dont have to disable NMIs - we are only looking at | 
|  | 4392 | * the list, not manipulating it: | 
|  | 4393 | */ | 
| Peter Zijlstra | d7b629a | 2009-05-20 12:21:19 +0200 | [diff] [blame] | 4394 | list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) { | 
|  | 4395 | if (counter != counter->group_leader) | 
|  | 4396 | continue; | 
|  | 4397 |  | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 4398 | if (!counter->attr.inherit) { | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 4399 | inherited_all = 0; | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4400 | continue; | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 4401 | } | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4402 |  | 
| Peter Zijlstra | 6ab423e | 2009-05-25 14:45:27 +0200 | [diff] [blame] | 4403 | ret = inherit_group(counter, parent, parent_ctx, | 
|  | 4404 | child, child_ctx); | 
|  | 4405 | if (ret) { | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 4406 | inherited_all = 0; | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4407 | break; | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 4408 | } | 
|  | 4409 | } | 
|  | 4410 |  | 
|  | 4411 | if (inherited_all) { | 
|  | 4412 | /* | 
|  | 4413 | * Mark the child context as a clone of the parent | 
|  | 4414 | * context, or of whatever the parent is a clone of. | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 4415 | * Note that if the parent is a clone, it could get | 
|  | 4416 | * uncloned at any point, but that doesn't matter | 
|  | 4417 | * because the list of counters and the generation | 
|  | 4418 | * count can't have changed since we took the mutex. | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 4419 | */ | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 4420 | cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); | 
|  | 4421 | if (cloned_ctx) { | 
|  | 4422 | child_ctx->parent_ctx = cloned_ctx; | 
| Paul Mackerras | 25346b9 | 2009-06-01 17:48:12 +1000 | [diff] [blame] | 4423 | child_ctx->parent_gen = parent_ctx->parent_gen; | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 4424 | } else { | 
|  | 4425 | child_ctx->parent_ctx = parent_ctx; | 
|  | 4426 | child_ctx->parent_gen = parent_ctx->generation; | 
|  | 4427 | } | 
|  | 4428 | get_ctx(child_ctx->parent_ctx); | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4429 | } | 
|  | 4430 |  | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4431 | mutex_unlock(&parent_ctx->mutex); | 
| Peter Zijlstra | 6ab423e | 2009-05-25 14:45:27 +0200 | [diff] [blame] | 4432 |  | 
| Paul Mackerras | 25346b9 | 2009-06-01 17:48:12 +1000 | [diff] [blame] | 4433 | perf_unpin_context(parent_ctx); | 
| Paul Mackerras | ad3a37d | 2009-05-29 16:06:20 +1000 | [diff] [blame] | 4434 |  | 
| Peter Zijlstra | 6ab423e | 2009-05-25 14:45:27 +0200 | [diff] [blame] | 4435 | return ret; | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 4436 | } | 
|  | 4437 |  | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4438 | static void __cpuinit perf_counter_init_cpu(int cpu) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4439 | { | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4440 | struct perf_cpu_context *cpuctx; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4441 |  | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4442 | cpuctx = &per_cpu(perf_cpu_context, cpu); | 
|  | 4443 | __perf_counter_init_context(&cpuctx->ctx, NULL); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4444 |  | 
| Ingo Molnar | 1dce8d9 | 2009-05-04 19:23:18 +0200 | [diff] [blame] | 4445 | spin_lock(&perf_resource_lock); | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4446 | cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; | 
| Ingo Molnar | 1dce8d9 | 2009-05-04 19:23:18 +0200 | [diff] [blame] | 4447 | spin_unlock(&perf_resource_lock); | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4448 |  | 
| Paul Mackerras | 01d0287 | 2009-01-14 13:44:19 +1100 | [diff] [blame] | 4449 | hw_perf_counter_setup(cpu); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4450 | } | 
|  | 4451 |  | 
|  | 4452 | #ifdef CONFIG_HOTPLUG_CPU | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4453 | static void __perf_counter_exit_cpu(void *info) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4454 | { | 
|  | 4455 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 
|  | 4456 | struct perf_counter_context *ctx = &cpuctx->ctx; | 
|  | 4457 | struct perf_counter *counter, *tmp; | 
|  | 4458 |  | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4459 | list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) | 
|  | 4460 | __perf_counter_remove_from_context(counter); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4461 | } | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4462 | static void perf_counter_exit_cpu(int cpu) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4463 | { | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4464 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | 
|  | 4465 | struct perf_counter_context *ctx = &cpuctx->ctx; | 
|  | 4466 |  | 
|  | 4467 | mutex_lock(&ctx->mutex); | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4468 | smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1); | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 4469 | mutex_unlock(&ctx->mutex); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4470 | } | 
|  | 4471 | #else | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4472 | static inline void perf_counter_exit_cpu(int cpu) { } | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4473 | #endif | 
|  | 4474 |  | 
|  | 4475 | static int __cpuinit | 
|  | 4476 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | 
|  | 4477 | { | 
|  | 4478 | unsigned int cpu = (long)hcpu; | 
|  | 4479 |  | 
|  | 4480 | switch (action) { | 
|  | 4481 |  | 
|  | 4482 | case CPU_UP_PREPARE: | 
|  | 4483 | case CPU_UP_PREPARE_FROZEN: | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4484 | perf_counter_init_cpu(cpu); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4485 | break; | 
|  | 4486 |  | 
|  | 4487 | case CPU_DOWN_PREPARE: | 
|  | 4488 | case CPU_DOWN_PREPARE_FROZEN: | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 4489 | perf_counter_exit_cpu(cpu); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4490 | break; | 
|  | 4491 |  | 
|  | 4492 | default: | 
|  | 4493 | break; | 
|  | 4494 | } | 
|  | 4495 |  | 
|  | 4496 | return NOTIFY_OK; | 
|  | 4497 | } | 
|  | 4498 |  | 
| Paul Mackerras | f38b082 | 2009-06-02 21:05:16 +1000 | [diff] [blame] | 4499 | /* | 
|  | 4500 | * This has to have a higher priority than migration_notifier in sched.c. | 
|  | 4501 | */ | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4502 | static struct notifier_block __cpuinitdata perf_cpu_nb = { | 
|  | 4503 | .notifier_call		= perf_cpu_notify, | 
| Paul Mackerras | f38b082 | 2009-06-02 21:05:16 +1000 | [diff] [blame] | 4504 | .priority		= 20, | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4505 | }; | 
|  | 4506 |  | 
| Ingo Molnar | 0d905bc | 2009-05-04 19:13:30 +0200 | [diff] [blame] | 4507 | void __init perf_counter_init(void) | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4508 | { | 
|  | 4509 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, | 
|  | 4510 | (void *)(long)smp_processor_id()); | 
|  | 4511 | register_cpu_notifier(&perf_cpu_nb); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4512 | } | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4513 |  | 
|  | 4514 | static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) | 
|  | 4515 | { | 
|  | 4516 | return sprintf(buf, "%d\n", perf_reserved_percpu); | 
|  | 4517 | } | 
|  | 4518 |  | 
|  | 4519 | static ssize_t | 
|  | 4520 | perf_set_reserve_percpu(struct sysdev_class *class, | 
|  | 4521 | const char *buf, | 
|  | 4522 | size_t count) | 
|  | 4523 | { | 
|  | 4524 | struct perf_cpu_context *cpuctx; | 
|  | 4525 | unsigned long val; | 
|  | 4526 | int err, cpu, mpt; | 
|  | 4527 |  | 
|  | 4528 | err = strict_strtoul(buf, 10, &val); | 
|  | 4529 | if (err) | 
|  | 4530 | return err; | 
|  | 4531 | if (val > perf_max_counters) | 
|  | 4532 | return -EINVAL; | 
|  | 4533 |  | 
| Ingo Molnar | 1dce8d9 | 2009-05-04 19:23:18 +0200 | [diff] [blame] | 4534 | spin_lock(&perf_resource_lock); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4535 | perf_reserved_percpu = val; | 
|  | 4536 | for_each_online_cpu(cpu) { | 
|  | 4537 | cpuctx = &per_cpu(perf_cpu_context, cpu); | 
|  | 4538 | spin_lock_irq(&cpuctx->ctx.lock); | 
|  | 4539 | mpt = min(perf_max_counters - cpuctx->ctx.nr_counters, | 
|  | 4540 | perf_max_counters - perf_reserved_percpu); | 
|  | 4541 | cpuctx->max_pertask = mpt; | 
|  | 4542 | spin_unlock_irq(&cpuctx->ctx.lock); | 
|  | 4543 | } | 
| Ingo Molnar | 1dce8d9 | 2009-05-04 19:23:18 +0200 | [diff] [blame] | 4544 | spin_unlock(&perf_resource_lock); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4545 |  | 
|  | 4546 | return count; | 
|  | 4547 | } | 
|  | 4548 |  | 
|  | 4549 | static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf) | 
|  | 4550 | { | 
|  | 4551 | return sprintf(buf, "%d\n", perf_overcommit); | 
|  | 4552 | } | 
|  | 4553 |  | 
|  | 4554 | static ssize_t | 
|  | 4555 | perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count) | 
|  | 4556 | { | 
|  | 4557 | unsigned long val; | 
|  | 4558 | int err; | 
|  | 4559 |  | 
|  | 4560 | err = strict_strtoul(buf, 10, &val); | 
|  | 4561 | if (err) | 
|  | 4562 | return err; | 
|  | 4563 | if (val > 1) | 
|  | 4564 | return -EINVAL; | 
|  | 4565 |  | 
| Ingo Molnar | 1dce8d9 | 2009-05-04 19:23:18 +0200 | [diff] [blame] | 4566 | spin_lock(&perf_resource_lock); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4567 | perf_overcommit = val; | 
| Ingo Molnar | 1dce8d9 | 2009-05-04 19:23:18 +0200 | [diff] [blame] | 4568 | spin_unlock(&perf_resource_lock); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 4569 |  | 
|  | 4570 | return count; | 
|  | 4571 | } | 
|  | 4572 |  | 
|  | 4573 | static SYSDEV_CLASS_ATTR( | 
|  | 4574 | reserve_percpu, | 
|  | 4575 | 0644, | 
|  | 4576 | perf_show_reserve_percpu, | 
|  | 4577 | perf_set_reserve_percpu | 
|  | 4578 | ); | 
|  | 4579 |  | 
|  | 4580 | static SYSDEV_CLASS_ATTR( | 
|  | 4581 | overcommit, | 
|  | 4582 | 0644, | 
|  | 4583 | perf_show_overcommit, | 
|  | 4584 | perf_set_overcommit | 
|  | 4585 | ); | 
|  | 4586 |  | 
|  | 4587 | static struct attribute *perfclass_attrs[] = { | 
|  | 4588 | &attr_reserve_percpu.attr, | 
|  | 4589 | &attr_overcommit.attr, | 
|  | 4590 | NULL | 
|  | 4591 | }; | 
|  | 4592 |  | 
|  | 4593 | static struct attribute_group perfclass_attr_group = { | 
|  | 4594 | .attrs			= perfclass_attrs, | 
|  | 4595 | .name			= "perf_counters", | 
|  | 4596 | }; | 
|  | 4597 |  | 
|  | 4598 | static int __init perf_counter_sysfs_init(void) | 
|  | 4599 | { | 
|  | 4600 | return sysfs_create_group(&cpu_sysdev_class.kset.kobj, | 
|  | 4601 | &perfclass_attr_group); | 
|  | 4602 | } | 
|  | 4603 | device_initcall(perf_counter_sysfs_init); |