blob: 0e6303d36c66f05c0798050823ec704234cf6c77 [file] [log] [blame]
Thomas Gleixner0793a612008-12-04 20:12:29 +01001/*
2 * Performance counters:
3 *
4 * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
6 *
7 * Data type definitions, declarations, prototypes.
8 *
9 * Started by: Thomas Gleixner and Ingo Molnar
10 *
11 * For licencing details see kernel-base/COPYING
12 */
13#ifndef _LINUX_PERF_COUNTER_H
14#define _LINUX_PERF_COUNTER_H
15
Paul Mackerrasf3dfd262009-02-26 22:43:46 +110016#include <linux/types.h>
17#include <linux/ioctl.h>
Paul Mackerras9aaa1312009-03-21 15:31:47 +110018#include <asm/byteorder.h>
Thomas Gleixner0793a612008-12-04 20:12:29 +010019
20/*
Ingo Molnar9f66a382008-12-10 12:33:23 +010021 * User-space ABI bits:
22 */
23
24/*
Peter Zijlstrab8e83512009-03-19 20:26:18 +010025 * hw_event.type
26 */
27enum perf_event_types {
28 PERF_TYPE_HARDWARE = 0,
29 PERF_TYPE_SOFTWARE = 1,
30 PERF_TYPE_TRACEPOINT = 2,
31
32 /*
33 * available TYPE space, raw is the max value.
34 */
35
36 PERF_TYPE_RAW = 128,
37};
38
39/*
40 * Generalized performance counter event types, used by the hw_event.event_id
Ingo Molnar9f66a382008-12-10 12:33:23 +010041 * parameter of the sys_perf_counter_open() syscall:
Thomas Gleixner0793a612008-12-04 20:12:29 +010042 */
Peter Zijlstrab8e83512009-03-19 20:26:18 +010043enum hw_event_ids {
Thomas Gleixner0793a612008-12-04 20:12:29 +010044 /*
Ingo Molnar9f66a382008-12-10 12:33:23 +010045 * Common hardware events, generalized by the kernel:
Thomas Gleixner0793a612008-12-04 20:12:29 +010046 */
Peter Zijlstrab8e83512009-03-19 20:26:18 +010047 PERF_COUNT_CPU_CYCLES = 0,
48 PERF_COUNT_INSTRUCTIONS = 1,
49 PERF_COUNT_CACHE_REFERENCES = 2,
50 PERF_COUNT_CACHE_MISSES = 3,
51 PERF_COUNT_BRANCH_INSTRUCTIONS = 4,
52 PERF_COUNT_BRANCH_MISSES = 5,
53 PERF_COUNT_BUS_CYCLES = 6,
Ingo Molnar9f66a382008-12-10 12:33:23 +010054
Peter Zijlstrab8e83512009-03-19 20:26:18 +010055 PERF_HW_EVENTS_MAX = 7,
56};
Ingo Molnar6c594c22008-12-14 12:34:15 +010057
Peter Zijlstrab8e83512009-03-19 20:26:18 +010058/*
59 * Special "software" counters provided by the kernel, even if the hardware
60 * does not support performance counters. These counters measure various
61 * physical and sw events of the kernel (and allow the profiling of them as
62 * well):
63 */
64enum sw_event_ids {
65 PERF_COUNT_CPU_CLOCK = 0,
66 PERF_COUNT_TASK_CLOCK = 1,
67 PERF_COUNT_PAGE_FAULTS = 2,
68 PERF_COUNT_CONTEXT_SWITCHES = 3,
69 PERF_COUNT_CPU_MIGRATIONS = 4,
70 PERF_COUNT_PAGE_FAULTS_MIN = 5,
71 PERF_COUNT_PAGE_FAULTS_MAJ = 6,
Ingo Molnar6c594c22008-12-14 12:34:15 +010072
Peter Zijlstrab8e83512009-03-19 20:26:18 +010073 PERF_SW_EVENTS_MAX = 7,
Thomas Gleixner0793a612008-12-04 20:12:29 +010074};
75
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +010076#define __PERF_COUNTER_MASK(name) \
77 (((1ULL << PERF_COUNTER_##name##_BITS) - 1) << \
78 PERF_COUNTER_##name##_SHIFT)
79
80#define PERF_COUNTER_RAW_BITS 1
81#define PERF_COUNTER_RAW_SHIFT 63
82#define PERF_COUNTER_RAW_MASK __PERF_COUNTER_MASK(RAW)
83
84#define PERF_COUNTER_CONFIG_BITS 63
85#define PERF_COUNTER_CONFIG_SHIFT 0
86#define PERF_COUNTER_CONFIG_MASK __PERF_COUNTER_MASK(CONFIG)
87
88#define PERF_COUNTER_TYPE_BITS 7
89#define PERF_COUNTER_TYPE_SHIFT 56
90#define PERF_COUNTER_TYPE_MASK __PERF_COUNTER_MASK(TYPE)
91
92#define PERF_COUNTER_EVENT_BITS 56
93#define PERF_COUNTER_EVENT_SHIFT 0
94#define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT)
95
Ingo Molnar9f66a382008-12-10 12:33:23 +010096/*
Peter Zijlstra8a057d82009-04-02 11:11:59 +020097 * Bits that can be set in hw_event.record_type to request information
98 * in the overflow packets.
99 */
100enum perf_counter_record_format {
101 PERF_RECORD_IP = 1U << 0,
102 PERF_RECORD_TID = 1U << 1,
Peter Zijlstra4d855452009-04-08 15:01:32 +0200103 PERF_RECORD_TIME = 1U << 2,
Peter Zijlstra78f13e92009-04-08 15:01:33 +0200104 PERF_RECORD_ADDR = 1U << 3,
105 PERF_RECORD_GROUP = 1U << 4,
106 PERF_RECORD_CALLCHAIN = 1U << 5,
Peter Zijlstraa85f61a2009-05-08 18:52:23 +0200107 PERF_RECORD_CONFIG = 1U << 6,
Peter Zijlstra8a057d82009-04-02 11:11:59 +0200108};
109
110/*
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100111 * Bits that can be set in hw_event.read_format to request that
112 * reads on the counter should return the indicated quantities,
113 * in increasing order of bit value, after the counter value.
114 */
115enum perf_counter_read_format {
116 PERF_FORMAT_TOTAL_TIME_ENABLED = 1,
117 PERF_FORMAT_TOTAL_TIME_RUNNING = 2,
118};
119
120/*
Ingo Molnar9f66a382008-12-10 12:33:23 +0100121 * Hardware event to monitor via a performance monitoring counter:
122 */
123struct perf_counter_hw_event {
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +0100124 /*
125 * The MSB of the config word signifies if the rest contains cpu
126 * specific (raw) counter configuration data, if unset, the next
127 * 7 bits are an event type and the rest of the bits are the event
128 * identifier.
129 */
130 __u64 config;
Ingo Molnar9f66a382008-12-10 12:33:23 +0100131
Paul Mackerrasf3dfd262009-02-26 22:43:46 +1100132 __u64 irq_period;
Peter Zijlstra8a057d82009-04-02 11:11:59 +0200133 __u32 record_type;
134 __u32 read_format;
Ingo Molnar9f66a382008-12-10 12:33:23 +0100135
Paul Mackerras2743a5b2009-03-04 20:36:51 +1100136 __u64 disabled : 1, /* off by default */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100137 nmi : 1, /* NMI sampling */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100138 inherit : 1, /* children inherit it */
139 pinned : 1, /* must always be on PMU */
140 exclusive : 1, /* only group on PMU */
141 exclude_user : 1, /* don't count user */
142 exclude_kernel : 1, /* ditto kernel */
143 exclude_hv : 1, /* ditto hypervisor */
Paul Mackerras2743a5b2009-03-04 20:36:51 +1100144 exclude_idle : 1, /* don't count when idle */
Peter Zijlstra0a4a9392009-03-30 19:07:05 +0200145 mmap : 1, /* include mmap data */
146 munmap : 1, /* include munmap data */
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +0200147 comm : 1, /* include comm data */
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100148
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +0200149 __reserved_1 : 52;
Paul Mackerras2743a5b2009-03-04 20:36:51 +1100150
151 __u32 extra_config_len;
Peter Zijlstrac4578102009-04-02 11:12:01 +0200152 __u32 wakeup_events; /* wakeup every n events */
Ingo Molnar9f66a382008-12-10 12:33:23 +0100153
Paul Mackerrasf3dfd262009-02-26 22:43:46 +1100154 __u64 __reserved_2;
Paul Mackerras2743a5b2009-03-04 20:36:51 +1100155 __u64 __reserved_3;
Thomas Gleixnereab656a2008-12-08 19:26:59 +0100156};
157
Ingo Molnar9f66a382008-12-10 12:33:23 +0100158/*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100159 * Ioctls that can be done on a perf counter fd:
160 */
Peter Zijlstra3df5eda2009-05-08 18:52:22 +0200161#define PERF_COUNTER_IOC_ENABLE _IOW('$', 0, u32)
162#define PERF_COUNTER_IOC_DISABLE _IOW('$', 1, u32)
Peter Zijlstra79f14642009-04-06 11:45:07 +0200163#define PERF_COUNTER_IOC_REFRESH _IOW('$', 2, u32)
Peter Zijlstra3df5eda2009-05-08 18:52:22 +0200164#define PERF_COUNTER_IOC_RESET _IOW('$', 3, u32)
165
166enum perf_counter_ioc_flags {
167 PERF_IOC_FLAG_GROUP = 1U << 0,
168};
Paul Mackerrasd859e292009-01-17 18:10:22 +1100169
Paul Mackerras37d81822009-03-23 18:22:08 +0100170/*
171 * Structure of the page that can be mapped via mmap
172 */
173struct perf_counter_mmap_page {
174 __u32 version; /* version number of this structure */
175 __u32 compat_version; /* lowest version this is compat with */
Peter Zijlstra38ff6672009-03-30 19:07:03 +0200176
177 /*
178 * Bits needed to read the hw counters in user-space.
179 *
Peter Zijlstra92f22a32009-04-02 11:12:04 +0200180 * u32 seq;
181 * s64 count;
Peter Zijlstra38ff6672009-03-30 19:07:03 +0200182 *
Peter Zijlstraa2e87d02009-04-06 11:44:59 +0200183 * do {
184 * seq = pc->lock;
Peter Zijlstra38ff6672009-03-30 19:07:03 +0200185 *
Peter Zijlstraa2e87d02009-04-06 11:44:59 +0200186 * barrier()
187 * if (pc->index) {
188 * count = pmc_read(pc->index - 1);
189 * count += pc->offset;
190 * } else
191 * goto regular_read;
Peter Zijlstra38ff6672009-03-30 19:07:03 +0200192 *
Peter Zijlstraa2e87d02009-04-06 11:44:59 +0200193 * barrier();
194 * } while (pc->lock != seq);
Peter Zijlstra38ff6672009-03-30 19:07:03 +0200195 *
Peter Zijlstra92f22a32009-04-02 11:12:04 +0200196 * NOTE: for obvious reason this only works on self-monitoring
197 * processes.
Peter Zijlstra38ff6672009-03-30 19:07:03 +0200198 */
Paul Mackerras37d81822009-03-23 18:22:08 +0100199 __u32 lock; /* seqlock for synchronization */
200 __u32 index; /* hardware counter identifier */
201 __s64 offset; /* add to hardware counter value */
Peter Zijlstra7b732a72009-03-23 18:22:10 +0100202
Peter Zijlstra38ff6672009-03-30 19:07:03 +0200203 /*
204 * Control data for the mmap() data buffer.
205 *
206 * User-space reading this value should issue an rmb(), on SMP capable
207 * platforms, after reading this value -- see perf_counter_wakeup().
208 */
Peter Zijlstra7b732a72009-03-23 18:22:10 +0100209 __u32 data_head; /* head in the data section */
Paul Mackerras37d81822009-03-23 18:22:08 +0100210};
211
Peter Zijlstra6b6e5482009-04-08 15:01:27 +0200212#define PERF_EVENT_MISC_KERNEL (1 << 0)
213#define PERF_EVENT_MISC_USER (1 << 1)
214#define PERF_EVENT_MISC_OVERFLOW (1 << 2)
Peter Zijlstra6fab0192009-04-08 15:01:26 +0200215
Peter Zijlstra5c148192009-03-25 12:30:23 +0100216struct perf_event_header {
217 __u32 type;
Peter Zijlstra6fab0192009-04-08 15:01:26 +0200218 __u16 misc;
219 __u16 size;
Peter Zijlstra5c148192009-03-25 12:30:23 +0100220};
221
222enum perf_event_type {
Peter Zijlstra5ed00412009-03-30 19:07:12 +0200223
Peter Zijlstra0c593b32009-04-06 11:45:08 +0200224 /*
225 * The MMAP events record the PROT_EXEC mappings so that we can
226 * correlate userspace IPs to code. They have the following structure:
227 *
228 * struct {
229 * struct perf_event_header header;
230 *
231 * u32 pid, tid;
232 * u64 addr;
233 * u64 len;
234 * u64 pgoff;
235 * char filename[];
236 * };
237 */
Peter Zijlstra8a057d82009-04-02 11:11:59 +0200238 PERF_EVENT_MMAP = 1,
239 PERF_EVENT_MUNMAP = 2,
Peter Zijlstraea5d20c2009-03-25 12:30:25 +0100240
Peter Zijlstra8a057d82009-04-02 11:11:59 +0200241 /*
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +0200242 * struct {
243 * struct perf_event_header header;
244 *
245 * u32 pid, tid;
246 * char comm[];
247 * };
248 */
249 PERF_EVENT_COMM = 3,
250
251 /*
Peter Zijlstra6b6e5482009-04-08 15:01:27 +0200252 * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
253 * will be PERF_RECORD_*
Peter Zijlstra0c593b32009-04-06 11:45:08 +0200254 *
255 * struct {
256 * struct perf_event_header header;
257 *
Peter Zijlstra6b6e5482009-04-08 15:01:27 +0200258 * { u64 ip; } && PERF_RECORD_IP
259 * { u32 pid, tid; } && PERF_RECORD_TID
Peter Zijlstra4d855452009-04-08 15:01:32 +0200260 * { u64 time; } && PERF_RECORD_TIME
Peter Zijlstra78f13e92009-04-08 15:01:33 +0200261 * { u64 addr; } && PERF_RECORD_ADDR
Peter Zijlstraa85f61a2009-05-08 18:52:23 +0200262 * { u64 config; } && PERF_RECORD_CONFIG
Peter Zijlstra0c593b32009-04-06 11:45:08 +0200263 *
264 * { u64 nr;
Peter Zijlstra6b6e5482009-04-08 15:01:27 +0200265 * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP
Peter Zijlstra0c593b32009-04-06 11:45:08 +0200266 *
267 * { u16 nr,
268 * hv,
269 * kernel,
270 * user;
Peter Zijlstra6b6e5482009-04-08 15:01:27 +0200271 * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN
Peter Zijlstra0c593b32009-04-06 11:45:08 +0200272 * };
Peter Zijlstra8a057d82009-04-02 11:11:59 +0200273 */
Peter Zijlstra5c148192009-03-25 12:30:23 +0100274};
275
Paul Mackerrasf3dfd262009-02-26 22:43:46 +1100276#ifdef __KERNEL__
Paul Mackerrasd859e292009-01-17 18:10:22 +1100277/*
Paul Mackerrasf3dfd262009-02-26 22:43:46 +1100278 * Kernel-internal data types and definitions:
Ingo Molnar9f66a382008-12-10 12:33:23 +0100279 */
280
Paul Mackerrasf3dfd262009-02-26 22:43:46 +1100281#ifdef CONFIG_PERF_COUNTERS
282# include <asm/perf_counter.h>
283#endif
284
285#include <linux/list.h>
286#include <linux/mutex.h>
287#include <linux/rculist.h>
288#include <linux/rcupdate.h>
289#include <linux/spinlock.h>
Peter Zijlstrad6d020e2009-03-13 12:21:35 +0100290#include <linux/hrtimer.h>
Peter Zijlstra3c446b3d2009-04-06 11:45:01 +0200291#include <linux/fs.h>
Paul Mackerrasf3dfd262009-02-26 22:43:46 +1100292#include <asm/atomic.h>
293
294struct task_struct;
295
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +0100296static inline u64 perf_event_raw(struct perf_counter_hw_event *hw_event)
297{
298 return hw_event->config & PERF_COUNTER_RAW_MASK;
299}
300
301static inline u64 perf_event_config(struct perf_counter_hw_event *hw_event)
302{
303 return hw_event->config & PERF_COUNTER_CONFIG_MASK;
304}
305
306static inline u64 perf_event_type(struct perf_counter_hw_event *hw_event)
307{
308 return (hw_event->config & PERF_COUNTER_TYPE_MASK) >>
309 PERF_COUNTER_TYPE_SHIFT;
310}
311
312static inline u64 perf_event_id(struct perf_counter_hw_event *hw_event)
313{
314 return hw_event->config & PERF_COUNTER_EVENT_MASK;
315}
316
Thomas Gleixner0793a612008-12-04 20:12:29 +0100317/**
Ingo Molnar9f66a382008-12-10 12:33:23 +0100318 * struct hw_perf_counter - performance counter hardware details:
Thomas Gleixner0793a612008-12-04 20:12:29 +0100319 */
320struct hw_perf_counter {
Ingo Molnaree060942008-12-13 09:00:03 +0100321#ifdef CONFIG_PERF_COUNTERS
Peter Zijlstrad6d020e2009-03-13 12:21:35 +0100322 union {
323 struct { /* hardware */
324 u64 config;
325 unsigned long config_base;
326 unsigned long counter_base;
327 int nmi;
Robert Richter6f00cad2009-04-29 12:47:17 +0200328 int idx;
Peter Zijlstrad6d020e2009-03-13 12:21:35 +0100329 };
330 union { /* software */
331 atomic64_t count;
332 struct hrtimer hrtimer;
333 };
334 };
Ingo Molnaree060942008-12-13 09:00:03 +0100335 atomic64_t prev_count;
Ingo Molnar9f66a382008-12-10 12:33:23 +0100336 u64 irq_period;
Ingo Molnaree060942008-12-13 09:00:03 +0100337 atomic64_t period_left;
338#endif
Thomas Gleixner0793a612008-12-04 20:12:29 +0100339};
340
Ingo Molnar621a01e2008-12-11 12:46:46 +0100341struct perf_counter;
342
343/**
Robert Richter4aeb0b42009-04-29 12:47:03 +0200344 * struct pmu - generic performance monitoring unit
Ingo Molnar621a01e2008-12-11 12:46:46 +0100345 */
Robert Richter4aeb0b42009-04-29 12:47:03 +0200346struct pmu {
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100347 int (*enable) (struct perf_counter *counter);
Ingo Molnar76715812008-12-17 14:20:28 +0100348 void (*disable) (struct perf_counter *counter);
349 void (*read) (struct perf_counter *counter);
Ingo Molnar621a01e2008-12-11 12:46:46 +0100350};
351
Thomas Gleixner0793a612008-12-04 20:12:29 +0100352/**
Ingo Molnar6a930702008-12-11 15:17:03 +0100353 * enum perf_counter_active_state - the states of a counter
354 */
355enum perf_counter_active_state {
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100356 PERF_COUNTER_STATE_ERROR = -2,
Ingo Molnar6a930702008-12-11 15:17:03 +0100357 PERF_COUNTER_STATE_OFF = -1,
358 PERF_COUNTER_STATE_INACTIVE = 0,
359 PERF_COUNTER_STATE_ACTIVE = 1,
360};
361
Ingo Molnar9b51f662008-12-12 13:49:45 +0100362struct file;
363
Peter Zijlstra7b732a72009-03-23 18:22:10 +0100364struct perf_mmap_data {
365 struct rcu_head rcu_head;
Peter Zijlstra8740f942009-04-08 15:01:29 +0200366 int nr_pages; /* nr of data pages */
Peter Zijlstrac5078f72009-05-05 17:50:24 +0200367 int nr_locked; /* nr pages mlocked */
Peter Zijlstra8740f942009-04-08 15:01:29 +0200368
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +0200369 atomic_t poll; /* POLL_ for wakeups */
Peter Zijlstra8740f942009-04-08 15:01:29 +0200370 atomic_t head; /* write position */
371 atomic_t events; /* event limit */
372
Peter Zijlstrac66de4a2009-05-05 17:50:22 +0200373 atomic_t done_head; /* completed head */
Peter Zijlstrac33a0bc2009-05-01 12:23:16 +0200374 atomic_t lock; /* concurrent writes */
375
Peter Zijlstrac66de4a2009-05-05 17:50:22 +0200376 atomic_t wakeup; /* needs a wakeup */
377
Peter Zijlstra7b732a72009-03-23 18:22:10 +0100378 struct perf_counter_mmap_page *user_page;
379 void *data_pages[0];
380};
381
Peter Zijlstra671dec52009-04-06 11:45:02 +0200382struct perf_pending_entry {
383 struct perf_pending_entry *next;
384 void (*func)(struct perf_pending_entry *);
Peter Zijlstra925d5192009-03-30 19:07:02 +0200385};
386
Ingo Molnar6a930702008-12-11 15:17:03 +0100387/**
Thomas Gleixner0793a612008-12-04 20:12:29 +0100388 * struct perf_counter - performance counter kernel representation:
389 */
390struct perf_counter {
Ingo Molnaree060942008-12-13 09:00:03 +0100391#ifdef CONFIG_PERF_COUNTERS
Ingo Molnar04289bb2008-12-11 08:38:42 +0100392 struct list_head list_entry;
Peter Zijlstra592903c2009-03-13 12:21:36 +0100393 struct list_head event_entry;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100394 struct list_head sibling_list;
Peter Zijlstra5c148192009-03-25 12:30:23 +0100395 int nr_siblings;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100396 struct perf_counter *group_leader;
Robert Richter4aeb0b42009-04-29 12:47:03 +0200397 const struct pmu *pmu;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100398
Ingo Molnar6a930702008-12-11 15:17:03 +0100399 enum perf_counter_active_state state;
Paul Mackerrasc07c99b2009-02-13 22:10:34 +1100400 enum perf_counter_active_state prev_state;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100401 atomic64_t count;
Ingo Molnaree060942008-12-13 09:00:03 +0100402
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100403 /*
404 * These are the total time in nanoseconds that the counter
405 * has been enabled (i.e. eligible to run, and the task has
406 * been scheduled in, if this is a per-task counter)
407 * and running (scheduled onto the CPU), respectively.
408 *
409 * They are computed from tstamp_enabled, tstamp_running and
410 * tstamp_stopped when the counter is in INACTIVE or ACTIVE state.
411 */
412 u64 total_time_enabled;
413 u64 total_time_running;
414
415 /*
416 * These are timestamps used for computing total_time_enabled
417 * and total_time_running when the counter is in INACTIVE or
418 * ACTIVE state, measured in nanoseconds from an arbitrary point
419 * in time.
420 * tstamp_enabled: the notional time when the counter was enabled
421 * tstamp_running: the notional time when the counter was scheduled on
422 * tstamp_stopped: in INACTIVE state, the notional time when the
423 * counter was scheduled off.
424 */
425 u64 tstamp_enabled;
426 u64 tstamp_running;
427 u64 tstamp_stopped;
428
Ingo Molnar9f66a382008-12-10 12:33:23 +0100429 struct perf_counter_hw_event hw_event;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100430 struct hw_perf_counter hw;
431
432 struct perf_counter_context *ctx;
433 struct task_struct *task;
Ingo Molnar9b51f662008-12-12 13:49:45 +0100434 struct file *filp;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100435
Ingo Molnar9b51f662008-12-12 13:49:45 +0100436 struct perf_counter *parent;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100437 struct list_head child_list;
438
Thomas Gleixner0793a612008-12-04 20:12:29 +0100439 /*
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100440 * These accumulate total time (in nanoseconds) that children
441 * counters have been enabled and running, respectively.
442 */
443 atomic64_t child_total_time_enabled;
444 atomic64_t child_total_time_running;
445
446 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100447 * Protect attach/detach and child_list:
Thomas Gleixner0793a612008-12-04 20:12:29 +0100448 */
449 struct mutex mutex;
450
451 int oncpu;
452 int cpu;
453
Peter Zijlstra7b732a72009-03-23 18:22:10 +0100454 /* mmap bits */
455 struct mutex mmap_mutex;
456 atomic_t mmap_count;
457 struct perf_mmap_data *data;
Paul Mackerras37d81822009-03-23 18:22:08 +0100458
Peter Zijlstra7b732a72009-03-23 18:22:10 +0100459 /* poll related */
Thomas Gleixner0793a612008-12-04 20:12:29 +0100460 wait_queue_head_t waitq;
Peter Zijlstra3c446b3d2009-04-06 11:45:01 +0200461 struct fasync_struct *fasync;
Peter Zijlstra79f14642009-04-06 11:45:07 +0200462
463 /* delayed work for NMIs and such */
464 int pending_wakeup;
Peter Zijlstra4c9e2542009-04-06 11:45:09 +0200465 int pending_kill;
Peter Zijlstra79f14642009-04-06 11:45:07 +0200466 int pending_disable;
Peter Zijlstra671dec52009-04-06 11:45:02 +0200467 struct perf_pending_entry pending;
Peter Zijlstra592903c2009-03-13 12:21:36 +0100468
Peter Zijlstra79f14642009-04-06 11:45:07 +0200469 atomic_t event_limit;
470
Peter Zijlstrae077df42009-03-19 20:26:17 +0100471 void (*destroy)(struct perf_counter *);
Peter Zijlstra592903c2009-03-13 12:21:36 +0100472 struct rcu_head rcu_head;
Ingo Molnaree060942008-12-13 09:00:03 +0100473#endif
Thomas Gleixner0793a612008-12-04 20:12:29 +0100474};
475
476/**
477 * struct perf_counter_context - counter context structure
478 *
479 * Used as a container for task counters and CPU counters as well:
480 */
481struct perf_counter_context {
482#ifdef CONFIG_PERF_COUNTERS
483 /*
Paul Mackerrasd859e292009-01-17 18:10:22 +1100484 * Protect the states of the counters in the list,
485 * nr_active, and the list:
Thomas Gleixner0793a612008-12-04 20:12:29 +0100486 */
487 spinlock_t lock;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100488 /*
489 * Protect the list of counters. Locking either mutex or lock
490 * is sufficient to ensure the list doesn't change; to change
491 * the list you need to lock both the mutex and the spinlock.
492 */
493 struct mutex mutex;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100494
495 struct list_head counter_list;
Peter Zijlstra592903c2009-03-13 12:21:36 +0100496 struct list_head event_list;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100497 int nr_counters;
498 int nr_active;
Paul Mackerrasd859e292009-01-17 18:10:22 +1100499 int is_active;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100500 struct task_struct *task;
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100501
502 /*
Peter Zijlstra4af49982009-04-06 11:45:10 +0200503 * Context clock, runs when context enabled.
Paul Mackerras53cfbf52009-03-25 22:46:58 +1100504 */
Peter Zijlstra4af49982009-04-06 11:45:10 +0200505 u64 time;
506 u64 timestamp;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100507#endif
508};
509
510/**
511 * struct perf_counter_cpu_context - per cpu counter context structure
512 */
513struct perf_cpu_context {
514 struct perf_counter_context ctx;
515 struct perf_counter_context *task_ctx;
516 int active_oncpu;
517 int max_pertask;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100518 int exclusive;
Peter Zijlstra96f6d442009-03-23 18:22:07 +0100519
520 /*
521 * Recursion avoidance:
522 *
523 * task, softirq, irq, nmi context
524 */
525 int recursion[4];
Thomas Gleixner0793a612008-12-04 20:12:29 +0100526};
527
Robert Richter829b42d2009-04-29 12:46:59 +0200528#ifdef CONFIG_PERF_COUNTERS
529
Thomas Gleixner0793a612008-12-04 20:12:29 +0100530/*
531 * Set by architecture code:
532 */
533extern int perf_max_counters;
534
Robert Richter4aeb0b42009-04-29 12:47:03 +0200535extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
Ingo Molnar621a01e2008-12-11 12:46:46 +0100536
Thomas Gleixner0793a612008-12-04 20:12:29 +0100537extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
538extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
539extern void perf_counter_task_tick(struct task_struct *task, int cpu);
Ingo Molnar9b51f662008-12-12 13:49:45 +0100540extern void perf_counter_init_task(struct task_struct *child);
541extern void perf_counter_exit_task(struct task_struct *child);
Peter Zijlstra925d5192009-03-30 19:07:02 +0200542extern void perf_counter_do_pending(void);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100543extern void perf_counter_print_debug(void);
Mike Galbraith1b023a92009-01-23 10:13:01 +0100544extern void perf_counter_unthrottle(void);
Ingo Molnar01b28382008-12-11 13:45:51 +0100545extern u64 hw_perf_save_disable(void);
546extern void hw_perf_restore(u64 ctrl);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100547extern int perf_counter_task_disable(void);
548extern int perf_counter_task_enable(void);
Paul Mackerras3cbed422009-01-09 16:43:42 +1100549extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
550 struct perf_cpu_context *cpuctx,
551 struct perf_counter_context *ctx, int cpu);
Paul Mackerras37d81822009-03-23 18:22:08 +0100552extern void perf_counter_update_userpage(struct perf_counter *counter);
Ingo Molnar5c92d122008-12-11 13:21:10 +0100553
Peter Zijlstraf6c7d5f2009-04-06 11:45:04 +0200554extern int perf_counter_overflow(struct perf_counter *counter,
Peter Zijlstra78f13e92009-04-08 15:01:33 +0200555 int nmi, struct pt_regs *regs, u64 addr);
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100556/*
557 * Return 1 for a software counter, 0 for a hardware counter
558 */
559static inline int is_software_counter(struct perf_counter *counter)
560{
Peter Zijlstraf4a2deb2009-03-23 18:22:06 +0100561 return !perf_event_raw(&counter->hw_event) &&
562 perf_event_type(&counter->hw_event) != PERF_TYPE_HARDWARE;
Paul Mackerras3b6f9e52009-01-14 21:00:30 +1100563}
564
Peter Zijlstra78f13e92009-04-08 15:01:33 +0200565extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
Peter Zijlstra15dbf272009-03-13 12:21:32 +0100566
Peter Zijlstra0a4a9392009-03-30 19:07:05 +0200567extern void perf_counter_mmap(unsigned long addr, unsigned long len,
568 unsigned long pgoff, struct file *file);
569
570extern void perf_counter_munmap(unsigned long addr, unsigned long len,
571 unsigned long pgoff, struct file *file);
572
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +0200573extern void perf_counter_comm(struct task_struct *tsk);
574
Peter Zijlstra9c03d882009-04-06 11:45:00 +0200575#define MAX_STACK_DEPTH 255
Peter Zijlstra394ee072009-03-30 19:07:14 +0200576
577struct perf_callchain_entry {
Peter Zijlstra9c03d882009-04-06 11:45:00 +0200578 u16 nr, hv, kernel, user;
Peter Zijlstra394ee072009-03-30 19:07:14 +0200579 u64 ip[MAX_STACK_DEPTH];
580};
581
582extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
583
Peter Zijlstra1ccd1542009-04-09 10:53:45 +0200584extern int sysctl_perf_counter_priv;
Peter Zijlstrac5078f72009-05-05 17:50:24 +0200585extern int sysctl_perf_counter_mlock;
Peter Zijlstra1ccd1542009-04-09 10:53:45 +0200586
Ingo Molnar0d905bc2009-05-04 19:13:30 +0200587extern void perf_counter_init(void);
588
Thomas Gleixner0793a612008-12-04 20:12:29 +0100589#else
590static inline void
591perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
592static inline void
593perf_counter_task_sched_out(struct task_struct *task, int cpu) { }
594static inline void
595perf_counter_task_tick(struct task_struct *task, int cpu) { }
Ingo Molnar9b51f662008-12-12 13:49:45 +0100596static inline void perf_counter_init_task(struct task_struct *child) { }
597static inline void perf_counter_exit_task(struct task_struct *child) { }
Peter Zijlstra925d5192009-03-30 19:07:02 +0200598static inline void perf_counter_do_pending(void) { }
Thomas Gleixner0793a612008-12-04 20:12:29 +0100599static inline void perf_counter_print_debug(void) { }
Mike Galbraith1b023a92009-01-23 10:13:01 +0100600static inline void perf_counter_unthrottle(void) { }
Peter Zijlstra15dbf272009-03-13 12:21:32 +0100601static inline void hw_perf_restore(u64 ctrl) { }
Ingo Molnar01b28382008-12-11 13:45:51 +0100602static inline u64 hw_perf_save_disable(void) { return 0; }
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100603static inline int perf_counter_task_disable(void) { return -EINVAL; }
604static inline int perf_counter_task_enable(void) { return -EINVAL; }
Peter Zijlstra15dbf272009-03-13 12:21:32 +0100605
Peter Zijlstra925d5192009-03-30 19:07:02 +0200606static inline void
Peter Zijlstra78f13e92009-04-08 15:01:33 +0200607perf_swcounter_event(u32 event, u64 nr, int nmi,
608 struct pt_regs *regs, u64 addr) { }
Peter Zijlstra0a4a9392009-03-30 19:07:05 +0200609
610static inline void
611perf_counter_mmap(unsigned long addr, unsigned long len,
612 unsigned long pgoff, struct file *file) { }
613
614static inline void
615perf_counter_munmap(unsigned long addr, unsigned long len,
Ingo Molnar0d905bc2009-05-04 19:13:30 +0200616 unsigned long pgoff, struct file *file) { }
Peter Zijlstra0a4a9392009-03-30 19:07:05 +0200617
Peter Zijlstra8d1b2d92009-04-08 15:01:30 +0200618static inline void perf_counter_comm(struct task_struct *tsk) { }
Ingo Molnar0d905bc2009-05-04 19:13:30 +0200619static inline void perf_counter_init(void) { }
Thomas Gleixner0793a612008-12-04 20:12:29 +0100620#endif
621
Paul Mackerrasf3dfd262009-02-26 22:43:46 +1100622#endif /* __KERNEL__ */
Thomas Gleixner0793a612008-12-04 20:12:29 +0100623#endif /* _LINUX_PERF_COUNTER_H */