blob: cc3a75a239a9a275ed7ebf025d9dfcec92810247 [file] [log] [blame]
Thomas Gleixner0793a612008-12-04 20:12:29 +01001/*
2 * Performance counters:
3 *
4 * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
6 *
7 * Data type definitions, declarations, prototypes.
8 *
9 * Started by: Thomas Gleixner and Ingo Molnar
10 *
11 * For licencing details see kernel-base/COPYING
12 */
13#ifndef _LINUX_PERF_COUNTER_H
14#define _LINUX_PERF_COUNTER_H
15
16#include <asm/atomic.h>
Ingo Molnare44aef52008-12-25 09:02:11 +010017
18#ifdef CONFIG_PERF_COUNTERS
19# include <asm/perf_counter.h>
20#endif
Thomas Gleixner0793a612008-12-04 20:12:29 +010021
22#include <linux/list.h>
23#include <linux/mutex.h>
24#include <linux/rculist.h>
25#include <linux/rcupdate.h>
26#include <linux/spinlock.h>
27
28struct task_struct;
29
30/*
Ingo Molnar9f66a382008-12-10 12:33:23 +010031 * User-space ABI bits:
32 */
33
34/*
35 * Generalized performance counter event types, used by the hw_event.type
36 * parameter of the sys_perf_counter_open() syscall:
Thomas Gleixner0793a612008-12-04 20:12:29 +010037 */
38enum hw_event_types {
Thomas Gleixner0793a612008-12-04 20:12:29 +010039 /*
Ingo Molnar9f66a382008-12-10 12:33:23 +010040 * Common hardware events, generalized by the kernel:
Thomas Gleixner0793a612008-12-04 20:12:29 +010041 */
Ingo Molnarf650a672008-12-23 12:17:29 +010042 PERF_COUNT_CPU_CYCLES = 0,
Ingo Molnar9f66a382008-12-10 12:33:23 +010043 PERF_COUNT_INSTRUCTIONS = 1,
44 PERF_COUNT_CACHE_REFERENCES = 2,
45 PERF_COUNT_CACHE_MISSES = 3,
46 PERF_COUNT_BRANCH_INSTRUCTIONS = 4,
47 PERF_COUNT_BRANCH_MISSES = 5,
Ingo Molnarf650a672008-12-23 12:17:29 +010048 PERF_COUNT_BUS_CYCLES = 6,
Ingo Molnar9f66a382008-12-10 12:33:23 +010049
Ingo Molnarf650a672008-12-23 12:17:29 +010050 PERF_HW_EVENTS_MAX = 7,
Ingo Molnar6c594c22008-12-14 12:34:15 +010051
Ingo Molnar9f66a382008-12-10 12:33:23 +010052 /*
53 * Special "software" counters provided by the kernel, even if
54 * the hardware does not support performance counters. These
55 * counters measure various physical and sw events of the
56 * kernel (and allow the profiling of them as well):
57 */
58 PERF_COUNT_CPU_CLOCK = -1,
59 PERF_COUNT_TASK_CLOCK = -2,
Ingo Molnar5d6a27d2008-12-14 12:28:33 +010060 PERF_COUNT_PAGE_FAULTS = -3,
61 PERF_COUNT_CONTEXT_SWITCHES = -4,
Ingo Molnar6c594c22008-12-14 12:34:15 +010062 PERF_COUNT_CPU_MIGRATIONS = -5,
63
64 PERF_SW_EVENTS_MIN = -6,
Thomas Gleixner0793a612008-12-04 20:12:29 +010065};
66
67/*
68 * IRQ-notification data record type:
69 */
Ingo Molnar9f66a382008-12-10 12:33:23 +010070enum perf_counter_record_type {
71 PERF_RECORD_SIMPLE = 0,
72 PERF_RECORD_IRQ = 1,
73 PERF_RECORD_GROUP = 2,
Thomas Gleixner0793a612008-12-04 20:12:29 +010074};
75
Ingo Molnar9f66a382008-12-10 12:33:23 +010076/*
77 * Hardware event to monitor via a performance monitoring counter:
78 */
79struct perf_counter_hw_event {
Ingo Molnar01b28382008-12-11 13:45:51 +010080 s64 type;
Ingo Molnar9f66a382008-12-10 12:33:23 +010081
82 u64 irq_period;
83 u32 record_type;
84
Ingo Molnar9b51f662008-12-12 13:49:45 +010085 u32 disabled : 1, /* off by default */
86 nmi : 1, /* NMI sampling */
87 raw : 1, /* raw event type */
88 inherit : 1, /* children inherit it */
89 __reserved_1 : 28;
Ingo Molnar9f66a382008-12-10 12:33:23 +010090
91 u64 __reserved_2;
Thomas Gleixnereab656a2008-12-08 19:26:59 +010092};
93
Ingo Molnar9f66a382008-12-10 12:33:23 +010094/*
95 * Kernel-internal data types:
96 */
97
Thomas Gleixner0793a612008-12-04 20:12:29 +010098/**
Ingo Molnar9f66a382008-12-10 12:33:23 +010099 * struct hw_perf_counter - performance counter hardware details:
Thomas Gleixner0793a612008-12-04 20:12:29 +0100100 */
101struct hw_perf_counter {
Ingo Molnaree060942008-12-13 09:00:03 +0100102#ifdef CONFIG_PERF_COUNTERS
Ingo Molnar9f66a382008-12-10 12:33:23 +0100103 u64 config;
104 unsigned long config_base;
105 unsigned long counter_base;
106 int nmi;
107 unsigned int idx;
Ingo Molnaree060942008-12-13 09:00:03 +0100108 atomic64_t prev_count;
Ingo Molnar9f66a382008-12-10 12:33:23 +0100109 u64 irq_period;
Ingo Molnaree060942008-12-13 09:00:03 +0100110 atomic64_t period_left;
111#endif
Thomas Gleixner0793a612008-12-04 20:12:29 +0100112};
113
114/*
115 * Hardcoded buffer length limit for now, for IRQ-fed events:
116 */
Ingo Molnar9f66a382008-12-10 12:33:23 +0100117#define PERF_DATA_BUFLEN 2048
Thomas Gleixner0793a612008-12-04 20:12:29 +0100118
119/**
120 * struct perf_data - performance counter IRQ data sampling ...
121 */
122struct perf_data {
Ingo Molnar9f66a382008-12-10 12:33:23 +0100123 int len;
124 int rd_idx;
125 int overrun;
126 u8 data[PERF_DATA_BUFLEN];
Thomas Gleixner0793a612008-12-04 20:12:29 +0100127};
128
Ingo Molnar621a01e2008-12-11 12:46:46 +0100129struct perf_counter;
130
131/**
132 * struct hw_perf_counter_ops - performance counter hw ops
133 */
134struct hw_perf_counter_ops {
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100135 int (*enable) (struct perf_counter *counter);
Ingo Molnar76715812008-12-17 14:20:28 +0100136 void (*disable) (struct perf_counter *counter);
137 void (*read) (struct perf_counter *counter);
Ingo Molnar621a01e2008-12-11 12:46:46 +0100138};
139
Thomas Gleixner0793a612008-12-04 20:12:29 +0100140/**
Ingo Molnar6a930702008-12-11 15:17:03 +0100141 * enum perf_counter_active_state - the states of a counter
142 */
143enum perf_counter_active_state {
144 PERF_COUNTER_STATE_OFF = -1,
145 PERF_COUNTER_STATE_INACTIVE = 0,
146 PERF_COUNTER_STATE_ACTIVE = 1,
147};
148
Ingo Molnar9b51f662008-12-12 13:49:45 +0100149struct file;
150
Ingo Molnar6a930702008-12-11 15:17:03 +0100151/**
Thomas Gleixner0793a612008-12-04 20:12:29 +0100152 * struct perf_counter - performance counter kernel representation:
153 */
154struct perf_counter {
Ingo Molnaree060942008-12-13 09:00:03 +0100155#ifdef CONFIG_PERF_COUNTERS
Ingo Molnar04289bb2008-12-11 08:38:42 +0100156 struct list_head list_entry;
157 struct list_head sibling_list;
158 struct perf_counter *group_leader;
Ingo Molnar5c92d122008-12-11 13:21:10 +0100159 const struct hw_perf_counter_ops *hw_ops;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100160
Ingo Molnar6a930702008-12-11 15:17:03 +0100161 enum perf_counter_active_state state;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100162 atomic64_t count;
Ingo Molnaree060942008-12-13 09:00:03 +0100163
Ingo Molnar9f66a382008-12-10 12:33:23 +0100164 struct perf_counter_hw_event hw_event;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100165 struct hw_perf_counter hw;
166
167 struct perf_counter_context *ctx;
168 struct task_struct *task;
Ingo Molnar9b51f662008-12-12 13:49:45 +0100169 struct file *filp;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100170
Ingo Molnar9b51f662008-12-12 13:49:45 +0100171 struct perf_counter *parent;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100172 /*
173 * Protect attach/detach:
174 */
175 struct mutex mutex;
176
177 int oncpu;
178 int cpu;
179
Thomas Gleixner0793a612008-12-04 20:12:29 +0100180 /* read() / irq related data */
181 wait_queue_head_t waitq;
182 /* optional: for NMIs */
183 int wakeup_pending;
184 struct perf_data *irqdata;
185 struct perf_data *usrdata;
186 struct perf_data data[2];
Ingo Molnaree060942008-12-13 09:00:03 +0100187#endif
Thomas Gleixner0793a612008-12-04 20:12:29 +0100188};
189
190/**
191 * struct perf_counter_context - counter context structure
192 *
193 * Used as a container for task counters and CPU counters as well:
194 */
195struct perf_counter_context {
196#ifdef CONFIG_PERF_COUNTERS
197 /*
198 * Protect the list of counters:
199 */
200 spinlock_t lock;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100201
202 struct list_head counter_list;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100203 int nr_counters;
204 int nr_active;
205 struct task_struct *task;
206#endif
207};
208
209/**
210 * struct perf_counter_cpu_context - per cpu counter context structure
211 */
212struct perf_cpu_context {
213 struct perf_counter_context ctx;
214 struct perf_counter_context *task_ctx;
215 int active_oncpu;
216 int max_pertask;
217};
218
219/*
220 * Set by architecture code:
221 */
222extern int perf_max_counters;
223
224#ifdef CONFIG_PERF_COUNTERS
Ingo Molnar5c92d122008-12-11 13:21:10 +0100225extern const struct hw_perf_counter_ops *
Ingo Molnar621a01e2008-12-11 12:46:46 +0100226hw_perf_counter_init(struct perf_counter *counter);
227
Thomas Gleixner0793a612008-12-04 20:12:29 +0100228extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
229extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
230extern void perf_counter_task_tick(struct task_struct *task, int cpu);
Ingo Molnar9b51f662008-12-12 13:49:45 +0100231extern void perf_counter_init_task(struct task_struct *child);
232extern void perf_counter_exit_task(struct task_struct *child);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100233extern void perf_counter_notify(struct pt_regs *regs);
234extern void perf_counter_print_debug(void);
Ingo Molnar01b28382008-12-11 13:45:51 +0100235extern u64 hw_perf_save_disable(void);
236extern void hw_perf_restore(u64 ctrl);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100237extern int perf_counter_task_disable(void);
238extern int perf_counter_task_enable(void);
Ingo Molnar5c92d122008-12-11 13:21:10 +0100239
Thomas Gleixner0793a612008-12-04 20:12:29 +0100240#else
241static inline void
242perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
243static inline void
244perf_counter_task_sched_out(struct task_struct *task, int cpu) { }
245static inline void
246perf_counter_task_tick(struct task_struct *task, int cpu) { }
Ingo Molnar9b51f662008-12-12 13:49:45 +0100247static inline void perf_counter_init_task(struct task_struct *child) { }
248static inline void perf_counter_exit_task(struct task_struct *child) { }
Thomas Gleixner0793a612008-12-04 20:12:29 +0100249static inline void perf_counter_notify(struct pt_regs *regs) { }
250static inline void perf_counter_print_debug(void) { }
Ingo Molnar01b28382008-12-11 13:45:51 +0100251static inline void hw_perf_restore(u64 ctrl) { }
252static inline u64 hw_perf_save_disable(void) { return 0; }
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100253static inline int perf_counter_task_disable(void) { return -EINVAL; }
254static inline int perf_counter_task_enable(void) { return -EINVAL; }
Thomas Gleixner0793a612008-12-04 20:12:29 +0100255#endif
256
257#endif /* _LINUX_PERF_COUNTER_H */