Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Performance counters: |
| 3 | * |
| 4 | * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar |
| 6 | * |
| 7 | * Data type definitions, declarations, prototypes. |
| 8 | * |
| 9 | * Started by: Thomas Gleixner and Ingo Molnar |
| 10 | * |
| 11 | * For licencing details see kernel-base/COPYING |
| 12 | */ |
| 13 | #ifndef _LINUX_PERF_COUNTER_H |
| 14 | #define _LINUX_PERF_COUNTER_H |
| 15 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 16 | #include <linux/types.h> |
| 17 | #include <linux/ioctl.h> |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 18 | |
| 19 | /* |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 20 | * User-space ABI bits: |
| 21 | */ |
| 22 | |
| 23 | /* |
| 24 | * Generalized performance counter event types, used by the hw_event.type |
| 25 | * parameter of the sys_perf_counter_open() syscall: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 26 | */ |
| 27 | enum hw_event_types { |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 28 | /* |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 29 | * Common hardware events, generalized by the kernel: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 30 | */ |
Ingo Molnar | f650a67 | 2008-12-23 12:17:29 +0100 | [diff] [blame] | 31 | PERF_COUNT_CPU_CYCLES = 0, |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 32 | PERF_COUNT_INSTRUCTIONS = 1, |
| 33 | PERF_COUNT_CACHE_REFERENCES = 2, |
| 34 | PERF_COUNT_CACHE_MISSES = 3, |
| 35 | PERF_COUNT_BRANCH_INSTRUCTIONS = 4, |
| 36 | PERF_COUNT_BRANCH_MISSES = 5, |
Ingo Molnar | f650a67 | 2008-12-23 12:17:29 +0100 | [diff] [blame] | 37 | PERF_COUNT_BUS_CYCLES = 6, |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 38 | |
Ingo Molnar | f650a67 | 2008-12-23 12:17:29 +0100 | [diff] [blame] | 39 | PERF_HW_EVENTS_MAX = 7, |
Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 40 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 41 | /* |
| 42 | * Special "software" counters provided by the kernel, even if |
| 43 | * the hardware does not support performance counters. These |
| 44 | * counters measure various physical and sw events of the |
| 45 | * kernel (and allow the profiling of them as well): |
| 46 | */ |
| 47 | PERF_COUNT_CPU_CLOCK = -1, |
| 48 | PERF_COUNT_TASK_CLOCK = -2, |
Ingo Molnar | 5d6a27d | 2008-12-14 12:28:33 +0100 | [diff] [blame] | 49 | PERF_COUNT_PAGE_FAULTS = -3, |
| 50 | PERF_COUNT_CONTEXT_SWITCHES = -4, |
Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 51 | PERF_COUNT_CPU_MIGRATIONS = -5, |
Peter Zijlstra | ac17dc8 | 2009-03-13 12:21:34 +0100 | [diff] [blame] | 52 | PERF_COUNT_PAGE_FAULTS_MIN = -6, |
| 53 | PERF_COUNT_PAGE_FAULTS_MAJ = -7, |
Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 54 | |
Peter Zijlstra | ac17dc8 | 2009-03-13 12:21:34 +0100 | [diff] [blame] | 55 | PERF_SW_EVENTS_MIN = -8, |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 56 | }; |
| 57 | |
| 58 | /* |
| 59 | * IRQ-notification data record type: |
| 60 | */ |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 61 | enum perf_counter_record_type { |
| 62 | PERF_RECORD_SIMPLE = 0, |
| 63 | PERF_RECORD_IRQ = 1, |
| 64 | PERF_RECORD_GROUP = 2, |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 65 | }; |
| 66 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 67 | /* |
| 68 | * Hardware event to monitor via a performance monitoring counter: |
| 69 | */ |
| 70 | struct perf_counter_hw_event { |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 71 | __s64 type; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 72 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 73 | __u64 irq_period; |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 74 | __u64 record_type; |
| 75 | __u64 read_format; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 76 | |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 77 | __u64 disabled : 1, /* off by default */ |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 78 | nmi : 1, /* NMI sampling */ |
| 79 | raw : 1, /* raw event type */ |
| 80 | inherit : 1, /* children inherit it */ |
| 81 | pinned : 1, /* must always be on PMU */ |
| 82 | exclusive : 1, /* only group on PMU */ |
| 83 | exclude_user : 1, /* don't count user */ |
| 84 | exclude_kernel : 1, /* ditto kernel */ |
| 85 | exclude_hv : 1, /* ditto hypervisor */ |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 86 | exclude_idle : 1, /* don't count when idle */ |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 87 | |
Ingo Molnar | 2485e51 | 2009-03-05 12:33:16 +0100 | [diff] [blame] | 88 | __reserved_1 : 54; |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 89 | |
| 90 | __u32 extra_config_len; |
| 91 | __u32 __reserved_4; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 92 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 93 | __u64 __reserved_2; |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 94 | __u64 __reserved_3; |
Thomas Gleixner | eab656a | 2008-12-08 19:26:59 +0100 | [diff] [blame] | 95 | }; |
| 96 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 97 | /* |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 98 | * Ioctls that can be done on a perf counter fd: |
| 99 | */ |
| 100 | #define PERF_COUNTER_IOC_ENABLE _IO('$', 0) |
| 101 | #define PERF_COUNTER_IOC_DISABLE _IO('$', 1) |
| 102 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 103 | #ifdef __KERNEL__ |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 104 | /* |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 105 | * Kernel-internal data types and definitions: |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 106 | */ |
| 107 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 108 | #ifdef CONFIG_PERF_COUNTERS |
| 109 | # include <asm/perf_counter.h> |
| 110 | #endif |
| 111 | |
| 112 | #include <linux/list.h> |
| 113 | #include <linux/mutex.h> |
| 114 | #include <linux/rculist.h> |
| 115 | #include <linux/rcupdate.h> |
| 116 | #include <linux/spinlock.h> |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame^] | 117 | #include <linux/hrtimer.h> |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 118 | #include <asm/atomic.h> |
| 119 | |
| 120 | struct task_struct; |
| 121 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 122 | /** |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 123 | * struct hw_perf_counter - performance counter hardware details: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 124 | */ |
| 125 | struct hw_perf_counter { |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 126 | #ifdef CONFIG_PERF_COUNTERS |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame^] | 127 | union { |
| 128 | struct { /* hardware */ |
| 129 | u64 config; |
| 130 | unsigned long config_base; |
| 131 | unsigned long counter_base; |
| 132 | int nmi; |
| 133 | unsigned int idx; |
| 134 | }; |
| 135 | union { /* software */ |
| 136 | atomic64_t count; |
| 137 | struct hrtimer hrtimer; |
| 138 | }; |
| 139 | }; |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 140 | atomic64_t prev_count; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 141 | u64 irq_period; |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 142 | atomic64_t period_left; |
| 143 | #endif |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 144 | }; |
| 145 | |
| 146 | /* |
| 147 | * Hardcoded buffer length limit for now, for IRQ-fed events: |
| 148 | */ |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 149 | #define PERF_DATA_BUFLEN 2048 |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 150 | |
| 151 | /** |
| 152 | * struct perf_data - performance counter IRQ data sampling ... |
| 153 | */ |
| 154 | struct perf_data { |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 155 | int len; |
| 156 | int rd_idx; |
| 157 | int overrun; |
| 158 | u8 data[PERF_DATA_BUFLEN]; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 159 | }; |
| 160 | |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 161 | struct perf_counter; |
| 162 | |
| 163 | /** |
| 164 | * struct hw_perf_counter_ops - performance counter hw ops |
| 165 | */ |
| 166 | struct hw_perf_counter_ops { |
Ingo Molnar | 95cdd2e | 2008-12-21 13:50:42 +0100 | [diff] [blame] | 167 | int (*enable) (struct perf_counter *counter); |
Ingo Molnar | 7671581 | 2008-12-17 14:20:28 +0100 | [diff] [blame] | 168 | void (*disable) (struct perf_counter *counter); |
| 169 | void (*read) (struct perf_counter *counter); |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 170 | }; |
| 171 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 172 | /** |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 173 | * enum perf_counter_active_state - the states of a counter |
| 174 | */ |
| 175 | enum perf_counter_active_state { |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 176 | PERF_COUNTER_STATE_ERROR = -2, |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 177 | PERF_COUNTER_STATE_OFF = -1, |
| 178 | PERF_COUNTER_STATE_INACTIVE = 0, |
| 179 | PERF_COUNTER_STATE_ACTIVE = 1, |
| 180 | }; |
| 181 | |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 182 | struct file; |
| 183 | |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 184 | /** |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 185 | * struct perf_counter - performance counter kernel representation: |
| 186 | */ |
| 187 | struct perf_counter { |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 188 | #ifdef CONFIG_PERF_COUNTERS |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 189 | struct list_head list_entry; |
| 190 | struct list_head sibling_list; |
| 191 | struct perf_counter *group_leader; |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 192 | const struct hw_perf_counter_ops *hw_ops; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 193 | |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 194 | enum perf_counter_active_state state; |
Paul Mackerras | c07c99b | 2009-02-13 22:10:34 +1100 | [diff] [blame] | 195 | enum perf_counter_active_state prev_state; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 196 | atomic64_t count; |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 197 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 198 | struct perf_counter_hw_event hw_event; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 199 | struct hw_perf_counter hw; |
| 200 | |
| 201 | struct perf_counter_context *ctx; |
| 202 | struct task_struct *task; |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 203 | struct file *filp; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 204 | |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 205 | struct perf_counter *parent; |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 206 | struct list_head child_list; |
| 207 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 208 | /* |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 209 | * Protect attach/detach and child_list: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 210 | */ |
| 211 | struct mutex mutex; |
| 212 | |
| 213 | int oncpu; |
| 214 | int cpu; |
| 215 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 216 | /* read() / irq related data */ |
| 217 | wait_queue_head_t waitq; |
| 218 | /* optional: for NMIs */ |
| 219 | int wakeup_pending; |
| 220 | struct perf_data *irqdata; |
| 221 | struct perf_data *usrdata; |
| 222 | struct perf_data data[2]; |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 223 | #endif |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 224 | }; |
| 225 | |
| 226 | /** |
| 227 | * struct perf_counter_context - counter context structure |
| 228 | * |
| 229 | * Used as a container for task counters and CPU counters as well: |
| 230 | */ |
| 231 | struct perf_counter_context { |
| 232 | #ifdef CONFIG_PERF_COUNTERS |
| 233 | /* |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 234 | * Protect the states of the counters in the list, |
| 235 | * nr_active, and the list: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 236 | */ |
| 237 | spinlock_t lock; |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 238 | /* |
| 239 | * Protect the list of counters. Locking either mutex or lock |
| 240 | * is sufficient to ensure the list doesn't change; to change |
| 241 | * the list you need to lock both the mutex and the spinlock. |
| 242 | */ |
| 243 | struct mutex mutex; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 244 | |
| 245 | struct list_head counter_list; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 246 | int nr_counters; |
| 247 | int nr_active; |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 248 | int is_active; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 249 | struct task_struct *task; |
| 250 | #endif |
| 251 | }; |
| 252 | |
| 253 | /** |
| 254 | * struct perf_counter_cpu_context - per cpu counter context structure |
| 255 | */ |
| 256 | struct perf_cpu_context { |
| 257 | struct perf_counter_context ctx; |
| 258 | struct perf_counter_context *task_ctx; |
| 259 | int active_oncpu; |
| 260 | int max_pertask; |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 261 | int exclusive; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 262 | }; |
| 263 | |
| 264 | /* |
| 265 | * Set by architecture code: |
| 266 | */ |
| 267 | extern int perf_max_counters; |
| 268 | |
| 269 | #ifdef CONFIG_PERF_COUNTERS |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 270 | extern const struct hw_perf_counter_ops * |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 271 | hw_perf_counter_init(struct perf_counter *counter); |
| 272 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 273 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); |
| 274 | extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); |
| 275 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 276 | extern void perf_counter_init_task(struct task_struct *child); |
| 277 | extern void perf_counter_exit_task(struct task_struct *child); |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 278 | extern void perf_counter_notify(struct pt_regs *regs); |
| 279 | extern void perf_counter_print_debug(void); |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 280 | extern void perf_counter_unthrottle(void); |
Ingo Molnar | 01b2838 | 2008-12-11 13:45:51 +0100 | [diff] [blame] | 281 | extern u64 hw_perf_save_disable(void); |
| 282 | extern void hw_perf_restore(u64 ctrl); |
Ingo Molnar | 1d1c7dd | 2008-12-11 14:59:31 +0100 | [diff] [blame] | 283 | extern int perf_counter_task_disable(void); |
| 284 | extern int perf_counter_task_enable(void); |
Paul Mackerras | 3cbed42 | 2009-01-09 16:43:42 +1100 | [diff] [blame] | 285 | extern int hw_perf_group_sched_in(struct perf_counter *group_leader, |
| 286 | struct perf_cpu_context *cpuctx, |
| 287 | struct perf_counter_context *ctx, int cpu); |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 288 | |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 289 | /* |
| 290 | * Return 1 for a software counter, 0 for a hardware counter |
| 291 | */ |
| 292 | static inline int is_software_counter(struct perf_counter *counter) |
| 293 | { |
| 294 | return !counter->hw_event.raw && counter->hw_event.type < 0; |
| 295 | } |
| 296 | |
Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 297 | extern void perf_swcounter_event(enum hw_event_types, u64, int, struct pt_regs *); |
| 298 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 299 | #else |
| 300 | static inline void |
| 301 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } |
| 302 | static inline void |
| 303 | perf_counter_task_sched_out(struct task_struct *task, int cpu) { } |
| 304 | static inline void |
| 305 | perf_counter_task_tick(struct task_struct *task, int cpu) { } |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 306 | static inline void perf_counter_init_task(struct task_struct *child) { } |
| 307 | static inline void perf_counter_exit_task(struct task_struct *child) { } |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 308 | static inline void perf_counter_notify(struct pt_regs *regs) { } |
| 309 | static inline void perf_counter_print_debug(void) { } |
Mike Galbraith | 1b023a9 | 2009-01-23 10:13:01 +0100 | [diff] [blame] | 310 | static inline void perf_counter_unthrottle(void) { } |
Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 311 | static inline void hw_perf_restore(u64 ctrl) { } |
Ingo Molnar | 01b2838 | 2008-12-11 13:45:51 +0100 | [diff] [blame] | 312 | static inline u64 hw_perf_save_disable(void) { return 0; } |
Ingo Molnar | 1d1c7dd | 2008-12-11 14:59:31 +0100 | [diff] [blame] | 313 | static inline int perf_counter_task_disable(void) { return -EINVAL; } |
| 314 | static inline int perf_counter_task_enable(void) { return -EINVAL; } |
Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 315 | |
| 316 | static inline void perf_swcounter_event(enum hw_event_types event, u64 nr, |
| 317 | int nmi, struct pt_regs *regs) { } |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 318 | #endif |
| 319 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 320 | #endif /* __KERNEL__ */ |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 321 | #endif /* _LINUX_PERF_COUNTER_H */ |