Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1 | /* |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 2 | * Performance events: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3 | * |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 5 | * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar |
| 6 | * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 7 | * |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 8 | * Data type definitions, declarations, prototypes. |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 9 | * |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 10 | * Started by: Thomas Gleixner and Ingo Molnar |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 11 | * |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 12 | * For licencing details see kernel-base/COPYING |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 13 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 14 | #ifndef _LINUX_PERF_EVENT_H |
| 15 | #define _LINUX_PERF_EVENT_H |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 16 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 17 | #include <linux/types.h> |
| 18 | #include <linux/ioctl.h> |
Paul Mackerras | 9aaa131 | 2009-03-21 15:31:47 +1100 | [diff] [blame] | 19 | #include <asm/byteorder.h> |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 20 | |
| 21 | /* |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 22 | * User-space ABI bits: |
| 23 | */ |
| 24 | |
| 25 | /* |
Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 26 | * attr.type |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 27 | */ |
Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 28 | enum perf_type_id { |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 29 | PERF_TYPE_HARDWARE = 0, |
| 30 | PERF_TYPE_SOFTWARE = 1, |
| 31 | PERF_TYPE_TRACEPOINT = 2, |
| 32 | PERF_TYPE_HW_CACHE = 3, |
| 33 | PERF_TYPE_RAW = 4, |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 34 | PERF_TYPE_BREAKPOINT = 5, |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 35 | |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 36 | PERF_TYPE_MAX, /* non-ABI */ |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 37 | }; |
| 38 | |
| 39 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 40 | * Generalized performance event event_id types, used by the |
| 41 | * attr.event_id parameter of the sys_perf_event_open() |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 42 | * syscall: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 43 | */ |
Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 44 | enum perf_hw_id { |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 45 | /* |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 46 | * Common hardware events, generalized by the kernel: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 47 | */ |
Peter Zijlstra | f4dbfa8 | 2009-06-11 14:06:28 +0200 | [diff] [blame] | 48 | PERF_COUNT_HW_CPU_CYCLES = 0, |
| 49 | PERF_COUNT_HW_INSTRUCTIONS = 1, |
| 50 | PERF_COUNT_HW_CACHE_REFERENCES = 2, |
| 51 | PERF_COUNT_HW_CACHE_MISSES = 3, |
| 52 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, |
| 53 | PERF_COUNT_HW_BRANCH_MISSES = 5, |
| 54 | PERF_COUNT_HW_BUS_CYCLES = 6, |
Ingo Molnar | 8f62242 | 2011-04-29 13:19:47 +0200 | [diff] [blame] | 55 | PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, |
| 56 | PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, |
Stephane Eranian | c37e174 | 2011-12-11 00:28:52 +0100 | [diff] [blame] | 57 | PERF_COUNT_HW_REF_CPU_CYCLES = 9, |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 58 | |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 59 | PERF_COUNT_HW_MAX, /* non-ABI */ |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 60 | }; |
Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 61 | |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 62 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 63 | * Generalized hardware cache events: |
Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 64 | * |
Peter Zijlstra | 89d6c0b | 2011-04-22 23:37:06 +0200 | [diff] [blame] | 65 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x |
Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 66 | * { read, write, prefetch } x |
| 67 | * { accesses, misses } |
| 68 | */ |
Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 69 | enum perf_hw_cache_id { |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 70 | PERF_COUNT_HW_CACHE_L1D = 0, |
| 71 | PERF_COUNT_HW_CACHE_L1I = 1, |
| 72 | PERF_COUNT_HW_CACHE_LL = 2, |
| 73 | PERF_COUNT_HW_CACHE_DTLB = 3, |
| 74 | PERF_COUNT_HW_CACHE_ITLB = 4, |
| 75 | PERF_COUNT_HW_CACHE_BPU = 5, |
Peter Zijlstra | 89d6c0b | 2011-04-22 23:37:06 +0200 | [diff] [blame] | 76 | PERF_COUNT_HW_CACHE_NODE = 6, |
Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 77 | |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 78 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ |
Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 79 | }; |
| 80 | |
Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 81 | enum perf_hw_cache_op_id { |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 82 | PERF_COUNT_HW_CACHE_OP_READ = 0, |
| 83 | PERF_COUNT_HW_CACHE_OP_WRITE = 1, |
| 84 | PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, |
Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 85 | |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 86 | PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ |
Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 87 | }; |
| 88 | |
Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 89 | enum perf_hw_cache_op_result_id { |
| 90 | PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, |
| 91 | PERF_COUNT_HW_CACHE_RESULT_MISS = 1, |
Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 92 | |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 93 | PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ |
Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 94 | }; |
| 95 | |
| 96 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 97 | * Special "software" events provided by the kernel, even if the hardware |
| 98 | * does not support performance events. These events measure various |
Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 99 | * physical and sw events of the kernel (and allow the profiling of them as |
| 100 | * well): |
| 101 | */ |
Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 102 | enum perf_sw_ids { |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 103 | PERF_COUNT_SW_CPU_CLOCK = 0, |
| 104 | PERF_COUNT_SW_TASK_CLOCK = 1, |
| 105 | PERF_COUNT_SW_PAGE_FAULTS = 2, |
| 106 | PERF_COUNT_SW_CONTEXT_SWITCHES = 3, |
| 107 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, |
| 108 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, |
| 109 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, |
Anton Blanchard | f7d7986 | 2009-10-18 01:09:29 +0000 | [diff] [blame] | 110 | PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, |
| 111 | PERF_COUNT_SW_EMULATION_FAULTS = 8, |
Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 112 | |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 113 | PERF_COUNT_SW_MAX, /* non-ABI */ |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 114 | }; |
| 115 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 116 | /* |
Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 117 | * Bits that can be set in attr.sample_type to request information |
Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 118 | * in the overflow packets. |
| 119 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 120 | enum perf_event_sample_format { |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 121 | PERF_SAMPLE_IP = 1U << 0, |
| 122 | PERF_SAMPLE_TID = 1U << 1, |
| 123 | PERF_SAMPLE_TIME = 1U << 2, |
| 124 | PERF_SAMPLE_ADDR = 1U << 3, |
Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 125 | PERF_SAMPLE_READ = 1U << 4, |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 126 | PERF_SAMPLE_CALLCHAIN = 1U << 5, |
| 127 | PERF_SAMPLE_ID = 1U << 6, |
| 128 | PERF_SAMPLE_CPU = 1U << 7, |
| 129 | PERF_SAMPLE_PERIOD = 1U << 8, |
Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 130 | PERF_SAMPLE_STREAM_ID = 1U << 9, |
Frederic Weisbecker | 3a43ce6 | 2009-08-08 04:26:37 +0200 | [diff] [blame] | 131 | PERF_SAMPLE_RAW = 1U << 10, |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 132 | PERF_SAMPLE_BRANCH_STACK = 1U << 11, |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 133 | PERF_SAMPLE_REGS_USER = 1U << 12, |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 134 | PERF_SAMPLE_STACK_USER = 1U << 13, |
Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 135 | |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 136 | PERF_SAMPLE_MAX = 1U << 14, /* non-ABI */ |
Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 137 | }; |
| 138 | |
| 139 | /* |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 140 | * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set |
| 141 | * |
| 142 | * If the user does not pass priv level information via branch_sample_type, |
| 143 | * the kernel uses the event's priv level. Branch and event priv levels do |
| 144 | * not have to match. Branch priv level is checked for permissions. |
| 145 | * |
| 146 | * The branch types can be combined, however BRANCH_ANY covers all types |
| 147 | * of branches and therefore it supersedes all the other types. |
| 148 | */ |
| 149 | enum perf_branch_sample_type { |
| 150 | PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */ |
| 151 | PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */ |
| 152 | PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */ |
| 153 | |
| 154 | PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */ |
| 155 | PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */ |
| 156 | PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */ |
| 157 | PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */ |
| 158 | |
| 159 | PERF_SAMPLE_BRANCH_MAX = 1U << 7, /* non-ABI */ |
| 160 | }; |
| 161 | |
| 162 | #define PERF_SAMPLE_BRANCH_PLM_ALL \ |
| 163 | (PERF_SAMPLE_BRANCH_USER|\ |
| 164 | PERF_SAMPLE_BRANCH_KERNEL|\ |
| 165 | PERF_SAMPLE_BRANCH_HV) |
| 166 | |
| 167 | /* |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 168 | * Values to determine ABI of the registers dump. |
| 169 | */ |
| 170 | enum perf_sample_regs_abi { |
| 171 | PERF_SAMPLE_REGS_ABI_NONE = 0, |
| 172 | PERF_SAMPLE_REGS_ABI_32 = 1, |
| 173 | PERF_SAMPLE_REGS_ABI_64 = 2, |
| 174 | }; |
| 175 | |
| 176 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 177 | * The format of the data returned by read() on a perf event fd, |
Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 178 | * as specified by attr.read_format: |
| 179 | * |
| 180 | * struct read_format { |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 181 | * { u64 value; |
Vince Weaver | d7ebe75 | 2011-06-03 17:59:51 -0400 | [diff] [blame] | 182 | * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED |
| 183 | * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 184 | * { u64 id; } && PERF_FORMAT_ID |
| 185 | * } && !PERF_FORMAT_GROUP |
Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 186 | * |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 187 | * { u64 nr; |
Vince Weaver | d7ebe75 | 2011-06-03 17:59:51 -0400 | [diff] [blame] | 188 | * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED |
| 189 | * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 190 | * { u64 value; |
| 191 | * { u64 id; } && PERF_FORMAT_ID |
| 192 | * } cntr[nr]; |
| 193 | * } && PERF_FORMAT_GROUP |
Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 194 | * }; |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 195 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 196 | enum perf_event_read_format { |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 197 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, |
| 198 | PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, |
| 199 | PERF_FORMAT_ID = 1U << 2, |
Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 200 | PERF_FORMAT_GROUP = 1U << 3, |
Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 201 | |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 202 | PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 203 | }; |
| 204 | |
Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 205 | #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ |
Stephane Eranian | cb5d769 | 2012-02-09 23:21:05 +0100 | [diff] [blame] | 206 | #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ |
| 207 | #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 208 | #define PERF_ATTR_SIZE_VER3 88 /* add: sample_regs_user */ |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 209 | #define PERF_ATTR_SIZE_VER4 96 /* add: sample_stack_user */ |
Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 210 | |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 211 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 212 | * Hardware event_id to monitor via a performance monitoring event: |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 213 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 214 | struct perf_event_attr { |
Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 215 | |
Peter Zijlstra | f4a2deb | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 216 | /* |
Ingo Molnar | a21ca2c | 2009-06-06 09:58:57 +0200 | [diff] [blame] | 217 | * Major type: hardware/software/tracepoint/etc. |
| 218 | */ |
| 219 | __u32 type; |
Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 220 | |
| 221 | /* |
| 222 | * Size of the attr structure, for fwd/bwd compat. |
| 223 | */ |
| 224 | __u32 size; |
Ingo Molnar | a21ca2c | 2009-06-06 09:58:57 +0200 | [diff] [blame] | 225 | |
| 226 | /* |
| 227 | * Type specific configuration information. |
Peter Zijlstra | f4a2deb | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 228 | */ |
| 229 | __u64 config; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 230 | |
Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 231 | union { |
Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 232 | __u64 sample_period; |
| 233 | __u64 sample_freq; |
Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 234 | }; |
| 235 | |
Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 236 | __u64 sample_type; |
| 237 | __u64 read_format; |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 238 | |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 239 | __u64 disabled : 1, /* off by default */ |
Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 240 | inherit : 1, /* children inherit it */ |
| 241 | pinned : 1, /* must always be on PMU */ |
| 242 | exclusive : 1, /* only group on PMU */ |
| 243 | exclude_user : 1, /* don't count user */ |
| 244 | exclude_kernel : 1, /* ditto kernel */ |
| 245 | exclude_hv : 1, /* ditto hypervisor */ |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 246 | exclude_idle : 1, /* don't count when idle */ |
Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 247 | mmap : 1, /* include mmap data */ |
Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 248 | comm : 1, /* include comm data */ |
Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 249 | freq : 1, /* use freq, not period */ |
Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 250 | inherit_stat : 1, /* per task counts */ |
Paul Mackerras | 57e7986 | 2009-06-30 16:07:19 +1000 | [diff] [blame] | 251 | enable_on_exec : 1, /* next exec enables */ |
Peter Zijlstra | 9f498cc | 2009-07-23 14:46:33 +0200 | [diff] [blame] | 252 | task : 1, /* trace fork/exit */ |
Peter Zijlstra | 2667de8 | 2009-09-17 19:01:10 +0200 | [diff] [blame] | 253 | watermark : 1, /* wakeup_watermark */ |
Peter Zijlstra | ab60834 | 2010-04-08 23:03:20 +0200 | [diff] [blame] | 254 | /* |
| 255 | * precise_ip: |
| 256 | * |
| 257 | * 0 - SAMPLE_IP can have arbitrary skid |
| 258 | * 1 - SAMPLE_IP must have constant skid |
| 259 | * 2 - SAMPLE_IP requested to have 0 skid |
| 260 | * 3 - SAMPLE_IP must have 0 skid |
| 261 | * |
| 262 | * See also PERF_RECORD_MISC_EXACT_IP |
| 263 | */ |
| 264 | precise_ip : 2, /* skid constraint */ |
Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 265 | mmap_data : 1, /* non-exec mmap data */ |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 266 | sample_id_all : 1, /* sample_type all events */ |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 267 | |
Joerg Roedel | a240f76 | 2011-10-05 14:01:16 +0200 | [diff] [blame] | 268 | exclude_host : 1, /* don't count in host */ |
| 269 | exclude_guest : 1, /* don't count in guest */ |
| 270 | |
Frederic Weisbecker | d077526 | 2012-08-07 15:20:41 +0200 | [diff] [blame^] | 271 | exclude_callchain_kernel : 1, /* exclude kernel callchains */ |
| 272 | exclude_callchain_user : 1, /* exclude user callchains */ |
| 273 | |
| 274 | __reserved_1 : 41; |
Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 275 | |
Peter Zijlstra | 2667de8 | 2009-09-17 19:01:10 +0200 | [diff] [blame] | 276 | union { |
| 277 | __u32 wakeup_events; /* wakeup every n events */ |
| 278 | __u32 wakeup_watermark; /* bytes before wakeup */ |
| 279 | }; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 280 | |
Peter Zijlstra | f13c12c | 2009-12-15 19:43:11 +0100 | [diff] [blame] | 281 | __u32 bp_type; |
Andi Kleen | a7e3ed1 | 2011-03-03 10:34:47 +0800 | [diff] [blame] | 282 | union { |
| 283 | __u64 bp_addr; |
| 284 | __u64 config1; /* extension of config */ |
| 285 | }; |
| 286 | union { |
| 287 | __u64 bp_len; |
| 288 | __u64 config2; /* extension of config1 */ |
| 289 | }; |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 290 | __u64 branch_sample_type; /* enum perf_branch_sample_type */ |
| 291 | |
| 292 | /* |
| 293 | * Defines set of user regs to dump on samples. |
| 294 | * See asm/perf_regs.h for details. |
| 295 | */ |
| 296 | __u64 sample_regs_user; |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 297 | |
| 298 | /* |
| 299 | * Defines size of the user stack to dump on samples. |
| 300 | */ |
| 301 | __u32 sample_stack_user; |
| 302 | |
| 303 | /* Align to u64. */ |
| 304 | __u32 __reserved_2; |
Thomas Gleixner | eab656a | 2008-12-08 19:26:59 +0100 | [diff] [blame] | 305 | }; |
| 306 | |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 307 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 308 | * Ioctls that can be done on a perf event fd: |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 309 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 310 | #define PERF_EVENT_IOC_ENABLE _IO ('$', 0) |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 311 | #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) |
| 312 | #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 313 | #define PERF_EVENT_IOC_RESET _IO ('$', 3) |
Arjan van de Ven | 4c49b12 | 2009-11-13 21:47:33 -0800 | [diff] [blame] | 314 | #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 315 | #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 316 | #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) |
Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 317 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 318 | enum perf_event_ioc_flags { |
Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 319 | PERF_IOC_FLAG_GROUP = 1U << 0, |
| 320 | }; |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 321 | |
Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 322 | /* |
| 323 | * Structure of the page that can be mapped via mmap |
| 324 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 325 | struct perf_event_mmap_page { |
Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 326 | __u32 version; /* version number of this structure */ |
| 327 | __u32 compat_version; /* lowest version this is compat with */ |
Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 328 | |
| 329 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 330 | * Bits needed to read the hw events in user-space. |
Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 331 | * |
Peter Zijlstra | c720620 | 2012-03-22 17:26:36 +0100 | [diff] [blame] | 332 | * u32 seq, time_mult, time_shift, idx, width; |
| 333 | * u64 count, enabled, running; |
| 334 | * u64 cyc, time_offset; |
| 335 | * s64 pmc = 0; |
Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 336 | * |
Peter Zijlstra | a2e87d0 | 2009-04-06 11:44:59 +0200 | [diff] [blame] | 337 | * do { |
| 338 | * seq = pc->lock; |
Peter Zijlstra | a2e87d0 | 2009-04-06 11:44:59 +0200 | [diff] [blame] | 339 | * barrier() |
Peter Zijlstra | c720620 | 2012-03-22 17:26:36 +0100 | [diff] [blame] | 340 | * |
| 341 | * enabled = pc->time_enabled; |
| 342 | * running = pc->time_running; |
| 343 | * |
| 344 | * if (pc->cap_usr_time && enabled != running) { |
| 345 | * cyc = rdtsc(); |
| 346 | * time_offset = pc->time_offset; |
| 347 | * time_mult = pc->time_mult; |
| 348 | * time_shift = pc->time_shift; |
| 349 | * } |
| 350 | * |
| 351 | * idx = pc->index; |
| 352 | * count = pc->offset; |
| 353 | * if (pc->cap_usr_rdpmc && idx) { |
| 354 | * width = pc->pmc_width; |
| 355 | * pmc = rdpmc(idx - 1); |
| 356 | * } |
Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 357 | * |
Peter Zijlstra | a2e87d0 | 2009-04-06 11:44:59 +0200 | [diff] [blame] | 358 | * barrier(); |
| 359 | * } while (pc->lock != seq); |
Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 360 | * |
Peter Zijlstra | 92f22a3 | 2009-04-02 11:12:04 +0200 | [diff] [blame] | 361 | * NOTE: for obvious reason this only works on self-monitoring |
| 362 | * processes. |
Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 363 | */ |
Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 364 | __u32 lock; /* seqlock for synchronization */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 365 | __u32 index; /* hardware event identifier */ |
| 366 | __s64 offset; /* add to hardware event value */ |
| 367 | __u64 time_enabled; /* time event active */ |
| 368 | __u64 time_running; /* time event on cpu */ |
Peter Zijlstra | c720620 | 2012-03-22 17:26:36 +0100 | [diff] [blame] | 369 | union { |
| 370 | __u64 capabilities; |
| 371 | __u64 cap_usr_time : 1, |
| 372 | cap_usr_rdpmc : 1, |
| 373 | cap_____res : 62; |
| 374 | }; |
| 375 | |
| 376 | /* |
| 377 | * If cap_usr_rdpmc this field provides the bit-width of the value |
| 378 | * read using the rdpmc() or equivalent instruction. This can be used |
| 379 | * to sign extend the result like: |
| 380 | * |
| 381 | * pmc <<= 64 - width; |
| 382 | * pmc >>= 64 - width; // signed shift right |
| 383 | * count += pmc; |
| 384 | */ |
| 385 | __u16 pmc_width; |
| 386 | |
| 387 | /* |
| 388 | * If cap_usr_time the below fields can be used to compute the time |
| 389 | * delta since time_enabled (in ns) using rdtsc or similar. |
| 390 | * |
| 391 | * u64 quot, rem; |
| 392 | * u64 delta; |
| 393 | * |
| 394 | * quot = (cyc >> time_shift); |
| 395 | * rem = cyc & ((1 << time_shift) - 1); |
| 396 | * delta = time_offset + quot * time_mult + |
| 397 | * ((rem * time_mult) >> time_shift); |
| 398 | * |
| 399 | * Where time_offset,time_mult,time_shift and cyc are read in the |
| 400 | * seqcount loop described above. This delta can then be added to |
| 401 | * enabled and possible running (if idx), improving the scaling: |
| 402 | * |
| 403 | * enabled += delta; |
| 404 | * if (idx) |
| 405 | * running += delta; |
| 406 | * |
| 407 | * quot = count / running; |
| 408 | * rem = count % running; |
| 409 | * count = quot * enabled + (rem * enabled) / running; |
| 410 | */ |
| 411 | __u16 time_shift; |
| 412 | __u32 time_mult; |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 413 | __u64 time_offset; |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 414 | |
Peter Zijlstra | 41f9533 | 2009-06-23 17:55:18 +0200 | [diff] [blame] | 415 | /* |
| 416 | * Hole for extension of the self monitor capabilities |
| 417 | */ |
| 418 | |
Peter Zijlstra | c720620 | 2012-03-22 17:26:36 +0100 | [diff] [blame] | 419 | __u64 __reserved[120]; /* align to 1k */ |
Peter Zijlstra | 41f9533 | 2009-06-23 17:55:18 +0200 | [diff] [blame] | 420 | |
Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 421 | /* |
| 422 | * Control data for the mmap() data buffer. |
| 423 | * |
Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 424 | * User-space reading the @data_head value should issue an rmb(), on |
| 425 | * SMP capable platforms, after reading this value -- see |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 426 | * perf_event_wakeup(). |
Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 427 | * |
| 428 | * When the mapping is PROT_WRITE the @data_tail value should be |
| 429 | * written by userspace to reflect the last read data. In this case |
| 430 | * the kernel will not over-write unread data. |
Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 431 | */ |
Peter Zijlstra | 8e3747c | 2009-06-02 16:16:02 +0200 | [diff] [blame] | 432 | __u64 data_head; /* head in the data section */ |
Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 433 | __u64 data_tail; /* user-space written tail */ |
Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 434 | }; |
| 435 | |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 436 | #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) |
Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 437 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 438 | #define PERF_RECORD_MISC_KERNEL (1 << 0) |
| 439 | #define PERF_RECORD_MISC_USER (2 << 0) |
| 440 | #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 441 | #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) |
| 442 | #define PERF_RECORD_MISC_GUEST_USER (5 << 0) |
Peter Zijlstra | 6fab019 | 2009-04-08 15:01:26 +0200 | [diff] [blame] | 443 | |
Peter Zijlstra | ab60834 | 2010-04-08 23:03:20 +0200 | [diff] [blame] | 444 | /* |
| 445 | * Indicates that the content of PERF_SAMPLE_IP points to |
| 446 | * the actual instruction that triggered the event. See also |
| 447 | * perf_event_attr::precise_ip. |
| 448 | */ |
| 449 | #define PERF_RECORD_MISC_EXACT_IP (1 << 14) |
Peter Zijlstra | ef21f68 | 2010-03-03 13:12:23 +0100 | [diff] [blame] | 450 | /* |
| 451 | * Reserve the last bit to indicate some extended misc field |
| 452 | */ |
| 453 | #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) |
| 454 | |
Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 455 | struct perf_event_header { |
| 456 | __u32 type; |
Peter Zijlstra | 6fab019 | 2009-04-08 15:01:26 +0200 | [diff] [blame] | 457 | __u16 misc; |
| 458 | __u16 size; |
Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 459 | }; |
| 460 | |
| 461 | enum perf_event_type { |
Peter Zijlstra | 5ed0041 | 2009-03-30 19:07:12 +0200 | [diff] [blame] | 462 | |
Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 463 | /* |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 464 | * If perf_event_attr.sample_id_all is set then all event types will |
| 465 | * have the sample_type selected fields related to where/when |
| 466 | * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID) |
| 467 | * described in PERF_RECORD_SAMPLE below, it will be stashed just after |
| 468 | * the perf_event_header and the fields already present for the existing |
| 469 | * fields, i.e. at the end of the payload. That way a newer perf.data |
| 470 | * file will be supported by older perf tools, with these new optional |
| 471 | * fields being ignored. |
| 472 | * |
Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 473 | * The MMAP events record the PROT_EXEC mappings so that we can |
| 474 | * correlate userspace IPs to code. They have the following structure: |
| 475 | * |
| 476 | * struct { |
Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 477 | * struct perf_event_header header; |
Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 478 | * |
Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 479 | * u32 pid, tid; |
| 480 | * u64 addr; |
| 481 | * u64 len; |
| 482 | * u64 pgoff; |
| 483 | * char filename[]; |
Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 484 | * }; |
| 485 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 486 | PERF_RECORD_MMAP = 1, |
Peter Zijlstra | ea5d20c | 2009-03-25 12:30:25 +0100 | [diff] [blame] | 487 | |
Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 488 | /* |
Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 489 | * struct { |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 490 | * struct perf_event_header header; |
| 491 | * u64 id; |
| 492 | * u64 lost; |
Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 493 | * }; |
| 494 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 495 | PERF_RECORD_LOST = 2, |
Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 496 | |
| 497 | /* |
| 498 | * struct { |
Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 499 | * struct perf_event_header header; |
Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 500 | * |
Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 501 | * u32 pid, tid; |
| 502 | * char comm[]; |
Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 503 | * }; |
| 504 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 505 | PERF_RECORD_COMM = 3, |
Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 506 | |
| 507 | /* |
Peter Zijlstra | 26b119b | 2009-05-20 12:21:20 +0200 | [diff] [blame] | 508 | * struct { |
Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 509 | * struct perf_event_header header; |
Peter Zijlstra | 9f498cc | 2009-07-23 14:46:33 +0200 | [diff] [blame] | 510 | * u32 pid, ppid; |
| 511 | * u32 tid, ptid; |
Arjan van de Ven | 393b2ad | 2009-09-12 07:52:47 +0200 | [diff] [blame] | 512 | * u64 time; |
Peter Zijlstra | 9f498cc | 2009-07-23 14:46:33 +0200 | [diff] [blame] | 513 | * }; |
| 514 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 515 | PERF_RECORD_EXIT = 4, |
Peter Zijlstra | 9f498cc | 2009-07-23 14:46:33 +0200 | [diff] [blame] | 516 | |
| 517 | /* |
| 518 | * struct { |
| 519 | * struct perf_event_header header; |
Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 520 | * u64 time; |
Peter Zijlstra | 689802b | 2009-06-05 15:05:43 +0200 | [diff] [blame] | 521 | * u64 id; |
Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 522 | * u64 stream_id; |
Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 523 | * }; |
| 524 | */ |
Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 525 | PERF_RECORD_THROTTLE = 5, |
| 526 | PERF_RECORD_UNTHROTTLE = 6, |
Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 527 | |
| 528 | /* |
Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 529 | * struct { |
Ingo Molnar | a21ca2c | 2009-06-06 09:58:57 +0200 | [diff] [blame] | 530 | * struct perf_event_header header; |
| 531 | * u32 pid, ppid; |
Peter Zijlstra | 9f498cc | 2009-07-23 14:46:33 +0200 | [diff] [blame] | 532 | * u32 tid, ptid; |
Anton Blanchard | a6f10a2 | 2009-09-22 22:34:24 +1000 | [diff] [blame] | 533 | * u64 time; |
Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 534 | * }; |
| 535 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 536 | PERF_RECORD_FORK = 7, |
Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 537 | |
| 538 | /* |
Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 539 | * struct { |
Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 540 | * struct perf_event_header header; |
| 541 | * u32 pid, tid; |
Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 542 | * |
Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 543 | * struct read_format values; |
Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 544 | * }; |
| 545 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 546 | PERF_RECORD_READ = 8, |
Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 547 | |
| 548 | /* |
Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 549 | * struct { |
Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 550 | * struct perf_event_header header; |
Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 551 | * |
Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 552 | * { u64 ip; } && PERF_SAMPLE_IP |
| 553 | * { u32 pid, tid; } && PERF_SAMPLE_TID |
| 554 | * { u64 time; } && PERF_SAMPLE_TIME |
| 555 | * { u64 addr; } && PERF_SAMPLE_ADDR |
Peter Zijlstra | e6e18ec | 2009-06-25 11:27:12 +0200 | [diff] [blame] | 556 | * { u64 id; } && PERF_SAMPLE_ID |
Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 557 | * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID |
Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 558 | * { u32 cpu, res; } && PERF_SAMPLE_CPU |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 559 | * { u64 period; } && PERF_SAMPLE_PERIOD |
Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 560 | * |
Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 561 | * { struct read_format values; } && PERF_SAMPLE_READ |
Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 562 | * |
Peter Zijlstra | f9188e0 | 2009-06-18 22:20:52 +0200 | [diff] [blame] | 563 | * { u64 nr, |
Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 564 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN |
Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 565 | * |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 566 | * # |
| 567 | * # The RAW record below is opaque data wrt the ABI |
| 568 | * # |
| 569 | * # That is, the ABI doesn't make any promises wrt to |
| 570 | * # the stability of its content, it may vary depending |
| 571 | * # on event, hardware, kernel version and phase of |
| 572 | * # the moon. |
| 573 | * # |
| 574 | * # In other words, PERF_SAMPLE_RAW contents are not an ABI. |
| 575 | * # |
Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 576 | * |
Peter Zijlstra | a044560 | 2009-08-10 11:16:52 +0200 | [diff] [blame] | 577 | * { u32 size; |
| 578 | * char data[size];}&& PERF_SAMPLE_RAW |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 579 | * |
| 580 | * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 581 | * |
| 582 | * { u64 abi; # enum perf_sample_regs_abi |
| 583 | * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 584 | * |
| 585 | * { u64 size; |
| 586 | * char data[size]; |
| 587 | * u64 dyn_size; } && PERF_SAMPLE_STACK_USER |
Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 588 | * }; |
Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 589 | */ |
Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 590 | PERF_RECORD_SAMPLE = 9, |
Peter Zijlstra | e6e18ec | 2009-06-25 11:27:12 +0200 | [diff] [blame] | 591 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 592 | PERF_RECORD_MAX, /* non-ABI */ |
Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 593 | }; |
| 594 | |
Arun Sharma | 0b0d9cf | 2012-04-20 15:41:34 -0700 | [diff] [blame] | 595 | #define PERF_MAX_STACK_DEPTH 127 |
Namhyung Kim | 114067b | 2012-05-31 14:43:27 +0900 | [diff] [blame] | 596 | |
Peter Zijlstra | f9188e0 | 2009-06-18 22:20:52 +0200 | [diff] [blame] | 597 | enum perf_callchain_context { |
| 598 | PERF_CONTEXT_HV = (__u64)-32, |
| 599 | PERF_CONTEXT_KERNEL = (__u64)-128, |
| 600 | PERF_CONTEXT_USER = (__u64)-512, |
Ingo Molnar | 7522060 | 2009-06-18 08:00:17 +0200 | [diff] [blame] | 601 | |
Peter Zijlstra | f9188e0 | 2009-06-18 22:20:52 +0200 | [diff] [blame] | 602 | PERF_CONTEXT_GUEST = (__u64)-2048, |
| 603 | PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, |
| 604 | PERF_CONTEXT_GUEST_USER = (__u64)-2560, |
| 605 | |
| 606 | PERF_CONTEXT_MAX = (__u64)-4095, |
Ingo Molnar | 7522060 | 2009-06-18 08:00:17 +0200 | [diff] [blame] | 607 | }; |
| 608 | |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 609 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) |
| 610 | #define PERF_FLAG_FD_OUTPUT (1U << 1) |
| 611 | #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ |
Peter Zijlstra | a4be7c2 | 2009-08-19 11:18:27 +0200 | [diff] [blame] | 612 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 613 | #ifdef __KERNEL__ |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 614 | /* |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 615 | * Kernel-internal data types and definitions: |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 616 | */ |
| 617 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 618 | #ifdef CONFIG_PERF_EVENTS |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 619 | # include <linux/cgroup.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 620 | # include <asm/perf_event.h> |
Peter Zijlstra | 7be7923 | 2010-06-09 11:57:23 +0200 | [diff] [blame] | 621 | # include <asm/local64.h> |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 622 | #endif |
| 623 | |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 624 | struct perf_guest_info_callbacks { |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 625 | int (*is_in_guest)(void); |
| 626 | int (*is_user_mode)(void); |
| 627 | unsigned long (*get_guest_ip)(void); |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 628 | }; |
| 629 | |
Arnd Bergmann | 2ff6cfd | 2009-12-07 17:12:58 +0100 | [diff] [blame] | 630 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
| 631 | #include <asm/hw_breakpoint.h> |
| 632 | #endif |
| 633 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 634 | #include <linux/list.h> |
| 635 | #include <linux/mutex.h> |
| 636 | #include <linux/rculist.h> |
| 637 | #include <linux/rcupdate.h> |
| 638 | #include <linux/spinlock.h> |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 639 | #include <linux/hrtimer.h> |
Peter Zijlstra | 3c446b3d | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 640 | #include <linux/fs.h> |
Peter Zijlstra | 709e50c | 2009-06-02 14:13:15 +0200 | [diff] [blame] | 641 | #include <linux/pid_namespace.h> |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 642 | #include <linux/workqueue.h> |
Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 643 | #include <linux/ftrace.h> |
Peter Zijlstra | 85cfabb | 2010-03-11 13:06:56 +0100 | [diff] [blame] | 644 | #include <linux/cpu.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 645 | #include <linux/irq_work.h> |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 646 | #include <linux/static_key.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 647 | #include <linux/atomic.h> |
Jiri Olsa | 641cc93 | 2012-03-15 20:09:14 +0100 | [diff] [blame] | 648 | #include <linux/sysfs.h> |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 649 | #include <linux/perf_regs.h> |
Peter Zijlstra | fa58815 | 2010-05-18 10:54:20 +0200 | [diff] [blame] | 650 | #include <asm/local.h> |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 651 | |
Peter Zijlstra | f9188e0 | 2009-06-18 22:20:52 +0200 | [diff] [blame] | 652 | struct perf_callchain_entry { |
| 653 | __u64 nr; |
| 654 | __u64 ip[PERF_MAX_STACK_DEPTH]; |
| 655 | }; |
| 656 | |
Frederic Weisbecker | 3a43ce6 | 2009-08-08 04:26:37 +0200 | [diff] [blame] | 657 | struct perf_raw_record { |
| 658 | u32 size; |
| 659 | void *data; |
Frederic Weisbecker | f413cdb | 2009-08-07 01:25:54 +0200 | [diff] [blame] | 660 | }; |
| 661 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 662 | /* |
| 663 | * single taken branch record layout: |
| 664 | * |
| 665 | * from: source instruction (may not always be a branch insn) |
| 666 | * to: branch target |
| 667 | * mispred: branch target was mispredicted |
| 668 | * predicted: branch target was predicted |
| 669 | * |
| 670 | * support for mispred, predicted is optional. In case it |
| 671 | * is not supported mispred = predicted = 0. |
| 672 | */ |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 673 | struct perf_branch_entry { |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 674 | __u64 from; |
| 675 | __u64 to; |
| 676 | __u64 mispred:1, /* target mispredicted */ |
| 677 | predicted:1,/* target predicted */ |
| 678 | reserved:62; |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 679 | }; |
| 680 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 681 | /* |
| 682 | * branch stack layout: |
| 683 | * nr: number of taken branches stored in entries[] |
| 684 | * |
| 685 | * Note that nr can vary from sample to sample |
| 686 | * branches (to, from) are stored from most recent |
| 687 | * to least recent, i.e., entries[0] contains the most |
| 688 | * recent branch. |
| 689 | */ |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 690 | struct perf_branch_stack { |
| 691 | __u64 nr; |
| 692 | struct perf_branch_entry entries[0]; |
| 693 | }; |
| 694 | |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 695 | struct perf_regs_user { |
| 696 | __u64 abi; |
| 697 | struct pt_regs *regs; |
| 698 | }; |
| 699 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 700 | struct task_struct; |
| 701 | |
Stephane Eranian | efc9f05 | 2011-06-06 16:57:03 +0200 | [diff] [blame] | 702 | /* |
| 703 | * extra PMU register associated with an event |
| 704 | */ |
| 705 | struct hw_perf_event_extra { |
| 706 | u64 config; /* register value */ |
| 707 | unsigned int reg; /* register address or index */ |
| 708 | int alloc; /* extra register already allocated */ |
| 709 | int idx; /* index in shared_regs->regs[] */ |
| 710 | }; |
| 711 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 712 | /** |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 713 | * struct hw_perf_event - performance event hardware details: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 714 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 715 | struct hw_perf_event { |
| 716 | #ifdef CONFIG_PERF_EVENTS |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 717 | union { |
| 718 | struct { /* hardware */ |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 719 | u64 config; |
Stephane Eranian | 447a194 | 2010-02-01 14:50:01 +0200 | [diff] [blame] | 720 | u64 last_tag; |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 721 | unsigned long config_base; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 722 | unsigned long event_base; |
Vince Weaver | c48b605 | 2012-03-01 17:28:14 -0500 | [diff] [blame] | 723 | int event_base_rdpmc; |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 724 | int idx; |
Stephane Eranian | 447a194 | 2010-02-01 14:50:01 +0200 | [diff] [blame] | 725 | int last_cpu; |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 726 | |
Stephane Eranian | efc9f05 | 2011-06-06 16:57:03 +0200 | [diff] [blame] | 727 | struct hw_perf_event_extra extra_reg; |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 728 | struct hw_perf_event_extra branch_reg; |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 729 | }; |
Soeren Sandmann | 721a669 | 2009-09-15 14:33:08 +0200 | [diff] [blame] | 730 | struct { /* software */ |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 731 | struct hrtimer hrtimer; |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 732 | }; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 733 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 734 | struct { /* breakpoint */ |
| 735 | struct arch_hw_breakpoint info; |
| 736 | struct list_head bp_list; |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 737 | /* |
| 738 | * Crufty hack to avoid the chicken and egg |
| 739 | * problem hw_breakpoint has with context |
| 740 | * creation and event initalization. |
| 741 | */ |
| 742 | struct task_struct *bp_target; |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 743 | }; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 744 | #endif |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 745 | }; |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 746 | int state; |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 747 | local64_t prev_count; |
Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 748 | u64 sample_period; |
Peter Zijlstra | 9e350de | 2009-06-10 21:34:59 +0200 | [diff] [blame] | 749 | u64 last_period; |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 750 | local64_t period_left; |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 751 | u64 interrupts_seq; |
Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 752 | u64 interrupts; |
Peter Zijlstra | 6a24ed6c | 2009-06-05 18:01:29 +0200 | [diff] [blame] | 753 | |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 754 | u64 freq_time_stamp; |
| 755 | u64 freq_count_stamp; |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 756 | #endif |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 757 | }; |
| 758 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 759 | /* |
| 760 | * hw_perf_event::state flags |
| 761 | */ |
| 762 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ |
| 763 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ |
| 764 | #define PERF_HES_ARCH 0x04 |
| 765 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 766 | struct perf_event; |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 767 | |
Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 768 | /* |
| 769 | * Common implementation detail of pmu::{start,commit,cancel}_txn |
| 770 | */ |
| 771 | #define PERF_EVENT_TXN 0x1 |
Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 772 | |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 773 | /** |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 774 | * struct pmu - generic performance monitoring unit |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 775 | */ |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 776 | struct pmu { |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 777 | struct list_head entry; |
| 778 | |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 779 | struct device *dev; |
Peter Zijlstra | 0c9d42e | 2011-11-20 23:30:47 +0100 | [diff] [blame] | 780 | const struct attribute_group **attr_groups; |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 781 | char *name; |
| 782 | int type; |
| 783 | |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 784 | int * __percpu pmu_disable_count; |
| 785 | struct perf_cpu_context * __percpu pmu_cpu_context; |
Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 786 | int task_ctx_nr; |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 787 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 788 | /* |
| 789 | * Fully disable/enable this PMU, can be used to protect from the PMI |
| 790 | * as well as for lazy/batch writing of the MSRs. |
| 791 | */ |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 792 | void (*pmu_enable) (struct pmu *pmu); /* optional */ |
| 793 | void (*pmu_disable) (struct pmu *pmu); /* optional */ |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 794 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 795 | /* |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 796 | * Try and initialize the event for this PMU. |
Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 797 | * Should return -ENOENT when the @event doesn't match this PMU. |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 798 | */ |
| 799 | int (*event_init) (struct perf_event *event); |
| 800 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 801 | #define PERF_EF_START 0x01 /* start the counter when adding */ |
| 802 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ |
| 803 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ |
| 804 | |
| 805 | /* |
| 806 | * Adds/Removes a counter to/from the PMU, can be done inside |
| 807 | * a transaction, see the ->*_txn() methods. |
| 808 | */ |
| 809 | int (*add) (struct perf_event *event, int flags); |
| 810 | void (*del) (struct perf_event *event, int flags); |
| 811 | |
| 812 | /* |
| 813 | * Starts/Stops a counter present on the PMU. The PMI handler |
| 814 | * should stop the counter when perf_event_overflow() returns |
| 815 | * !0. ->start() will be used to continue. |
| 816 | */ |
| 817 | void (*start) (struct perf_event *event, int flags); |
| 818 | void (*stop) (struct perf_event *event, int flags); |
| 819 | |
| 820 | /* |
| 821 | * Updates the counter value of the event. |
| 822 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 823 | void (*read) (struct perf_event *event); |
Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 824 | |
| 825 | /* |
Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 826 | * Group events scheduling is treated as a transaction, add |
| 827 | * group events as a whole and perform one schedulability test. |
| 828 | * If the test fails, roll back the whole group |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 829 | * |
| 830 | * Start the transaction, after this ->add() doesn't need to |
Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 831 | * do schedulability tests. |
Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 832 | */ |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 833 | void (*start_txn) (struct pmu *pmu); /* optional */ |
Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 834 | /* |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 835 | * If ->start_txn() disabled the ->add() schedulability test |
Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 836 | * then ->commit_txn() is required to perform one. On success |
| 837 | * the transaction is closed. On error the transaction is kept |
| 838 | * open until ->cancel_txn() is called. |
| 839 | */ |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 840 | int (*commit_txn) (struct pmu *pmu); /* optional */ |
Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 841 | /* |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 842 | * Will cancel the transaction, assumes ->del() is called |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 843 | * for each successful ->add() during the transaction. |
Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 844 | */ |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 845 | void (*cancel_txn) (struct pmu *pmu); /* optional */ |
Peter Zijlstra | 35edc2a | 2011-11-20 20:36:02 +0100 | [diff] [blame] | 846 | |
| 847 | /* |
| 848 | * Will return the value for perf_event_mmap_page::index for this event, |
| 849 | * if no implementation is provided it will default to: event->hw.idx + 1. |
| 850 | */ |
| 851 | int (*event_idx) (struct perf_event *event); /*optional */ |
Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 852 | |
| 853 | /* |
| 854 | * flush branch stack on context-switches (needed in cpu-wide mode) |
| 855 | */ |
| 856 | void (*flush_branch_stack) (void); |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 857 | }; |
| 858 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 859 | /** |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 860 | * enum perf_event_active_state - the states of a event |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 861 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 862 | enum perf_event_active_state { |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 863 | PERF_EVENT_STATE_ERROR = -2, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 864 | PERF_EVENT_STATE_OFF = -1, |
| 865 | PERF_EVENT_STATE_INACTIVE = 0, |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 866 | PERF_EVENT_STATE_ACTIVE = 1, |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 867 | }; |
| 868 | |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 869 | struct file; |
Peter Zijlstra | 453f19e | 2009-11-20 22:19:43 +0100 | [diff] [blame] | 870 | struct perf_sample_data; |
| 871 | |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 872 | typedef void (*perf_overflow_handler_t)(struct perf_event *, |
Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 873 | struct perf_sample_data *, |
| 874 | struct pt_regs *regs); |
| 875 | |
Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 876 | enum perf_group_flag { |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 877 | PERF_GROUP_SOFTWARE = 0x1, |
Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 878 | }; |
| 879 | |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 880 | #define SWEVENT_HLIST_BITS 8 |
| 881 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 882 | |
| 883 | struct swevent_hlist { |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 884 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; |
| 885 | struct rcu_head rcu_head; |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 886 | }; |
| 887 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 888 | #define PERF_ATTACH_CONTEXT 0x01 |
| 889 | #define PERF_ATTACH_GROUP 0x02 |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 890 | #define PERF_ATTACH_TASK 0x04 |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 891 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 892 | #ifdef CONFIG_CGROUP_PERF |
| 893 | /* |
| 894 | * perf_cgroup_info keeps track of time_enabled for a cgroup. |
| 895 | * This is a per-cpu dynamically allocated data structure. |
| 896 | */ |
| 897 | struct perf_cgroup_info { |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 898 | u64 time; |
| 899 | u64 timestamp; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 900 | }; |
| 901 | |
| 902 | struct perf_cgroup { |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 903 | struct cgroup_subsys_state css; |
| 904 | struct perf_cgroup_info *info; /* timing info, one per cpu */ |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 905 | }; |
| 906 | #endif |
| 907 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 908 | struct ring_buffer; |
| 909 | |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 910 | /** |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 911 | * struct perf_event - performance event kernel representation: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 912 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 913 | struct perf_event { |
| 914 | #ifdef CONFIG_PERF_EVENTS |
Ingo Molnar | 65abc86 | 2009-09-21 10:18:27 +0200 | [diff] [blame] | 915 | struct list_head group_entry; |
Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 916 | struct list_head event_entry; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 917 | struct list_head sibling_list; |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 918 | struct hlist_node hlist_entry; |
Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 919 | int nr_siblings; |
Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 920 | int group_flags; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 921 | struct perf_event *group_leader; |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 922 | struct pmu *pmu; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 923 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 924 | enum perf_event_active_state state; |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 925 | unsigned int attach_state; |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 926 | local64_t count; |
Peter Zijlstra | a6e6dea | 2010-05-21 14:27:58 +0200 | [diff] [blame] | 927 | atomic64_t child_count; |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 928 | |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 929 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 930 | * These are the total time in nanoseconds that the event |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 931 | * has been enabled (i.e. eligible to run, and the task has |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 932 | * been scheduled in, if this is a per-task event) |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 933 | * and running (scheduled onto the CPU), respectively. |
| 934 | * |
| 935 | * They are computed from tstamp_enabled, tstamp_running and |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 936 | * tstamp_stopped when the event is in INACTIVE or ACTIVE state. |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 937 | */ |
| 938 | u64 total_time_enabled; |
| 939 | u64 total_time_running; |
| 940 | |
| 941 | /* |
| 942 | * These are timestamps used for computing total_time_enabled |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 943 | * and total_time_running when the event is in INACTIVE or |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 944 | * ACTIVE state, measured in nanoseconds from an arbitrary point |
| 945 | * in time. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 946 | * tstamp_enabled: the notional time when the event was enabled |
| 947 | * tstamp_running: the notional time when the event was scheduled on |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 948 | * tstamp_stopped: in INACTIVE state, the notional time when the |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 949 | * event was scheduled off. |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 950 | */ |
| 951 | u64 tstamp_enabled; |
| 952 | u64 tstamp_running; |
| 953 | u64 tstamp_stopped; |
| 954 | |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 955 | /* |
| 956 | * timestamp shadows the actual context timing but it can |
| 957 | * be safely used in NMI interrupt context. It reflects the |
| 958 | * context time as it was when the event was last scheduled in. |
| 959 | * |
| 960 | * ctx_time already accounts for ctx->timestamp. Therefore to |
| 961 | * compute ctx_time for a sample, simply add perf_clock(). |
| 962 | */ |
| 963 | u64 shadow_ctx_time; |
| 964 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 965 | struct perf_event_attr attr; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 966 | u16 header_size; |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 967 | u16 id_header_size; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 968 | u16 read_size; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 969 | struct hw_perf_event hw; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 970 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 971 | struct perf_event_context *ctx; |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 972 | struct file *filp; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 973 | |
| 974 | /* |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 975 | * These accumulate total time (in nanoseconds) that children |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 976 | * events have been enabled and running, respectively. |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 977 | */ |
| 978 | atomic64_t child_total_time_enabled; |
| 979 | atomic64_t child_total_time_running; |
| 980 | |
| 981 | /* |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 982 | * Protect attach/detach and child_list: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 983 | */ |
Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 984 | struct mutex child_mutex; |
| 985 | struct list_head child_list; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 986 | struct perf_event *parent; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 987 | |
| 988 | int oncpu; |
| 989 | int cpu; |
| 990 | |
Peter Zijlstra | 082ff5a | 2009-05-23 18:29:00 +0200 | [diff] [blame] | 991 | struct list_head owner_entry; |
| 992 | struct task_struct *owner; |
| 993 | |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 994 | /* mmap bits */ |
| 995 | struct mutex mmap_mutex; |
| 996 | atomic_t mmap_count; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 997 | int mmap_locked; |
| 998 | struct user_struct *mmap_user; |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 999 | struct ring_buffer *rb; |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 1000 | struct list_head rb_entry; |
Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 1001 | |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 1002 | /* poll related */ |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1003 | wait_queue_head_t waitq; |
Peter Zijlstra | 3c446b3d | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 1004 | struct fasync_struct *fasync; |
Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 1005 | |
| 1006 | /* delayed work for NMIs and such */ |
| 1007 | int pending_wakeup; |
Peter Zijlstra | 4c9e254 | 2009-04-06 11:45:09 +0200 | [diff] [blame] | 1008 | int pending_kill; |
Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 1009 | int pending_disable; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 1010 | struct irq_work pending; |
Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 1011 | |
Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 1012 | atomic_t event_limit; |
| 1013 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1014 | void (*destroy)(struct perf_event *); |
Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 1015 | struct rcu_head rcu_head; |
Peter Zijlstra | 709e50c | 2009-06-02 14:13:15 +0200 | [diff] [blame] | 1016 | |
| 1017 | struct pid_namespace *ns; |
Peter Zijlstra | 8e5799b | 2009-06-02 15:08:15 +0200 | [diff] [blame] | 1018 | u64 id; |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 1019 | |
Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 1020 | perf_overflow_handler_t overflow_handler; |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 1021 | void *overflow_handler_context; |
Peter Zijlstra | 453f19e | 2009-11-20 22:19:43 +0100 | [diff] [blame] | 1022 | |
Li Zefan | 07b139c | 2009-12-21 14:27:35 +0800 | [diff] [blame] | 1023 | #ifdef CONFIG_EVENT_TRACING |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 1024 | struct ftrace_event_call *tp_event; |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 1025 | struct event_filter *filter; |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 1026 | #ifdef CONFIG_FUNCTION_TRACER |
| 1027 | struct ftrace_ops ftrace_ops; |
| 1028 | #endif |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 1029 | #endif |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 1030 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1031 | #ifdef CONFIG_CGROUP_PERF |
| 1032 | struct perf_cgroup *cgrp; /* cgroup event is attach to */ |
| 1033 | int cgrp_defer_enabled; |
| 1034 | #endif |
| 1035 | |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 1036 | #endif /* CONFIG_PERF_EVENTS */ |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1037 | }; |
| 1038 | |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 1039 | enum perf_event_context_type { |
| 1040 | task_context, |
| 1041 | cpu_context, |
| 1042 | }; |
| 1043 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1044 | /** |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1045 | * struct perf_event_context - event context structure |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1046 | * |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1047 | * Used as a container for task events and CPU events as well: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1048 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1049 | struct perf_event_context { |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 1050 | struct pmu *pmu; |
Richard Kennedy | ee643c4 | 2011-03-07 15:46:59 +0000 | [diff] [blame] | 1051 | enum perf_event_context_type type; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1052 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1053 | * Protect the states of the events in the list, |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1054 | * nr_active, and the list: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1055 | */ |
Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1056 | raw_spinlock_t lock; |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1057 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1058 | * Protect the list of events. Locking either mutex or lock |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1059 | * is sufficient to ensure the list doesn't change; to change |
| 1060 | * the list you need to lock both the mutex and the spinlock. |
| 1061 | */ |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1062 | struct mutex mutex; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 1063 | |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 1064 | struct list_head pinned_groups; |
| 1065 | struct list_head flexible_groups; |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1066 | struct list_head event_list; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1067 | int nr_events; |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1068 | int nr_active; |
| 1069 | int is_active; |
Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 1070 | int nr_stat; |
Peter Zijlstra | 0f5a260 | 2011-11-16 14:38:16 +0100 | [diff] [blame] | 1071 | int nr_freq; |
Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 1072 | int rotate_disable; |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1073 | atomic_t refcount; |
| 1074 | struct task_struct *task; |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1075 | |
| 1076 | /* |
Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 1077 | * Context clock, runs when context enabled. |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1078 | */ |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1079 | u64 time; |
| 1080 | u64 timestamp; |
Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 1081 | |
| 1082 | /* |
| 1083 | * These fields let us detect when two contexts have both |
| 1084 | * been cloned (inherited) from a common ancestor. |
| 1085 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1086 | struct perf_event_context *parent_ctx; |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1087 | u64 parent_gen; |
| 1088 | u64 generation; |
| 1089 | int pin_count; |
Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 1090 | int nr_cgroups; /* cgroup evts */ |
| 1091 | int nr_branch_stack; /* branch_stack evt */ |
Richard Kennedy | 28009ce | 2011-06-07 16:33:38 +0100 | [diff] [blame] | 1092 | struct rcu_head rcu_head; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1093 | }; |
| 1094 | |
Frederic Weisbecker | 7ae07ea | 2010-08-14 20:45:13 +0200 | [diff] [blame] | 1095 | /* |
| 1096 | * Number of contexts where an event can trigger: |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1097 | * task, softirq, hardirq, nmi. |
Frederic Weisbecker | 7ae07ea | 2010-08-14 20:45:13 +0200 | [diff] [blame] | 1098 | */ |
| 1099 | #define PERF_NR_CONTEXTS 4 |
| 1100 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1101 | /** |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1102 | * struct perf_event_cpu_context - per cpu event context structure |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1103 | */ |
| 1104 | struct perf_cpu_context { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1105 | struct perf_event_context ctx; |
| 1106 | struct perf_event_context *task_ctx; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1107 | int active_oncpu; |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1108 | int exclusive; |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 1109 | struct list_head rotation_list; |
| 1110 | int jiffies_interval; |
Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 1111 | struct pmu *active_pmu; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1112 | struct perf_cgroup *cgrp; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1113 | }; |
| 1114 | |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1115 | struct perf_output_handle { |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 1116 | struct perf_event *event; |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 1117 | struct ring_buffer *rb; |
Peter Zijlstra | 6d1acfd | 2010-05-18 11:12:48 +0200 | [diff] [blame] | 1118 | unsigned long wakeup; |
Peter Zijlstra | 5d967a8 | 2010-05-20 16:46:39 +0200 | [diff] [blame] | 1119 | unsigned long size; |
| 1120 | void *addr; |
| 1121 | int page; |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1122 | }; |
| 1123 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1124 | #ifdef CONFIG_PERF_EVENTS |
Robert Richter | 829b42d | 2009-04-29 12:46:59 +0200 | [diff] [blame] | 1125 | |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 1126 | extern int perf_pmu_register(struct pmu *pmu, char *name, int type); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 1127 | extern void perf_pmu_unregister(struct pmu *pmu); |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 1128 | |
Matt Fleming | 3bf101b | 2010-09-27 20:22:24 +0100 | [diff] [blame] | 1129 | extern int perf_num_counters(void); |
Matt Fleming | 84c7991 | 2010-10-03 21:41:13 +0100 | [diff] [blame] | 1130 | extern const char *perf_pmu_name(void); |
Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 1131 | extern void __perf_event_task_sched_in(struct task_struct *prev, |
| 1132 | struct task_struct *task); |
| 1133 | extern void __perf_event_task_sched_out(struct task_struct *prev, |
| 1134 | struct task_struct *next); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1135 | extern int perf_event_init_task(struct task_struct *child); |
| 1136 | extern void perf_event_exit_task(struct task_struct *child); |
| 1137 | extern void perf_event_free_task(struct task_struct *task); |
Peter Zijlstra | 4e231c7 | 2010-09-09 21:01:59 +0200 | [diff] [blame] | 1138 | extern void perf_event_delayed_put(struct task_struct *task); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1139 | extern void perf_event_print_debug(void); |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 1140 | extern void perf_pmu_disable(struct pmu *pmu); |
| 1141 | extern void perf_pmu_enable(struct pmu *pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1142 | extern int perf_event_task_disable(void); |
| 1143 | extern int perf_event_task_enable(void); |
Avi Kivity | 26ca5c1 | 2011-06-29 18:42:37 +0300 | [diff] [blame] | 1144 | extern int perf_event_refresh(struct perf_event *event, int refresh); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1145 | extern void perf_event_update_userpage(struct perf_event *event); |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 1146 | extern int perf_event_release_kernel(struct perf_event *event); |
| 1147 | extern struct perf_event * |
| 1148 | perf_event_create_kernel_counter(struct perf_event_attr *attr, |
| 1149 | int cpu, |
Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 1150 | struct task_struct *task, |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 1151 | perf_overflow_handler_t callback, |
| 1152 | void *context); |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 1153 | extern void perf_pmu_migrate_context(struct pmu *pmu, |
| 1154 | int src_cpu, int dst_cpu); |
Peter Zijlstra | 59ed446f | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 1155 | extern u64 perf_event_read_value(struct perf_event *event, |
| 1156 | u64 *enabled, u64 *running); |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 1157 | |
Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 1158 | |
Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 1159 | struct perf_sample_data { |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1160 | u64 type; |
| 1161 | |
| 1162 | u64 ip; |
| 1163 | struct { |
| 1164 | u32 pid; |
| 1165 | u32 tid; |
| 1166 | } tid_entry; |
| 1167 | u64 time; |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1168 | u64 addr; |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1169 | u64 id; |
| 1170 | u64 stream_id; |
| 1171 | struct { |
| 1172 | u32 cpu; |
| 1173 | u32 reserved; |
| 1174 | } cpu_entry; |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1175 | u64 period; |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1176 | struct perf_callchain_entry *callchain; |
Frederic Weisbecker | 3a43ce6 | 2009-08-08 04:26:37 +0200 | [diff] [blame] | 1177 | struct perf_raw_record *raw; |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 1178 | struct perf_branch_stack *br_stack; |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 1179 | struct perf_regs_user regs_user; |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 1180 | u64 stack_user_size; |
Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 1181 | }; |
| 1182 | |
Robert Richter | fd0d000 | 2012-04-02 20:19:08 +0200 | [diff] [blame] | 1183 | static inline void perf_sample_data_init(struct perf_sample_data *data, |
| 1184 | u64 addr, u64 period) |
Peter Zijlstra | dc1d628 | 2010-03-03 15:55:04 +0100 | [diff] [blame] | 1185 | { |
Robert Richter | fd0d000 | 2012-04-02 20:19:08 +0200 | [diff] [blame] | 1186 | /* remaining struct members initialized in perf_prepare_sample() */ |
Peter Zijlstra | dc1d628 | 2010-03-03 15:55:04 +0100 | [diff] [blame] | 1187 | data->addr = addr; |
| 1188 | data->raw = NULL; |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 1189 | data->br_stack = NULL; |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 1190 | data->period = period; |
| 1191 | data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE; |
| 1192 | data->regs_user.regs = NULL; |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 1193 | data->stack_user_size = 0; |
Peter Zijlstra | dc1d628 | 2010-03-03 15:55:04 +0100 | [diff] [blame] | 1194 | } |
| 1195 | |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1196 | extern void perf_output_sample(struct perf_output_handle *handle, |
| 1197 | struct perf_event_header *header, |
| 1198 | struct perf_sample_data *data, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1199 | struct perf_event *event); |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1200 | extern void perf_prepare_sample(struct perf_event_header *header, |
| 1201 | struct perf_sample_data *data, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1202 | struct perf_event *event, |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1203 | struct pt_regs *regs); |
| 1204 | |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1205 | extern int perf_event_overflow(struct perf_event *event, |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1206 | struct perf_sample_data *data, |
| 1207 | struct pt_regs *regs); |
Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 1208 | |
Franck Bui-Huu | 6c7e550 | 2010-11-23 16:21:43 +0100 | [diff] [blame] | 1209 | static inline bool is_sampling_event(struct perf_event *event) |
| 1210 | { |
| 1211 | return event->attr.sample_period != 0; |
| 1212 | } |
| 1213 | |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1214 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1215 | * Return 1 for a software event, 0 for a hardware event |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1216 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1217 | static inline int is_software_event(struct perf_event *event) |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1218 | { |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 1219 | return event->pmu->task_ctx_nr == perf_sw_context; |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1220 | } |
| 1221 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1222 | extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
Peter Zijlstra | f29ac75 | 2009-06-19 18:27:26 +0200 | [diff] [blame] | 1223 | |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1224 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); |
Peter Zijlstra | f29ac75 | 2009-06-19 18:27:26 +0200 | [diff] [blame] | 1225 | |
Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 1226 | #ifndef perf_arch_fetch_caller_regs |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1227 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } |
Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 1228 | #endif |
Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 1229 | |
| 1230 | /* |
| 1231 | * Take a snapshot of the regs. Skip ip and frame pointer to |
| 1232 | * the nth caller. We only need a few of the regs: |
| 1233 | * - ip for PERF_SAMPLE_IP |
| 1234 | * - cs for user_mode() tests |
| 1235 | * - bp for callchains |
| 1236 | * - eflags, for future purposes, just in case |
| 1237 | */ |
Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 1238 | static inline void perf_fetch_caller_regs(struct pt_regs *regs) |
Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 1239 | { |
Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 1240 | memset(regs, 0, sizeof(*regs)); |
| 1241 | |
Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 1242 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); |
Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 1243 | } |
| 1244 | |
Peter Zijlstra | 7e54a5a | 2010-10-14 22:32:45 +0200 | [diff] [blame] | 1245 | static __always_inline void |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1246 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) |
Frederic Weisbecker | e49a5bd | 2010-03-22 19:40:03 +0100 | [diff] [blame] | 1247 | { |
Peter Zijlstra | 7e54a5a | 2010-10-14 22:32:45 +0200 | [diff] [blame] | 1248 | struct pt_regs hot_regs; |
Frederic Weisbecker | e49a5bd | 2010-03-22 19:40:03 +0100 | [diff] [blame] | 1249 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1250 | if (static_key_false(&perf_swevent_enabled[event_id])) { |
Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 1251 | if (!regs) { |
| 1252 | perf_fetch_caller_regs(&hot_regs); |
| 1253 | regs = &hot_regs; |
| 1254 | } |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1255 | __perf_sw_event(event_id, nr, regs, addr); |
Frederic Weisbecker | e49a5bd | 2010-03-22 19:40:03 +0100 | [diff] [blame] | 1256 | } |
| 1257 | } |
| 1258 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1259 | extern struct static_key_deferred perf_sched_events; |
Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 1260 | |
Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 1261 | static inline void perf_event_task_sched_in(struct task_struct *prev, |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 1262 | struct task_struct *task) |
Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 1263 | { |
Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 1264 | if (static_key_false(&perf_sched_events.key)) |
| 1265 | __perf_event_task_sched_in(prev, task); |
| 1266 | } |
| 1267 | |
| 1268 | static inline void perf_event_task_sched_out(struct task_struct *prev, |
| 1269 | struct task_struct *next) |
| 1270 | { |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1271 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); |
Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 1272 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1273 | if (static_key_false(&perf_sched_events.key)) |
Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 1274 | __perf_event_task_sched_out(prev, next); |
Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 1275 | } |
| 1276 | |
Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 1277 | extern void perf_event_mmap(struct vm_area_struct *vma); |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1278 | extern struct perf_guest_info_callbacks *perf_guest_cbs; |
Zhang, Yanmin | dcf46b9 | 2010-04-20 10:13:58 +0800 | [diff] [blame] | 1279 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |
| 1280 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1281 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1282 | extern void perf_event_comm(struct task_struct *tsk); |
| 1283 | extern void perf_event_fork(struct task_struct *tsk); |
Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 1284 | |
Frederic Weisbecker | 56962b4 | 2010-06-30 23:03:51 +0200 | [diff] [blame] | 1285 | /* Callchains */ |
| 1286 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); |
| 1287 | |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1288 | extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); |
| 1289 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); |
Frederic Weisbecker | 56962b4 | 2010-06-30 23:03:51 +0200 | [diff] [blame] | 1290 | |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1291 | static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) |
Frederic Weisbecker | 70791ce | 2010-06-29 19:34:05 +0200 | [diff] [blame] | 1292 | { |
| 1293 | if (entry->nr < PERF_MAX_STACK_DEPTH) |
| 1294 | entry->ip[entry->nr++] = ip; |
| 1295 | } |
Peter Zijlstra | 394ee07 | 2009-03-30 19:07:14 +0200 | [diff] [blame] | 1296 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1297 | extern int sysctl_perf_event_paranoid; |
| 1298 | extern int sysctl_perf_event_mlock; |
| 1299 | extern int sysctl_perf_event_sample_rate; |
Peter Zijlstra | 1ccd154 | 2009-04-09 10:53:45 +0200 | [diff] [blame] | 1300 | |
Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 1301 | extern int perf_proc_update_handler(struct ctl_table *table, int write, |
| 1302 | void __user *buffer, size_t *lenp, |
| 1303 | loff_t *ppos); |
| 1304 | |
Peter Zijlstra | 320ebf0 | 2010-03-02 12:35:37 +0100 | [diff] [blame] | 1305 | static inline bool perf_paranoid_tracepoint_raw(void) |
| 1306 | { |
| 1307 | return sysctl_perf_event_paranoid > -1; |
| 1308 | } |
| 1309 | |
| 1310 | static inline bool perf_paranoid_cpu(void) |
| 1311 | { |
| 1312 | return sysctl_perf_event_paranoid > 0; |
| 1313 | } |
| 1314 | |
| 1315 | static inline bool perf_paranoid_kernel(void) |
| 1316 | { |
| 1317 | return sysctl_perf_event_paranoid > 1; |
| 1318 | } |
| 1319 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1320 | extern void perf_event_init(void); |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 1321 | extern void perf_tp_event(u64 addr, u64 count, void *record, |
| 1322 | int entry_size, struct pt_regs *regs, |
Andrew Vagin | e6dab5f | 2012-07-11 18:14:58 +0400 | [diff] [blame] | 1323 | struct hlist_head *head, int rctx, |
| 1324 | struct task_struct *task); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 1325 | extern void perf_bp_event(struct perf_event *event, void *data); |
Ingo Molnar | 0d905bc | 2009-05-04 19:13:30 +0200 | [diff] [blame] | 1326 | |
Paul Mackerras | 9d23a90 | 2009-05-14 21:48:08 +1000 | [diff] [blame] | 1327 | #ifndef perf_misc_flags |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1328 | # define perf_misc_flags(regs) \ |
| 1329 | (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) |
| 1330 | # define perf_instruction_pointer(regs) instruction_pointer(regs) |
Paul Mackerras | 9d23a90 | 2009-05-14 21:48:08 +1000 | [diff] [blame] | 1331 | #endif |
| 1332 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 1333 | static inline bool has_branch_stack(struct perf_event *event) |
| 1334 | { |
| 1335 | return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; |
| 1336 | } |
| 1337 | |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1338 | extern int perf_output_begin(struct perf_output_handle *handle, |
Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 1339 | struct perf_event *event, unsigned int size); |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1340 | extern void perf_output_end(struct perf_output_handle *handle); |
Frederic Weisbecker | 91d7753 | 2012-08-07 15:20:38 +0200 | [diff] [blame] | 1341 | extern unsigned int perf_output_copy(struct perf_output_handle *handle, |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1342 | const void *buf, unsigned int len); |
Jiri Olsa | 5685e0f | 2012-08-07 15:20:39 +0200 | [diff] [blame] | 1343 | extern unsigned int perf_output_skip(struct perf_output_handle *handle, |
| 1344 | unsigned int len); |
Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 1345 | extern int perf_swevent_get_recursion_context(void); |
| 1346 | extern void perf_swevent_put_recursion_context(int rctx); |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 1347 | extern void perf_event_enable(struct perf_event *event); |
| 1348 | extern void perf_event_disable(struct perf_event *event); |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 1349 | extern void perf_event_task_tick(void); |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1350 | #else |
| 1351 | static inline void |
Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 1352 | perf_event_task_sched_in(struct task_struct *prev, |
| 1353 | struct task_struct *task) { } |
| 1354 | static inline void |
| 1355 | perf_event_task_sched_out(struct task_struct *prev, |
| 1356 | struct task_struct *next) { } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1357 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } |
| 1358 | static inline void perf_event_exit_task(struct task_struct *child) { } |
| 1359 | static inline void perf_event_free_task(struct task_struct *task) { } |
Peter Zijlstra | 4e231c7 | 2010-09-09 21:01:59 +0200 | [diff] [blame] | 1360 | static inline void perf_event_delayed_put(struct task_struct *task) { } |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 1361 | static inline void perf_event_print_debug(void) { } |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 1362 | static inline int perf_event_task_disable(void) { return -EINVAL; } |
| 1363 | static inline int perf_event_task_enable(void) { return -EINVAL; } |
Avi Kivity | 26ca5c1 | 2011-06-29 18:42:37 +0300 | [diff] [blame] | 1364 | static inline int perf_event_refresh(struct perf_event *event, int refresh) |
| 1365 | { |
| 1366 | return -EINVAL; |
| 1367 | } |
Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 1368 | |
Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 1369 | static inline void |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1370 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 1371 | static inline void |
Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 1372 | perf_bp_event(struct perf_event *event, void *data) { } |
Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 1373 | |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1374 | static inline int perf_register_guest_info_callbacks |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1375 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1376 | static inline int perf_unregister_guest_info_callbacks |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1377 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1378 | |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 1379 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1380 | static inline void perf_event_comm(struct task_struct *tsk) { } |
| 1381 | static inline void perf_event_fork(struct task_struct *tsk) { } |
| 1382 | static inline void perf_event_init(void) { } |
Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 1383 | static inline int perf_swevent_get_recursion_context(void) { return -1; } |
Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 1384 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 1385 | static inline void perf_event_enable(struct perf_event *event) { } |
| 1386 | static inline void perf_event_disable(struct perf_event *event) { } |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 1387 | static inline void perf_event_task_tick(void) { } |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1388 | #endif |
| 1389 | |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1390 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1391 | |
Peter Zijlstra | 3f6da39 | 2010-03-05 13:01:18 +0100 | [diff] [blame] | 1392 | /* |
| 1393 | * This has to have a higher priority than migration_notifier in sched.c. |
| 1394 | */ |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1395 | #define perf_cpu_notifier(fn) \ |
| 1396 | do { \ |
| 1397 | static struct notifier_block fn##_nb __cpuinitdata = \ |
| 1398 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ |
| 1399 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ |
| 1400 | (void *)(unsigned long)smp_processor_id()); \ |
| 1401 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ |
| 1402 | (void *)(unsigned long)smp_processor_id()); \ |
| 1403 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ |
| 1404 | (void *)(unsigned long)smp_processor_id()); \ |
| 1405 | register_cpu_notifier(&fn##_nb); \ |
Peter Zijlstra | 3f6da39 | 2010-03-05 13:01:18 +0100 | [diff] [blame] | 1406 | } while (0) |
| 1407 | |
Jiri Olsa | 641cc93 | 2012-03-15 20:09:14 +0100 | [diff] [blame] | 1408 | |
| 1409 | #define PMU_FORMAT_ATTR(_name, _format) \ |
| 1410 | static ssize_t \ |
| 1411 | _name##_show(struct device *dev, \ |
| 1412 | struct device_attribute *attr, \ |
| 1413 | char *page) \ |
| 1414 | { \ |
| 1415 | BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ |
| 1416 | return sprintf(page, _format "\n"); \ |
| 1417 | } \ |
| 1418 | \ |
| 1419 | static struct device_attribute format_attr_##_name = __ATTR_RO(_name) |
| 1420 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 1421 | #endif /* __KERNEL__ */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1422 | #endif /* _LINUX_PERF_EVENT_H */ |