| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1 | /* | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 2 |  * Performance events: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3 |  * | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 4 |  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | 
 | 5 |  *    Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | 
 | 6 |  *    Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 7 |  * | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 8 |  * Data type definitions, declarations, prototypes. | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 9 |  * | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 10 |  *    Started by: Thomas Gleixner and Ingo Molnar | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 11 |  * | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 12 |  * For licencing details see kernel-base/COPYING | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 13 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 14 | #ifndef _LINUX_PERF_EVENT_H | 
 | 15 | #define _LINUX_PERF_EVENT_H | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 16 |  | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 17 | #include <linux/types.h> | 
 | 18 | #include <linux/ioctl.h> | 
| Paul Mackerras | 9aaa131 | 2009-03-21 15:31:47 +1100 | [diff] [blame] | 19 | #include <asm/byteorder.h> | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 20 |  | 
 | 21 | /* | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 22 |  * User-space ABI bits: | 
 | 23 |  */ | 
 | 24 |  | 
 | 25 | /* | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 26 |  * attr.type | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 27 |  */ | 
| Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 28 | enum perf_type_id { | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 29 | 	PERF_TYPE_HARDWARE			= 0, | 
 | 30 | 	PERF_TYPE_SOFTWARE			= 1, | 
 | 31 | 	PERF_TYPE_TRACEPOINT			= 2, | 
 | 32 | 	PERF_TYPE_HW_CACHE			= 3, | 
 | 33 | 	PERF_TYPE_RAW				= 4, | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 34 | 	PERF_TYPE_BREAKPOINT			= 5, | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 35 |  | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 36 | 	PERF_TYPE_MAX,				/* non-ABI */ | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 37 | }; | 
 | 38 |  | 
 | 39 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 40 |  * Generalized performance event event_id types, used by the | 
 | 41 |  * attr.event_id parameter of the sys_perf_event_open() | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 42 |  * syscall: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 43 |  */ | 
| Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 44 | enum perf_hw_id { | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 45 | 	/* | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 46 | 	 * Common hardware events, generalized by the kernel: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 47 | 	 */ | 
| Peter Zijlstra | f4dbfa8 | 2009-06-11 14:06:28 +0200 | [diff] [blame] | 48 | 	PERF_COUNT_HW_CPU_CYCLES		= 0, | 
 | 49 | 	PERF_COUNT_HW_INSTRUCTIONS		= 1, | 
 | 50 | 	PERF_COUNT_HW_CACHE_REFERENCES		= 2, | 
 | 51 | 	PERF_COUNT_HW_CACHE_MISSES		= 3, | 
 | 52 | 	PERF_COUNT_HW_BRANCH_INSTRUCTIONS	= 4, | 
 | 53 | 	PERF_COUNT_HW_BRANCH_MISSES		= 5, | 
 | 54 | 	PERF_COUNT_HW_BUS_CYCLES		= 6, | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 55 |  | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 56 | 	PERF_COUNT_HW_MAX,			/* non-ABI */ | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 57 | }; | 
| Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 58 |  | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 59 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 60 |  * Generalized hardware cache events: | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 61 |  * | 
| Peter Zijlstra | 8be6e8f | 2009-06-11 14:19:11 +0200 | [diff] [blame] | 62 |  *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 63 |  *       { read, write, prefetch } x | 
 | 64 |  *       { accesses, misses } | 
 | 65 |  */ | 
| Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 66 | enum perf_hw_cache_id { | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 67 | 	PERF_COUNT_HW_CACHE_L1D			= 0, | 
 | 68 | 	PERF_COUNT_HW_CACHE_L1I			= 1, | 
 | 69 | 	PERF_COUNT_HW_CACHE_LL			= 2, | 
 | 70 | 	PERF_COUNT_HW_CACHE_DTLB		= 3, | 
 | 71 | 	PERF_COUNT_HW_CACHE_ITLB		= 4, | 
 | 72 | 	PERF_COUNT_HW_CACHE_BPU			= 5, | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 73 |  | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 74 | 	PERF_COUNT_HW_CACHE_MAX,		/* non-ABI */ | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 75 | }; | 
 | 76 |  | 
| Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 77 | enum perf_hw_cache_op_id { | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 78 | 	PERF_COUNT_HW_CACHE_OP_READ		= 0, | 
 | 79 | 	PERF_COUNT_HW_CACHE_OP_WRITE		= 1, | 
 | 80 | 	PERF_COUNT_HW_CACHE_OP_PREFETCH		= 2, | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 81 |  | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 82 | 	PERF_COUNT_HW_CACHE_OP_MAX,		/* non-ABI */ | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 83 | }; | 
 | 84 |  | 
| Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 85 | enum perf_hw_cache_op_result_id { | 
 | 86 | 	PERF_COUNT_HW_CACHE_RESULT_ACCESS	= 0, | 
 | 87 | 	PERF_COUNT_HW_CACHE_RESULT_MISS		= 1, | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 88 |  | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 89 | 	PERF_COUNT_HW_CACHE_RESULT_MAX,		/* non-ABI */ | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 90 | }; | 
 | 91 |  | 
 | 92 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 93 |  * Special "software" events provided by the kernel, even if the hardware | 
 | 94 |  * does not support performance events. These events measure various | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 95 |  * physical and sw events of the kernel (and allow the profiling of them as | 
 | 96 |  * well): | 
 | 97 |  */ | 
| Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 98 | enum perf_sw_ids { | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 99 | 	PERF_COUNT_SW_CPU_CLOCK			= 0, | 
 | 100 | 	PERF_COUNT_SW_TASK_CLOCK		= 1, | 
 | 101 | 	PERF_COUNT_SW_PAGE_FAULTS		= 2, | 
 | 102 | 	PERF_COUNT_SW_CONTEXT_SWITCHES		= 3, | 
 | 103 | 	PERF_COUNT_SW_CPU_MIGRATIONS		= 4, | 
 | 104 | 	PERF_COUNT_SW_PAGE_FAULTS_MIN		= 5, | 
 | 105 | 	PERF_COUNT_SW_PAGE_FAULTS_MAJ		= 6, | 
| Anton Blanchard | f7d7986 | 2009-10-18 01:09:29 +0000 | [diff] [blame] | 106 | 	PERF_COUNT_SW_ALIGNMENT_FAULTS		= 7, | 
 | 107 | 	PERF_COUNT_SW_EMULATION_FAULTS		= 8, | 
| Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 108 |  | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 109 | 	PERF_COUNT_SW_MAX,			/* non-ABI */ | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 110 | }; | 
 | 111 |  | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 112 | /* | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 113 |  * Bits that can be set in attr.sample_type to request information | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 114 |  * in the overflow packets. | 
 | 115 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 116 | enum perf_event_sample_format { | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 117 | 	PERF_SAMPLE_IP				= 1U << 0, | 
 | 118 | 	PERF_SAMPLE_TID				= 1U << 1, | 
 | 119 | 	PERF_SAMPLE_TIME			= 1U << 2, | 
 | 120 | 	PERF_SAMPLE_ADDR			= 1U << 3, | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 121 | 	PERF_SAMPLE_READ			= 1U << 4, | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 122 | 	PERF_SAMPLE_CALLCHAIN			= 1U << 5, | 
 | 123 | 	PERF_SAMPLE_ID				= 1U << 6, | 
 | 124 | 	PERF_SAMPLE_CPU				= 1U << 7, | 
 | 125 | 	PERF_SAMPLE_PERIOD			= 1U << 8, | 
| Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 126 | 	PERF_SAMPLE_STREAM_ID			= 1U << 9, | 
| Frederic Weisbecker | 3a43ce6 | 2009-08-08 04:26:37 +0200 | [diff] [blame] | 127 | 	PERF_SAMPLE_RAW				= 1U << 10, | 
| Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 128 |  | 
| Frederic Weisbecker | f413cdb | 2009-08-07 01:25:54 +0200 | [diff] [blame] | 129 | 	PERF_SAMPLE_MAX = 1U << 11,		/* non-ABI */ | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 130 | }; | 
 | 131 |  | 
 | 132 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 133 |  * The format of the data returned by read() on a perf event fd, | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 134 |  * as specified by attr.read_format: | 
 | 135 |  * | 
 | 136 |  * struct read_format { | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 137 |  *	{ u64		value; | 
 | 138 |  *	  { u64		time_enabled; } && PERF_FORMAT_ENABLED | 
 | 139 |  *	  { u64		time_running; } && PERF_FORMAT_RUNNING | 
 | 140 |  *	  { u64		id;           } && PERF_FORMAT_ID | 
 | 141 |  *	} && !PERF_FORMAT_GROUP | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 142 |  * | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 143 |  *	{ u64		nr; | 
 | 144 |  *	  { u64		time_enabled; } && PERF_FORMAT_ENABLED | 
 | 145 |  *	  { u64		time_running; } && PERF_FORMAT_RUNNING | 
 | 146 |  *	  { u64		value; | 
 | 147 |  *	    { u64	id;           } && PERF_FORMAT_ID | 
 | 148 |  *	  }		cntr[nr]; | 
 | 149 |  *	} && PERF_FORMAT_GROUP | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 150 |  * }; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 151 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 152 | enum perf_event_read_format { | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 153 | 	PERF_FORMAT_TOTAL_TIME_ENABLED		= 1U << 0, | 
 | 154 | 	PERF_FORMAT_TOTAL_TIME_RUNNING		= 1U << 1, | 
 | 155 | 	PERF_FORMAT_ID				= 1U << 2, | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 156 | 	PERF_FORMAT_GROUP			= 1U << 3, | 
| Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 157 |  | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 158 | 	PERF_FORMAT_MAX = 1U << 4,		/* non-ABI */ | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 159 | }; | 
 | 160 |  | 
| Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 161 | #define PERF_ATTR_SIZE_VER0	64	/* sizeof first published struct */ | 
 | 162 |  | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 163 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 164 |  * Hardware event_id to monitor via a performance monitoring event: | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 165 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 166 | struct perf_event_attr { | 
| Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 167 |  | 
| Peter Zijlstra | f4a2deb | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 168 | 	/* | 
| Ingo Molnar | a21ca2c | 2009-06-06 09:58:57 +0200 | [diff] [blame] | 169 | 	 * Major type: hardware/software/tracepoint/etc. | 
 | 170 | 	 */ | 
 | 171 | 	__u32			type; | 
| Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 172 |  | 
 | 173 | 	/* | 
 | 174 | 	 * Size of the attr structure, for fwd/bwd compat. | 
 | 175 | 	 */ | 
 | 176 | 	__u32			size; | 
| Ingo Molnar | a21ca2c | 2009-06-06 09:58:57 +0200 | [diff] [blame] | 177 |  | 
 | 178 | 	/* | 
 | 179 | 	 * Type specific configuration information. | 
| Peter Zijlstra | f4a2deb | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 180 | 	 */ | 
 | 181 | 	__u64			config; | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 182 |  | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 183 | 	union { | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 184 | 		__u64		sample_period; | 
 | 185 | 		__u64		sample_freq; | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 186 | 	}; | 
 | 187 |  | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 188 | 	__u64			sample_type; | 
 | 189 | 	__u64			read_format; | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 190 |  | 
| Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 191 | 	__u64			disabled       :  1, /* off by default        */ | 
| Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 192 | 				inherit	       :  1, /* children inherit it   */ | 
 | 193 | 				pinned	       :  1, /* must always be on PMU */ | 
 | 194 | 				exclusive      :  1, /* only group on PMU     */ | 
 | 195 | 				exclude_user   :  1, /* don't count user      */ | 
 | 196 | 				exclude_kernel :  1, /* ditto kernel          */ | 
 | 197 | 				exclude_hv     :  1, /* ditto hypervisor      */ | 
| Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 198 | 				exclude_idle   :  1, /* don't count when idle */ | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 199 | 				mmap           :  1, /* include mmap data     */ | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 200 | 				comm	       :  1, /* include comm data     */ | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 201 | 				freq           :  1, /* use freq, not period  */ | 
| Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 202 | 				inherit_stat   :  1, /* per task counts       */ | 
| Paul Mackerras | 57e7986 | 2009-06-30 16:07:19 +1000 | [diff] [blame] | 203 | 				enable_on_exec :  1, /* next exec enables     */ | 
| Peter Zijlstra | 9f498cc | 2009-07-23 14:46:33 +0200 | [diff] [blame] | 204 | 				task           :  1, /* trace fork/exit       */ | 
| Peter Zijlstra | 2667de8 | 2009-09-17 19:01:10 +0200 | [diff] [blame] | 205 | 				watermark      :  1, /* wakeup_watermark      */ | 
| Peter Zijlstra | ab60834 | 2010-04-08 23:03:20 +0200 | [diff] [blame] | 206 | 				/* | 
 | 207 | 				 * precise_ip: | 
 | 208 | 				 * | 
 | 209 | 				 *  0 - SAMPLE_IP can have arbitrary skid | 
 | 210 | 				 *  1 - SAMPLE_IP must have constant skid | 
 | 211 | 				 *  2 - SAMPLE_IP requested to have 0 skid | 
 | 212 | 				 *  3 - SAMPLE_IP must have 0 skid | 
 | 213 | 				 * | 
 | 214 | 				 *  See also PERF_RECORD_MISC_EXACT_IP | 
 | 215 | 				 */ | 
 | 216 | 				precise_ip     :  2, /* skid constraint       */ | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 217 |  | 
| Peter Zijlstra | ab60834 | 2010-04-08 23:03:20 +0200 | [diff] [blame] | 218 | 				__reserved_1   : 47; | 
| Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 219 |  | 
| Peter Zijlstra | 2667de8 | 2009-09-17 19:01:10 +0200 | [diff] [blame] | 220 | 	union { | 
 | 221 | 		__u32		wakeup_events;	  /* wakeup every n events */ | 
 | 222 | 		__u32		wakeup_watermark; /* bytes before wakeup   */ | 
 | 223 | 	}; | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 224 |  | 
| Peter Zijlstra | f13c12c | 2009-12-15 19:43:11 +0100 | [diff] [blame] | 225 | 	__u32			bp_type; | 
| Mahesh Salgaonkar | cd75764 | 2010-01-30 10:25:18 +0530 | [diff] [blame] | 226 | 	__u64			bp_addr; | 
 | 227 | 	__u64			bp_len; | 
| Thomas Gleixner | eab656a | 2008-12-08 19:26:59 +0100 | [diff] [blame] | 228 | }; | 
 | 229 |  | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 230 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 231 |  * Ioctls that can be done on a perf event fd: | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 232 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 233 | #define PERF_EVENT_IOC_ENABLE		_IO ('$', 0) | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 234 | #define PERF_EVENT_IOC_DISABLE		_IO ('$', 1) | 
 | 235 | #define PERF_EVENT_IOC_REFRESH		_IO ('$', 2) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 236 | #define PERF_EVENT_IOC_RESET		_IO ('$', 3) | 
| Arjan van de Ven | 4c49b12 | 2009-11-13 21:47:33 -0800 | [diff] [blame] | 237 | #define PERF_EVENT_IOC_PERIOD		_IOW('$', 4, __u64) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 238 | #define PERF_EVENT_IOC_SET_OUTPUT	_IO ('$', 5) | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 239 | #define PERF_EVENT_IOC_SET_FILTER	_IOW('$', 6, char *) | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 240 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 241 | enum perf_event_ioc_flags { | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 242 | 	PERF_IOC_FLAG_GROUP		= 1U << 0, | 
 | 243 | }; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 244 |  | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 245 | /* | 
 | 246 |  * Structure of the page that can be mapped via mmap | 
 | 247 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 248 | struct perf_event_mmap_page { | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 249 | 	__u32	version;		/* version number of this structure */ | 
 | 250 | 	__u32	compat_version;		/* lowest version this is compat with */ | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 251 |  | 
 | 252 | 	/* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 253 | 	 * Bits needed to read the hw events in user-space. | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 254 | 	 * | 
| Peter Zijlstra | 92f22a3 | 2009-04-02 11:12:04 +0200 | [diff] [blame] | 255 | 	 *   u32 seq; | 
 | 256 | 	 *   s64 count; | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 257 | 	 * | 
| Peter Zijlstra | a2e87d0 | 2009-04-06 11:44:59 +0200 | [diff] [blame] | 258 | 	 *   do { | 
 | 259 | 	 *     seq = pc->lock; | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 260 | 	 * | 
| Peter Zijlstra | a2e87d0 | 2009-04-06 11:44:59 +0200 | [diff] [blame] | 261 | 	 *     barrier() | 
 | 262 | 	 *     if (pc->index) { | 
 | 263 | 	 *       count = pmc_read(pc->index - 1); | 
 | 264 | 	 *       count += pc->offset; | 
 | 265 | 	 *     } else | 
 | 266 | 	 *       goto regular_read; | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 267 | 	 * | 
| Peter Zijlstra | a2e87d0 | 2009-04-06 11:44:59 +0200 | [diff] [blame] | 268 | 	 *     barrier(); | 
 | 269 | 	 *   } while (pc->lock != seq); | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 270 | 	 * | 
| Peter Zijlstra | 92f22a3 | 2009-04-02 11:12:04 +0200 | [diff] [blame] | 271 | 	 * NOTE: for obvious reason this only works on self-monitoring | 
 | 272 | 	 *       processes. | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 273 | 	 */ | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 274 | 	__u32	lock;			/* seqlock for synchronization */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 275 | 	__u32	index;			/* hardware event identifier */ | 
 | 276 | 	__s64	offset;			/* add to hardware event value */ | 
 | 277 | 	__u64	time_enabled;		/* time event active */ | 
 | 278 | 	__u64	time_running;		/* time event on cpu */ | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 279 |  | 
| Peter Zijlstra | 41f9533 | 2009-06-23 17:55:18 +0200 | [diff] [blame] | 280 | 		/* | 
 | 281 | 		 * Hole for extension of the self monitor capabilities | 
 | 282 | 		 */ | 
 | 283 |  | 
| Peter Zijlstra | 7f8b4e4 | 2009-06-22 14:34:35 +0200 | [diff] [blame] | 284 | 	__u64	__reserved[123];	/* align to 1k */ | 
| Peter Zijlstra | 41f9533 | 2009-06-23 17:55:18 +0200 | [diff] [blame] | 285 |  | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 286 | 	/* | 
 | 287 | 	 * Control data for the mmap() data buffer. | 
 | 288 | 	 * | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 289 | 	 * User-space reading the @data_head value should issue an rmb(), on | 
 | 290 | 	 * SMP capable platforms, after reading this value -- see | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 291 | 	 * perf_event_wakeup(). | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 292 | 	 * | 
 | 293 | 	 * When the mapping is PROT_WRITE the @data_tail value should be | 
 | 294 | 	 * written by userspace to reflect the last read data. In this case | 
 | 295 | 	 * the kernel will not over-write unread data. | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 296 | 	 */ | 
| Peter Zijlstra | 8e3747c | 2009-06-02 16:16:02 +0200 | [diff] [blame] | 297 | 	__u64   data_head;		/* head in the data section */ | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 298 | 	__u64	data_tail;		/* user-space written tail */ | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 299 | }; | 
 | 300 |  | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 301 | #define PERF_RECORD_MISC_CPUMODE_MASK		(7 << 0) | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 302 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN	(0 << 0) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 303 | #define PERF_RECORD_MISC_KERNEL			(1 << 0) | 
 | 304 | #define PERF_RECORD_MISC_USER			(2 << 0) | 
 | 305 | #define PERF_RECORD_MISC_HYPERVISOR		(3 << 0) | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 306 | #define PERF_RECORD_MISC_GUEST_KERNEL		(4 << 0) | 
 | 307 | #define PERF_RECORD_MISC_GUEST_USER		(5 << 0) | 
| Peter Zijlstra | 6fab019 | 2009-04-08 15:01:26 +0200 | [diff] [blame] | 308 |  | 
| Peter Zijlstra | ab60834 | 2010-04-08 23:03:20 +0200 | [diff] [blame] | 309 | /* | 
 | 310 |  * Indicates that the content of PERF_SAMPLE_IP points to | 
 | 311 |  * the actual instruction that triggered the event. See also | 
 | 312 |  * perf_event_attr::precise_ip. | 
 | 313 |  */ | 
 | 314 | #define PERF_RECORD_MISC_EXACT_IP		(1 << 14) | 
| Peter Zijlstra | ef21f68 | 2010-03-03 13:12:23 +0100 | [diff] [blame] | 315 | /* | 
 | 316 |  * Reserve the last bit to indicate some extended misc field | 
 | 317 |  */ | 
 | 318 | #define PERF_RECORD_MISC_EXT_RESERVED		(1 << 15) | 
 | 319 |  | 
| Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 320 | struct perf_event_header { | 
 | 321 | 	__u32	type; | 
| Peter Zijlstra | 6fab019 | 2009-04-08 15:01:26 +0200 | [diff] [blame] | 322 | 	__u16	misc; | 
 | 323 | 	__u16	size; | 
| Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 324 | }; | 
 | 325 |  | 
 | 326 | enum perf_event_type { | 
| Peter Zijlstra | 5ed0041 | 2009-03-30 19:07:12 +0200 | [diff] [blame] | 327 |  | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 328 | 	/* | 
 | 329 | 	 * The MMAP events record the PROT_EXEC mappings so that we can | 
 | 330 | 	 * correlate userspace IPs to code. They have the following structure: | 
 | 331 | 	 * | 
 | 332 | 	 * struct { | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 333 | 	 *	struct perf_event_header	header; | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 334 | 	 * | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 335 | 	 *	u32				pid, tid; | 
 | 336 | 	 *	u64				addr; | 
 | 337 | 	 *	u64				len; | 
 | 338 | 	 *	u64				pgoff; | 
 | 339 | 	 *	char				filename[]; | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 340 | 	 * }; | 
 | 341 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 342 | 	PERF_RECORD_MMAP			= 1, | 
| Peter Zijlstra | ea5d20c | 2009-03-25 12:30:25 +0100 | [diff] [blame] | 343 |  | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 344 | 	/* | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 345 | 	 * struct { | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 346 | 	 *	struct perf_event_header	header; | 
 | 347 | 	 *	u64				id; | 
 | 348 | 	 *	u64				lost; | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 349 | 	 * }; | 
 | 350 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 351 | 	PERF_RECORD_LOST			= 2, | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 352 |  | 
 | 353 | 	/* | 
 | 354 | 	 * struct { | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 355 | 	 *	struct perf_event_header	header; | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 356 | 	 * | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 357 | 	 *	u32				pid, tid; | 
 | 358 | 	 *	char				comm[]; | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 359 | 	 * }; | 
 | 360 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 361 | 	PERF_RECORD_COMM			= 3, | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 362 |  | 
 | 363 | 	/* | 
| Peter Zijlstra | 26b119b | 2009-05-20 12:21:20 +0200 | [diff] [blame] | 364 | 	 * struct { | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 365 | 	 *	struct perf_event_header	header; | 
| Peter Zijlstra | 9f498cc | 2009-07-23 14:46:33 +0200 | [diff] [blame] | 366 | 	 *	u32				pid, ppid; | 
 | 367 | 	 *	u32				tid, ptid; | 
| Arjan van de Ven | 393b2ad | 2009-09-12 07:52:47 +0200 | [diff] [blame] | 368 | 	 *	u64				time; | 
| Peter Zijlstra | 9f498cc | 2009-07-23 14:46:33 +0200 | [diff] [blame] | 369 | 	 * }; | 
 | 370 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 371 | 	PERF_RECORD_EXIT			= 4, | 
| Peter Zijlstra | 9f498cc | 2009-07-23 14:46:33 +0200 | [diff] [blame] | 372 |  | 
 | 373 | 	/* | 
 | 374 | 	 * struct { | 
 | 375 | 	 *	struct perf_event_header	header; | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 376 | 	 *	u64				time; | 
| Peter Zijlstra | 689802b | 2009-06-05 15:05:43 +0200 | [diff] [blame] | 377 | 	 *	u64				id; | 
| Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 378 | 	 *	u64				stream_id; | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 379 | 	 * }; | 
 | 380 | 	 */ | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 381 | 	PERF_RECORD_THROTTLE			= 5, | 
 | 382 | 	PERF_RECORD_UNTHROTTLE			= 6, | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 383 |  | 
 | 384 | 	/* | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 385 | 	 * struct { | 
| Ingo Molnar | a21ca2c | 2009-06-06 09:58:57 +0200 | [diff] [blame] | 386 | 	 *	struct perf_event_header	header; | 
 | 387 | 	 *	u32				pid, ppid; | 
| Peter Zijlstra | 9f498cc | 2009-07-23 14:46:33 +0200 | [diff] [blame] | 388 | 	 *	u32				tid, ptid; | 
| Anton Blanchard | a6f10a2 | 2009-09-22 22:34:24 +1000 | [diff] [blame] | 389 | 	 *	u64				time; | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 390 | 	 * }; | 
 | 391 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 392 | 	PERF_RECORD_FORK			= 7, | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 393 |  | 
 | 394 | 	/* | 
| Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 395 | 	 * struct { | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 396 | 	 *	struct perf_event_header	header; | 
 | 397 | 	 *	u32				pid, tid; | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 398 | 	 * | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 399 | 	 *	struct read_format		values; | 
| Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 400 | 	 * }; | 
 | 401 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 402 | 	PERF_RECORD_READ			= 8, | 
| Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 403 |  | 
 | 404 | 	/* | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 405 | 	 * struct { | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 406 | 	 *	struct perf_event_header	header; | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 407 | 	 * | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 408 | 	 *	{ u64			ip;	  } && PERF_SAMPLE_IP | 
 | 409 | 	 *	{ u32			pid, tid; } && PERF_SAMPLE_TID | 
 | 410 | 	 *	{ u64			time;     } && PERF_SAMPLE_TIME | 
 | 411 | 	 *	{ u64			addr;     } && PERF_SAMPLE_ADDR | 
| Peter Zijlstra | e6e18ec | 2009-06-25 11:27:12 +0200 | [diff] [blame] | 412 | 	 *	{ u64			id;	  } && PERF_SAMPLE_ID | 
| Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 413 | 	 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 414 | 	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 415 | 	 *	{ u64			period;   } && PERF_SAMPLE_PERIOD | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 416 | 	 * | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 417 | 	 *	{ struct read_format	values;	  } && PERF_SAMPLE_READ | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 418 | 	 * | 
| Peter Zijlstra | f9188e0 | 2009-06-18 22:20:52 +0200 | [diff] [blame] | 419 | 	 *	{ u64			nr, | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 420 | 	 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 421 | 	 * | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 422 | 	 *	# | 
 | 423 | 	 *	# The RAW record below is opaque data wrt the ABI | 
 | 424 | 	 *	# | 
 | 425 | 	 *	# That is, the ABI doesn't make any promises wrt to | 
 | 426 | 	 *	# the stability of its content, it may vary depending | 
 | 427 | 	 *	# on event, hardware, kernel version and phase of | 
 | 428 | 	 *	# the moon. | 
 | 429 | 	 *	# | 
 | 430 | 	 *	# In other words, PERF_SAMPLE_RAW contents are not an ABI. | 
 | 431 | 	 *	# | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 432 | 	 * | 
| Peter Zijlstra | a044560 | 2009-08-10 11:16:52 +0200 | [diff] [blame] | 433 | 	 *	{ u32			size; | 
 | 434 | 	 *	  char                  data[size];}&& PERF_SAMPLE_RAW | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 435 | 	 * }; | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 436 | 	 */ | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 437 | 	PERF_RECORD_SAMPLE			= 9, | 
| Peter Zijlstra | e6e18ec | 2009-06-25 11:27:12 +0200 | [diff] [blame] | 438 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 439 | 	PERF_RECORD_MAX,			/* non-ABI */ | 
| Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 440 | }; | 
 | 441 |  | 
| Peter Zijlstra | f9188e0 | 2009-06-18 22:20:52 +0200 | [diff] [blame] | 442 | enum perf_callchain_context { | 
 | 443 | 	PERF_CONTEXT_HV			= (__u64)-32, | 
 | 444 | 	PERF_CONTEXT_KERNEL		= (__u64)-128, | 
 | 445 | 	PERF_CONTEXT_USER		= (__u64)-512, | 
| Ingo Molnar | 7522060 | 2009-06-18 08:00:17 +0200 | [diff] [blame] | 446 |  | 
| Peter Zijlstra | f9188e0 | 2009-06-18 22:20:52 +0200 | [diff] [blame] | 447 | 	PERF_CONTEXT_GUEST		= (__u64)-2048, | 
 | 448 | 	PERF_CONTEXT_GUEST_KERNEL	= (__u64)-2176, | 
 | 449 | 	PERF_CONTEXT_GUEST_USER		= (__u64)-2560, | 
 | 450 |  | 
 | 451 | 	PERF_CONTEXT_MAX		= (__u64)-4095, | 
| Ingo Molnar | 7522060 | 2009-06-18 08:00:17 +0200 | [diff] [blame] | 452 | }; | 
 | 453 |  | 
| Peter Zijlstra | a4be7c2 | 2009-08-19 11:18:27 +0200 | [diff] [blame] | 454 | #define PERF_FLAG_FD_NO_GROUP	(1U << 0) | 
 | 455 | #define PERF_FLAG_FD_OUTPUT	(1U << 1) | 
 | 456 |  | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 457 | #ifdef __KERNEL__ | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 458 | /* | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 459 |  * Kernel-internal data types and definitions: | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 460 |  */ | 
 | 461 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 462 | #ifdef CONFIG_PERF_EVENTS | 
 | 463 | # include <asm/perf_event.h> | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 464 | #endif | 
 | 465 |  | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 466 | struct perf_guest_info_callbacks { | 
 | 467 | 	int (*is_in_guest) (void); | 
 | 468 | 	int (*is_user_mode) (void); | 
 | 469 | 	unsigned long (*get_guest_ip) (void); | 
 | 470 | }; | 
 | 471 |  | 
| Arnd Bergmann | 2ff6cfd | 2009-12-07 17:12:58 +0100 | [diff] [blame] | 472 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 
 | 473 | #include <asm/hw_breakpoint.h> | 
 | 474 | #endif | 
 | 475 |  | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 476 | #include <linux/list.h> | 
 | 477 | #include <linux/mutex.h> | 
 | 478 | #include <linux/rculist.h> | 
 | 479 | #include <linux/rcupdate.h> | 
 | 480 | #include <linux/spinlock.h> | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 481 | #include <linux/hrtimer.h> | 
| Peter Zijlstra | 3c446b3 | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 482 | #include <linux/fs.h> | 
| Peter Zijlstra | 709e50c | 2009-06-02 14:13:15 +0200 | [diff] [blame] | 483 | #include <linux/pid_namespace.h> | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 484 | #include <linux/workqueue.h> | 
| Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 485 | #include <linux/ftrace.h> | 
| Peter Zijlstra | 85cfabb | 2010-03-11 13:06:56 +0100 | [diff] [blame] | 486 | #include <linux/cpu.h> | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 487 | #include <asm/atomic.h> | 
| Peter Zijlstra | fa58815 | 2010-05-18 10:54:20 +0200 | [diff] [blame] | 488 | #include <asm/local.h> | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 489 |  | 
| Peter Zijlstra | f9188e0 | 2009-06-18 22:20:52 +0200 | [diff] [blame] | 490 | #define PERF_MAX_STACK_DEPTH		255 | 
 | 491 |  | 
 | 492 | struct perf_callchain_entry { | 
 | 493 | 	__u64				nr; | 
 | 494 | 	__u64				ip[PERF_MAX_STACK_DEPTH]; | 
 | 495 | }; | 
 | 496 |  | 
| Frederic Weisbecker | 3a43ce6 | 2009-08-08 04:26:37 +0200 | [diff] [blame] | 497 | struct perf_raw_record { | 
 | 498 | 	u32				size; | 
 | 499 | 	void				*data; | 
| Frederic Weisbecker | f413cdb | 2009-08-07 01:25:54 +0200 | [diff] [blame] | 500 | }; | 
 | 501 |  | 
| Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 502 | struct perf_branch_entry { | 
 | 503 | 	__u64				from; | 
 | 504 | 	__u64				to; | 
 | 505 | 	__u64				flags; | 
 | 506 | }; | 
 | 507 |  | 
 | 508 | struct perf_branch_stack { | 
 | 509 | 	__u64				nr; | 
 | 510 | 	struct perf_branch_entry	entries[0]; | 
 | 511 | }; | 
 | 512 |  | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 513 | struct task_struct; | 
 | 514 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 515 | /** | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 516 |  * struct hw_perf_event - performance event hardware details: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 517 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 518 | struct hw_perf_event { | 
 | 519 | #ifdef CONFIG_PERF_EVENTS | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 520 | 	union { | 
 | 521 | 		struct { /* hardware */ | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 522 | 			u64		config; | 
| Stephane Eranian | 447a194 | 2010-02-01 14:50:01 +0200 | [diff] [blame] | 523 | 			u64		last_tag; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 524 | 			unsigned long	config_base; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 525 | 			unsigned long	event_base; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 526 | 			int		idx; | 
| Stephane Eranian | 447a194 | 2010-02-01 14:50:01 +0200 | [diff] [blame] | 527 | 			int		last_cpu; | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 528 | 		}; | 
| Soeren Sandmann | 721a669 | 2009-09-15 14:33:08 +0200 | [diff] [blame] | 529 | 		struct { /* software */ | 
 | 530 | 			s64		remaining; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 531 | 			struct hrtimer	hrtimer; | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 532 | 		}; | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 533 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 
| Frederic Weisbecker | dd8b1cf | 2010-02-27 17:10:39 +0100 | [diff] [blame] | 534 | 		/* breakpoint */ | 
 | 535 | 		struct arch_hw_breakpoint	info; | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 536 | #endif | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 537 | 	}; | 
| Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 538 | 	atomic64_t			prev_count; | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 539 | 	u64				sample_period; | 
| Peter Zijlstra | 9e350de | 2009-06-10 21:34:59 +0200 | [diff] [blame] | 540 | 	u64				last_period; | 
| Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 541 | 	atomic64_t			period_left; | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 542 | 	u64				interrupts; | 
| Peter Zijlstra | 6a24ed6c | 2009-06-05 18:01:29 +0200 | [diff] [blame] | 543 |  | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 544 | 	u64				freq_time_stamp; | 
 | 545 | 	u64				freq_count_stamp; | 
| Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 546 | #endif | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 547 | }; | 
 | 548 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 549 | struct perf_event; | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 550 |  | 
| Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 551 | #define PERF_EVENT_TXN_STARTED 1 | 
 | 552 |  | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 553 | /** | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 554 |  * struct pmu - generic performance monitoring unit | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 555 |  */ | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 556 | struct pmu { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 557 | 	int (*enable)			(struct perf_event *event); | 
 | 558 | 	void (*disable)			(struct perf_event *event); | 
| Stephane Eranian | d76a081 | 2010-02-08 17:06:01 +0200 | [diff] [blame] | 559 | 	int (*start)			(struct perf_event *event); | 
 | 560 | 	void (*stop)			(struct perf_event *event); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 561 | 	void (*read)			(struct perf_event *event); | 
 | 562 | 	void (*unthrottle)		(struct perf_event *event); | 
| Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 563 |  | 
 | 564 | 	/* | 
 | 565 | 	 * group events scheduling is treated as a transaction, | 
 | 566 | 	 * add group events as a whole and perform one schedulability test. | 
 | 567 | 	 * If test fails, roll back the whole group | 
 | 568 | 	 */ | 
 | 569 |  | 
 | 570 | 	void (*start_txn)	(const struct pmu *pmu); | 
 | 571 | 	void (*cancel_txn)	(const struct pmu *pmu); | 
 | 572 | 	int  (*commit_txn)	(const struct pmu *pmu); | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 573 | }; | 
 | 574 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 575 | /** | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 576 |  * enum perf_event_active_state - the states of a event | 
| Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 577 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 578 | enum perf_event_active_state { | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 579 | 	PERF_EVENT_STATE_ERROR		= -2, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 580 | 	PERF_EVENT_STATE_OFF		= -1, | 
 | 581 | 	PERF_EVENT_STATE_INACTIVE	=  0, | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 582 | 	PERF_EVENT_STATE_ACTIVE		=  1, | 
| Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 583 | }; | 
 | 584 |  | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 585 | struct file; | 
 | 586 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 587 | struct perf_mmap_data { | 
 | 588 | 	struct rcu_head			rcu_head; | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 589 | #ifdef CONFIG_PERF_USE_VMALLOC | 
 | 590 | 	struct work_struct		work; | 
| Peter Zijlstra | 3cafa9f | 2010-05-20 19:07:56 +0200 | [diff] [blame] | 591 | 	int				page_order;	/* allocation order  */ | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 592 | #endif | 
| Peter Zijlstra | 8740f94 | 2009-04-08 15:01:29 +0200 | [diff] [blame] | 593 | 	int				nr_pages;	/* nr of data pages  */ | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 594 | 	int				writable;	/* are we writable   */ | 
| Peter Zijlstra | c5078f7 | 2009-05-05 17:50:24 +0200 | [diff] [blame] | 595 | 	int				nr_locked;	/* nr pages mlocked  */ | 
| Peter Zijlstra | 8740f94 | 2009-04-08 15:01:29 +0200 | [diff] [blame] | 596 |  | 
| Peter Zijlstra | c33a0bc | 2009-05-01 12:23:16 +0200 | [diff] [blame] | 597 | 	atomic_t			poll;		/* POLL_ for wakeups */ | 
| Peter Zijlstra | 8740f94 | 2009-04-08 15:01:29 +0200 | [diff] [blame] | 598 |  | 
| Peter Zijlstra | fa58815 | 2010-05-18 10:54:20 +0200 | [diff] [blame] | 599 | 	local_t				head;		/* write position    */ | 
 | 600 | 	local_t				nest;		/* nested writers    */ | 
 | 601 | 	local_t				events;		/* event limit       */ | 
| Peter Zijlstra | adb8e11 | 2010-05-20 16:21:55 +0200 | [diff] [blame] | 602 | 	local_t				wakeup;		/* wakeup stamp      */ | 
| Peter Zijlstra | fa58815 | 2010-05-18 10:54:20 +0200 | [diff] [blame] | 603 | 	local_t				lost;		/* nr records lost   */ | 
| Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 604 |  | 
| Peter Zijlstra | 2667de8 | 2009-09-17 19:01:10 +0200 | [diff] [blame] | 605 | 	long				watermark;	/* wakeup watermark  */ | 
 | 606 |  | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 607 | 	struct perf_event_mmap_page	*user_page; | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 608 | 	void				*data_pages[0]; | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 609 | }; | 
 | 610 |  | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 611 | struct perf_pending_entry { | 
 | 612 | 	struct perf_pending_entry *next; | 
 | 613 | 	void (*func)(struct perf_pending_entry *); | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 614 | }; | 
 | 615 |  | 
| Peter Zijlstra | 453f19e | 2009-11-20 22:19:43 +0100 | [diff] [blame] | 616 | struct perf_sample_data; | 
 | 617 |  | 
| Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 618 | typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | 
 | 619 | 					struct perf_sample_data *, | 
 | 620 | 					struct pt_regs *regs); | 
 | 621 |  | 
| Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 622 | enum perf_group_flag { | 
 | 623 | 	PERF_GROUP_SOFTWARE = 0x1, | 
 | 624 | }; | 
 | 625 |  | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 626 | #define SWEVENT_HLIST_BITS	8 | 
 | 627 | #define SWEVENT_HLIST_SIZE	(1 << SWEVENT_HLIST_BITS) | 
 | 628 |  | 
 | 629 | struct swevent_hlist { | 
 | 630 | 	struct hlist_head	heads[SWEVENT_HLIST_SIZE]; | 
 | 631 | 	struct rcu_head		rcu_head; | 
 | 632 | }; | 
 | 633 |  | 
| Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 634 | /** | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 635 |  * struct perf_event - performance event kernel representation: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 636 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 637 | struct perf_event { | 
 | 638 | #ifdef CONFIG_PERF_EVENTS | 
| Ingo Molnar | 65abc86 | 2009-09-21 10:18:27 +0200 | [diff] [blame] | 639 | 	struct list_head		group_entry; | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 640 | 	struct list_head		event_entry; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 641 | 	struct list_head		sibling_list; | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 642 | 	struct hlist_node		hlist_entry; | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 643 | 	int				nr_siblings; | 
| Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 644 | 	int				group_flags; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 645 | 	struct perf_event		*group_leader; | 
 | 646 | 	struct perf_event		*output; | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 647 | 	const struct pmu		*pmu; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 648 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 649 | 	enum perf_event_active_state	state; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 650 | 	atomic64_t			count; | 
| Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 651 |  | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 652 | 	/* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 653 | 	 * These are the total time in nanoseconds that the event | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 654 | 	 * has been enabled (i.e. eligible to run, and the task has | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 655 | 	 * been scheduled in, if this is a per-task event) | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 656 | 	 * and running (scheduled onto the CPU), respectively. | 
 | 657 | 	 * | 
 | 658 | 	 * They are computed from tstamp_enabled, tstamp_running and | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 659 | 	 * tstamp_stopped when the event is in INACTIVE or ACTIVE state. | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 660 | 	 */ | 
 | 661 | 	u64				total_time_enabled; | 
 | 662 | 	u64				total_time_running; | 
 | 663 |  | 
 | 664 | 	/* | 
 | 665 | 	 * These are timestamps used for computing total_time_enabled | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 666 | 	 * and total_time_running when the event is in INACTIVE or | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 667 | 	 * ACTIVE state, measured in nanoseconds from an arbitrary point | 
 | 668 | 	 * in time. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 669 | 	 * tstamp_enabled: the notional time when the event was enabled | 
 | 670 | 	 * tstamp_running: the notional time when the event was scheduled on | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 671 | 	 * tstamp_stopped: in INACTIVE state, the notional time when the | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 672 | 	 *	event was scheduled off. | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 673 | 	 */ | 
 | 674 | 	u64				tstamp_enabled; | 
 | 675 | 	u64				tstamp_running; | 
 | 676 | 	u64				tstamp_stopped; | 
 | 677 |  | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 678 | 	struct perf_event_attr		attr; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 679 | 	struct hw_perf_event		hw; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 680 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 681 | 	struct perf_event_context	*ctx; | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 682 | 	struct file			*filp; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 683 |  | 
 | 684 | 	/* | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 685 | 	 * These accumulate total time (in nanoseconds) that children | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 686 | 	 * events have been enabled and running, respectively. | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 687 | 	 */ | 
 | 688 | 	atomic64_t			child_total_time_enabled; | 
 | 689 | 	atomic64_t			child_total_time_running; | 
 | 690 |  | 
 | 691 | 	/* | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 692 | 	 * Protect attach/detach and child_list: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 693 | 	 */ | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 694 | 	struct mutex			child_mutex; | 
 | 695 | 	struct list_head		child_list; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 696 | 	struct perf_event		*parent; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 697 |  | 
 | 698 | 	int				oncpu; | 
 | 699 | 	int				cpu; | 
 | 700 |  | 
| Peter Zijlstra | 082ff5a | 2009-05-23 18:29:00 +0200 | [diff] [blame] | 701 | 	struct list_head		owner_entry; | 
 | 702 | 	struct task_struct		*owner; | 
 | 703 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 704 | 	/* mmap bits */ | 
 | 705 | 	struct mutex			mmap_mutex; | 
 | 706 | 	atomic_t			mmap_count; | 
 | 707 | 	struct perf_mmap_data		*data; | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 708 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 709 | 	/* poll related */ | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 710 | 	wait_queue_head_t		waitq; | 
| Peter Zijlstra | 3c446b3 | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 711 | 	struct fasync_struct		*fasync; | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 712 |  | 
 | 713 | 	/* delayed work for NMIs and such */ | 
 | 714 | 	int				pending_wakeup; | 
| Peter Zijlstra | 4c9e254 | 2009-04-06 11:45:09 +0200 | [diff] [blame] | 715 | 	int				pending_kill; | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 716 | 	int				pending_disable; | 
| Peter Zijlstra | 671dec5 | 2009-04-06 11:45:02 +0200 | [diff] [blame] | 717 | 	struct perf_pending_entry	pending; | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 718 |  | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 719 | 	atomic_t			event_limit; | 
 | 720 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 721 | 	void (*destroy)(struct perf_event *); | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 722 | 	struct rcu_head			rcu_head; | 
| Peter Zijlstra | 709e50c | 2009-06-02 14:13:15 +0200 | [diff] [blame] | 723 |  | 
 | 724 | 	struct pid_namespace		*ns; | 
| Peter Zijlstra | 8e5799b | 2009-06-02 15:08:15 +0200 | [diff] [blame] | 725 | 	u64				id; | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 726 |  | 
| Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 727 | 	perf_overflow_handler_t		overflow_handler; | 
| Peter Zijlstra | 453f19e | 2009-11-20 22:19:43 +0100 | [diff] [blame] | 728 |  | 
| Li Zefan | 07b139c | 2009-12-21 14:27:35 +0800 | [diff] [blame] | 729 | #ifdef CONFIG_EVENT_TRACING | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 730 | 	struct ftrace_event_call	*tp_event; | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 731 | 	struct event_filter		*filter; | 
| Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 732 | #endif | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 733 |  | 
 | 734 | #endif /* CONFIG_PERF_EVENTS */ | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 735 | }; | 
 | 736 |  | 
 | 737 | /** | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 738 |  * struct perf_event_context - event context structure | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 739 |  * | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 740 |  * Used as a container for task events and CPU events as well: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 741 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 742 | struct perf_event_context { | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 743 | 	/* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 744 | 	 * Protect the states of the events in the list, | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 745 | 	 * nr_active, and the list: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 746 | 	 */ | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 747 | 	raw_spinlock_t			lock; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 748 | 	/* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 749 | 	 * Protect the list of events.  Locking either mutex or lock | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 750 | 	 * is sufficient to ensure the list doesn't change; to change | 
 | 751 | 	 * the list you need to lock both the mutex and the spinlock. | 
 | 752 | 	 */ | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 753 | 	struct mutex			mutex; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 754 |  | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 755 | 	struct list_head		pinned_groups; | 
 | 756 | 	struct list_head		flexible_groups; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 757 | 	struct list_head		event_list; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 758 | 	int				nr_events; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 759 | 	int				nr_active; | 
 | 760 | 	int				is_active; | 
| Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 761 | 	int				nr_stat; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 762 | 	atomic_t			refcount; | 
 | 763 | 	struct task_struct		*task; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 764 |  | 
 | 765 | 	/* | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 766 | 	 * Context clock, runs when context enabled. | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 767 | 	 */ | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 768 | 	u64				time; | 
 | 769 | 	u64				timestamp; | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 770 |  | 
 | 771 | 	/* | 
 | 772 | 	 * These fields let us detect when two contexts have both | 
 | 773 | 	 * been cloned (inherited) from a common ancestor. | 
 | 774 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 775 | 	struct perf_event_context	*parent_ctx; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 776 | 	u64				parent_gen; | 
 | 777 | 	u64				generation; | 
 | 778 | 	int				pin_count; | 
 | 779 | 	struct rcu_head			rcu_head; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 780 | }; | 
 | 781 |  | 
 | 782 | /** | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 783 |  * struct perf_event_cpu_context - per cpu event context structure | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 784 |  */ | 
 | 785 | struct perf_cpu_context { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 786 | 	struct perf_event_context	ctx; | 
 | 787 | 	struct perf_event_context	*task_ctx; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 788 | 	int				active_oncpu; | 
 | 789 | 	int				max_pertask; | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 790 | 	int				exclusive; | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 791 | 	struct swevent_hlist		*swevent_hlist; | 
 | 792 | 	struct mutex			hlist_mutex; | 
 | 793 | 	int				hlist_refcount; | 
| Peter Zijlstra | 96f6d44 | 2009-03-23 18:22:07 +0100 | [diff] [blame] | 794 |  | 
 | 795 | 	/* | 
 | 796 | 	 * Recursion avoidance: | 
 | 797 | 	 * | 
 | 798 | 	 * task, softirq, irq, nmi context | 
 | 799 | 	 */ | 
| Ingo Molnar | 22a4f65 | 2009-06-01 10:13:37 +0200 | [diff] [blame] | 800 | 	int				recursion[4]; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 801 | }; | 
 | 802 |  | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 803 | struct perf_output_handle { | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 804 | 	struct perf_event		*event; | 
 | 805 | 	struct perf_mmap_data		*data; | 
| Peter Zijlstra | 6d1acfd | 2010-05-18 11:12:48 +0200 | [diff] [blame] | 806 | 	unsigned long			wakeup; | 
| Peter Zijlstra | 5d967a8 | 2010-05-20 16:46:39 +0200 | [diff] [blame] | 807 | 	unsigned long			size; | 
 | 808 | 	void				*addr; | 
 | 809 | 	int				page; | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 810 | 	int				nmi; | 
 | 811 | 	int				sample; | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 812 | }; | 
 | 813 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 814 | #ifdef CONFIG_PERF_EVENTS | 
| Robert Richter | 829b42d | 2009-04-29 12:46:59 +0200 | [diff] [blame] | 815 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 816 | /* | 
 | 817 |  * Set by architecture code: | 
 | 818 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 819 | extern int perf_max_events; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 820 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 821 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 822 |  | 
| Peter Zijlstra | 49f4743 | 2009-12-27 11:51:52 +0100 | [diff] [blame] | 823 | extern void perf_event_task_sched_in(struct task_struct *task); | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 824 | extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); | 
| Peter Zijlstra | 49f4743 | 2009-12-27 11:51:52 +0100 | [diff] [blame] | 825 | extern void perf_event_task_tick(struct task_struct *task); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 826 | extern int perf_event_init_task(struct task_struct *child); | 
 | 827 | extern void perf_event_exit_task(struct task_struct *child); | 
 | 828 | extern void perf_event_free_task(struct task_struct *task); | 
 | 829 | extern void set_perf_event_pending(void); | 
 | 830 | extern void perf_event_do_pending(void); | 
 | 831 | extern void perf_event_print_debug(void); | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 832 | extern void __perf_disable(void); | 
 | 833 | extern bool __perf_enable(void); | 
 | 834 | extern void perf_disable(void); | 
 | 835 | extern void perf_enable(void); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 836 | extern int perf_event_task_disable(void); | 
 | 837 | extern int perf_event_task_enable(void); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 838 | extern void perf_event_update_userpage(struct perf_event *event); | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 839 | extern int perf_event_release_kernel(struct perf_event *event); | 
 | 840 | extern struct perf_event * | 
 | 841 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | 
 | 842 | 				int cpu, | 
| Frederic Weisbecker | 97eaf53 | 2009-10-18 15:33:50 +0200 | [diff] [blame] | 843 | 				pid_t pid, | 
| Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 844 | 				perf_overflow_handler_t callback); | 
| Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 845 | extern u64 perf_event_read_value(struct perf_event *event, | 
 | 846 | 				 u64 *enabled, u64 *running); | 
| Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 847 |  | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 848 | struct perf_sample_data { | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 849 | 	u64				type; | 
 | 850 |  | 
 | 851 | 	u64				ip; | 
 | 852 | 	struct { | 
 | 853 | 		u32	pid; | 
 | 854 | 		u32	tid; | 
 | 855 | 	}				tid_entry; | 
 | 856 | 	u64				time; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 857 | 	u64				addr; | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 858 | 	u64				id; | 
 | 859 | 	u64				stream_id; | 
 | 860 | 	struct { | 
 | 861 | 		u32	cpu; | 
 | 862 | 		u32	reserved; | 
 | 863 | 	}				cpu_entry; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 864 | 	u64				period; | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 865 | 	struct perf_callchain_entry	*callchain; | 
| Frederic Weisbecker | 3a43ce6 | 2009-08-08 04:26:37 +0200 | [diff] [blame] | 866 | 	struct perf_raw_record		*raw; | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 867 | }; | 
 | 868 |  | 
| Peter Zijlstra | dc1d628 | 2010-03-03 15:55:04 +0100 | [diff] [blame] | 869 | static inline | 
 | 870 | void perf_sample_data_init(struct perf_sample_data *data, u64 addr) | 
 | 871 | { | 
 | 872 | 	data->addr = addr; | 
 | 873 | 	data->raw  = NULL; | 
 | 874 | } | 
 | 875 |  | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 876 | extern void perf_output_sample(struct perf_output_handle *handle, | 
 | 877 | 			       struct perf_event_header *header, | 
 | 878 | 			       struct perf_sample_data *data, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 879 | 			       struct perf_event *event); | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 880 | extern void perf_prepare_sample(struct perf_event_header *header, | 
 | 881 | 				struct perf_sample_data *data, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 882 | 				struct perf_event *event, | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 883 | 				struct pt_regs *regs); | 
 | 884 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 885 | extern int perf_event_overflow(struct perf_event *event, int nmi, | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 886 | 				 struct perf_sample_data *data, | 
 | 887 | 				 struct pt_regs *regs); | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 888 |  | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 889 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 890 |  * Return 1 for a software event, 0 for a hardware event | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 891 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 892 | static inline int is_software_event(struct perf_event *event) | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 893 | { | 
| Peter Zijlstra | 92b6759 | 2010-01-18 14:02:16 +0100 | [diff] [blame] | 894 | 	switch (event->attr.type) { | 
 | 895 | 	case PERF_TYPE_SOFTWARE: | 
 | 896 | 	case PERF_TYPE_TRACEPOINT: | 
 | 897 | 	/* for now the breakpoint stuff also works as software event */ | 
 | 898 | 	case PERF_TYPE_BREAKPOINT: | 
 | 899 | 		return 1; | 
 | 900 | 	} | 
 | 901 | 	return 0; | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 902 | } | 
 | 903 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 904 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 
| Peter Zijlstra | f29ac75 | 2009-06-19 18:27:26 +0200 | [diff] [blame] | 905 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 906 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 
| Peter Zijlstra | f29ac75 | 2009-06-19 18:27:26 +0200 | [diff] [blame] | 907 |  | 
| Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 908 | extern void | 
 | 909 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); | 
 | 910 |  | 
 | 911 | /* | 
 | 912 |  * Take a snapshot of the regs. Skip ip and frame pointer to | 
 | 913 |  * the nth caller. We only need a few of the regs: | 
 | 914 |  * - ip for PERF_SAMPLE_IP | 
 | 915 |  * - cs for user_mode() tests | 
 | 916 |  * - bp for callchains | 
 | 917 |  * - eflags, for future purposes, just in case | 
 | 918 |  */ | 
 | 919 | static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) | 
 | 920 | { | 
 | 921 | 	unsigned long ip; | 
 | 922 |  | 
 | 923 | 	memset(regs, 0, sizeof(*regs)); | 
 | 924 |  | 
 | 925 | 	switch (skip) { | 
 | 926 | 	case 1 : | 
 | 927 | 		ip = CALLER_ADDR0; | 
 | 928 | 		break; | 
 | 929 | 	case 2 : | 
 | 930 | 		ip = CALLER_ADDR1; | 
 | 931 | 		break; | 
 | 932 | 	case 3 : | 
 | 933 | 		ip = CALLER_ADDR2; | 
 | 934 | 		break; | 
 | 935 | 	case 4: | 
 | 936 | 		ip = CALLER_ADDR3; | 
 | 937 | 		break; | 
 | 938 | 	/* No need to support further for now */ | 
 | 939 | 	default: | 
 | 940 | 		ip = 0; | 
 | 941 | 	} | 
 | 942 |  | 
 | 943 | 	return perf_arch_fetch_caller_regs(regs, ip, skip); | 
 | 944 | } | 
 | 945 |  | 
| Frederic Weisbecker | e49a5bd | 2010-03-22 19:40:03 +0100 | [diff] [blame] | 946 | static inline void | 
 | 947 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | 
 | 948 | { | 
 | 949 | 	if (atomic_read(&perf_swevent_enabled[event_id])) { | 
 | 950 | 		struct pt_regs hot_regs; | 
 | 951 |  | 
 | 952 | 		if (!regs) { | 
 | 953 | 			perf_fetch_caller_regs(&hot_regs, 1); | 
 | 954 | 			regs = &hot_regs; | 
 | 955 | 		} | 
 | 956 | 		__perf_sw_event(event_id, nr, nmi, regs, addr); | 
 | 957 | 	} | 
 | 958 | } | 
 | 959 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 960 | extern void __perf_event_mmap(struct vm_area_struct *vma); | 
| Peter Zijlstra | 089dd79 | 2009-06-05 14:04:55 +0200 | [diff] [blame] | 961 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 962 | static inline void perf_event_mmap(struct vm_area_struct *vma) | 
| Peter Zijlstra | 089dd79 | 2009-06-05 14:04:55 +0200 | [diff] [blame] | 963 | { | 
 | 964 | 	if (vma->vm_flags & VM_EXEC) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 965 | 		__perf_event_mmap(vma); | 
| Peter Zijlstra | 089dd79 | 2009-06-05 14:04:55 +0200 | [diff] [blame] | 966 | } | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 967 |  | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 968 | extern struct perf_guest_info_callbacks *perf_guest_cbs; | 
| Zhang, Yanmin | dcf46b9 | 2010-04-20 10:13:58 +0800 | [diff] [blame] | 969 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | 
 | 970 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 971 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 972 | extern void perf_event_comm(struct task_struct *tsk); | 
 | 973 | extern void perf_event_fork(struct task_struct *tsk); | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 974 |  | 
| Peter Zijlstra | 394ee07 | 2009-03-30 19:07:14 +0200 | [diff] [blame] | 975 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | 
 | 976 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 977 | extern int sysctl_perf_event_paranoid; | 
 | 978 | extern int sysctl_perf_event_mlock; | 
 | 979 | extern int sysctl_perf_event_sample_rate; | 
| Peter Zijlstra | 1ccd154 | 2009-04-09 10:53:45 +0200 | [diff] [blame] | 980 |  | 
| Peter Zijlstra | 320ebf0 | 2010-03-02 12:35:37 +0100 | [diff] [blame] | 981 | static inline bool perf_paranoid_tracepoint_raw(void) | 
 | 982 | { | 
 | 983 | 	return sysctl_perf_event_paranoid > -1; | 
 | 984 | } | 
 | 985 |  | 
 | 986 | static inline bool perf_paranoid_cpu(void) | 
 | 987 | { | 
 | 988 | 	return sysctl_perf_event_paranoid > 0; | 
 | 989 | } | 
 | 990 |  | 
 | 991 | static inline bool perf_paranoid_kernel(void) | 
 | 992 | { | 
 | 993 | 	return sysctl_perf_event_paranoid > 1; | 
 | 994 | } | 
 | 995 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 996 | extern void perf_event_init(void); | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 997 | extern void perf_tp_event(u64 addr, u64 count, void *record, | 
 | 998 | 			  int entry_size, struct pt_regs *regs, | 
 | 999 | 			  struct hlist_head *head); | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 1000 | extern void perf_bp_event(struct perf_event *event, void *data); | 
| Ingo Molnar | 0d905bc | 2009-05-04 19:13:30 +0200 | [diff] [blame] | 1001 |  | 
| Paul Mackerras | 9d23a90 | 2009-05-14 21:48:08 +1000 | [diff] [blame] | 1002 | #ifndef perf_misc_flags | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1003 | #define perf_misc_flags(regs)	(user_mode(regs) ? PERF_RECORD_MISC_USER : \ | 
 | 1004 | 				 PERF_RECORD_MISC_KERNEL) | 
| Paul Mackerras | 9d23a90 | 2009-05-14 21:48:08 +1000 | [diff] [blame] | 1005 | #define perf_instruction_pointer(regs)	instruction_pointer(regs) | 
 | 1006 | #endif | 
 | 1007 |  | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1008 | extern int perf_output_begin(struct perf_output_handle *handle, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1009 | 			     struct perf_event *event, unsigned int size, | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1010 | 			     int nmi, int sample); | 
 | 1011 | extern void perf_output_end(struct perf_output_handle *handle); | 
 | 1012 | extern void perf_output_copy(struct perf_output_handle *handle, | 
 | 1013 | 			     const void *buf, unsigned int len); | 
| Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 1014 | extern int perf_swevent_get_recursion_context(void); | 
 | 1015 | extern void perf_swevent_put_recursion_context(int rctx); | 
| Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 1016 | extern void perf_event_enable(struct perf_event *event); | 
 | 1017 | extern void perf_event_disable(struct perf_event *event); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1018 | #else | 
 | 1019 | static inline void | 
| Peter Zijlstra | 49f4743 | 2009-12-27 11:51:52 +0100 | [diff] [blame] | 1020 | perf_event_task_sched_in(struct task_struct *task)			{ } | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1021 | static inline void | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1022 | perf_event_task_sched_out(struct task_struct *task, | 
| Peter Zijlstra | 49f4743 | 2009-12-27 11:51:52 +0100 | [diff] [blame] | 1023 | 			    struct task_struct *next)			{ } | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1024 | static inline void | 
| Peter Zijlstra | 49f4743 | 2009-12-27 11:51:52 +0100 | [diff] [blame] | 1025 | perf_event_task_tick(struct task_struct *task)				{ } | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1026 | static inline int perf_event_init_task(struct task_struct *child)	{ return 0; } | 
 | 1027 | static inline void perf_event_exit_task(struct task_struct *child)	{ } | 
 | 1028 | static inline void perf_event_free_task(struct task_struct *task)	{ } | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 1029 | static inline void perf_event_do_pending(void)				{ } | 
 | 1030 | static inline void perf_event_print_debug(void)				{ } | 
| Peter Zijlstra | 9e35ad3 | 2009-05-13 16:21:38 +0200 | [diff] [blame] | 1031 | static inline void perf_disable(void)					{ } | 
 | 1032 | static inline void perf_enable(void)					{ } | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 1033 | static inline int perf_event_task_disable(void)				{ return -EINVAL; } | 
 | 1034 | static inline int perf_event_task_enable(void)				{ return -EINVAL; } | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 1035 |  | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 1036 | static inline void | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1037 | perf_sw_event(u32 event_id, u64 nr, int nmi, | 
| Peter Zijlstra | 78f13e9 | 2009-04-08 15:01:33 +0200 | [diff] [blame] | 1038 | 		     struct pt_regs *regs, u64 addr)			{ } | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 1039 | static inline void | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 1040 | perf_bp_event(struct perf_event *event, void *data)			{ } | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 1041 |  | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1042 | static inline int perf_register_guest_info_callbacks | 
| Zhang, Yanmin | dcf46b9 | 2010-04-20 10:13:58 +0800 | [diff] [blame] | 1043 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1044 | static inline int perf_unregister_guest_info_callbacks | 
| Zhang, Yanmin | dcf46b9 | 2010-04-20 10:13:58 +0800 | [diff] [blame] | 1045 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1046 |  | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 1047 | static inline void perf_event_mmap(struct vm_area_struct *vma)		{ } | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1048 | static inline void perf_event_comm(struct task_struct *tsk)		{ } | 
 | 1049 | static inline void perf_event_fork(struct task_struct *tsk)		{ } | 
 | 1050 | static inline void perf_event_init(void)				{ } | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 1051 | static inline int  perf_swevent_get_recursion_context(void)		{ return -1; } | 
| Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 1052 | static inline void perf_swevent_put_recursion_context(int rctx)		{ } | 
| Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 1053 | static inline void perf_event_enable(struct perf_event *event)		{ } | 
 | 1054 | static inline void perf_event_disable(struct perf_event *event)		{ } | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1055 | #endif | 
 | 1056 |  | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1057 | #define perf_output_put(handle, x) \ | 
 | 1058 | 	perf_output_copy((handle), &(x), sizeof(x)) | 
 | 1059 |  | 
| Peter Zijlstra | 3f6da39 | 2010-03-05 13:01:18 +0100 | [diff] [blame] | 1060 | /* | 
 | 1061 |  * This has to have a higher priority than migration_notifier in sched.c. | 
 | 1062 |  */ | 
 | 1063 | #define perf_cpu_notifier(fn)					\ | 
 | 1064 | do {								\ | 
 | 1065 | 	static struct notifier_block fn##_nb __cpuinitdata =	\ | 
 | 1066 | 		{ .notifier_call = fn, .priority = 20 };	\ | 
 | 1067 | 	fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,		\ | 
 | 1068 | 		(void *)(unsigned long)smp_processor_id());	\ | 
 | 1069 | 	fn(&fn##_nb, (unsigned long)CPU_STARTING,		\ | 
 | 1070 | 		(void *)(unsigned long)smp_processor_id());	\ | 
 | 1071 | 	fn(&fn##_nb, (unsigned long)CPU_ONLINE,			\ | 
 | 1072 | 		(void *)(unsigned long)smp_processor_id());	\ | 
 | 1073 | 	register_cpu_notifier(&fn##_nb);			\ | 
 | 1074 | } while (0) | 
 | 1075 |  | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 1076 | #endif /* __KERNEL__ */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1077 | #endif /* _LINUX_PERF_EVENT_H */ |