| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1 | /* | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 2 |  * Performance events: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3 |  * | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 4 |  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 5 |  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar | 
 | 6 |  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 7 |  * | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 8 |  * Data type definitions, declarations, prototypes. | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 9 |  * | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 10 |  *    Started by: Thomas Gleixner and Ingo Molnar | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 11 |  * | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 12 |  * For licencing details see kernel-base/COPYING | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 13 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 14 | #ifndef _LINUX_PERF_EVENT_H | 
 | 15 | #define _LINUX_PERF_EVENT_H | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 16 |  | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 17 | #include <linux/types.h> | 
 | 18 | #include <linux/ioctl.h> | 
| Paul Mackerras | 9aaa131 | 2009-03-21 15:31:47 +1100 | [diff] [blame] | 19 | #include <asm/byteorder.h> | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 20 |  | 
 | 21 | /* | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 22 |  * User-space ABI bits: | 
 | 23 |  */ | 
 | 24 |  | 
 | 25 | /* | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 26 |  * attr.type | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 27 |  */ | 
| Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 28 | enum perf_type_id { | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 29 | 	PERF_TYPE_HARDWARE			= 0, | 
 | 30 | 	PERF_TYPE_SOFTWARE			= 1, | 
 | 31 | 	PERF_TYPE_TRACEPOINT			= 2, | 
 | 32 | 	PERF_TYPE_HW_CACHE			= 3, | 
 | 33 | 	PERF_TYPE_RAW				= 4, | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 34 | 	PERF_TYPE_BREAKPOINT			= 5, | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 35 |  | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 36 | 	PERF_TYPE_MAX,				/* non-ABI */ | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 37 | }; | 
 | 38 |  | 
 | 39 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 40 |  * Generalized performance event event_id types, used by the | 
 | 41 |  * attr.event_id parameter of the sys_perf_event_open() | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 42 |  * syscall: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 43 |  */ | 
| Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 44 | enum perf_hw_id { | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 45 | 	/* | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 46 | 	 * Common hardware events, generalized by the kernel: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 47 | 	 */ | 
| Peter Zijlstra | f4dbfa8 | 2009-06-11 14:06:28 +0200 | [diff] [blame] | 48 | 	PERF_COUNT_HW_CPU_CYCLES		= 0, | 
 | 49 | 	PERF_COUNT_HW_INSTRUCTIONS		= 1, | 
 | 50 | 	PERF_COUNT_HW_CACHE_REFERENCES		= 2, | 
 | 51 | 	PERF_COUNT_HW_CACHE_MISSES		= 3, | 
 | 52 | 	PERF_COUNT_HW_BRANCH_INSTRUCTIONS	= 4, | 
 | 53 | 	PERF_COUNT_HW_BRANCH_MISSES		= 5, | 
 | 54 | 	PERF_COUNT_HW_BUS_CYCLES		= 6, | 
| Ingo Molnar | 8f62242 | 2011-04-29 13:19:47 +0200 | [diff] [blame] | 55 | 	PERF_COUNT_HW_STALLED_CYCLES_FRONTEND	= 7, | 
 | 56 | 	PERF_COUNT_HW_STALLED_CYCLES_BACKEND	= 8, | 
| Stephane Eranian | c37e174 | 2011-12-11 00:28:52 +0100 | [diff] [blame] | 57 | 	PERF_COUNT_HW_REF_CPU_CYCLES		= 9, | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 58 |  | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 59 | 	PERF_COUNT_HW_MAX,			/* non-ABI */ | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 60 | }; | 
| Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 61 |  | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 62 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 63 |  * Generalized hardware cache events: | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 64 |  * | 
| Peter Zijlstra | 89d6c0b | 2011-04-22 23:37:06 +0200 | [diff] [blame] | 65 |  *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 66 |  *       { read, write, prefetch } x | 
 | 67 |  *       { accesses, misses } | 
 | 68 |  */ | 
| Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 69 | enum perf_hw_cache_id { | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 70 | 	PERF_COUNT_HW_CACHE_L1D			= 0, | 
 | 71 | 	PERF_COUNT_HW_CACHE_L1I			= 1, | 
 | 72 | 	PERF_COUNT_HW_CACHE_LL			= 2, | 
 | 73 | 	PERF_COUNT_HW_CACHE_DTLB		= 3, | 
 | 74 | 	PERF_COUNT_HW_CACHE_ITLB		= 4, | 
 | 75 | 	PERF_COUNT_HW_CACHE_BPU			= 5, | 
| Peter Zijlstra | 89d6c0b | 2011-04-22 23:37:06 +0200 | [diff] [blame] | 76 | 	PERF_COUNT_HW_CACHE_NODE		= 6, | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 77 |  | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 78 | 	PERF_COUNT_HW_CACHE_MAX,		/* non-ABI */ | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 79 | }; | 
 | 80 |  | 
| Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 81 | enum perf_hw_cache_op_id { | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 82 | 	PERF_COUNT_HW_CACHE_OP_READ		= 0, | 
 | 83 | 	PERF_COUNT_HW_CACHE_OP_WRITE		= 1, | 
 | 84 | 	PERF_COUNT_HW_CACHE_OP_PREFETCH		= 2, | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 85 |  | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 86 | 	PERF_COUNT_HW_CACHE_OP_MAX,		/* non-ABI */ | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 87 | }; | 
 | 88 |  | 
| Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 89 | enum perf_hw_cache_op_result_id { | 
 | 90 | 	PERF_COUNT_HW_CACHE_RESULT_ACCESS	= 0, | 
 | 91 | 	PERF_COUNT_HW_CACHE_RESULT_MISS		= 1, | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 92 |  | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 93 | 	PERF_COUNT_HW_CACHE_RESULT_MAX,		/* non-ABI */ | 
| Ingo Molnar | 8326f44 | 2009-06-05 20:22:46 +0200 | [diff] [blame] | 94 | }; | 
 | 95 |  | 
 | 96 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 97 |  * Special "software" events provided by the kernel, even if the hardware | 
 | 98 |  * does not support performance events. These events measure various | 
| Peter Zijlstra | b8e8351 | 2009-03-19 20:26:18 +0100 | [diff] [blame] | 99 |  * physical and sw events of the kernel (and allow the profiling of them as | 
 | 100 |  * well): | 
 | 101 |  */ | 
| Peter Zijlstra | 1c432d8 | 2009-06-11 13:19:29 +0200 | [diff] [blame] | 102 | enum perf_sw_ids { | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 103 | 	PERF_COUNT_SW_CPU_CLOCK			= 0, | 
 | 104 | 	PERF_COUNT_SW_TASK_CLOCK		= 1, | 
 | 105 | 	PERF_COUNT_SW_PAGE_FAULTS		= 2, | 
 | 106 | 	PERF_COUNT_SW_CONTEXT_SWITCHES		= 3, | 
 | 107 | 	PERF_COUNT_SW_CPU_MIGRATIONS		= 4, | 
 | 108 | 	PERF_COUNT_SW_PAGE_FAULTS_MIN		= 5, | 
 | 109 | 	PERF_COUNT_SW_PAGE_FAULTS_MAJ		= 6, | 
| Anton Blanchard | f7d7986 | 2009-10-18 01:09:29 +0000 | [diff] [blame] | 110 | 	PERF_COUNT_SW_ALIGNMENT_FAULTS		= 7, | 
 | 111 | 	PERF_COUNT_SW_EMULATION_FAULTS		= 8, | 
| Ingo Molnar | 6c594c2 | 2008-12-14 12:34:15 +0100 | [diff] [blame] | 112 |  | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 113 | 	PERF_COUNT_SW_MAX,			/* non-ABI */ | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 114 | }; | 
 | 115 |  | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 116 | /* | 
| Peter Zijlstra | 0d48696 | 2009-06-02 19:22:16 +0200 | [diff] [blame] | 117 |  * Bits that can be set in attr.sample_type to request information | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 118 |  * in the overflow packets. | 
 | 119 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 120 | enum perf_event_sample_format { | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 121 | 	PERF_SAMPLE_IP				= 1U << 0, | 
 | 122 | 	PERF_SAMPLE_TID				= 1U << 1, | 
 | 123 | 	PERF_SAMPLE_TIME			= 1U << 2, | 
 | 124 | 	PERF_SAMPLE_ADDR			= 1U << 3, | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 125 | 	PERF_SAMPLE_READ			= 1U << 4, | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 126 | 	PERF_SAMPLE_CALLCHAIN			= 1U << 5, | 
 | 127 | 	PERF_SAMPLE_ID				= 1U << 6, | 
 | 128 | 	PERF_SAMPLE_CPU				= 1U << 7, | 
 | 129 | 	PERF_SAMPLE_PERIOD			= 1U << 8, | 
| Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 130 | 	PERF_SAMPLE_STREAM_ID			= 1U << 9, | 
| Frederic Weisbecker | 3a43ce6 | 2009-08-08 04:26:37 +0200 | [diff] [blame] | 131 | 	PERF_SAMPLE_RAW				= 1U << 10, | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 132 | 	PERF_SAMPLE_BRANCH_STACK		= 1U << 11, | 
| Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 133 |  | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 134 | 	PERF_SAMPLE_MAX = 1U << 12,		/* non-ABI */ | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 135 | }; | 
 | 136 |  | 
 | 137 | /* | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 138 |  * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set | 
 | 139 |  * | 
 | 140 |  * If the user does not pass priv level information via branch_sample_type, | 
 | 141 |  * the kernel uses the event's priv level. Branch and event priv levels do | 
 | 142 |  * not have to match. Branch priv level is checked for permissions. | 
 | 143 |  * | 
 | 144 |  * The branch types can be combined, however BRANCH_ANY covers all types | 
 | 145 |  * of branches and therefore it supersedes all the other types. | 
 | 146 |  */ | 
 | 147 | enum perf_branch_sample_type { | 
 | 148 | 	PERF_SAMPLE_BRANCH_USER		= 1U << 0, /* user branches */ | 
 | 149 | 	PERF_SAMPLE_BRANCH_KERNEL	= 1U << 1, /* kernel branches */ | 
 | 150 | 	PERF_SAMPLE_BRANCH_HV		= 1U << 2, /* hypervisor branches */ | 
 | 151 |  | 
 | 152 | 	PERF_SAMPLE_BRANCH_ANY		= 1U << 3, /* any branch types */ | 
 | 153 | 	PERF_SAMPLE_BRANCH_ANY_CALL	= 1U << 4, /* any call branch */ | 
 | 154 | 	PERF_SAMPLE_BRANCH_ANY_RETURN	= 1U << 5, /* any return branch */ | 
 | 155 | 	PERF_SAMPLE_BRANCH_IND_CALL	= 1U << 6, /* indirect calls */ | 
 | 156 |  | 
 | 157 | 	PERF_SAMPLE_BRANCH_MAX		= 1U << 7, /* non-ABI */ | 
 | 158 | }; | 
 | 159 |  | 
 | 160 | #define PERF_SAMPLE_BRANCH_PLM_ALL \ | 
 | 161 | 	(PERF_SAMPLE_BRANCH_USER|\ | 
 | 162 | 	 PERF_SAMPLE_BRANCH_KERNEL|\ | 
 | 163 | 	 PERF_SAMPLE_BRANCH_HV) | 
 | 164 |  | 
 | 165 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 166 |  * The format of the data returned by read() on a perf event fd, | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 167 |  * as specified by attr.read_format: | 
 | 168 |  * | 
 | 169 |  * struct read_format { | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 170 |  *	{ u64		value; | 
| Vince Weaver | d7ebe75 | 2011-06-03 17:59:51 -0400 | [diff] [blame] | 171 |  *	  { u64		time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED | 
 | 172 |  *	  { u64		time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 173 |  *	  { u64		id;           } && PERF_FORMAT_ID | 
 | 174 |  *	} && !PERF_FORMAT_GROUP | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 175 |  * | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 176 |  *	{ u64		nr; | 
| Vince Weaver | d7ebe75 | 2011-06-03 17:59:51 -0400 | [diff] [blame] | 177 |  *	  { u64		time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED | 
 | 178 |  *	  { u64		time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 179 |  *	  { u64		value; | 
 | 180 |  *	    { u64	id;           } && PERF_FORMAT_ID | 
 | 181 |  *	  }		cntr[nr]; | 
 | 182 |  *	} && PERF_FORMAT_GROUP | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 183 |  * }; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 184 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 185 | enum perf_event_read_format { | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 186 | 	PERF_FORMAT_TOTAL_TIME_ENABLED		= 1U << 0, | 
 | 187 | 	PERF_FORMAT_TOTAL_TIME_RUNNING		= 1U << 1, | 
 | 188 | 	PERF_FORMAT_ID				= 1U << 2, | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 189 | 	PERF_FORMAT_GROUP			= 1U << 3, | 
| Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 190 |  | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 191 | 	PERF_FORMAT_MAX = 1U << 4,		/* non-ABI */ | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 192 | }; | 
 | 193 |  | 
| Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 194 | #define PERF_ATTR_SIZE_VER0	64	/* sizeof first published struct */ | 
| Stephane Eranian | cb5d769 | 2012-02-09 23:21:05 +0100 | [diff] [blame] | 195 | #define PERF_ATTR_SIZE_VER1	72	/* add: config2 */ | 
 | 196 | #define PERF_ATTR_SIZE_VER2	80	/* add: branch_sample_type */ | 
| Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 197 |  | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 198 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 199 |  * Hardware event_id to monitor via a performance monitoring event: | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 200 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 201 | struct perf_event_attr { | 
| Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 202 |  | 
| Peter Zijlstra | f4a2deb | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 203 | 	/* | 
| Ingo Molnar | a21ca2c | 2009-06-06 09:58:57 +0200 | [diff] [blame] | 204 | 	 * Major type: hardware/software/tracepoint/etc. | 
 | 205 | 	 */ | 
 | 206 | 	__u32			type; | 
| Peter Zijlstra | 974802e | 2009-06-12 12:46:55 +0200 | [diff] [blame] | 207 |  | 
 | 208 | 	/* | 
 | 209 | 	 * Size of the attr structure, for fwd/bwd compat. | 
 | 210 | 	 */ | 
 | 211 | 	__u32			size; | 
| Ingo Molnar | a21ca2c | 2009-06-06 09:58:57 +0200 | [diff] [blame] | 212 |  | 
 | 213 | 	/* | 
 | 214 | 	 * Type specific configuration information. | 
| Peter Zijlstra | f4a2deb | 2009-03-23 18:22:06 +0100 | [diff] [blame] | 215 | 	 */ | 
 | 216 | 	__u64			config; | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 217 |  | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 218 | 	union { | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 219 | 		__u64		sample_period; | 
 | 220 | 		__u64		sample_freq; | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 221 | 	}; | 
 | 222 |  | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 223 | 	__u64			sample_type; | 
 | 224 | 	__u64			read_format; | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 225 |  | 
| Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 226 | 	__u64			disabled       :  1, /* off by default        */ | 
| Paul Mackerras | 0475f9e | 2009-02-11 14:35:35 +1100 | [diff] [blame] | 227 | 				inherit	       :  1, /* children inherit it   */ | 
 | 228 | 				pinned	       :  1, /* must always be on PMU */ | 
 | 229 | 				exclusive      :  1, /* only group on PMU     */ | 
 | 230 | 				exclude_user   :  1, /* don't count user      */ | 
 | 231 | 				exclude_kernel :  1, /* ditto kernel          */ | 
 | 232 | 				exclude_hv     :  1, /* ditto hypervisor      */ | 
| Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 233 | 				exclude_idle   :  1, /* don't count when idle */ | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 234 | 				mmap           :  1, /* include mmap data     */ | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 235 | 				comm	       :  1, /* include comm data     */ | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 236 | 				freq           :  1, /* use freq, not period  */ | 
| Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 237 | 				inherit_stat   :  1, /* per task counts       */ | 
| Paul Mackerras | 57e7986 | 2009-06-30 16:07:19 +1000 | [diff] [blame] | 238 | 				enable_on_exec :  1, /* next exec enables     */ | 
| Peter Zijlstra | 9f498cc | 2009-07-23 14:46:33 +0200 | [diff] [blame] | 239 | 				task           :  1, /* trace fork/exit       */ | 
| Peter Zijlstra | 2667de8 | 2009-09-17 19:01:10 +0200 | [diff] [blame] | 240 | 				watermark      :  1, /* wakeup_watermark      */ | 
| Peter Zijlstra | ab60834 | 2010-04-08 23:03:20 +0200 | [diff] [blame] | 241 | 				/* | 
 | 242 | 				 * precise_ip: | 
 | 243 | 				 * | 
 | 244 | 				 *  0 - SAMPLE_IP can have arbitrary skid | 
 | 245 | 				 *  1 - SAMPLE_IP must have constant skid | 
 | 246 | 				 *  2 - SAMPLE_IP requested to have 0 skid | 
 | 247 | 				 *  3 - SAMPLE_IP must have 0 skid | 
 | 248 | 				 * | 
 | 249 | 				 *  See also PERF_RECORD_MISC_EXACT_IP | 
 | 250 | 				 */ | 
 | 251 | 				precise_ip     :  2, /* skid constraint       */ | 
| Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 252 | 				mmap_data      :  1, /* non-exec mmap data    */ | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 253 | 				sample_id_all  :  1, /* sample_type all events */ | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 254 |  | 
| Joerg Roedel | a240f76 | 2011-10-05 14:01:16 +0200 | [diff] [blame] | 255 | 				exclude_host   :  1, /* don't count in host   */ | 
 | 256 | 				exclude_guest  :  1, /* don't count in guest  */ | 
 | 257 |  | 
 | 258 | 				__reserved_1   : 43; | 
| Paul Mackerras | 2743a5b | 2009-03-04 20:36:51 +1100 | [diff] [blame] | 259 |  | 
| Peter Zijlstra | 2667de8 | 2009-09-17 19:01:10 +0200 | [diff] [blame] | 260 | 	union { | 
 | 261 | 		__u32		wakeup_events;	  /* wakeup every n events */ | 
 | 262 | 		__u32		wakeup_watermark; /* bytes before wakeup   */ | 
 | 263 | 	}; | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 264 |  | 
| Peter Zijlstra | f13c12c | 2009-12-15 19:43:11 +0100 | [diff] [blame] | 265 | 	__u32			bp_type; | 
| Andi Kleen | a7e3ed1 | 2011-03-03 10:34:47 +0800 | [diff] [blame] | 266 | 	union { | 
 | 267 | 		__u64		bp_addr; | 
 | 268 | 		__u64		config1; /* extension of config */ | 
 | 269 | 	}; | 
 | 270 | 	union { | 
 | 271 | 		__u64		bp_len; | 
 | 272 | 		__u64		config2; /* extension of config1 */ | 
 | 273 | 	}; | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 274 | 	__u64	branch_sample_type; /* enum branch_sample_type */ | 
| Thomas Gleixner | eab656a | 2008-12-08 19:26:59 +0100 | [diff] [blame] | 275 | }; | 
 | 276 |  | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 277 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 278 |  * Ioctls that can be done on a perf event fd: | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 279 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 280 | #define PERF_EVENT_IOC_ENABLE		_IO ('$', 0) | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 281 | #define PERF_EVENT_IOC_DISABLE		_IO ('$', 1) | 
 | 282 | #define PERF_EVENT_IOC_REFRESH		_IO ('$', 2) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 283 | #define PERF_EVENT_IOC_RESET		_IO ('$', 3) | 
| Arjan van de Ven | 4c49b12 | 2009-11-13 21:47:33 -0800 | [diff] [blame] | 284 | #define PERF_EVENT_IOC_PERIOD		_IOW('$', 4, __u64) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 285 | #define PERF_EVENT_IOC_SET_OUTPUT	_IO ('$', 5) | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 286 | #define PERF_EVENT_IOC_SET_FILTER	_IOW('$', 6, char *) | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 287 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 288 | enum perf_event_ioc_flags { | 
| Peter Zijlstra | 3df5eda | 2009-05-08 18:52:22 +0200 | [diff] [blame] | 289 | 	PERF_IOC_FLAG_GROUP		= 1U << 0, | 
 | 290 | }; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 291 |  | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 292 | /* | 
 | 293 |  * Structure of the page that can be mapped via mmap | 
 | 294 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 295 | struct perf_event_mmap_page { | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 296 | 	__u32	version;		/* version number of this structure */ | 
 | 297 | 	__u32	compat_version;		/* lowest version this is compat with */ | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 298 |  | 
 | 299 | 	/* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 300 | 	 * Bits needed to read the hw events in user-space. | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 301 | 	 * | 
| Peter Zijlstra | c720620 | 2012-03-22 17:26:36 +0100 | [diff] [blame] | 302 | 	 *   u32 seq, time_mult, time_shift, idx, width; | 
 | 303 | 	 *   u64 count, enabled, running; | 
 | 304 | 	 *   u64 cyc, time_offset; | 
 | 305 | 	 *   s64 pmc = 0; | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 306 | 	 * | 
| Peter Zijlstra | a2e87d0 | 2009-04-06 11:44:59 +0200 | [diff] [blame] | 307 | 	 *   do { | 
 | 308 | 	 *     seq = pc->lock; | 
| Peter Zijlstra | a2e87d0 | 2009-04-06 11:44:59 +0200 | [diff] [blame] | 309 | 	 *     barrier() | 
| Peter Zijlstra | c720620 | 2012-03-22 17:26:36 +0100 | [diff] [blame] | 310 | 	 * | 
 | 311 | 	 *     enabled = pc->time_enabled; | 
 | 312 | 	 *     running = pc->time_running; | 
 | 313 | 	 * | 
 | 314 | 	 *     if (pc->cap_usr_time && enabled != running) { | 
 | 315 | 	 *       cyc = rdtsc(); | 
 | 316 | 	 *       time_offset = pc->time_offset; | 
 | 317 | 	 *       time_mult   = pc->time_mult; | 
 | 318 | 	 *       time_shift  = pc->time_shift; | 
 | 319 | 	 *     } | 
 | 320 | 	 * | 
 | 321 | 	 *     idx = pc->index; | 
 | 322 | 	 *     count = pc->offset; | 
 | 323 | 	 *     if (pc->cap_usr_rdpmc && idx) { | 
 | 324 | 	 *       width = pc->pmc_width; | 
 | 325 | 	 *       pmc = rdpmc(idx - 1); | 
 | 326 | 	 *     } | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 327 | 	 * | 
| Peter Zijlstra | a2e87d0 | 2009-04-06 11:44:59 +0200 | [diff] [blame] | 328 | 	 *     barrier(); | 
 | 329 | 	 *   } while (pc->lock != seq); | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 330 | 	 * | 
| Peter Zijlstra | 92f22a3 | 2009-04-02 11:12:04 +0200 | [diff] [blame] | 331 | 	 * NOTE: for obvious reason this only works on self-monitoring | 
 | 332 | 	 *       processes. | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 333 | 	 */ | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 334 | 	__u32	lock;			/* seqlock for synchronization */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 335 | 	__u32	index;			/* hardware event identifier */ | 
 | 336 | 	__s64	offset;			/* add to hardware event value */ | 
 | 337 | 	__u64	time_enabled;		/* time event active */ | 
 | 338 | 	__u64	time_running;		/* time event on cpu */ | 
| Peter Zijlstra | c720620 | 2012-03-22 17:26:36 +0100 | [diff] [blame] | 339 | 	union { | 
 | 340 | 		__u64	capabilities; | 
 | 341 | 		__u64	cap_usr_time  : 1, | 
 | 342 | 			cap_usr_rdpmc : 1, | 
 | 343 | 			cap_____res   : 62; | 
 | 344 | 	}; | 
 | 345 |  | 
 | 346 | 	/* | 
 | 347 | 	 * If cap_usr_rdpmc this field provides the bit-width of the value | 
 | 348 | 	 * read using the rdpmc() or equivalent instruction. This can be used | 
 | 349 | 	 * to sign extend the result like: | 
 | 350 | 	 * | 
 | 351 | 	 *   pmc <<= 64 - width; | 
 | 352 | 	 *   pmc >>= 64 - width; // signed shift right | 
 | 353 | 	 *   count += pmc; | 
 | 354 | 	 */ | 
 | 355 | 	__u16	pmc_width; | 
 | 356 |  | 
 | 357 | 	/* | 
 | 358 | 	 * If cap_usr_time the below fields can be used to compute the time | 
 | 359 | 	 * delta since time_enabled (in ns) using rdtsc or similar. | 
 | 360 | 	 * | 
 | 361 | 	 *   u64 quot, rem; | 
 | 362 | 	 *   u64 delta; | 
 | 363 | 	 * | 
 | 364 | 	 *   quot = (cyc >> time_shift); | 
 | 365 | 	 *   rem = cyc & ((1 << time_shift) - 1); | 
 | 366 | 	 *   delta = time_offset + quot * time_mult + | 
 | 367 | 	 *              ((rem * time_mult) >> time_shift); | 
 | 368 | 	 * | 
 | 369 | 	 * Where time_offset,time_mult,time_shift and cyc are read in the | 
 | 370 | 	 * seqcount loop described above. This delta can then be added to | 
 | 371 | 	 * enabled and possible running (if idx), improving the scaling: | 
 | 372 | 	 * | 
 | 373 | 	 *   enabled += delta; | 
 | 374 | 	 *   if (idx) | 
 | 375 | 	 *     running += delta; | 
 | 376 | 	 * | 
 | 377 | 	 *   quot = count / running; | 
 | 378 | 	 *   rem  = count % running; | 
 | 379 | 	 *   count = quot * enabled + (rem * enabled) / running; | 
 | 380 | 	 */ | 
 | 381 | 	__u16	time_shift; | 
 | 382 | 	__u32	time_mult; | 
| Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 383 | 	__u64	time_offset; | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 384 |  | 
| Peter Zijlstra | 41f9533 | 2009-06-23 17:55:18 +0200 | [diff] [blame] | 385 | 		/* | 
 | 386 | 		 * Hole for extension of the self monitor capabilities | 
 | 387 | 		 */ | 
 | 388 |  | 
| Peter Zijlstra | c720620 | 2012-03-22 17:26:36 +0100 | [diff] [blame] | 389 | 	__u64	__reserved[120];	/* align to 1k */ | 
| Peter Zijlstra | 41f9533 | 2009-06-23 17:55:18 +0200 | [diff] [blame] | 390 |  | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 391 | 	/* | 
 | 392 | 	 * Control data for the mmap() data buffer. | 
 | 393 | 	 * | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 394 | 	 * User-space reading the @data_head value should issue an rmb(), on | 
 | 395 | 	 * SMP capable platforms, after reading this value -- see | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 396 | 	 * perf_event_wakeup(). | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 397 | 	 * | 
 | 398 | 	 * When the mapping is PROT_WRITE the @data_tail value should be | 
 | 399 | 	 * written by userspace to reflect the last read data. In this case | 
 | 400 | 	 * the kernel will not over-write unread data. | 
| Peter Zijlstra | 38ff667 | 2009-03-30 19:07:03 +0200 | [diff] [blame] | 401 | 	 */ | 
| Peter Zijlstra | 8e3747c | 2009-06-02 16:16:02 +0200 | [diff] [blame] | 402 | 	__u64   data_head;		/* head in the data section */ | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 403 | 	__u64	data_tail;		/* user-space written tail */ | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 404 | }; | 
 | 405 |  | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 406 | #define PERF_RECORD_MISC_CPUMODE_MASK		(7 << 0) | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 407 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN	(0 << 0) | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 408 | #define PERF_RECORD_MISC_KERNEL			(1 << 0) | 
 | 409 | #define PERF_RECORD_MISC_USER			(2 << 0) | 
 | 410 | #define PERF_RECORD_MISC_HYPERVISOR		(3 << 0) | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 411 | #define PERF_RECORD_MISC_GUEST_KERNEL		(4 << 0) | 
 | 412 | #define PERF_RECORD_MISC_GUEST_USER		(5 << 0) | 
| Peter Zijlstra | 6fab019 | 2009-04-08 15:01:26 +0200 | [diff] [blame] | 413 |  | 
| Peter Zijlstra | ab60834 | 2010-04-08 23:03:20 +0200 | [diff] [blame] | 414 | /* | 
 | 415 |  * Indicates that the content of PERF_SAMPLE_IP points to | 
 | 416 |  * the actual instruction that triggered the event. See also | 
 | 417 |  * perf_event_attr::precise_ip. | 
 | 418 |  */ | 
 | 419 | #define PERF_RECORD_MISC_EXACT_IP		(1 << 14) | 
| Peter Zijlstra | ef21f68 | 2010-03-03 13:12:23 +0100 | [diff] [blame] | 420 | /* | 
 | 421 |  * Reserve the last bit to indicate some extended misc field | 
 | 422 |  */ | 
 | 423 | #define PERF_RECORD_MISC_EXT_RESERVED		(1 << 15) | 
 | 424 |  | 
| Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 425 | struct perf_event_header { | 
 | 426 | 	__u32	type; | 
| Peter Zijlstra | 6fab019 | 2009-04-08 15:01:26 +0200 | [diff] [blame] | 427 | 	__u16	misc; | 
 | 428 | 	__u16	size; | 
| Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 429 | }; | 
 | 430 |  | 
 | 431 | enum perf_event_type { | 
| Peter Zijlstra | 5ed0041 | 2009-03-30 19:07:12 +0200 | [diff] [blame] | 432 |  | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 433 | 	/* | 
| Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 434 | 	 * If perf_event_attr.sample_id_all is set then all event types will | 
 | 435 | 	 * have the sample_type selected fields related to where/when | 
 | 436 | 	 * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID) | 
 | 437 | 	 * described in PERF_RECORD_SAMPLE below, it will be stashed just after | 
 | 438 | 	 * the perf_event_header and the fields already present for the existing | 
 | 439 | 	 * fields, i.e. at the end of the payload. That way a newer perf.data | 
 | 440 | 	 * file will be supported by older perf tools, with these new optional | 
 | 441 | 	 * fields being ignored. | 
 | 442 | 	 * | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 443 | 	 * The MMAP events record the PROT_EXEC mappings so that we can | 
 | 444 | 	 * correlate userspace IPs to code. They have the following structure: | 
 | 445 | 	 * | 
 | 446 | 	 * struct { | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 447 | 	 *	struct perf_event_header	header; | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 448 | 	 * | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 449 | 	 *	u32				pid, tid; | 
 | 450 | 	 *	u64				addr; | 
 | 451 | 	 *	u64				len; | 
 | 452 | 	 *	u64				pgoff; | 
 | 453 | 	 *	char				filename[]; | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 454 | 	 * }; | 
 | 455 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 456 | 	PERF_RECORD_MMAP			= 1, | 
| Peter Zijlstra | ea5d20c | 2009-03-25 12:30:25 +0100 | [diff] [blame] | 457 |  | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 458 | 	/* | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 459 | 	 * struct { | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 460 | 	 *	struct perf_event_header	header; | 
 | 461 | 	 *	u64				id; | 
 | 462 | 	 *	u64				lost; | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 463 | 	 * }; | 
 | 464 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 465 | 	PERF_RECORD_LOST			= 2, | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 466 |  | 
 | 467 | 	/* | 
 | 468 | 	 * struct { | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 469 | 	 *	struct perf_event_header	header; | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 470 | 	 * | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 471 | 	 *	u32				pid, tid; | 
 | 472 | 	 *	char				comm[]; | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 473 | 	 * }; | 
 | 474 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 475 | 	PERF_RECORD_COMM			= 3, | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 476 |  | 
 | 477 | 	/* | 
| Peter Zijlstra | 26b119b | 2009-05-20 12:21:20 +0200 | [diff] [blame] | 478 | 	 * struct { | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 479 | 	 *	struct perf_event_header	header; | 
| Peter Zijlstra | 9f498cc | 2009-07-23 14:46:33 +0200 | [diff] [blame] | 480 | 	 *	u32				pid, ppid; | 
 | 481 | 	 *	u32				tid, ptid; | 
| Arjan van de Ven | 393b2ad | 2009-09-12 07:52:47 +0200 | [diff] [blame] | 482 | 	 *	u64				time; | 
| Peter Zijlstra | 9f498cc | 2009-07-23 14:46:33 +0200 | [diff] [blame] | 483 | 	 * }; | 
 | 484 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 485 | 	PERF_RECORD_EXIT			= 4, | 
| Peter Zijlstra | 9f498cc | 2009-07-23 14:46:33 +0200 | [diff] [blame] | 486 |  | 
 | 487 | 	/* | 
 | 488 | 	 * struct { | 
 | 489 | 	 *	struct perf_event_header	header; | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 490 | 	 *	u64				time; | 
| Peter Zijlstra | 689802b | 2009-06-05 15:05:43 +0200 | [diff] [blame] | 491 | 	 *	u64				id; | 
| Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 492 | 	 *	u64				stream_id; | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 493 | 	 * }; | 
 | 494 | 	 */ | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 495 | 	PERF_RECORD_THROTTLE			= 5, | 
 | 496 | 	PERF_RECORD_UNTHROTTLE			= 6, | 
| Peter Zijlstra | a78ac32 | 2009-05-25 17:39:05 +0200 | [diff] [blame] | 497 |  | 
 | 498 | 	/* | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 499 | 	 * struct { | 
| Ingo Molnar | a21ca2c | 2009-06-06 09:58:57 +0200 | [diff] [blame] | 500 | 	 *	struct perf_event_header	header; | 
 | 501 | 	 *	u32				pid, ppid; | 
| Peter Zijlstra | 9f498cc | 2009-07-23 14:46:33 +0200 | [diff] [blame] | 502 | 	 *	u32				tid, ptid; | 
| Anton Blanchard | a6f10a2 | 2009-09-22 22:34:24 +1000 | [diff] [blame] | 503 | 	 *	u64				time; | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 504 | 	 * }; | 
 | 505 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 506 | 	PERF_RECORD_FORK			= 7, | 
| Peter Zijlstra | 60313eb | 2009-06-04 16:53:44 +0200 | [diff] [blame] | 507 |  | 
 | 508 | 	/* | 
| Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 509 | 	 * struct { | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 510 | 	 *	struct perf_event_header	header; | 
 | 511 | 	 *	u32				pid, tid; | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 512 | 	 * | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 513 | 	 *	struct read_format		values; | 
| Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 514 | 	 * }; | 
 | 515 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 516 | 	PERF_RECORD_READ			= 8, | 
| Peter Zijlstra | 38b200d | 2009-06-23 20:13:11 +0200 | [diff] [blame] | 517 |  | 
 | 518 | 	/* | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 519 | 	 * struct { | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 520 | 	 *	struct perf_event_header	header; | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 521 | 	 * | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 522 | 	 *	{ u64			ip;	  } && PERF_SAMPLE_IP | 
 | 523 | 	 *	{ u32			pid, tid; } && PERF_SAMPLE_TID | 
 | 524 | 	 *	{ u64			time;     } && PERF_SAMPLE_TIME | 
 | 525 | 	 *	{ u64			addr;     } && PERF_SAMPLE_ADDR | 
| Peter Zijlstra | e6e18ec | 2009-06-25 11:27:12 +0200 | [diff] [blame] | 526 | 	 *	{ u64			id;	  } && PERF_SAMPLE_ID | 
| Peter Zijlstra | 7f453c2 | 2009-07-21 13:19:40 +0200 | [diff] [blame] | 527 | 	 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 528 | 	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 529 | 	 *	{ u64			period;   } && PERF_SAMPLE_PERIOD | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 530 | 	 * | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 531 | 	 *	{ struct read_format	values;	  } && PERF_SAMPLE_READ | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 532 | 	 * | 
| Peter Zijlstra | f9188e0 | 2009-06-18 22:20:52 +0200 | [diff] [blame] | 533 | 	 *	{ u64			nr, | 
| Peter Zijlstra | 43a21ea | 2009-03-25 19:39:37 +0100 | [diff] [blame] | 534 | 	 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 535 | 	 * | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 536 | 	 *	# | 
 | 537 | 	 *	# The RAW record below is opaque data wrt the ABI | 
 | 538 | 	 *	# | 
 | 539 | 	 *	# That is, the ABI doesn't make any promises wrt to | 
 | 540 | 	 *	# the stability of its content, it may vary depending | 
 | 541 | 	 *	# on event, hardware, kernel version and phase of | 
 | 542 | 	 *	# the moon. | 
 | 543 | 	 *	# | 
 | 544 | 	 *	# In other words, PERF_SAMPLE_RAW contents are not an ABI. | 
 | 545 | 	 *	# | 
| Peter Zijlstra | 3dab77f | 2009-08-13 11:47:53 +0200 | [diff] [blame] | 546 | 	 * | 
| Peter Zijlstra | a044560 | 2009-08-10 11:16:52 +0200 | [diff] [blame] | 547 | 	 *	{ u32			size; | 
 | 548 | 	 *	  char                  data[size];}&& PERF_SAMPLE_RAW | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 549 | 	 * | 
 | 550 | 	 *	{ u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK | 
| Peter Zijlstra | 0c593b3 | 2009-04-06 11:45:08 +0200 | [diff] [blame] | 551 | 	 * }; | 
| Peter Zijlstra | 8a057d8 | 2009-04-02 11:11:59 +0200 | [diff] [blame] | 552 | 	 */ | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 553 | 	PERF_RECORD_SAMPLE			= 9, | 
| Peter Zijlstra | e6e18ec | 2009-06-25 11:27:12 +0200 | [diff] [blame] | 554 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 555 | 	PERF_RECORD_MAX,			/* non-ABI */ | 
| Peter Zijlstra | 5c14819 | 2009-03-25 12:30:23 +0100 | [diff] [blame] | 556 | }; | 
 | 557 |  | 
| Peter Zijlstra | f9188e0 | 2009-06-18 22:20:52 +0200 | [diff] [blame] | 558 | enum perf_callchain_context { | 
 | 559 | 	PERF_CONTEXT_HV			= (__u64)-32, | 
 | 560 | 	PERF_CONTEXT_KERNEL		= (__u64)-128, | 
 | 561 | 	PERF_CONTEXT_USER		= (__u64)-512, | 
| Ingo Molnar | 7522060 | 2009-06-18 08:00:17 +0200 | [diff] [blame] | 562 |  | 
| Peter Zijlstra | f9188e0 | 2009-06-18 22:20:52 +0200 | [diff] [blame] | 563 | 	PERF_CONTEXT_GUEST		= (__u64)-2048, | 
 | 564 | 	PERF_CONTEXT_GUEST_KERNEL	= (__u64)-2176, | 
 | 565 | 	PERF_CONTEXT_GUEST_USER		= (__u64)-2560, | 
 | 566 |  | 
 | 567 | 	PERF_CONTEXT_MAX		= (__u64)-4095, | 
| Ingo Molnar | 7522060 | 2009-06-18 08:00:17 +0200 | [diff] [blame] | 568 | }; | 
 | 569 |  | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 570 | #define PERF_FLAG_FD_NO_GROUP		(1U << 0) | 
 | 571 | #define PERF_FLAG_FD_OUTPUT		(1U << 1) | 
 | 572 | #define PERF_FLAG_PID_CGROUP		(1U << 2) /* pid=cgroup id, per-cpu mode only */ | 
| Peter Zijlstra | a4be7c2 | 2009-08-19 11:18:27 +0200 | [diff] [blame] | 573 |  | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 574 | #ifdef __KERNEL__ | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 575 | /* | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 576 |  * Kernel-internal data types and definitions: | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 577 |  */ | 
 | 578 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 579 | #ifdef CONFIG_PERF_EVENTS | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 580 | # include <linux/cgroup.h> | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 581 | # include <asm/perf_event.h> | 
| Peter Zijlstra | 7be7923 | 2010-06-09 11:57:23 +0200 | [diff] [blame] | 582 | # include <asm/local64.h> | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 583 | #endif | 
 | 584 |  | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 585 | struct perf_guest_info_callbacks { | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 586 | 	int				(*is_in_guest)(void); | 
 | 587 | 	int				(*is_user_mode)(void); | 
 | 588 | 	unsigned long			(*get_guest_ip)(void); | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 589 | }; | 
 | 590 |  | 
| Arnd Bergmann | 2ff6cfd | 2009-12-07 17:12:58 +0100 | [diff] [blame] | 591 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 
 | 592 | #include <asm/hw_breakpoint.h> | 
 | 593 | #endif | 
 | 594 |  | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 595 | #include <linux/list.h> | 
 | 596 | #include <linux/mutex.h> | 
 | 597 | #include <linux/rculist.h> | 
 | 598 | #include <linux/rcupdate.h> | 
 | 599 | #include <linux/spinlock.h> | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 600 | #include <linux/hrtimer.h> | 
| Peter Zijlstra | 3c446b3 | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 601 | #include <linux/fs.h> | 
| Peter Zijlstra | 709e50c | 2009-06-02 14:13:15 +0200 | [diff] [blame] | 602 | #include <linux/pid_namespace.h> | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 603 | #include <linux/workqueue.h> | 
| Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 604 | #include <linux/ftrace.h> | 
| Peter Zijlstra | 85cfabb | 2010-03-11 13:06:56 +0100 | [diff] [blame] | 605 | #include <linux/cpu.h> | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 606 | #include <linux/irq_work.h> | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 607 | #include <linux/static_key.h> | 
| Arun Sharma | 6006349 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 608 | #include <linux/atomic.h> | 
| Jiri Olsa | 641cc93 | 2012-03-15 20:09:14 +0100 | [diff] [blame] | 609 | #include <linux/sysfs.h> | 
| Peter Zijlstra | fa58815 | 2010-05-18 10:54:20 +0200 | [diff] [blame] | 610 | #include <asm/local.h> | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 611 |  | 
| Peter Zijlstra | f9188e0 | 2009-06-18 22:20:52 +0200 | [diff] [blame] | 612 | #define PERF_MAX_STACK_DEPTH		255 | 
 | 613 |  | 
 | 614 | struct perf_callchain_entry { | 
 | 615 | 	__u64				nr; | 
 | 616 | 	__u64				ip[PERF_MAX_STACK_DEPTH]; | 
 | 617 | }; | 
 | 618 |  | 
| Frederic Weisbecker | 3a43ce6 | 2009-08-08 04:26:37 +0200 | [diff] [blame] | 619 | struct perf_raw_record { | 
 | 620 | 	u32				size; | 
 | 621 | 	void				*data; | 
| Frederic Weisbecker | f413cdb | 2009-08-07 01:25:54 +0200 | [diff] [blame] | 622 | }; | 
 | 623 |  | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 624 | /* | 
 | 625 |  * single taken branch record layout: | 
 | 626 |  * | 
 | 627 |  *      from: source instruction (may not always be a branch insn) | 
 | 628 |  *        to: branch target | 
 | 629 |  *   mispred: branch target was mispredicted | 
 | 630 |  * predicted: branch target was predicted | 
 | 631 |  * | 
 | 632 |  * support for mispred, predicted is optional. In case it | 
 | 633 |  * is not supported mispred = predicted = 0. | 
 | 634 |  */ | 
| Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 635 | struct perf_branch_entry { | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 636 | 	__u64	from; | 
 | 637 | 	__u64	to; | 
 | 638 | 	__u64	mispred:1,  /* target mispredicted */ | 
 | 639 | 		predicted:1,/* target predicted */ | 
 | 640 | 		reserved:62; | 
| Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 641 | }; | 
 | 642 |  | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 643 | /* | 
 | 644 |  * branch stack layout: | 
 | 645 |  *  nr: number of taken branches stored in entries[] | 
 | 646 |  * | 
 | 647 |  * Note that nr can vary from sample to sample | 
 | 648 |  * branches (to, from) are stored from most recent | 
 | 649 |  * to least recent, i.e., entries[0] contains the most | 
 | 650 |  * recent branch. | 
 | 651 |  */ | 
| Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 652 | struct perf_branch_stack { | 
 | 653 | 	__u64				nr; | 
 | 654 | 	struct perf_branch_entry	entries[0]; | 
 | 655 | }; | 
 | 656 |  | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 657 | struct task_struct; | 
 | 658 |  | 
| Stephane Eranian | efc9f05 | 2011-06-06 16:57:03 +0200 | [diff] [blame] | 659 | /* | 
 | 660 |  * extra PMU register associated with an event | 
 | 661 |  */ | 
 | 662 | struct hw_perf_event_extra { | 
 | 663 | 	u64		config;	/* register value */ | 
 | 664 | 	unsigned int	reg;	/* register address or index */ | 
 | 665 | 	int		alloc;	/* extra register already allocated */ | 
 | 666 | 	int		idx;	/* index in shared_regs->regs[] */ | 
 | 667 | }; | 
 | 668 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 669 | /** | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 670 |  * struct hw_perf_event - performance event hardware details: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 671 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 672 | struct hw_perf_event { | 
 | 673 | #ifdef CONFIG_PERF_EVENTS | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 674 | 	union { | 
 | 675 | 		struct { /* hardware */ | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 676 | 			u64		config; | 
| Stephane Eranian | 447a194 | 2010-02-01 14:50:01 +0200 | [diff] [blame] | 677 | 			u64		last_tag; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 678 | 			unsigned long	config_base; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 679 | 			unsigned long	event_base; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 680 | 			int		idx; | 
| Stephane Eranian | 447a194 | 2010-02-01 14:50:01 +0200 | [diff] [blame] | 681 | 			int		last_cpu; | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 682 |  | 
| Stephane Eranian | efc9f05 | 2011-06-06 16:57:03 +0200 | [diff] [blame] | 683 | 			struct hw_perf_event_extra extra_reg; | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 684 | 			struct hw_perf_event_extra branch_reg; | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 685 | 		}; | 
| Soeren Sandmann | 721a669 | 2009-09-15 14:33:08 +0200 | [diff] [blame] | 686 | 		struct { /* software */ | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 687 | 			struct hrtimer	hrtimer; | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 688 | 		}; | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 689 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 
| Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 690 | 		struct { /* breakpoint */ | 
 | 691 | 			struct arch_hw_breakpoint	info; | 
 | 692 | 			struct list_head		bp_list; | 
| Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 693 | 			/* | 
 | 694 | 			 * Crufty hack to avoid the chicken and egg | 
 | 695 | 			 * problem hw_breakpoint has with context | 
 | 696 | 			 * creation and event initalization. | 
 | 697 | 			 */ | 
 | 698 | 			struct task_struct		*bp_target; | 
| Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 699 | 		}; | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 700 | #endif | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 701 | 	}; | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 702 | 	int				state; | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 703 | 	local64_t			prev_count; | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 704 | 	u64				sample_period; | 
| Peter Zijlstra | 9e350de | 2009-06-10 21:34:59 +0200 | [diff] [blame] | 705 | 	u64				last_period; | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 706 | 	local64_t			period_left; | 
| Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 707 | 	u64                             interrupts_seq; | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 708 | 	u64				interrupts; | 
| Peter Zijlstra | 6a24ed6c | 2009-06-05 18:01:29 +0200 | [diff] [blame] | 709 |  | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 710 | 	u64				freq_time_stamp; | 
 | 711 | 	u64				freq_count_stamp; | 
| Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 712 | #endif | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 713 | }; | 
 | 714 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 715 | /* | 
 | 716 |  * hw_perf_event::state flags | 
 | 717 |  */ | 
 | 718 | #define PERF_HES_STOPPED	0x01 /* the counter is stopped */ | 
 | 719 | #define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */ | 
 | 720 | #define PERF_HES_ARCH		0x04 | 
 | 721 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 722 | struct perf_event; | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 723 |  | 
| Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 724 | /* | 
 | 725 |  * Common implementation detail of pmu::{start,commit,cancel}_txn | 
 | 726 |  */ | 
 | 727 | #define PERF_EVENT_TXN 0x1 | 
| Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 728 |  | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 729 | /** | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 730 |  * struct pmu - generic performance monitoring unit | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 731 |  */ | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 732 | struct pmu { | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 733 | 	struct list_head		entry; | 
 | 734 |  | 
| Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 735 | 	struct device			*dev; | 
| Peter Zijlstra | 0c9d42e | 2011-11-20 23:30:47 +0100 | [diff] [blame] | 736 | 	const struct attribute_group	**attr_groups; | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 737 | 	char				*name; | 
 | 738 | 	int				type; | 
 | 739 |  | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 740 | 	int * __percpu			pmu_disable_count; | 
 | 741 | 	struct perf_cpu_context * __percpu pmu_cpu_context; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 742 | 	int				task_ctx_nr; | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 743 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 744 | 	/* | 
 | 745 | 	 * Fully disable/enable this PMU, can be used to protect from the PMI | 
 | 746 | 	 * as well as for lazy/batch writing of the MSRs. | 
 | 747 | 	 */ | 
| Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 748 | 	void (*pmu_enable)		(struct pmu *pmu); /* optional */ | 
 | 749 | 	void (*pmu_disable)		(struct pmu *pmu); /* optional */ | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 750 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 751 | 	/* | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 752 | 	 * Try and initialize the event for this PMU. | 
| Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 753 | 	 * Should return -ENOENT when the @event doesn't match this PMU. | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 754 | 	 */ | 
 | 755 | 	int (*event_init)		(struct perf_event *event); | 
 | 756 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 757 | #define PERF_EF_START	0x01		/* start the counter when adding    */ | 
 | 758 | #define PERF_EF_RELOAD	0x02		/* reload the counter when starting */ | 
 | 759 | #define PERF_EF_UPDATE	0x04		/* update the counter when stopping */ | 
 | 760 |  | 
 | 761 | 	/* | 
 | 762 | 	 * Adds/Removes a counter to/from the PMU, can be done inside | 
 | 763 | 	 * a transaction, see the ->*_txn() methods. | 
 | 764 | 	 */ | 
 | 765 | 	int  (*add)			(struct perf_event *event, int flags); | 
 | 766 | 	void (*del)			(struct perf_event *event, int flags); | 
 | 767 |  | 
 | 768 | 	/* | 
 | 769 | 	 * Starts/Stops a counter present on the PMU. The PMI handler | 
 | 770 | 	 * should stop the counter when perf_event_overflow() returns | 
 | 771 | 	 * !0. ->start() will be used to continue. | 
 | 772 | 	 */ | 
 | 773 | 	void (*start)			(struct perf_event *event, int flags); | 
 | 774 | 	void (*stop)			(struct perf_event *event, int flags); | 
 | 775 |  | 
 | 776 | 	/* | 
 | 777 | 	 * Updates the counter value of the event. | 
 | 778 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 779 | 	void (*read)			(struct perf_event *event); | 
| Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 780 |  | 
 | 781 | 	/* | 
| Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 782 | 	 * Group events scheduling is treated as a transaction, add | 
 | 783 | 	 * group events as a whole and perform one schedulability test. | 
 | 784 | 	 * If the test fails, roll back the whole group | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 785 | 	 * | 
 | 786 | 	 * Start the transaction, after this ->add() doesn't need to | 
| Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 787 | 	 * do schedulability tests. | 
| Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 788 | 	 */ | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 789 | 	void (*start_txn)		(struct pmu *pmu); /* optional */ | 
| Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 790 | 	/* | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 791 | 	 * If ->start_txn() disabled the ->add() schedulability test | 
| Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 792 | 	 * then ->commit_txn() is required to perform one. On success | 
 | 793 | 	 * the transaction is closed. On error the transaction is kept | 
 | 794 | 	 * open until ->cancel_txn() is called. | 
 | 795 | 	 */ | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 796 | 	int  (*commit_txn)		(struct pmu *pmu); /* optional */ | 
| Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 797 | 	/* | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 798 | 	 * Will cancel the transaction, assumes ->del() is called | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 799 | 	 * for each successful ->add() during the transaction. | 
| Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 800 | 	 */ | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 801 | 	void (*cancel_txn)		(struct pmu *pmu); /* optional */ | 
| Peter Zijlstra | 35edc2a | 2011-11-20 20:36:02 +0100 | [diff] [blame] | 802 |  | 
 | 803 | 	/* | 
 | 804 | 	 * Will return the value for perf_event_mmap_page::index for this event, | 
 | 805 | 	 * if no implementation is provided it will default to: event->hw.idx + 1. | 
 | 806 | 	 */ | 
 | 807 | 	int (*event_idx)		(struct perf_event *event); /*optional */ | 
| Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 808 |  | 
 | 809 | 	/* | 
 | 810 | 	 * flush branch stack on context-switches (needed in cpu-wide mode) | 
 | 811 | 	 */ | 
 | 812 | 	void (*flush_branch_stack)	(void); | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 813 | }; | 
 | 814 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 815 | /** | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 816 |  * enum perf_event_active_state - the states of a event | 
| Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 817 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 818 | enum perf_event_active_state { | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 819 | 	PERF_EVENT_STATE_ERROR		= -2, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 820 | 	PERF_EVENT_STATE_OFF		= -1, | 
 | 821 | 	PERF_EVENT_STATE_INACTIVE	=  0, | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 822 | 	PERF_EVENT_STATE_ACTIVE		=  1, | 
| Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 823 | }; | 
 | 824 |  | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 825 | struct file; | 
| Peter Zijlstra | 453f19e | 2009-11-20 22:19:43 +0100 | [diff] [blame] | 826 | struct perf_sample_data; | 
 | 827 |  | 
| Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 828 | typedef void (*perf_overflow_handler_t)(struct perf_event *, | 
| Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 829 | 					struct perf_sample_data *, | 
 | 830 | 					struct pt_regs *regs); | 
 | 831 |  | 
| Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 832 | enum perf_group_flag { | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 833 | 	PERF_GROUP_SOFTWARE		= 0x1, | 
| Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 834 | }; | 
 | 835 |  | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 836 | #define SWEVENT_HLIST_BITS		8 | 
 | 837 | #define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS) | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 838 |  | 
 | 839 | struct swevent_hlist { | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 840 | 	struct hlist_head		heads[SWEVENT_HLIST_SIZE]; | 
 | 841 | 	struct rcu_head			rcu_head; | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 842 | }; | 
 | 843 |  | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 844 | #define PERF_ATTACH_CONTEXT	0x01 | 
 | 845 | #define PERF_ATTACH_GROUP	0x02 | 
| Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 846 | #define PERF_ATTACH_TASK	0x04 | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 847 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 848 | #ifdef CONFIG_CGROUP_PERF | 
 | 849 | /* | 
 | 850 |  * perf_cgroup_info keeps track of time_enabled for a cgroup. | 
 | 851 |  * This is a per-cpu dynamically allocated data structure. | 
 | 852 |  */ | 
 | 853 | struct perf_cgroup_info { | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 854 | 	u64				time; | 
 | 855 | 	u64				timestamp; | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 856 | }; | 
 | 857 |  | 
 | 858 | struct perf_cgroup { | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 859 | 	struct				cgroup_subsys_state css; | 
 | 860 | 	struct				perf_cgroup_info *info;	/* timing info, one per cpu */ | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 861 | }; | 
 | 862 | #endif | 
 | 863 |  | 
| Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 864 | struct ring_buffer; | 
 | 865 |  | 
| Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 866 | /** | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 867 |  * struct perf_event - performance event kernel representation: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 868 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 869 | struct perf_event { | 
 | 870 | #ifdef CONFIG_PERF_EVENTS | 
| Ingo Molnar | 65abc86 | 2009-09-21 10:18:27 +0200 | [diff] [blame] | 871 | 	struct list_head		group_entry; | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 872 | 	struct list_head		event_entry; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 873 | 	struct list_head		sibling_list; | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 874 | 	struct hlist_node		hlist_entry; | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 875 | 	int				nr_siblings; | 
| Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 876 | 	int				group_flags; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 877 | 	struct perf_event		*group_leader; | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 878 | 	struct pmu			*pmu; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 879 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 880 | 	enum perf_event_active_state	state; | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 881 | 	unsigned int			attach_state; | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 882 | 	local64_t			count; | 
| Peter Zijlstra | a6e6dea | 2010-05-21 14:27:58 +0200 | [diff] [blame] | 883 | 	atomic64_t			child_count; | 
| Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 884 |  | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 885 | 	/* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 886 | 	 * These are the total time in nanoseconds that the event | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 887 | 	 * has been enabled (i.e. eligible to run, and the task has | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 888 | 	 * been scheduled in, if this is a per-task event) | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 889 | 	 * and running (scheduled onto the CPU), respectively. | 
 | 890 | 	 * | 
 | 891 | 	 * They are computed from tstamp_enabled, tstamp_running and | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 892 | 	 * tstamp_stopped when the event is in INACTIVE or ACTIVE state. | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 893 | 	 */ | 
 | 894 | 	u64				total_time_enabled; | 
 | 895 | 	u64				total_time_running; | 
 | 896 |  | 
 | 897 | 	/* | 
 | 898 | 	 * These are timestamps used for computing total_time_enabled | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 899 | 	 * and total_time_running when the event is in INACTIVE or | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 900 | 	 * ACTIVE state, measured in nanoseconds from an arbitrary point | 
 | 901 | 	 * in time. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 902 | 	 * tstamp_enabled: the notional time when the event was enabled | 
 | 903 | 	 * tstamp_running: the notional time when the event was scheduled on | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 904 | 	 * tstamp_stopped: in INACTIVE state, the notional time when the | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 905 | 	 *	event was scheduled off. | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 906 | 	 */ | 
 | 907 | 	u64				tstamp_enabled; | 
 | 908 | 	u64				tstamp_running; | 
 | 909 | 	u64				tstamp_stopped; | 
 | 910 |  | 
| Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 911 | 	/* | 
 | 912 | 	 * timestamp shadows the actual context timing but it can | 
 | 913 | 	 * be safely used in NMI interrupt context. It reflects the | 
 | 914 | 	 * context time as it was when the event was last scheduled in. | 
 | 915 | 	 * | 
 | 916 | 	 * ctx_time already accounts for ctx->timestamp. Therefore to | 
 | 917 | 	 * compute ctx_time for a sample, simply add perf_clock(). | 
 | 918 | 	 */ | 
 | 919 | 	u64				shadow_ctx_time; | 
 | 920 |  | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 921 | 	struct perf_event_attr		attr; | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 922 | 	u16				header_size; | 
| Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 923 | 	u16				id_header_size; | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 924 | 	u16				read_size; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 925 | 	struct hw_perf_event		hw; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 926 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 927 | 	struct perf_event_context	*ctx; | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 928 | 	struct file			*filp; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 929 |  | 
 | 930 | 	/* | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 931 | 	 * These accumulate total time (in nanoseconds) that children | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 932 | 	 * events have been enabled and running, respectively. | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 933 | 	 */ | 
 | 934 | 	atomic64_t			child_total_time_enabled; | 
 | 935 | 	atomic64_t			child_total_time_running; | 
 | 936 |  | 
 | 937 | 	/* | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 938 | 	 * Protect attach/detach and child_list: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 939 | 	 */ | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 940 | 	struct mutex			child_mutex; | 
 | 941 | 	struct list_head		child_list; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 942 | 	struct perf_event		*parent; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 943 |  | 
 | 944 | 	int				oncpu; | 
 | 945 | 	int				cpu; | 
 | 946 |  | 
| Peter Zijlstra | 082ff5a | 2009-05-23 18:29:00 +0200 | [diff] [blame] | 947 | 	struct list_head		owner_entry; | 
 | 948 | 	struct task_struct		*owner; | 
 | 949 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 950 | 	/* mmap bits */ | 
 | 951 | 	struct mutex			mmap_mutex; | 
 | 952 | 	atomic_t			mmap_count; | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 953 | 	int				mmap_locked; | 
 | 954 | 	struct user_struct		*mmap_user; | 
| Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 955 | 	struct ring_buffer		*rb; | 
| Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 956 | 	struct list_head		rb_entry; | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 957 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 958 | 	/* poll related */ | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 959 | 	wait_queue_head_t		waitq; | 
| Peter Zijlstra | 3c446b3 | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 960 | 	struct fasync_struct		*fasync; | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 961 |  | 
 | 962 | 	/* delayed work for NMIs and such */ | 
 | 963 | 	int				pending_wakeup; | 
| Peter Zijlstra | 4c9e254 | 2009-04-06 11:45:09 +0200 | [diff] [blame] | 964 | 	int				pending_kill; | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 965 | 	int				pending_disable; | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 966 | 	struct irq_work			pending; | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 967 |  | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 968 | 	atomic_t			event_limit; | 
 | 969 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 970 | 	void (*destroy)(struct perf_event *); | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 971 | 	struct rcu_head			rcu_head; | 
| Peter Zijlstra | 709e50c | 2009-06-02 14:13:15 +0200 | [diff] [blame] | 972 |  | 
 | 973 | 	struct pid_namespace		*ns; | 
| Peter Zijlstra | 8e5799b | 2009-06-02 15:08:15 +0200 | [diff] [blame] | 974 | 	u64				id; | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 975 |  | 
| Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 976 | 	perf_overflow_handler_t		overflow_handler; | 
| Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 977 | 	void				*overflow_handler_context; | 
| Peter Zijlstra | 453f19e | 2009-11-20 22:19:43 +0100 | [diff] [blame] | 978 |  | 
| Li Zefan | 07b139c | 2009-12-21 14:27:35 +0800 | [diff] [blame] | 979 | #ifdef CONFIG_EVENT_TRACING | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 980 | 	struct ftrace_event_call	*tp_event; | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 981 | 	struct event_filter		*filter; | 
| Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 982 | #ifdef CONFIG_FUNCTION_TRACER | 
 | 983 | 	struct ftrace_ops               ftrace_ops; | 
 | 984 | #endif | 
| Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 985 | #endif | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 986 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 987 | #ifdef CONFIG_CGROUP_PERF | 
 | 988 | 	struct perf_cgroup		*cgrp; /* cgroup event is attach to */ | 
 | 989 | 	int				cgrp_defer_enabled; | 
 | 990 | #endif | 
 | 991 |  | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 992 | #endif /* CONFIG_PERF_EVENTS */ | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 993 | }; | 
 | 994 |  | 
| Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 995 | enum perf_event_context_type { | 
 | 996 | 	task_context, | 
 | 997 | 	cpu_context, | 
 | 998 | }; | 
 | 999 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1000 | /** | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1001 |  * struct perf_event_context - event context structure | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1002 |  * | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1003 |  * Used as a container for task events and CPU events as well: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1004 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1005 | struct perf_event_context { | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 1006 | 	struct pmu			*pmu; | 
| Richard Kennedy | ee643c4 | 2011-03-07 15:46:59 +0000 | [diff] [blame] | 1007 | 	enum perf_event_context_type	type; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1008 | 	/* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1009 | 	 * Protect the states of the events in the list, | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1010 | 	 * nr_active, and the list: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1011 | 	 */ | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1012 | 	raw_spinlock_t			lock; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1013 | 	/* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1014 | 	 * Protect the list of events.  Locking either mutex or lock | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 1015 | 	 * is sufficient to ensure the list doesn't change; to change | 
 | 1016 | 	 * the list you need to lock both the mutex and the spinlock. | 
 | 1017 | 	 */ | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1018 | 	struct mutex			mutex; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 1019 |  | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 1020 | 	struct list_head		pinned_groups; | 
 | 1021 | 	struct list_head		flexible_groups; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1022 | 	struct list_head		event_list; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1023 | 	int				nr_events; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1024 | 	int				nr_active; | 
 | 1025 | 	int				is_active; | 
| Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 1026 | 	int				nr_stat; | 
| Peter Zijlstra | 0f5a260 | 2011-11-16 14:38:16 +0100 | [diff] [blame] | 1027 | 	int				nr_freq; | 
| Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 1028 | 	int				rotate_disable; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1029 | 	atomic_t			refcount; | 
 | 1030 | 	struct task_struct		*task; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1031 |  | 
 | 1032 | 	/* | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 1033 | 	 * Context clock, runs when context enabled. | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 1034 | 	 */ | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1035 | 	u64				time; | 
 | 1036 | 	u64				timestamp; | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 1037 |  | 
 | 1038 | 	/* | 
 | 1039 | 	 * These fields let us detect when two contexts have both | 
 | 1040 | 	 * been cloned (inherited) from a common ancestor. | 
 | 1041 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1042 | 	struct perf_event_context	*parent_ctx; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1043 | 	u64				parent_gen; | 
 | 1044 | 	u64				generation; | 
 | 1045 | 	int				pin_count; | 
| Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 1046 | 	int				nr_cgroups;	 /* cgroup evts */ | 
 | 1047 | 	int				nr_branch_stack; /* branch_stack evt */ | 
| Richard Kennedy | 28009ce | 2011-06-07 16:33:38 +0100 | [diff] [blame] | 1048 | 	struct rcu_head			rcu_head; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1049 | }; | 
 | 1050 |  | 
| Frederic Weisbecker | 7ae07ea | 2010-08-14 20:45:13 +0200 | [diff] [blame] | 1051 | /* | 
 | 1052 |  * Number of contexts where an event can trigger: | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1053 |  *	task, softirq, hardirq, nmi. | 
| Frederic Weisbecker | 7ae07ea | 2010-08-14 20:45:13 +0200 | [diff] [blame] | 1054 |  */ | 
 | 1055 | #define PERF_NR_CONTEXTS	4 | 
 | 1056 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1057 | /** | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1058 |  * struct perf_event_cpu_context - per cpu event context structure | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1059 |  */ | 
 | 1060 | struct perf_cpu_context { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1061 | 	struct perf_event_context	ctx; | 
 | 1062 | 	struct perf_event_context	*task_ctx; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1063 | 	int				active_oncpu; | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1064 | 	int				exclusive; | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 1065 | 	struct list_head		rotation_list; | 
 | 1066 | 	int				jiffies_interval; | 
| Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 1067 | 	struct pmu			*active_pmu; | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1068 | 	struct perf_cgroup		*cgrp; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1069 | }; | 
 | 1070 |  | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1071 | struct perf_output_handle { | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 1072 | 	struct perf_event		*event; | 
| Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 1073 | 	struct ring_buffer		*rb; | 
| Peter Zijlstra | 6d1acfd | 2010-05-18 11:12:48 +0200 | [diff] [blame] | 1074 | 	unsigned long			wakeup; | 
| Peter Zijlstra | 5d967a8 | 2010-05-20 16:46:39 +0200 | [diff] [blame] | 1075 | 	unsigned long			size; | 
 | 1076 | 	void				*addr; | 
 | 1077 | 	int				page; | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1078 | }; | 
 | 1079 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1080 | #ifdef CONFIG_PERF_EVENTS | 
| Robert Richter | 829b42d | 2009-04-29 12:46:59 +0200 | [diff] [blame] | 1081 |  | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 1082 | extern int perf_pmu_register(struct pmu *pmu, char *name, int type); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 1083 | extern void perf_pmu_unregister(struct pmu *pmu); | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 1084 |  | 
| Matt Fleming | 3bf101b | 2010-09-27 20:22:24 +0100 | [diff] [blame] | 1085 | extern int perf_num_counters(void); | 
| Matt Fleming | 84c7991 | 2010-10-03 21:41:13 +0100 | [diff] [blame] | 1086 | extern const char *perf_pmu_name(void); | 
| Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 1087 | extern void __perf_event_task_sched_in(struct task_struct *prev, | 
 | 1088 | 				       struct task_struct *task); | 
 | 1089 | extern void __perf_event_task_sched_out(struct task_struct *prev, | 
 | 1090 | 					struct task_struct *next); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1091 | extern int perf_event_init_task(struct task_struct *child); | 
 | 1092 | extern void perf_event_exit_task(struct task_struct *child); | 
 | 1093 | extern void perf_event_free_task(struct task_struct *task); | 
| Peter Zijlstra | 4e231c7 | 2010-09-09 21:01:59 +0200 | [diff] [blame] | 1094 | extern void perf_event_delayed_put(struct task_struct *task); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1095 | extern void perf_event_print_debug(void); | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 1096 | extern void perf_pmu_disable(struct pmu *pmu); | 
 | 1097 | extern void perf_pmu_enable(struct pmu *pmu); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1098 | extern int perf_event_task_disable(void); | 
 | 1099 | extern int perf_event_task_enable(void); | 
| Avi Kivity | 26ca5c1 | 2011-06-29 18:42:37 +0300 | [diff] [blame] | 1100 | extern int perf_event_refresh(struct perf_event *event, int refresh); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1101 | extern void perf_event_update_userpage(struct perf_event *event); | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 1102 | extern int perf_event_release_kernel(struct perf_event *event); | 
 | 1103 | extern struct perf_event * | 
 | 1104 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | 
 | 1105 | 				int cpu, | 
| Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 1106 | 				struct task_struct *task, | 
| Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 1107 | 				perf_overflow_handler_t callback, | 
 | 1108 | 				void *context); | 
| Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 1109 | extern u64 perf_event_read_value(struct perf_event *event, | 
 | 1110 | 				 u64 *enabled, u64 *running); | 
| Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 1111 |  | 
| Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 1112 |  | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 1113 | struct perf_sample_data { | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1114 | 	u64				type; | 
 | 1115 |  | 
 | 1116 | 	u64				ip; | 
 | 1117 | 	struct { | 
 | 1118 | 		u32	pid; | 
 | 1119 | 		u32	tid; | 
 | 1120 | 	}				tid_entry; | 
 | 1121 | 	u64				time; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1122 | 	u64				addr; | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1123 | 	u64				id; | 
 | 1124 | 	u64				stream_id; | 
 | 1125 | 	struct { | 
 | 1126 | 		u32	cpu; | 
 | 1127 | 		u32	reserved; | 
 | 1128 | 	}				cpu_entry; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 1129 | 	u64				period; | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1130 | 	struct perf_callchain_entry	*callchain; | 
| Frederic Weisbecker | 3a43ce6 | 2009-08-08 04:26:37 +0200 | [diff] [blame] | 1131 | 	struct perf_raw_record		*raw; | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 1132 | 	struct perf_branch_stack	*br_stack; | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 1133 | }; | 
 | 1134 |  | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1135 | static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) | 
| Peter Zijlstra | dc1d628 | 2010-03-03 15:55:04 +0100 | [diff] [blame] | 1136 | { | 
 | 1137 | 	data->addr = addr; | 
 | 1138 | 	data->raw  = NULL; | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 1139 | 	data->br_stack = NULL; | 
| Peter Zijlstra | dc1d628 | 2010-03-03 15:55:04 +0100 | [diff] [blame] | 1140 | } | 
 | 1141 |  | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1142 | extern void perf_output_sample(struct perf_output_handle *handle, | 
 | 1143 | 			       struct perf_event_header *header, | 
 | 1144 | 			       struct perf_sample_data *data, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1145 | 			       struct perf_event *event); | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1146 | extern void perf_prepare_sample(struct perf_event_header *header, | 
 | 1147 | 				struct perf_sample_data *data, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1148 | 				struct perf_event *event, | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1149 | 				struct pt_regs *regs); | 
 | 1150 |  | 
| Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1151 | extern int perf_event_overflow(struct perf_event *event, | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1152 | 				 struct perf_sample_data *data, | 
 | 1153 | 				 struct pt_regs *regs); | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 1154 |  | 
| Franck Bui-Huu | 6c7e550 | 2010-11-23 16:21:43 +0100 | [diff] [blame] | 1155 | static inline bool is_sampling_event(struct perf_event *event) | 
 | 1156 | { | 
 | 1157 | 	return event->attr.sample_period != 0; | 
 | 1158 | } | 
 | 1159 |  | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1160 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1161 |  * Return 1 for a software event, 0 for a hardware event | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1162 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1163 | static inline int is_software_event(struct perf_event *event) | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1164 | { | 
| Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 1165 | 	return event->pmu->task_ctx_nr == perf_sw_context; | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1166 | } | 
 | 1167 |  | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1168 | extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 
| Peter Zijlstra | f29ac75 | 2009-06-19 18:27:26 +0200 | [diff] [blame] | 1169 |  | 
| Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1170 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); | 
| Peter Zijlstra | f29ac75 | 2009-06-19 18:27:26 +0200 | [diff] [blame] | 1171 |  | 
| Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 1172 | #ifndef perf_arch_fetch_caller_regs | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1173 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } | 
| Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 1174 | #endif | 
| Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 1175 |  | 
 | 1176 | /* | 
 | 1177 |  * Take a snapshot of the regs. Skip ip and frame pointer to | 
 | 1178 |  * the nth caller. We only need a few of the regs: | 
 | 1179 |  * - ip for PERF_SAMPLE_IP | 
 | 1180 |  * - cs for user_mode() tests | 
 | 1181 |  * - bp for callchains | 
 | 1182 |  * - eflags, for future purposes, just in case | 
 | 1183 |  */ | 
| Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 1184 | static inline void perf_fetch_caller_regs(struct pt_regs *regs) | 
| Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 1185 | { | 
| Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 1186 | 	memset(regs, 0, sizeof(*regs)); | 
 | 1187 |  | 
| Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 1188 | 	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); | 
| Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 1189 | } | 
 | 1190 |  | 
| Peter Zijlstra | 7e54a5a | 2010-10-14 22:32:45 +0200 | [diff] [blame] | 1191 | static __always_inline void | 
| Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1192 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) | 
| Frederic Weisbecker | e49a5bd | 2010-03-22 19:40:03 +0100 | [diff] [blame] | 1193 | { | 
| Peter Zijlstra | 7e54a5a | 2010-10-14 22:32:45 +0200 | [diff] [blame] | 1194 | 	struct pt_regs hot_regs; | 
| Frederic Weisbecker | e49a5bd | 2010-03-22 19:40:03 +0100 | [diff] [blame] | 1195 |  | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1196 | 	if (static_key_false(&perf_swevent_enabled[event_id])) { | 
| Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 1197 | 		if (!regs) { | 
 | 1198 | 			perf_fetch_caller_regs(&hot_regs); | 
 | 1199 | 			regs = &hot_regs; | 
 | 1200 | 		} | 
| Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1201 | 		__perf_sw_event(event_id, nr, regs, addr); | 
| Frederic Weisbecker | e49a5bd | 2010-03-22 19:40:03 +0100 | [diff] [blame] | 1202 | 	} | 
 | 1203 | } | 
 | 1204 |  | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1205 | extern struct static_key_deferred perf_sched_events; | 
| Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 1206 |  | 
| Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 1207 | static inline void perf_event_task_sched_in(struct task_struct *prev, | 
 | 1208 | 					    struct task_struct *task) | 
| Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 1209 | { | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1210 | 	if (static_key_false(&perf_sched_events.key)) | 
| Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 1211 | 		__perf_event_task_sched_in(prev, task); | 
| Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 1212 | } | 
 | 1213 |  | 
| Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 1214 | static inline void perf_event_task_sched_out(struct task_struct *prev, | 
 | 1215 | 					     struct task_struct *next) | 
| Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 1216 | { | 
| Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1217 | 	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); | 
| Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 1218 |  | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1219 | 	if (static_key_false(&perf_sched_events.key)) | 
| Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 1220 | 		__perf_event_task_sched_out(prev, next); | 
| Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 1221 | } | 
 | 1222 |  | 
| Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 1223 | extern void perf_event_mmap(struct vm_area_struct *vma); | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1224 | extern struct perf_guest_info_callbacks *perf_guest_cbs; | 
| Zhang, Yanmin | dcf46b9 | 2010-04-20 10:13:58 +0800 | [diff] [blame] | 1225 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | 
 | 1226 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1227 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1228 | extern void perf_event_comm(struct task_struct *tsk); | 
 | 1229 | extern void perf_event_fork(struct task_struct *tsk); | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 1230 |  | 
| Frederic Weisbecker | 56962b4 | 2010-06-30 23:03:51 +0200 | [diff] [blame] | 1231 | /* Callchains */ | 
 | 1232 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | 
 | 1233 |  | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1234 | extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); | 
 | 1235 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); | 
| Frederic Weisbecker | 56962b4 | 2010-06-30 23:03:51 +0200 | [diff] [blame] | 1236 |  | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1237 | static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) | 
| Frederic Weisbecker | 70791ce | 2010-06-29 19:34:05 +0200 | [diff] [blame] | 1238 | { | 
 | 1239 | 	if (entry->nr < PERF_MAX_STACK_DEPTH) | 
 | 1240 | 		entry->ip[entry->nr++] = ip; | 
 | 1241 | } | 
| Peter Zijlstra | 394ee07 | 2009-03-30 19:07:14 +0200 | [diff] [blame] | 1242 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1243 | extern int sysctl_perf_event_paranoid; | 
 | 1244 | extern int sysctl_perf_event_mlock; | 
 | 1245 | extern int sysctl_perf_event_sample_rate; | 
| Peter Zijlstra | 1ccd154 | 2009-04-09 10:53:45 +0200 | [diff] [blame] | 1246 |  | 
| Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 1247 | extern int perf_proc_update_handler(struct ctl_table *table, int write, | 
 | 1248 | 		void __user *buffer, size_t *lenp, | 
 | 1249 | 		loff_t *ppos); | 
 | 1250 |  | 
| Peter Zijlstra | 320ebf0 | 2010-03-02 12:35:37 +0100 | [diff] [blame] | 1251 | static inline bool perf_paranoid_tracepoint_raw(void) | 
 | 1252 | { | 
 | 1253 | 	return sysctl_perf_event_paranoid > -1; | 
 | 1254 | } | 
 | 1255 |  | 
 | 1256 | static inline bool perf_paranoid_cpu(void) | 
 | 1257 | { | 
 | 1258 | 	return sysctl_perf_event_paranoid > 0; | 
 | 1259 | } | 
 | 1260 |  | 
 | 1261 | static inline bool perf_paranoid_kernel(void) | 
 | 1262 | { | 
 | 1263 | 	return sysctl_perf_event_paranoid > 1; | 
 | 1264 | } | 
 | 1265 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1266 | extern void perf_event_init(void); | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 1267 | extern void perf_tp_event(u64 addr, u64 count, void *record, | 
 | 1268 | 			  int entry_size, struct pt_regs *regs, | 
| Peter Zijlstra | ecc55f8 | 2010-05-21 15:11:34 +0200 | [diff] [blame] | 1269 | 			  struct hlist_head *head, int rctx); | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 1270 | extern void perf_bp_event(struct perf_event *event, void *data); | 
| Ingo Molnar | 0d905bc | 2009-05-04 19:13:30 +0200 | [diff] [blame] | 1271 |  | 
| Paul Mackerras | 9d23a90 | 2009-05-14 21:48:08 +1000 | [diff] [blame] | 1272 | #ifndef perf_misc_flags | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1273 | # define perf_misc_flags(regs) \ | 
 | 1274 | 		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) | 
 | 1275 | # define perf_instruction_pointer(regs)	instruction_pointer(regs) | 
| Paul Mackerras | 9d23a90 | 2009-05-14 21:48:08 +1000 | [diff] [blame] | 1276 | #endif | 
 | 1277 |  | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 1278 | static inline bool has_branch_stack(struct perf_event *event) | 
 | 1279 | { | 
 | 1280 | 	return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; | 
 | 1281 | } | 
 | 1282 |  | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1283 | extern int perf_output_begin(struct perf_output_handle *handle, | 
| Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 1284 | 			     struct perf_event *event, unsigned int size); | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1285 | extern void perf_output_end(struct perf_output_handle *handle); | 
 | 1286 | extern void perf_output_copy(struct perf_output_handle *handle, | 
 | 1287 | 			     const void *buf, unsigned int len); | 
| Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 1288 | extern int perf_swevent_get_recursion_context(void); | 
 | 1289 | extern void perf_swevent_put_recursion_context(int rctx); | 
| Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 1290 | extern void perf_event_enable(struct perf_event *event); | 
 | 1291 | extern void perf_event_disable(struct perf_event *event); | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 1292 | extern void perf_event_task_tick(void); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1293 | #else | 
 | 1294 | static inline void | 
| Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 1295 | perf_event_task_sched_in(struct task_struct *prev, | 
 | 1296 | 			 struct task_struct *task)			{ } | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1297 | static inline void | 
| Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 1298 | perf_event_task_sched_out(struct task_struct *prev, | 
 | 1299 | 			  struct task_struct *next)			{ } | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1300 | static inline int perf_event_init_task(struct task_struct *child)	{ return 0; } | 
 | 1301 | static inline void perf_event_exit_task(struct task_struct *child)	{ } | 
 | 1302 | static inline void perf_event_free_task(struct task_struct *task)	{ } | 
| Peter Zijlstra | 4e231c7 | 2010-09-09 21:01:59 +0200 | [diff] [blame] | 1303 | static inline void perf_event_delayed_put(struct task_struct *task)	{ } | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 1304 | static inline void perf_event_print_debug(void)				{ } | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 1305 | static inline int perf_event_task_disable(void)				{ return -EINVAL; } | 
 | 1306 | static inline int perf_event_task_enable(void)				{ return -EINVAL; } | 
| Avi Kivity | 26ca5c1 | 2011-06-29 18:42:37 +0300 | [diff] [blame] | 1307 | static inline int perf_event_refresh(struct perf_event *event, int refresh) | 
 | 1308 | { | 
 | 1309 | 	return -EINVAL; | 
 | 1310 | } | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 1311 |  | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 1312 | static inline void | 
| Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1313 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ } | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 1314 | static inline void | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 1315 | perf_bp_event(struct perf_event *event, void *data)			{ } | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 1316 |  | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1317 | static inline int perf_register_guest_info_callbacks | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1318 | (struct perf_guest_info_callbacks *callbacks)				{ return 0; } | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1319 | static inline int perf_unregister_guest_info_callbacks | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1320 | (struct perf_guest_info_callbacks *callbacks)				{ return 0; } | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1321 |  | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 1322 | static inline void perf_event_mmap(struct vm_area_struct *vma)		{ } | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1323 | static inline void perf_event_comm(struct task_struct *tsk)		{ } | 
 | 1324 | static inline void perf_event_fork(struct task_struct *tsk)		{ } | 
 | 1325 | static inline void perf_event_init(void)				{ } | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 1326 | static inline int  perf_swevent_get_recursion_context(void)		{ return -1; } | 
| Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 1327 | static inline void perf_swevent_put_recursion_context(int rctx)		{ } | 
| Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 1328 | static inline void perf_event_enable(struct perf_event *event)		{ } | 
 | 1329 | static inline void perf_event_disable(struct perf_event *event)		{ } | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 1330 | static inline void perf_event_task_tick(void)				{ } | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1331 | #endif | 
 | 1332 |  | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1333 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1334 |  | 
| Peter Zijlstra | 3f6da39 | 2010-03-05 13:01:18 +0100 | [diff] [blame] | 1335 | /* | 
 | 1336 |  * This has to have a higher priority than migration_notifier in sched.c. | 
 | 1337 |  */ | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1338 | #define perf_cpu_notifier(fn)						\ | 
 | 1339 | do {									\ | 
 | 1340 | 	static struct notifier_block fn##_nb __cpuinitdata =		\ | 
 | 1341 | 		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\ | 
 | 1342 | 	fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,			\ | 
 | 1343 | 		(void *)(unsigned long)smp_processor_id());		\ | 
 | 1344 | 	fn(&fn##_nb, (unsigned long)CPU_STARTING,			\ | 
 | 1345 | 		(void *)(unsigned long)smp_processor_id());		\ | 
 | 1346 | 	fn(&fn##_nb, (unsigned long)CPU_ONLINE,				\ | 
 | 1347 | 		(void *)(unsigned long)smp_processor_id());		\ | 
 | 1348 | 	register_cpu_notifier(&fn##_nb);				\ | 
| Peter Zijlstra | 3f6da39 | 2010-03-05 13:01:18 +0100 | [diff] [blame] | 1349 | } while (0) | 
 | 1350 |  | 
| Jiri Olsa | 641cc93 | 2012-03-15 20:09:14 +0100 | [diff] [blame] | 1351 |  | 
 | 1352 | #define PMU_FORMAT_ATTR(_name, _format)					\ | 
 | 1353 | static ssize_t								\ | 
 | 1354 | _name##_show(struct device *dev,					\ | 
 | 1355 | 			       struct device_attribute *attr,		\ | 
 | 1356 | 			       char *page)				\ | 
 | 1357 | {									\ | 
 | 1358 | 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\ | 
 | 1359 | 	return sprintf(page, _format "\n");				\ | 
 | 1360 | }									\ | 
 | 1361 | 									\ | 
 | 1362 | static struct device_attribute format_attr_##_name = __ATTR_RO(_name) | 
 | 1363 |  | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 1364 | #endif /* __KERNEL__ */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1365 | #endif /* _LINUX_PERF_EVENT_H */ |