| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1 | /* | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 2 |  * Performance events: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3 |  * | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 4 |  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 5 |  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar | 
 | 6 |  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 7 |  * | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 8 |  * Data type definitions, declarations, prototypes. | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 9 |  * | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 10 |  *    Started by: Thomas Gleixner and Ingo Molnar | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 11 |  * | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 12 |  * For licencing details see kernel-base/COPYING | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 13 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 14 | #ifndef _LINUX_PERF_EVENT_H | 
 | 15 | #define _LINUX_PERF_EVENT_H | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 16 |  | 
| David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 17 | #include <uapi/linux/perf_event.h> | 
| Peter Zijlstra | a4be7c2 | 2009-08-19 11:18:27 +0200 | [diff] [blame] | 18 |  | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 19 | /* | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 20 |  * Kernel-internal data types and definitions: | 
| Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 21 |  */ | 
 | 22 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 23 | #ifdef CONFIG_PERF_EVENTS | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 24 | # include <linux/cgroup.h> | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 25 | # include <asm/perf_event.h> | 
| Peter Zijlstra | 7be7923 | 2010-06-09 11:57:23 +0200 | [diff] [blame] | 26 | # include <asm/local64.h> | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 27 | #endif | 
 | 28 |  | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 29 | struct perf_guest_info_callbacks { | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 30 | 	int				(*is_in_guest)(void); | 
 | 31 | 	int				(*is_user_mode)(void); | 
 | 32 | 	unsigned long			(*get_guest_ip)(void); | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 33 | }; | 
 | 34 |  | 
| Arnd Bergmann | 2ff6cfd | 2009-12-07 17:12:58 +0100 | [diff] [blame] | 35 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 
 | 36 | #include <asm/hw_breakpoint.h> | 
 | 37 | #endif | 
 | 38 |  | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 39 | #include <linux/list.h> | 
 | 40 | #include <linux/mutex.h> | 
 | 41 | #include <linux/rculist.h> | 
 | 42 | #include <linux/rcupdate.h> | 
 | 43 | #include <linux/spinlock.h> | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 44 | #include <linux/hrtimer.h> | 
| Peter Zijlstra | 3c446b3d | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 45 | #include <linux/fs.h> | 
| Peter Zijlstra | 709e50c | 2009-06-02 14:13:15 +0200 | [diff] [blame] | 46 | #include <linux/pid_namespace.h> | 
| Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 47 | #include <linux/workqueue.h> | 
| Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 48 | #include <linux/ftrace.h> | 
| Peter Zijlstra | 85cfabb | 2010-03-11 13:06:56 +0100 | [diff] [blame] | 49 | #include <linux/cpu.h> | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 50 | #include <linux/irq_work.h> | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 51 | #include <linux/static_key.h> | 
| Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 52 | #include <linux/atomic.h> | 
| Jiri Olsa | 641cc93 | 2012-03-15 20:09:14 +0100 | [diff] [blame] | 53 | #include <linux/sysfs.h> | 
| Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 54 | #include <linux/perf_regs.h> | 
| Peter Zijlstra | fa58815 | 2010-05-18 10:54:20 +0200 | [diff] [blame] | 55 | #include <asm/local.h> | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 56 |  | 
| Peter Zijlstra | f9188e0 | 2009-06-18 22:20:52 +0200 | [diff] [blame] | 57 | struct perf_callchain_entry { | 
 | 58 | 	__u64				nr; | 
 | 59 | 	__u64				ip[PERF_MAX_STACK_DEPTH]; | 
 | 60 | }; | 
 | 61 |  | 
| Frederic Weisbecker | 3a43ce6 | 2009-08-08 04:26:37 +0200 | [diff] [blame] | 62 | struct perf_raw_record { | 
 | 63 | 	u32				size; | 
 | 64 | 	void				*data; | 
| Frederic Weisbecker | f413cdb | 2009-08-07 01:25:54 +0200 | [diff] [blame] | 65 | }; | 
 | 66 |  | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 67 | /* | 
 | 68 |  * single taken branch record layout: | 
 | 69 |  * | 
 | 70 |  *      from: source instruction (may not always be a branch insn) | 
 | 71 |  *        to: branch target | 
 | 72 |  *   mispred: branch target was mispredicted | 
 | 73 |  * predicted: branch target was predicted | 
 | 74 |  * | 
 | 75 |  * support for mispred, predicted is optional. In case it | 
 | 76 |  * is not supported mispred = predicted = 0. | 
 | 77 |  */ | 
| Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 78 | struct perf_branch_entry { | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 79 | 	__u64	from; | 
 | 80 | 	__u64	to; | 
 | 81 | 	__u64	mispred:1,  /* target mispredicted */ | 
 | 82 | 		predicted:1,/* target predicted */ | 
 | 83 | 		reserved:62; | 
| Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 84 | }; | 
 | 85 |  | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 86 | /* | 
 | 87 |  * branch stack layout: | 
 | 88 |  *  nr: number of taken branches stored in entries[] | 
 | 89 |  * | 
 | 90 |  * Note that nr can vary from sample to sample | 
 | 91 |  * branches (to, from) are stored from most recent | 
 | 92 |  * to least recent, i.e., entries[0] contains the most | 
 | 93 |  * recent branch. | 
 | 94 |  */ | 
| Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 95 | struct perf_branch_stack { | 
 | 96 | 	__u64				nr; | 
 | 97 | 	struct perf_branch_entry	entries[0]; | 
 | 98 | }; | 
 | 99 |  | 
| Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 100 | struct perf_regs_user { | 
 | 101 | 	__u64		abi; | 
 | 102 | 	struct pt_regs	*regs; | 
 | 103 | }; | 
 | 104 |  | 
| Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 105 | struct task_struct; | 
 | 106 |  | 
| Stephane Eranian | efc9f05 | 2011-06-06 16:57:03 +0200 | [diff] [blame] | 107 | /* | 
 | 108 |  * extra PMU register associated with an event | 
 | 109 |  */ | 
 | 110 | struct hw_perf_event_extra { | 
 | 111 | 	u64		config;	/* register value */ | 
 | 112 | 	unsigned int	reg;	/* register address or index */ | 
 | 113 | 	int		alloc;	/* extra register already allocated */ | 
 | 114 | 	int		idx;	/* index in shared_regs->regs[] */ | 
 | 115 | }; | 
 | 116 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 117 | /** | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 118 |  * struct hw_perf_event - performance event hardware details: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 119 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 120 | struct hw_perf_event { | 
 | 121 | #ifdef CONFIG_PERF_EVENTS | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 122 | 	union { | 
 | 123 | 		struct { /* hardware */ | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 124 | 			u64		config; | 
| Stephane Eranian | 447a194 | 2010-02-01 14:50:01 +0200 | [diff] [blame] | 125 | 			u64		last_tag; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 126 | 			unsigned long	config_base; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 127 | 			unsigned long	event_base; | 
| Vince Weaver | c48b605 | 2012-03-01 17:28:14 -0500 | [diff] [blame] | 128 | 			int		event_base_rdpmc; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 129 | 			int		idx; | 
| Stephane Eranian | 447a194 | 2010-02-01 14:50:01 +0200 | [diff] [blame] | 130 | 			int		last_cpu; | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 131 |  | 
| Stephane Eranian | efc9f05 | 2011-06-06 16:57:03 +0200 | [diff] [blame] | 132 | 			struct hw_perf_event_extra extra_reg; | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 133 | 			struct hw_perf_event_extra branch_reg; | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 134 | 		}; | 
| Soeren Sandmann | 721a669 | 2009-09-15 14:33:08 +0200 | [diff] [blame] | 135 | 		struct { /* software */ | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 136 | 			struct hrtimer	hrtimer; | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 137 | 		}; | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 138 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 
| Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 139 | 		struct { /* breakpoint */ | 
 | 140 | 			struct arch_hw_breakpoint	info; | 
 | 141 | 			struct list_head		bp_list; | 
| Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 142 | 			/* | 
 | 143 | 			 * Crufty hack to avoid the chicken and egg | 
 | 144 | 			 * problem hw_breakpoint has with context | 
 | 145 | 			 * creation and event initalization. | 
 | 146 | 			 */ | 
 | 147 | 			struct task_struct		*bp_target; | 
| Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 148 | 		}; | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 149 | #endif | 
| Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 150 | 	}; | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 151 | 	int				state; | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 152 | 	local64_t			prev_count; | 
| Peter Zijlstra | b23f332 | 2009-06-02 15:13:03 +0200 | [diff] [blame] | 153 | 	u64				sample_period; | 
| Peter Zijlstra | 9e350de | 2009-06-10 21:34:59 +0200 | [diff] [blame] | 154 | 	u64				last_period; | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 155 | 	local64_t			period_left; | 
| Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 156 | 	u64                             interrupts_seq; | 
| Peter Zijlstra | 60db5e0 | 2009-05-15 15:19:28 +0200 | [diff] [blame] | 157 | 	u64				interrupts; | 
| Peter Zijlstra | 6a24ed6c | 2009-06-05 18:01:29 +0200 | [diff] [blame] | 158 |  | 
| Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 159 | 	u64				freq_time_stamp; | 
 | 160 | 	u64				freq_count_stamp; | 
| Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 161 | #endif | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 162 | }; | 
 | 163 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 164 | /* | 
 | 165 |  * hw_perf_event::state flags | 
 | 166 |  */ | 
 | 167 | #define PERF_HES_STOPPED	0x01 /* the counter is stopped */ | 
 | 168 | #define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */ | 
 | 169 | #define PERF_HES_ARCH		0x04 | 
 | 170 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 171 | struct perf_event; | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 172 |  | 
| Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 173 | /* | 
 | 174 |  * Common implementation detail of pmu::{start,commit,cancel}_txn | 
 | 175 |  */ | 
 | 176 | #define PERF_EVENT_TXN 0x1 | 
| Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 177 |  | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 178 | /** | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 179 |  * struct pmu - generic performance monitoring unit | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 180 |  */ | 
| Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 181 | struct pmu { | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 182 | 	struct list_head		entry; | 
 | 183 |  | 
| Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 184 | 	struct device			*dev; | 
| Peter Zijlstra | 0c9d42e | 2011-11-20 23:30:47 +0100 | [diff] [blame] | 185 | 	const struct attribute_group	**attr_groups; | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 186 | 	char				*name; | 
 | 187 | 	int				type; | 
 | 188 |  | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 189 | 	int * __percpu			pmu_disable_count; | 
 | 190 | 	struct perf_cpu_context * __percpu pmu_cpu_context; | 
| Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 191 | 	int				task_ctx_nr; | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 192 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 193 | 	/* | 
 | 194 | 	 * Fully disable/enable this PMU, can be used to protect from the PMI | 
 | 195 | 	 * as well as for lazy/batch writing of the MSRs. | 
 | 196 | 	 */ | 
| Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 197 | 	void (*pmu_enable)		(struct pmu *pmu); /* optional */ | 
 | 198 | 	void (*pmu_disable)		(struct pmu *pmu); /* optional */ | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 199 |  | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 200 | 	/* | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 201 | 	 * Try and initialize the event for this PMU. | 
| Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 202 | 	 * Should return -ENOENT when the @event doesn't match this PMU. | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 203 | 	 */ | 
 | 204 | 	int (*event_init)		(struct perf_event *event); | 
 | 205 |  | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 206 | #define PERF_EF_START	0x01		/* start the counter when adding    */ | 
 | 207 | #define PERF_EF_RELOAD	0x02		/* reload the counter when starting */ | 
 | 208 | #define PERF_EF_UPDATE	0x04		/* update the counter when stopping */ | 
 | 209 |  | 
 | 210 | 	/* | 
 | 211 | 	 * Adds/Removes a counter to/from the PMU, can be done inside | 
 | 212 | 	 * a transaction, see the ->*_txn() methods. | 
 | 213 | 	 */ | 
 | 214 | 	int  (*add)			(struct perf_event *event, int flags); | 
 | 215 | 	void (*del)			(struct perf_event *event, int flags); | 
 | 216 |  | 
 | 217 | 	/* | 
 | 218 | 	 * Starts/Stops a counter present on the PMU. The PMI handler | 
 | 219 | 	 * should stop the counter when perf_event_overflow() returns | 
 | 220 | 	 * !0. ->start() will be used to continue. | 
 | 221 | 	 */ | 
 | 222 | 	void (*start)			(struct perf_event *event, int flags); | 
 | 223 | 	void (*stop)			(struct perf_event *event, int flags); | 
 | 224 |  | 
 | 225 | 	/* | 
 | 226 | 	 * Updates the counter value of the event. | 
 | 227 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 228 | 	void (*read)			(struct perf_event *event); | 
| Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 229 |  | 
 | 230 | 	/* | 
| Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 231 | 	 * Group events scheduling is treated as a transaction, add | 
 | 232 | 	 * group events as a whole and perform one schedulability test. | 
 | 233 | 	 * If the test fails, roll back the whole group | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 234 | 	 * | 
 | 235 | 	 * Start the transaction, after this ->add() doesn't need to | 
| Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 236 | 	 * do schedulability tests. | 
| Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 237 | 	 */ | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 238 | 	void (*start_txn)		(struct pmu *pmu); /* optional */ | 
| Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 239 | 	/* | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 240 | 	 * If ->start_txn() disabled the ->add() schedulability test | 
| Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 241 | 	 * then ->commit_txn() is required to perform one. On success | 
 | 242 | 	 * the transaction is closed. On error the transaction is kept | 
 | 243 | 	 * open until ->cancel_txn() is called. | 
 | 244 | 	 */ | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 245 | 	int  (*commit_txn)		(struct pmu *pmu); /* optional */ | 
| Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 246 | 	/* | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 247 | 	 * Will cancel the transaction, assumes ->del() is called | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 248 | 	 * for each successful ->add() during the transaction. | 
| Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 249 | 	 */ | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 250 | 	void (*cancel_txn)		(struct pmu *pmu); /* optional */ | 
| Peter Zijlstra | 35edc2a | 2011-11-20 20:36:02 +0100 | [diff] [blame] | 251 |  | 
 | 252 | 	/* | 
 | 253 | 	 * Will return the value for perf_event_mmap_page::index for this event, | 
 | 254 | 	 * if no implementation is provided it will default to: event->hw.idx + 1. | 
 | 255 | 	 */ | 
 | 256 | 	int (*event_idx)		(struct perf_event *event); /*optional */ | 
| Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 257 |  | 
 | 258 | 	/* | 
 | 259 | 	 * flush branch stack on context-switches (needed in cpu-wide mode) | 
 | 260 | 	 */ | 
 | 261 | 	void (*flush_branch_stack)	(void); | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 262 | }; | 
 | 263 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 264 | /** | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 265 |  * enum perf_event_active_state - the states of a event | 
| Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 266 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 267 | enum perf_event_active_state { | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 268 | 	PERF_EVENT_STATE_ERROR		= -2, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 269 | 	PERF_EVENT_STATE_OFF		= -1, | 
 | 270 | 	PERF_EVENT_STATE_INACTIVE	=  0, | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 271 | 	PERF_EVENT_STATE_ACTIVE		=  1, | 
| Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 272 | }; | 
 | 273 |  | 
| Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 274 | struct file; | 
| Peter Zijlstra | 453f19e | 2009-11-20 22:19:43 +0100 | [diff] [blame] | 275 | struct perf_sample_data; | 
 | 276 |  | 
| Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 277 | typedef void (*perf_overflow_handler_t)(struct perf_event *, | 
| Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 278 | 					struct perf_sample_data *, | 
 | 279 | 					struct pt_regs *regs); | 
 | 280 |  | 
| Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 281 | enum perf_group_flag { | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 282 | 	PERF_GROUP_SOFTWARE		= 0x1, | 
| Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 283 | }; | 
 | 284 |  | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 285 | #define SWEVENT_HLIST_BITS		8 | 
 | 286 | #define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS) | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 287 |  | 
 | 288 | struct swevent_hlist { | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 289 | 	struct hlist_head		heads[SWEVENT_HLIST_SIZE]; | 
 | 290 | 	struct rcu_head			rcu_head; | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 291 | }; | 
 | 292 |  | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 293 | #define PERF_ATTACH_CONTEXT	0x01 | 
 | 294 | #define PERF_ATTACH_GROUP	0x02 | 
| Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 295 | #define PERF_ATTACH_TASK	0x04 | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 296 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 297 | #ifdef CONFIG_CGROUP_PERF | 
 | 298 | /* | 
 | 299 |  * perf_cgroup_info keeps track of time_enabled for a cgroup. | 
 | 300 |  * This is a per-cpu dynamically allocated data structure. | 
 | 301 |  */ | 
 | 302 | struct perf_cgroup_info { | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 303 | 	u64				time; | 
 | 304 | 	u64				timestamp; | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 305 | }; | 
 | 306 |  | 
 | 307 | struct perf_cgroup { | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 308 | 	struct				cgroup_subsys_state css; | 
 | 309 | 	struct				perf_cgroup_info *info;	/* timing info, one per cpu */ | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 310 | }; | 
 | 311 | #endif | 
 | 312 |  | 
| Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 313 | struct ring_buffer; | 
 | 314 |  | 
| Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 315 | /** | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 316 |  * struct perf_event - performance event kernel representation: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 317 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 318 | struct perf_event { | 
 | 319 | #ifdef CONFIG_PERF_EVENTS | 
| Ingo Molnar | 65abc86 | 2009-09-21 10:18:27 +0200 | [diff] [blame] | 320 | 	struct list_head		group_entry; | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 321 | 	struct list_head		event_entry; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 322 | 	struct list_head		sibling_list; | 
| Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 323 | 	struct hlist_node		hlist_entry; | 
| Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 324 | 	int				nr_siblings; | 
| Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 325 | 	int				group_flags; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 326 | 	struct perf_event		*group_leader; | 
| Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 327 | 	struct pmu			*pmu; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 328 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 329 | 	enum perf_event_active_state	state; | 
| Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 330 | 	unsigned int			attach_state; | 
| Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 331 | 	local64_t			count; | 
| Peter Zijlstra | a6e6dea | 2010-05-21 14:27:58 +0200 | [diff] [blame] | 332 | 	atomic64_t			child_count; | 
| Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 333 |  | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 334 | 	/* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 335 | 	 * These are the total time in nanoseconds that the event | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 336 | 	 * has been enabled (i.e. eligible to run, and the task has | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 337 | 	 * been scheduled in, if this is a per-task event) | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 338 | 	 * and running (scheduled onto the CPU), respectively. | 
 | 339 | 	 * | 
 | 340 | 	 * They are computed from tstamp_enabled, tstamp_running and | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 341 | 	 * tstamp_stopped when the event is in INACTIVE or ACTIVE state. | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 342 | 	 */ | 
 | 343 | 	u64				total_time_enabled; | 
 | 344 | 	u64				total_time_running; | 
 | 345 |  | 
 | 346 | 	/* | 
 | 347 | 	 * These are timestamps used for computing total_time_enabled | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 348 | 	 * and total_time_running when the event is in INACTIVE or | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 349 | 	 * ACTIVE state, measured in nanoseconds from an arbitrary point | 
 | 350 | 	 * in time. | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 351 | 	 * tstamp_enabled: the notional time when the event was enabled | 
 | 352 | 	 * tstamp_running: the notional time when the event was scheduled on | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 353 | 	 * tstamp_stopped: in INACTIVE state, the notional time when the | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 354 | 	 *	event was scheduled off. | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 355 | 	 */ | 
 | 356 | 	u64				tstamp_enabled; | 
 | 357 | 	u64				tstamp_running; | 
 | 358 | 	u64				tstamp_stopped; | 
 | 359 |  | 
| Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 360 | 	/* | 
 | 361 | 	 * timestamp shadows the actual context timing but it can | 
 | 362 | 	 * be safely used in NMI interrupt context. It reflects the | 
 | 363 | 	 * context time as it was when the event was last scheduled in. | 
 | 364 | 	 * | 
 | 365 | 	 * ctx_time already accounts for ctx->timestamp. Therefore to | 
 | 366 | 	 * compute ctx_time for a sample, simply add perf_clock(). | 
 | 367 | 	 */ | 
 | 368 | 	u64				shadow_ctx_time; | 
 | 369 |  | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 370 | 	struct perf_event_attr		attr; | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 371 | 	u16				header_size; | 
| Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 372 | 	u16				id_header_size; | 
| Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 373 | 	u16				read_size; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 374 | 	struct hw_perf_event		hw; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 375 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 376 | 	struct perf_event_context	*ctx; | 
| Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 377 | 	atomic_long_t			refcount; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 378 |  | 
 | 379 | 	/* | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 380 | 	 * These accumulate total time (in nanoseconds) that children | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 381 | 	 * events have been enabled and running, respectively. | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 382 | 	 */ | 
 | 383 | 	atomic64_t			child_total_time_enabled; | 
 | 384 | 	atomic64_t			child_total_time_running; | 
 | 385 |  | 
 | 386 | 	/* | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 387 | 	 * Protect attach/detach and child_list: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 388 | 	 */ | 
| Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 389 | 	struct mutex			child_mutex; | 
 | 390 | 	struct list_head		child_list; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 391 | 	struct perf_event		*parent; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 392 |  | 
 | 393 | 	int				oncpu; | 
 | 394 | 	int				cpu; | 
 | 395 |  | 
| Peter Zijlstra | 082ff5a | 2009-05-23 18:29:00 +0200 | [diff] [blame] | 396 | 	struct list_head		owner_entry; | 
 | 397 | 	struct task_struct		*owner; | 
 | 398 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 399 | 	/* mmap bits */ | 
 | 400 | 	struct mutex			mmap_mutex; | 
 | 401 | 	atomic_t			mmap_count; | 
| Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 402 | 	int				mmap_locked; | 
 | 403 | 	struct user_struct		*mmap_user; | 
| Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 404 | 	struct ring_buffer		*rb; | 
| Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 405 | 	struct list_head		rb_entry; | 
| Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 406 |  | 
| Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 407 | 	/* poll related */ | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 408 | 	wait_queue_head_t		waitq; | 
| Peter Zijlstra | 3c446b3d | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 409 | 	struct fasync_struct		*fasync; | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 410 |  | 
 | 411 | 	/* delayed work for NMIs and such */ | 
 | 412 | 	int				pending_wakeup; | 
| Peter Zijlstra | 4c9e254 | 2009-04-06 11:45:09 +0200 | [diff] [blame] | 413 | 	int				pending_kill; | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 414 | 	int				pending_disable; | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 415 | 	struct irq_work			pending; | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 416 |  | 
| Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 417 | 	atomic_t			event_limit; | 
 | 418 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 419 | 	void (*destroy)(struct perf_event *); | 
| Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 420 | 	struct rcu_head			rcu_head; | 
| Peter Zijlstra | 709e50c | 2009-06-02 14:13:15 +0200 | [diff] [blame] | 421 |  | 
 | 422 | 	struct pid_namespace		*ns; | 
| Peter Zijlstra | 8e5799b | 2009-06-02 15:08:15 +0200 | [diff] [blame] | 423 | 	u64				id; | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 424 |  | 
| Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 425 | 	perf_overflow_handler_t		overflow_handler; | 
| Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 426 | 	void				*overflow_handler_context; | 
| Peter Zijlstra | 453f19e | 2009-11-20 22:19:43 +0100 | [diff] [blame] | 427 |  | 
| Li Zefan | 07b139c | 2009-12-21 14:27:35 +0800 | [diff] [blame] | 428 | #ifdef CONFIG_EVENT_TRACING | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 429 | 	struct ftrace_event_call	*tp_event; | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 430 | 	struct event_filter		*filter; | 
| Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 431 | #ifdef CONFIG_FUNCTION_TRACER | 
 | 432 | 	struct ftrace_ops               ftrace_ops; | 
 | 433 | #endif | 
| Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 434 | #endif | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 435 |  | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 436 | #ifdef CONFIG_CGROUP_PERF | 
 | 437 | 	struct perf_cgroup		*cgrp; /* cgroup event is attach to */ | 
 | 438 | 	int				cgrp_defer_enabled; | 
 | 439 | #endif | 
 | 440 |  | 
| Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 441 | #endif /* CONFIG_PERF_EVENTS */ | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 442 | }; | 
 | 443 |  | 
| Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 444 | enum perf_event_context_type { | 
 | 445 | 	task_context, | 
 | 446 | 	cpu_context, | 
 | 447 | }; | 
 | 448 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 449 | /** | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 450 |  * struct perf_event_context - event context structure | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 451 |  * | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 452 |  * Used as a container for task events and CPU events as well: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 453 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 454 | struct perf_event_context { | 
| Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 455 | 	struct pmu			*pmu; | 
| Richard Kennedy | ee643c4 | 2011-03-07 15:46:59 +0000 | [diff] [blame] | 456 | 	enum perf_event_context_type	type; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 457 | 	/* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 458 | 	 * Protect the states of the events in the list, | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 459 | 	 * nr_active, and the list: | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 460 | 	 */ | 
| Thomas Gleixner | e625cce | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 461 | 	raw_spinlock_t			lock; | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 462 | 	/* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 463 | 	 * Protect the list of events.  Locking either mutex or lock | 
| Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 464 | 	 * is sufficient to ensure the list doesn't change; to change | 
 | 465 | 	 * the list you need to lock both the mutex and the spinlock. | 
 | 466 | 	 */ | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 467 | 	struct mutex			mutex; | 
| Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 468 |  | 
| Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 469 | 	struct list_head		pinned_groups; | 
 | 470 | 	struct list_head		flexible_groups; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 471 | 	struct list_head		event_list; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 472 | 	int				nr_events; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 473 | 	int				nr_active; | 
 | 474 | 	int				is_active; | 
| Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 475 | 	int				nr_stat; | 
| Peter Zijlstra | 0f5a260 | 2011-11-16 14:38:16 +0100 | [diff] [blame] | 476 | 	int				nr_freq; | 
| Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 477 | 	int				rotate_disable; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 478 | 	atomic_t			refcount; | 
 | 479 | 	struct task_struct		*task; | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 480 |  | 
 | 481 | 	/* | 
| Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 482 | 	 * Context clock, runs when context enabled. | 
| Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 483 | 	 */ | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 484 | 	u64				time; | 
 | 485 | 	u64				timestamp; | 
| Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 486 |  | 
 | 487 | 	/* | 
 | 488 | 	 * These fields let us detect when two contexts have both | 
 | 489 | 	 * been cloned (inherited) from a common ancestor. | 
 | 490 | 	 */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 491 | 	struct perf_event_context	*parent_ctx; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 492 | 	u64				parent_gen; | 
 | 493 | 	u64				generation; | 
 | 494 | 	int				pin_count; | 
| Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 495 | 	int				nr_cgroups;	 /* cgroup evts */ | 
 | 496 | 	int				nr_branch_stack; /* branch_stack evt */ | 
| Richard Kennedy | 28009ce | 2011-06-07 16:33:38 +0100 | [diff] [blame] | 497 | 	struct rcu_head			rcu_head; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 498 | }; | 
 | 499 |  | 
| Frederic Weisbecker | 7ae07ea | 2010-08-14 20:45:13 +0200 | [diff] [blame] | 500 | /* | 
 | 501 |  * Number of contexts where an event can trigger: | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 502 |  *	task, softirq, hardirq, nmi. | 
| Frederic Weisbecker | 7ae07ea | 2010-08-14 20:45:13 +0200 | [diff] [blame] | 503 |  */ | 
 | 504 | #define PERF_NR_CONTEXTS	4 | 
 | 505 |  | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 506 | /** | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 507 |  * struct perf_event_cpu_context - per cpu event context structure | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 508 |  */ | 
 | 509 | struct perf_cpu_context { | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 510 | 	struct perf_event_context	ctx; | 
 | 511 | 	struct perf_event_context	*task_ctx; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 512 | 	int				active_oncpu; | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 513 | 	int				exclusive; | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 514 | 	struct list_head		rotation_list; | 
 | 515 | 	int				jiffies_interval; | 
| Peter Zijlstra | 3f1f332 | 2012-10-02 15:38:52 +0200 | [diff] [blame] | 516 | 	struct pmu			*unique_pmu; | 
| Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 517 | 	struct perf_cgroup		*cgrp; | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 518 | }; | 
 | 519 |  | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 520 | struct perf_output_handle { | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 521 | 	struct perf_event		*event; | 
| Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 522 | 	struct ring_buffer		*rb; | 
| Peter Zijlstra | 6d1acfd | 2010-05-18 11:12:48 +0200 | [diff] [blame] | 523 | 	unsigned long			wakeup; | 
| Peter Zijlstra | 5d967a8 | 2010-05-20 16:46:39 +0200 | [diff] [blame] | 524 | 	unsigned long			size; | 
 | 525 | 	void				*addr; | 
 | 526 | 	int				page; | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 527 | }; | 
 | 528 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 529 | #ifdef CONFIG_PERF_EVENTS | 
| Robert Richter | 829b42d | 2009-04-29 12:46:59 +0200 | [diff] [blame] | 530 |  | 
| Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 531 | extern int perf_pmu_register(struct pmu *pmu, char *name, int type); | 
| Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 532 | extern void perf_pmu_unregister(struct pmu *pmu); | 
| Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 533 |  | 
| Matt Fleming | 3bf101b | 2010-09-27 20:22:24 +0100 | [diff] [blame] | 534 | extern int perf_num_counters(void); | 
| Matt Fleming | 84c7991 | 2010-10-03 21:41:13 +0100 | [diff] [blame] | 535 | extern const char *perf_pmu_name(void); | 
| Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 536 | extern void __perf_event_task_sched_in(struct task_struct *prev, | 
 | 537 | 				       struct task_struct *task); | 
 | 538 | extern void __perf_event_task_sched_out(struct task_struct *prev, | 
 | 539 | 					struct task_struct *next); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 540 | extern int perf_event_init_task(struct task_struct *child); | 
 | 541 | extern void perf_event_exit_task(struct task_struct *child); | 
 | 542 | extern void perf_event_free_task(struct task_struct *task); | 
| Peter Zijlstra | 4e231c7 | 2010-09-09 21:01:59 +0200 | [diff] [blame] | 543 | extern void perf_event_delayed_put(struct task_struct *task); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 544 | extern void perf_event_print_debug(void); | 
| Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 545 | extern void perf_pmu_disable(struct pmu *pmu); | 
 | 546 | extern void perf_pmu_enable(struct pmu *pmu); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 547 | extern int perf_event_task_disable(void); | 
 | 548 | extern int perf_event_task_enable(void); | 
| Avi Kivity | 26ca5c1 | 2011-06-29 18:42:37 +0300 | [diff] [blame] | 549 | extern int perf_event_refresh(struct perf_event *event, int refresh); | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 550 | extern void perf_event_update_userpage(struct perf_event *event); | 
| Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 551 | extern int perf_event_release_kernel(struct perf_event *event); | 
 | 552 | extern struct perf_event * | 
 | 553 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | 
 | 554 | 				int cpu, | 
| Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 555 | 				struct task_struct *task, | 
| Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 556 | 				perf_overflow_handler_t callback, | 
 | 557 | 				void *context); | 
| Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 558 | extern void perf_pmu_migrate_context(struct pmu *pmu, | 
 | 559 | 				int src_cpu, int dst_cpu); | 
| Peter Zijlstra | 59ed446f | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 560 | extern u64 perf_event_read_value(struct perf_event *event, | 
 | 561 | 				 u64 *enabled, u64 *running); | 
| Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 562 |  | 
| Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 563 |  | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 564 | struct perf_sample_data { | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 565 | 	u64				type; | 
 | 566 |  | 
 | 567 | 	u64				ip; | 
 | 568 | 	struct { | 
 | 569 | 		u32	pid; | 
 | 570 | 		u32	tid; | 
 | 571 | 	}				tid_entry; | 
 | 572 | 	u64				time; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 573 | 	u64				addr; | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 574 | 	u64				id; | 
 | 575 | 	u64				stream_id; | 
 | 576 | 	struct { | 
 | 577 | 		u32	cpu; | 
 | 578 | 		u32	reserved; | 
 | 579 | 	}				cpu_entry; | 
| Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 580 | 	u64				period; | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 581 | 	struct perf_callchain_entry	*callchain; | 
| Frederic Weisbecker | 3a43ce6 | 2009-08-08 04:26:37 +0200 | [diff] [blame] | 582 | 	struct perf_raw_record		*raw; | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 583 | 	struct perf_branch_stack	*br_stack; | 
| Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 584 | 	struct perf_regs_user		regs_user; | 
| Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 585 | 	u64				stack_user_size; | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 586 | }; | 
 | 587 |  | 
| Robert Richter | fd0d000 | 2012-04-02 20:19:08 +0200 | [diff] [blame] | 588 | static inline void perf_sample_data_init(struct perf_sample_data *data, | 
 | 589 | 					 u64 addr, u64 period) | 
| Peter Zijlstra | dc1d628 | 2010-03-03 15:55:04 +0100 | [diff] [blame] | 590 | { | 
| Robert Richter | fd0d000 | 2012-04-02 20:19:08 +0200 | [diff] [blame] | 591 | 	/* remaining struct members initialized in perf_prepare_sample() */ | 
| Peter Zijlstra | dc1d628 | 2010-03-03 15:55:04 +0100 | [diff] [blame] | 592 | 	data->addr = addr; | 
 | 593 | 	data->raw  = NULL; | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 594 | 	data->br_stack = NULL; | 
| Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 595 | 	data->period = period; | 
 | 596 | 	data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE; | 
 | 597 | 	data->regs_user.regs = NULL; | 
| Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 598 | 	data->stack_user_size = 0; | 
| Peter Zijlstra | dc1d628 | 2010-03-03 15:55:04 +0100 | [diff] [blame] | 599 | } | 
 | 600 |  | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 601 | extern void perf_output_sample(struct perf_output_handle *handle, | 
 | 602 | 			       struct perf_event_header *header, | 
 | 603 | 			       struct perf_sample_data *data, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 604 | 			       struct perf_event *event); | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 605 | extern void perf_prepare_sample(struct perf_event_header *header, | 
 | 606 | 				struct perf_sample_data *data, | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 607 | 				struct perf_event *event, | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 608 | 				struct pt_regs *regs); | 
 | 609 |  | 
| Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 610 | extern int perf_event_overflow(struct perf_event *event, | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 611 | 				 struct perf_sample_data *data, | 
 | 612 | 				 struct pt_regs *regs); | 
| Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 613 |  | 
| Franck Bui-Huu | 6c7e550 | 2010-11-23 16:21:43 +0100 | [diff] [blame] | 614 | static inline bool is_sampling_event(struct perf_event *event) | 
 | 615 | { | 
 | 616 | 	return event->attr.sample_period != 0; | 
 | 617 | } | 
 | 618 |  | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 619 | /* | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 620 |  * Return 1 for a software event, 0 for a hardware event | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 621 |  */ | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 622 | static inline int is_software_event(struct perf_event *event) | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 623 | { | 
| Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 624 | 	return event->pmu->task_ctx_nr == perf_sw_context; | 
| Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 625 | } | 
 | 626 |  | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 627 | extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 
| Peter Zijlstra | f29ac75 | 2009-06-19 18:27:26 +0200 | [diff] [blame] | 628 |  | 
| Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 629 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); | 
| Peter Zijlstra | f29ac75 | 2009-06-19 18:27:26 +0200 | [diff] [blame] | 630 |  | 
| Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 631 | #ifndef perf_arch_fetch_caller_regs | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 632 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } | 
| Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 633 | #endif | 
| Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 634 |  | 
 | 635 | /* | 
 | 636 |  * Take a snapshot of the regs. Skip ip and frame pointer to | 
 | 637 |  * the nth caller. We only need a few of the regs: | 
 | 638 |  * - ip for PERF_SAMPLE_IP | 
 | 639 |  * - cs for user_mode() tests | 
 | 640 |  * - bp for callchains | 
 | 641 |  * - eflags, for future purposes, just in case | 
 | 642 |  */ | 
| Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 643 | static inline void perf_fetch_caller_regs(struct pt_regs *regs) | 
| Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 644 | { | 
| Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 645 | 	memset(regs, 0, sizeof(*regs)); | 
 | 646 |  | 
| Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 647 | 	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); | 
| Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 648 | } | 
 | 649 |  | 
| Peter Zijlstra | 7e54a5a | 2010-10-14 22:32:45 +0200 | [diff] [blame] | 650 | static __always_inline void | 
| Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 651 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) | 
| Frederic Weisbecker | e49a5bd | 2010-03-22 19:40:03 +0100 | [diff] [blame] | 652 | { | 
| Peter Zijlstra | 7e54a5a | 2010-10-14 22:32:45 +0200 | [diff] [blame] | 653 | 	struct pt_regs hot_regs; | 
| Frederic Weisbecker | e49a5bd | 2010-03-22 19:40:03 +0100 | [diff] [blame] | 654 |  | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 655 | 	if (static_key_false(&perf_swevent_enabled[event_id])) { | 
| Jason Baron | d430d3d | 2011-03-16 17:29:47 -0400 | [diff] [blame] | 656 | 		if (!regs) { | 
 | 657 | 			perf_fetch_caller_regs(&hot_regs); | 
 | 658 | 			regs = &hot_regs; | 
 | 659 | 		} | 
| Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 660 | 		__perf_sw_event(event_id, nr, regs, addr); | 
| Frederic Weisbecker | e49a5bd | 2010-03-22 19:40:03 +0100 | [diff] [blame] | 661 | 	} | 
 | 662 | } | 
 | 663 |  | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 664 | extern struct static_key_deferred perf_sched_events; | 
| Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 665 |  | 
| Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 666 | static inline void perf_event_task_sched_in(struct task_struct *prev, | 
| Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 667 | 					    struct task_struct *task) | 
| Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 668 | { | 
| Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 669 | 	if (static_key_false(&perf_sched_events.key)) | 
 | 670 | 		__perf_event_task_sched_in(prev, task); | 
 | 671 | } | 
 | 672 |  | 
 | 673 | static inline void perf_event_task_sched_out(struct task_struct *prev, | 
 | 674 | 					     struct task_struct *next) | 
 | 675 | { | 
| Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 676 | 	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); | 
| Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 677 |  | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 678 | 	if (static_key_false(&perf_sched_events.key)) | 
| Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 679 | 		__perf_event_task_sched_out(prev, next); | 
| Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 680 | } | 
 | 681 |  | 
| Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 682 | extern void perf_event_mmap(struct vm_area_struct *vma); | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 683 | extern struct perf_guest_info_callbacks *perf_guest_cbs; | 
| Zhang, Yanmin | dcf46b9 | 2010-04-20 10:13:58 +0800 | [diff] [blame] | 684 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | 
 | 685 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 686 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 687 | extern void perf_event_comm(struct task_struct *tsk); | 
 | 688 | extern void perf_event_fork(struct task_struct *tsk); | 
| Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 689 |  | 
| Frederic Weisbecker | 56962b4 | 2010-06-30 23:03:51 +0200 | [diff] [blame] | 690 | /* Callchains */ | 
 | 691 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | 
 | 692 |  | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 693 | extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); | 
 | 694 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); | 
| Frederic Weisbecker | 56962b4 | 2010-06-30 23:03:51 +0200 | [diff] [blame] | 695 |  | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 696 | static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) | 
| Frederic Weisbecker | 70791ce | 2010-06-29 19:34:05 +0200 | [diff] [blame] | 697 | { | 
 | 698 | 	if (entry->nr < PERF_MAX_STACK_DEPTH) | 
 | 699 | 		entry->ip[entry->nr++] = ip; | 
 | 700 | } | 
| Peter Zijlstra | 394ee07 | 2009-03-30 19:07:14 +0200 | [diff] [blame] | 701 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 702 | extern int sysctl_perf_event_paranoid; | 
 | 703 | extern int sysctl_perf_event_mlock; | 
 | 704 | extern int sysctl_perf_event_sample_rate; | 
| Peter Zijlstra | 1ccd154 | 2009-04-09 10:53:45 +0200 | [diff] [blame] | 705 |  | 
| Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 706 | extern int perf_proc_update_handler(struct ctl_table *table, int write, | 
 | 707 | 		void __user *buffer, size_t *lenp, | 
 | 708 | 		loff_t *ppos); | 
 | 709 |  | 
| Peter Zijlstra | 320ebf0 | 2010-03-02 12:35:37 +0100 | [diff] [blame] | 710 | static inline bool perf_paranoid_tracepoint_raw(void) | 
 | 711 | { | 
 | 712 | 	return sysctl_perf_event_paranoid > -1; | 
 | 713 | } | 
 | 714 |  | 
 | 715 | static inline bool perf_paranoid_cpu(void) | 
 | 716 | { | 
 | 717 | 	return sysctl_perf_event_paranoid > 0; | 
 | 718 | } | 
 | 719 |  | 
 | 720 | static inline bool perf_paranoid_kernel(void) | 
 | 721 | { | 
 | 722 | 	return sysctl_perf_event_paranoid > 1; | 
 | 723 | } | 
 | 724 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 725 | extern void perf_event_init(void); | 
| Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 726 | extern void perf_tp_event(u64 addr, u64 count, void *record, | 
 | 727 | 			  int entry_size, struct pt_regs *regs, | 
| Andrew Vagin | e6dab5f | 2012-07-11 18:14:58 +0400 | [diff] [blame] | 728 | 			  struct hlist_head *head, int rctx, | 
 | 729 | 			  struct task_struct *task); | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 730 | extern void perf_bp_event(struct perf_event *event, void *data); | 
| Ingo Molnar | 0d905bc | 2009-05-04 19:13:30 +0200 | [diff] [blame] | 731 |  | 
| Paul Mackerras | 9d23a90 | 2009-05-14 21:48:08 +1000 | [diff] [blame] | 732 | #ifndef perf_misc_flags | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 733 | # define perf_misc_flags(regs) \ | 
 | 734 | 		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) | 
 | 735 | # define perf_instruction_pointer(regs)	instruction_pointer(regs) | 
| Paul Mackerras | 9d23a90 | 2009-05-14 21:48:08 +1000 | [diff] [blame] | 736 | #endif | 
 | 737 |  | 
| Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 738 | static inline bool has_branch_stack(struct perf_event *event) | 
 | 739 | { | 
 | 740 | 	return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; | 
 | 741 | } | 
 | 742 |  | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 743 | extern int perf_output_begin(struct perf_output_handle *handle, | 
| Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 744 | 			     struct perf_event *event, unsigned int size); | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 745 | extern void perf_output_end(struct perf_output_handle *handle); | 
| Frederic Weisbecker | 91d7753 | 2012-08-07 15:20:38 +0200 | [diff] [blame] | 746 | extern unsigned int perf_output_copy(struct perf_output_handle *handle, | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 747 | 			     const void *buf, unsigned int len); | 
| Jiri Olsa | 5685e0f | 2012-08-07 15:20:39 +0200 | [diff] [blame] | 748 | extern unsigned int perf_output_skip(struct perf_output_handle *handle, | 
 | 749 | 				     unsigned int len); | 
| Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 750 | extern int perf_swevent_get_recursion_context(void); | 
 | 751 | extern void perf_swevent_put_recursion_context(int rctx); | 
| Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 752 | extern void perf_event_enable(struct perf_event *event); | 
 | 753 | extern void perf_event_disable(struct perf_event *event); | 
| K.Prasad | 500ad2d | 2012-08-02 13:46:35 +0530 | [diff] [blame] | 754 | extern int __perf_event_disable(void *info); | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 755 | extern void perf_event_task_tick(void); | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 756 | #else | 
 | 757 | static inline void | 
| Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 758 | perf_event_task_sched_in(struct task_struct *prev, | 
 | 759 | 			 struct task_struct *task)			{ } | 
 | 760 | static inline void | 
 | 761 | perf_event_task_sched_out(struct task_struct *prev, | 
 | 762 | 			  struct task_struct *next)			{ } | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 763 | static inline int perf_event_init_task(struct task_struct *child)	{ return 0; } | 
 | 764 | static inline void perf_event_exit_task(struct task_struct *child)	{ } | 
 | 765 | static inline void perf_event_free_task(struct task_struct *task)	{ } | 
| Peter Zijlstra | 4e231c7 | 2010-09-09 21:01:59 +0200 | [diff] [blame] | 766 | static inline void perf_event_delayed_put(struct task_struct *task)	{ } | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 767 | static inline void perf_event_print_debug(void)				{ } | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 768 | static inline int perf_event_task_disable(void)				{ return -EINVAL; } | 
 | 769 | static inline int perf_event_task_enable(void)				{ return -EINVAL; } | 
| Avi Kivity | 26ca5c1 | 2011-06-29 18:42:37 +0300 | [diff] [blame] | 770 | static inline int perf_event_refresh(struct perf_event *event, int refresh) | 
 | 771 | { | 
 | 772 | 	return -EINVAL; | 
 | 773 | } | 
| Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 774 |  | 
| Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 775 | static inline void | 
| Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 776 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ } | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 777 | static inline void | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 778 | perf_bp_event(struct perf_event *event, void *data)			{ } | 
| Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 779 |  | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 780 | static inline int perf_register_guest_info_callbacks | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 781 | (struct perf_guest_info_callbacks *callbacks)				{ return 0; } | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 782 | static inline int perf_unregister_guest_info_callbacks | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 783 | (struct perf_guest_info_callbacks *callbacks)				{ return 0; } | 
| Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 784 |  | 
| Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 785 | static inline void perf_event_mmap(struct vm_area_struct *vma)		{ } | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 786 | static inline void perf_event_comm(struct task_struct *tsk)		{ } | 
 | 787 | static inline void perf_event_fork(struct task_struct *tsk)		{ } | 
 | 788 | static inline void perf_event_init(void)				{ } | 
| Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 789 | static inline int  perf_swevent_get_recursion_context(void)		{ return -1; } | 
| Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 790 | static inline void perf_swevent_put_recursion_context(int rctx)		{ } | 
| Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 791 | static inline void perf_event_enable(struct perf_event *event)		{ } | 
 | 792 | static inline void perf_event_disable(struct perf_event *event)		{ } | 
| K.Prasad | 500ad2d | 2012-08-02 13:46:35 +0530 | [diff] [blame] | 793 | static inline int __perf_event_disable(void *info)			{ return -1; } | 
| Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 794 | static inline void perf_event_task_tick(void)				{ } | 
| Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 795 | #endif | 
 | 796 |  | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 797 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) | 
| Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 798 |  | 
| Peter Zijlstra | 3f6da39 | 2010-03-05 13:01:18 +0100 | [diff] [blame] | 799 | /* | 
 | 800 |  * This has to have a higher priority than migration_notifier in sched.c. | 
 | 801 |  */ | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 802 | #define perf_cpu_notifier(fn)						\ | 
 | 803 | do {									\ | 
 | 804 | 	static struct notifier_block fn##_nb __cpuinitdata =		\ | 
 | 805 | 		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\ | 
| Srivatsa S. Bhat | c13d38e | 2012-10-16 13:28:17 +0530 | [diff] [blame] | 806 | 	unsigned long cpu = smp_processor_id();				\ | 
| Srivatsa S. Bhat | 6760bca | 2012-10-16 13:28:10 +0530 | [diff] [blame] | 807 | 	unsigned long flags;						\ | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 808 | 	fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,			\ | 
| Srivatsa S. Bhat | c13d38e | 2012-10-16 13:28:17 +0530 | [diff] [blame] | 809 | 		(void *)(unsigned long)cpu);				\ | 
| Srivatsa S. Bhat | 6760bca | 2012-10-16 13:28:10 +0530 | [diff] [blame] | 810 | 	local_irq_save(flags);						\ | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 811 | 	fn(&fn##_nb, (unsigned long)CPU_STARTING,			\ | 
| Srivatsa S. Bhat | c13d38e | 2012-10-16 13:28:17 +0530 | [diff] [blame] | 812 | 		(void *)(unsigned long)cpu);				\ | 
| Srivatsa S. Bhat | 6760bca | 2012-10-16 13:28:10 +0530 | [diff] [blame] | 813 | 	local_irq_restore(flags);					\ | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 814 | 	fn(&fn##_nb, (unsigned long)CPU_ONLINE,				\ | 
| Srivatsa S. Bhat | c13d38e | 2012-10-16 13:28:17 +0530 | [diff] [blame] | 815 | 		(void *)(unsigned long)cpu);				\ | 
| Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 816 | 	register_cpu_notifier(&fn##_nb);				\ | 
| Peter Zijlstra | 3f6da39 | 2010-03-05 13:01:18 +0100 | [diff] [blame] | 817 | } while (0) | 
 | 818 |  | 
| Jiri Olsa | 641cc93 | 2012-03-15 20:09:14 +0100 | [diff] [blame] | 819 |  | 
 | 820 | #define PMU_FORMAT_ATTR(_name, _format)					\ | 
 | 821 | static ssize_t								\ | 
 | 822 | _name##_show(struct device *dev,					\ | 
 | 823 | 			       struct device_attribute *attr,		\ | 
 | 824 | 			       char *page)				\ | 
 | 825 | {									\ | 
 | 826 | 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\ | 
 | 827 | 	return sprintf(page, _format "\n");				\ | 
 | 828 | }									\ | 
 | 829 | 									\ | 
 | 830 | static struct device_attribute format_attr_##_name = __ATTR_RO(_name) | 
 | 831 |  | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 832 | #endif /* _LINUX_PERF_EVENT_H */ |