| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1 | /* | 
 | 2 |  * Generic ring buffer | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | 
 | 5 |  */ | 
 | 6 | #include <linux/ring_buffer.h> | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 7 | #include <linux/trace_clock.h> | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 8 | #include <linux/spinlock.h> | 
 | 9 | #include <linux/debugfs.h> | 
 | 10 | #include <linux/uaccess.h> | 
| Steven Rostedt | a81bd80 | 2009-02-06 01:45:16 -0500 | [diff] [blame] | 11 | #include <linux/hardirq.h> | 
| Vegard Nossum | 1744a21 | 2009-02-28 08:29:44 +0100 | [diff] [blame] | 12 | #include <linux/kmemcheck.h> | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 13 | #include <linux/module.h> | 
 | 14 | #include <linux/percpu.h> | 
 | 15 | #include <linux/mutex.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 16 | #include <linux/slab.h> | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 17 | #include <linux/init.h> | 
 | 18 | #include <linux/hash.h> | 
 | 19 | #include <linux/list.h> | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 20 | #include <linux/cpu.h> | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 21 | #include <linux/fs.h> | 
 | 22 |  | 
| Christoph Lameter | 7961576 | 2010-01-05 15:34:50 +0900 | [diff] [blame] | 23 | #include <asm/local.h> | 
| Steven Rostedt | 182e9f5 | 2008-11-03 23:15:56 -0500 | [diff] [blame] | 24 | #include "trace.h" | 
 | 25 |  | 
| Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 26 | /* | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 27 |  * The ring buffer header is special. We must manually up keep it. | 
 | 28 |  */ | 
 | 29 | int ring_buffer_print_entry_header(struct trace_seq *s) | 
 | 30 | { | 
 | 31 | 	int ret; | 
 | 32 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 33 | 	ret = trace_seq_printf(s, "# compressed entry header\n"); | 
 | 34 | 	ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n"); | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 35 | 	ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n"); | 
 | 36 | 	ret = trace_seq_printf(s, "\tarray       :   32 bits\n"); | 
 | 37 | 	ret = trace_seq_printf(s, "\n"); | 
 | 38 | 	ret = trace_seq_printf(s, "\tpadding     : type == %d\n", | 
 | 39 | 			       RINGBUF_TYPE_PADDING); | 
 | 40 | 	ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", | 
 | 41 | 			       RINGBUF_TYPE_TIME_EXTEND); | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 42 | 	ret = trace_seq_printf(s, "\tdata max type_len  == %d\n", | 
 | 43 | 			       RINGBUF_TYPE_DATA_TYPE_LEN_MAX); | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 44 |  | 
 | 45 | 	return ret; | 
 | 46 | } | 
 | 47 |  | 
 | 48 | /* | 
| Steven Rostedt | 5cc9854 | 2009-03-12 22:24:17 -0400 | [diff] [blame] | 49 |  * The ring buffer is made up of a list of pages. A separate list of pages is | 
 | 50 |  * allocated for each CPU. A writer may only write to a buffer that is | 
 | 51 |  * associated with the CPU it is currently executing on.  A reader may read | 
 | 52 |  * from any per cpu buffer. | 
 | 53 |  * | 
 | 54 |  * The reader is special. For each per cpu buffer, the reader has its own | 
 | 55 |  * reader page. When a reader has read the entire reader page, this reader | 
 | 56 |  * page is swapped with another page in the ring buffer. | 
 | 57 |  * | 
 | 58 |  * Now, as long as the writer is off the reader page, the reader can do what | 
 | 59 |  * ever it wants with that page. The writer will never write to that page | 
 | 60 |  * again (as long as it is out of the ring buffer). | 
 | 61 |  * | 
 | 62 |  * Here's some silly ASCII art. | 
 | 63 |  * | 
 | 64 |  *   +------+ | 
 | 65 |  *   |reader|          RING BUFFER | 
 | 66 |  *   |page  | | 
 | 67 |  *   +------+        +---+   +---+   +---+ | 
 | 68 |  *                   |   |-->|   |-->|   | | 
 | 69 |  *                   +---+   +---+   +---+ | 
 | 70 |  *                     ^               | | 
 | 71 |  *                     |               | | 
 | 72 |  *                     +---------------+ | 
 | 73 |  * | 
 | 74 |  * | 
 | 75 |  *   +------+ | 
 | 76 |  *   |reader|          RING BUFFER | 
 | 77 |  *   |page  |------------------v | 
 | 78 |  *   +------+        +---+   +---+   +---+ | 
 | 79 |  *                   |   |-->|   |-->|   | | 
 | 80 |  *                   +---+   +---+   +---+ | 
 | 81 |  *                     ^               | | 
 | 82 |  *                     |               | | 
 | 83 |  *                     +---------------+ | 
 | 84 |  * | 
 | 85 |  * | 
 | 86 |  *   +------+ | 
 | 87 |  *   |reader|          RING BUFFER | 
 | 88 |  *   |page  |------------------v | 
 | 89 |  *   +------+        +---+   +---+   +---+ | 
 | 90 |  *      ^            |   |-->|   |-->|   | | 
 | 91 |  *      |            +---+   +---+   +---+ | 
 | 92 |  *      |                              | | 
 | 93 |  *      |                              | | 
 | 94 |  *      +------------------------------+ | 
 | 95 |  * | 
 | 96 |  * | 
 | 97 |  *   +------+ | 
 | 98 |  *   |buffer|          RING BUFFER | 
 | 99 |  *   |page  |------------------v | 
 | 100 |  *   +------+        +---+   +---+   +---+ | 
 | 101 |  *      ^            |   |   |   |-->|   | | 
 | 102 |  *      |   New      +---+   +---+   +---+ | 
 | 103 |  *      |  Reader------^               | | 
 | 104 |  *      |   page                       | | 
 | 105 |  *      +------------------------------+ | 
 | 106 |  * | 
 | 107 |  * | 
 | 108 |  * After we make this swap, the reader can hand this page off to the splice | 
 | 109 |  * code and be done with it. It can even allocate a new page if it needs to | 
 | 110 |  * and swap that into the ring buffer. | 
 | 111 |  * | 
 | 112 |  * We will be using cmpxchg soon to make all this lockless. | 
 | 113 |  * | 
 | 114 |  */ | 
 | 115 |  | 
 | 116 | /* | 
| Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 117 |  * A fast way to enable or disable all ring buffers is to | 
 | 118 |  * call tracing_on or tracing_off. Turning off the ring buffers | 
 | 119 |  * prevents all ring buffers from being recorded to. | 
 | 120 |  * Turning this switch on, makes it OK to write to the | 
 | 121 |  * ring buffer, if the ring buffer is enabled itself. | 
 | 122 |  * | 
 | 123 |  * There's three layers that must be on in order to write | 
 | 124 |  * to the ring buffer. | 
 | 125 |  * | 
 | 126 |  * 1) This global flag must be set. | 
 | 127 |  * 2) The ring buffer must be enabled for recording. | 
 | 128 |  * 3) The per cpu buffer must be enabled for recording. | 
 | 129 |  * | 
 | 130 |  * In case of an anomaly, this global flag has a bit set that | 
 | 131 |  * will permantly disable all ring buffers. | 
 | 132 |  */ | 
 | 133 |  | 
 | 134 | /* | 
 | 135 |  * Global flag to disable all recording to ring buffers | 
 | 136 |  *  This has two bits: ON, DISABLED | 
 | 137 |  * | 
 | 138 |  *  ON   DISABLED | 
 | 139 |  * ---- ---------- | 
 | 140 |  *   0      0        : ring buffers are off | 
 | 141 |  *   1      0        : ring buffers are on | 
 | 142 |  *   X      1        : ring buffers are permanently disabled | 
 | 143 |  */ | 
 | 144 |  | 
 | 145 | enum { | 
 | 146 | 	RB_BUFFERS_ON_BIT	= 0, | 
 | 147 | 	RB_BUFFERS_DISABLED_BIT	= 1, | 
 | 148 | }; | 
 | 149 |  | 
 | 150 | enum { | 
 | 151 | 	RB_BUFFERS_ON		= 1 << RB_BUFFERS_ON_BIT, | 
 | 152 | 	RB_BUFFERS_DISABLED	= 1 << RB_BUFFERS_DISABLED_BIT, | 
 | 153 | }; | 
 | 154 |  | 
| Hannes Eder | 5e39841 | 2009-02-10 19:44:34 +0100 | [diff] [blame] | 155 | static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; | 
| Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 156 |  | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 157 | /* Used for individual buffers (after the counter) */ | 
 | 158 | #define RB_BUFFER_OFF		(1 << 20) | 
 | 159 |  | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 160 | #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) | 
 | 161 |  | 
| Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 162 | /** | 
| Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 163 |  * tracing_off_permanent - permanently disable ring buffers | 
 | 164 |  * | 
 | 165 |  * This function, once called, will disable all ring buffers | 
| Wenji Huang | c3706f0 | 2009-02-10 01:03:18 -0500 | [diff] [blame] | 166 |  * permanently. | 
| Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 167 |  */ | 
 | 168 | void tracing_off_permanent(void) | 
 | 169 | { | 
 | 170 | 	set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); | 
| Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 171 | } | 
 | 172 |  | 
| Steven Rostedt | e3d6bf0 | 2009-03-03 13:53:07 -0500 | [diff] [blame] | 173 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) | 
| Andrew Morton | 67d3472 | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 174 | #define RB_ALIGNMENT		4U | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 175 | #define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 176 | #define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */ | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 177 |  | 
| Steven Rostedt | 2271048 | 2010-03-18 17:54:19 -0400 | [diff] [blame] | 178 | #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | 
 | 179 | # define RB_FORCE_8BYTE_ALIGNMENT	0 | 
 | 180 | # define RB_ARCH_ALIGNMENT		RB_ALIGNMENT | 
 | 181 | #else | 
 | 182 | # define RB_FORCE_8BYTE_ALIGNMENT	1 | 
 | 183 | # define RB_ARCH_ALIGNMENT		8U | 
 | 184 | #endif | 
 | 185 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 186 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ | 
 | 187 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 188 |  | 
 | 189 | enum { | 
 | 190 | 	RB_LEN_TIME_EXTEND = 8, | 
 | 191 | 	RB_LEN_TIME_STAMP = 16, | 
 | 192 | }; | 
 | 193 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 194 | #define skip_time_extend(event) \ | 
 | 195 | 	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) | 
 | 196 |  | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 197 | static inline int rb_null_event(struct ring_buffer_event *event) | 
 | 198 | { | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 199 | 	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 200 | } | 
 | 201 |  | 
 | 202 | static void rb_event_set_padding(struct ring_buffer_event *event) | 
 | 203 | { | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 204 | 	/* padding has a NULL time_delta */ | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 205 | 	event->type_len = RINGBUF_TYPE_PADDING; | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 206 | 	event->time_delta = 0; | 
 | 207 | } | 
 | 208 |  | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 209 | static unsigned | 
 | 210 | rb_event_data_length(struct ring_buffer_event *event) | 
 | 211 | { | 
 | 212 | 	unsigned length; | 
 | 213 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 214 | 	if (event->type_len) | 
 | 215 | 		length = event->type_len * RB_ALIGNMENT; | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 216 | 	else | 
 | 217 | 		length = event->array[0]; | 
 | 218 | 	return length + RB_EVNT_HDR_SIZE; | 
 | 219 | } | 
 | 220 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 221 | /* | 
 | 222 |  * Return the length of the given event. Will return | 
 | 223 |  * the length of the time extend if the event is a | 
 | 224 |  * time extend. | 
 | 225 |  */ | 
 | 226 | static inline unsigned | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 227 | rb_event_length(struct ring_buffer_event *event) | 
 | 228 | { | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 229 | 	switch (event->type_len) { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 230 | 	case RINGBUF_TYPE_PADDING: | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 231 | 		if (rb_null_event(event)) | 
 | 232 | 			/* undefined */ | 
 | 233 | 			return -1; | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 234 | 		return  event->array[0] + RB_EVNT_HDR_SIZE; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 235 |  | 
 | 236 | 	case RINGBUF_TYPE_TIME_EXTEND: | 
 | 237 | 		return RB_LEN_TIME_EXTEND; | 
 | 238 |  | 
 | 239 | 	case RINGBUF_TYPE_TIME_STAMP: | 
 | 240 | 		return RB_LEN_TIME_STAMP; | 
 | 241 |  | 
 | 242 | 	case RINGBUF_TYPE_DATA: | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 243 | 		return rb_event_data_length(event); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 244 | 	default: | 
 | 245 | 		BUG(); | 
 | 246 | 	} | 
 | 247 | 	/* not hit */ | 
 | 248 | 	return 0; | 
 | 249 | } | 
 | 250 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 251 | /* | 
 | 252 |  * Return total length of time extend and data, | 
 | 253 |  *   or just the event length for all other events. | 
 | 254 |  */ | 
 | 255 | static inline unsigned | 
 | 256 | rb_event_ts_length(struct ring_buffer_event *event) | 
 | 257 | { | 
 | 258 | 	unsigned len = 0; | 
 | 259 |  | 
 | 260 | 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { | 
 | 261 | 		/* time extends include the data event after it */ | 
 | 262 | 		len = RB_LEN_TIME_EXTEND; | 
 | 263 | 		event = skip_time_extend(event); | 
 | 264 | 	} | 
 | 265 | 	return len + rb_event_length(event); | 
 | 266 | } | 
 | 267 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 268 | /** | 
 | 269 |  * ring_buffer_event_length - return the length of the event | 
 | 270 |  * @event: the event to get the length of | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 271 |  * | 
 | 272 |  * Returns the size of the data load of a data event. | 
 | 273 |  * If the event is something other than a data event, it | 
 | 274 |  * returns the size of the event itself. With the exception | 
 | 275 |  * of a TIME EXTEND, where it still returns the size of the | 
 | 276 |  * data load of the data event after it. | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 277 |  */ | 
 | 278 | unsigned ring_buffer_event_length(struct ring_buffer_event *event) | 
 | 279 | { | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 280 | 	unsigned length; | 
 | 281 |  | 
 | 282 | 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) | 
 | 283 | 		event = skip_time_extend(event); | 
 | 284 |  | 
 | 285 | 	length = rb_event_length(event); | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 286 | 	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 
| Robert Richter | 465634a | 2009-01-07 15:32:11 +0100 | [diff] [blame] | 287 | 		return length; | 
 | 288 | 	length -= RB_EVNT_HDR_SIZE; | 
 | 289 | 	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) | 
 | 290 |                 length -= sizeof(event->array[0]); | 
 | 291 | 	return length; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 292 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 293 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 294 |  | 
 | 295 | /* inline for ring buffer fast paths */ | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 296 | static void * | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 297 | rb_event_data(struct ring_buffer_event *event) | 
 | 298 | { | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 299 | 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) | 
 | 300 | 		event = skip_time_extend(event); | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 301 | 	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 302 | 	/* If length is in len field, then array[0] has the data */ | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 303 | 	if (event->type_len) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 304 | 		return (void *)&event->array[0]; | 
 | 305 | 	/* Otherwise length is in array[0] and array[1] has the data */ | 
 | 306 | 	return (void *)&event->array[1]; | 
 | 307 | } | 
 | 308 |  | 
 | 309 | /** | 
 | 310 |  * ring_buffer_event_data - return the data of the event | 
 | 311 |  * @event: the event to get the data from | 
 | 312 |  */ | 
 | 313 | void *ring_buffer_event_data(struct ring_buffer_event *event) | 
 | 314 | { | 
 | 315 | 	return rb_event_data(event); | 
 | 316 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 317 | EXPORT_SYMBOL_GPL(ring_buffer_event_data); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 318 |  | 
 | 319 | #define for_each_buffer_cpu(buffer, cpu)		\ | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 320 | 	for_each_cpu(cpu, buffer->cpumask) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 321 |  | 
 | 322 | #define TS_SHIFT	27 | 
 | 323 | #define TS_MASK		((1ULL << TS_SHIFT) - 1) | 
 | 324 | #define TS_DELTA_TEST	(~TS_MASK) | 
 | 325 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 326 | /* Flag when events were overwritten */ | 
 | 327 | #define RB_MISSED_EVENTS	(1 << 31) | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 328 | /* Missed count stored at end */ | 
 | 329 | #define RB_MISSED_STORED	(1 << 30) | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 330 |  | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 331 | struct buffer_data_page { | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 332 | 	u64		 time_stamp;	/* page time stamp */ | 
| Wenji Huang | c3706f0 | 2009-02-10 01:03:18 -0500 | [diff] [blame] | 333 | 	local_t		 commit;	/* write committed index */ | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 334 | 	unsigned char	 data[];	/* data of buffer page */ | 
 | 335 | }; | 
 | 336 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 337 | /* | 
 | 338 |  * Note, the buffer_page list must be first. The buffer pages | 
 | 339 |  * are allocated in cache lines, which means that each buffer | 
 | 340 |  * page will be at the beginning of a cache line, and thus | 
 | 341 |  * the least significant bits will be zero. We use this to | 
 | 342 |  * add flags in the list struct pointers, to make the ring buffer | 
 | 343 |  * lockless. | 
 | 344 |  */ | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 345 | struct buffer_page { | 
| Steven Rostedt | 778c55d | 2009-05-01 18:44:45 -0400 | [diff] [blame] | 346 | 	struct list_head list;		/* list of buffer pages */ | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 347 | 	local_t		 write;		/* index for next write */ | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 348 | 	unsigned	 read;		/* index for next read */ | 
| Steven Rostedt | 778c55d | 2009-05-01 18:44:45 -0400 | [diff] [blame] | 349 | 	local_t		 entries;	/* entries on this page */ | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 350 | 	unsigned long	 real_end;	/* real end of data */ | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 351 | 	struct buffer_data_page *page;	/* Actual data page */ | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 352 | }; | 
 | 353 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 354 | /* | 
 | 355 |  * The buffer page counters, write and entries, must be reset | 
 | 356 |  * atomically when crossing page boundaries. To synchronize this | 
 | 357 |  * update, two counters are inserted into the number. One is | 
 | 358 |  * the actual counter for the write position or count on the page. | 
 | 359 |  * | 
 | 360 |  * The other is a counter of updaters. Before an update happens | 
 | 361 |  * the update partition of the counter is incremented. This will | 
 | 362 |  * allow the updater to update the counter atomically. | 
 | 363 |  * | 
 | 364 |  * The counter is 20 bits, and the state data is 12. | 
 | 365 |  */ | 
 | 366 | #define RB_WRITE_MASK		0xfffff | 
 | 367 | #define RB_WRITE_INTCNT		(1 << 20) | 
 | 368 |  | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 369 | static void rb_init_page(struct buffer_data_page *bpage) | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 370 | { | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 371 | 	local_set(&bpage->commit, 0); | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 372 | } | 
 | 373 |  | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 374 | /** | 
 | 375 |  * ring_buffer_page_len - the size of data on the page. | 
 | 376 |  * @page: The page to read | 
 | 377 |  * | 
 | 378 |  * Returns the amount of data on the page, including buffer page header. | 
 | 379 |  */ | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 380 | size_t ring_buffer_page_len(void *page) | 
 | 381 | { | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 382 | 	return local_read(&((struct buffer_data_page *)page)->commit) | 
 | 383 | 		+ BUF_PAGE_HDR_SIZE; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 384 | } | 
 | 385 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 386 | /* | 
| Steven Rostedt | ed56829 | 2008-09-29 23:02:40 -0400 | [diff] [blame] | 387 |  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing | 
 | 388 |  * this issue out. | 
 | 389 |  */ | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 390 | static void free_buffer_page(struct buffer_page *bpage) | 
| Steven Rostedt | ed56829 | 2008-09-29 23:02:40 -0400 | [diff] [blame] | 391 | { | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 392 | 	free_page((unsigned long)bpage->page); | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 393 | 	kfree(bpage); | 
| Steven Rostedt | ed56829 | 2008-09-29 23:02:40 -0400 | [diff] [blame] | 394 | } | 
 | 395 |  | 
 | 396 | /* | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 397 |  * We need to fit the time_stamp delta into 27 bits. | 
 | 398 |  */ | 
 | 399 | static inline int test_time_stamp(u64 delta) | 
 | 400 | { | 
 | 401 | 	if (delta & TS_DELTA_TEST) | 
 | 402 | 		return 1; | 
 | 403 | 	return 0; | 
 | 404 | } | 
 | 405 |  | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 406 | #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 407 |  | 
| Steven Rostedt | be957c4 | 2009-05-11 14:42:53 -0400 | [diff] [blame] | 408 | /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ | 
 | 409 | #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) | 
 | 410 |  | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 411 | int ring_buffer_print_page_header(struct trace_seq *s) | 
 | 412 | { | 
 | 413 | 	struct buffer_data_page field; | 
 | 414 | 	int ret; | 
 | 415 |  | 
 | 416 | 	ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" | 
| Tom Zanussi | 26a5074 | 2009-10-06 01:09:50 -0500 | [diff] [blame] | 417 | 			       "offset:0;\tsize:%u;\tsigned:%u;\n", | 
 | 418 | 			       (unsigned int)sizeof(field.time_stamp), | 
 | 419 | 			       (unsigned int)is_signed_type(u64)); | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 420 |  | 
 | 421 | 	ret = trace_seq_printf(s, "\tfield: local_t commit;\t" | 
| Tom Zanussi | 26a5074 | 2009-10-06 01:09:50 -0500 | [diff] [blame] | 422 | 			       "offset:%u;\tsize:%u;\tsigned:%u;\n", | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 423 | 			       (unsigned int)offsetof(typeof(field), commit), | 
| Tom Zanussi | 26a5074 | 2009-10-06 01:09:50 -0500 | [diff] [blame] | 424 | 			       (unsigned int)sizeof(field.commit), | 
 | 425 | 			       (unsigned int)is_signed_type(long)); | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 426 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 427 | 	ret = trace_seq_printf(s, "\tfield: int overwrite;\t" | 
 | 428 | 			       "offset:%u;\tsize:%u;\tsigned:%u;\n", | 
 | 429 | 			       (unsigned int)offsetof(typeof(field), commit), | 
 | 430 | 			       1, | 
 | 431 | 			       (unsigned int)is_signed_type(long)); | 
 | 432 |  | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 433 | 	ret = trace_seq_printf(s, "\tfield: char data;\t" | 
| Tom Zanussi | 26a5074 | 2009-10-06 01:09:50 -0500 | [diff] [blame] | 434 | 			       "offset:%u;\tsize:%u;\tsigned:%u;\n", | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 435 | 			       (unsigned int)offsetof(typeof(field), data), | 
| Tom Zanussi | 26a5074 | 2009-10-06 01:09:50 -0500 | [diff] [blame] | 436 | 			       (unsigned int)BUF_PAGE_SIZE, | 
 | 437 | 			       (unsigned int)is_signed_type(char)); | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 438 |  | 
 | 439 | 	return ret; | 
 | 440 | } | 
 | 441 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 442 | /* | 
 | 443 |  * head_page == tail_page && head == tail then buffer is empty. | 
 | 444 |  */ | 
 | 445 | struct ring_buffer_per_cpu { | 
 | 446 | 	int				cpu; | 
| Richard Kennedy | 985023d | 2010-03-25 11:27:36 +0000 | [diff] [blame] | 447 | 	atomic_t			record_disabled; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 448 | 	struct ring_buffer		*buffer; | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 449 | 	raw_spinlock_t			reader_lock;	/* serialize readers */ | 
| Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 450 | 	arch_spinlock_t			lock; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 451 | 	struct lock_class_key		lock_key; | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 452 | 	struct list_head		*pages; | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 453 | 	struct buffer_page		*head_page;	/* read from head */ | 
 | 454 | 	struct buffer_page		*tail_page;	/* write to tail */ | 
| Wenji Huang | c3706f0 | 2009-02-10 01:03:18 -0500 | [diff] [blame] | 455 | 	struct buffer_page		*commit_page;	/* committed pages */ | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 456 | 	struct buffer_page		*reader_page; | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 457 | 	unsigned long			lost_events; | 
 | 458 | 	unsigned long			last_overrun; | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 459 | 	local_t				entries_bytes; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 460 | 	local_t				commit_overrun; | 
 | 461 | 	local_t				overrun; | 
| Steven Rostedt | e4906ef | 2009-04-30 20:49:44 -0400 | [diff] [blame] | 462 | 	local_t				entries; | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 463 | 	local_t				committing; | 
 | 464 | 	local_t				commits; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 465 | 	unsigned long			read; | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 466 | 	unsigned long			read_bytes; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 467 | 	u64				write_stamp; | 
 | 468 | 	u64				read_stamp; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 469 | }; | 
 | 470 |  | 
 | 471 | struct ring_buffer { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 472 | 	unsigned			pages; | 
 | 473 | 	unsigned			flags; | 
 | 474 | 	int				cpus; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 475 | 	atomic_t			record_disabled; | 
| Arnaldo Carvalho de Melo | 00f62f6 | 2009-02-09 17:04:06 -0200 | [diff] [blame] | 476 | 	cpumask_var_t			cpumask; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 477 |  | 
| Peter Zijlstra | 1f8a6a1 | 2009-06-08 18:18:39 +0200 | [diff] [blame] | 478 | 	struct lock_class_key		*reader_lock_key; | 
 | 479 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 480 | 	struct mutex			mutex; | 
 | 481 |  | 
 | 482 | 	struct ring_buffer_per_cpu	**buffers; | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 483 |  | 
| Steven Rostedt | 59222ef | 2009-03-12 11:46:03 -0400 | [diff] [blame] | 484 | #ifdef CONFIG_HOTPLUG_CPU | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 485 | 	struct notifier_block		cpu_notify; | 
 | 486 | #endif | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 487 | 	u64				(*clock)(void); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 488 | }; | 
 | 489 |  | 
 | 490 | struct ring_buffer_iter { | 
 | 491 | 	struct ring_buffer_per_cpu	*cpu_buffer; | 
 | 492 | 	unsigned long			head; | 
 | 493 | 	struct buffer_page		*head_page; | 
| Steven Rostedt | 492a74f | 2010-01-25 15:17:47 -0500 | [diff] [blame] | 494 | 	struct buffer_page		*cache_reader_page; | 
 | 495 | 	unsigned long			cache_read; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 496 | 	u64				read_stamp; | 
 | 497 | }; | 
 | 498 |  | 
| Steven Rostedt | f536aaf | 2008-11-10 23:07:30 -0500 | [diff] [blame] | 499 | /* buffer may be either ring_buffer or ring_buffer_per_cpu */ | 
| Steven Rostedt | 077c540 | 2009-09-03 19:53:46 -0400 | [diff] [blame] | 500 | #define RB_WARN_ON(b, cond)						\ | 
 | 501 | 	({								\ | 
 | 502 | 		int _____ret = unlikely(cond);				\ | 
 | 503 | 		if (_____ret) {						\ | 
 | 504 | 			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ | 
 | 505 | 				struct ring_buffer_per_cpu *__b =	\ | 
 | 506 | 					(void *)b;			\ | 
 | 507 | 				atomic_inc(&__b->buffer->record_disabled); \ | 
 | 508 | 			} else						\ | 
 | 509 | 				atomic_inc(&b->record_disabled);	\ | 
 | 510 | 			WARN_ON(1);					\ | 
 | 511 | 		}							\ | 
 | 512 | 		_____ret;						\ | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 513 | 	}) | 
| Steven Rostedt | f536aaf | 2008-11-10 23:07:30 -0500 | [diff] [blame] | 514 |  | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 515 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 
 | 516 | #define DEBUG_SHIFT 0 | 
 | 517 |  | 
| Jiri Olsa | 6d3f1e1 | 2009-10-23 19:36:19 -0400 | [diff] [blame] | 518 | static inline u64 rb_time_stamp(struct ring_buffer *buffer) | 
| Steven Rostedt | 88eb012 | 2009-05-11 16:28:23 -0400 | [diff] [blame] | 519 | { | 
 | 520 | 	/* shift to debug/test normalization and TIME_EXTENTS */ | 
 | 521 | 	return buffer->clock() << DEBUG_SHIFT; | 
 | 522 | } | 
 | 523 |  | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 524 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) | 
 | 525 | { | 
 | 526 | 	u64 time; | 
 | 527 |  | 
 | 528 | 	preempt_disable_notrace(); | 
| Jiri Olsa | 6d3f1e1 | 2009-10-23 19:36:19 -0400 | [diff] [blame] | 529 | 	time = rb_time_stamp(buffer); | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 530 | 	preempt_enable_no_resched_notrace(); | 
 | 531 |  | 
 | 532 | 	return time; | 
 | 533 | } | 
 | 534 | EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); | 
 | 535 |  | 
 | 536 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, | 
 | 537 | 				      int cpu, u64 *ts) | 
 | 538 | { | 
 | 539 | 	/* Just stupid testing the normalize function and deltas */ | 
 | 540 | 	*ts >>= DEBUG_SHIFT; | 
 | 541 | } | 
 | 542 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); | 
 | 543 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 544 | /* | 
 | 545 |  * Making the ring buffer lockless makes things tricky. | 
 | 546 |  * Although writes only happen on the CPU that they are on, | 
 | 547 |  * and they only need to worry about interrupts. Reads can | 
 | 548 |  * happen on any CPU. | 
 | 549 |  * | 
 | 550 |  * The reader page is always off the ring buffer, but when the | 
 | 551 |  * reader finishes with a page, it needs to swap its page with | 
 | 552 |  * a new one from the buffer. The reader needs to take from | 
 | 553 |  * the head (writes go to the tail). But if a writer is in overwrite | 
 | 554 |  * mode and wraps, it must push the head page forward. | 
 | 555 |  * | 
 | 556 |  * Here lies the problem. | 
 | 557 |  * | 
 | 558 |  * The reader must be careful to replace only the head page, and | 
 | 559 |  * not another one. As described at the top of the file in the | 
 | 560 |  * ASCII art, the reader sets its old page to point to the next | 
 | 561 |  * page after head. It then sets the page after head to point to | 
 | 562 |  * the old reader page. But if the writer moves the head page | 
 | 563 |  * during this operation, the reader could end up with the tail. | 
 | 564 |  * | 
 | 565 |  * We use cmpxchg to help prevent this race. We also do something | 
 | 566 |  * special with the page before head. We set the LSB to 1. | 
 | 567 |  * | 
 | 568 |  * When the writer must push the page forward, it will clear the | 
 | 569 |  * bit that points to the head page, move the head, and then set | 
 | 570 |  * the bit that points to the new head page. | 
 | 571 |  * | 
 | 572 |  * We also don't want an interrupt coming in and moving the head | 
 | 573 |  * page on another writer. Thus we use the second LSB to catch | 
 | 574 |  * that too. Thus: | 
 | 575 |  * | 
 | 576 |  * head->list->prev->next        bit 1          bit 0 | 
 | 577 |  *                              -------        ------- | 
 | 578 |  * Normal page                     0              0 | 
 | 579 |  * Points to head page             0              1 | 
 | 580 |  * New head page                   1              0 | 
 | 581 |  * | 
 | 582 |  * Note we can not trust the prev pointer of the head page, because: | 
 | 583 |  * | 
 | 584 |  * +----+       +-----+        +-----+ | 
 | 585 |  * |    |------>|  T  |---X--->|  N  | | 
 | 586 |  * |    |<------|     |        |     | | 
 | 587 |  * +----+       +-----+        +-----+ | 
 | 588 |  *   ^                           ^ | | 
 | 589 |  *   |          +-----+          | | | 
 | 590 |  *   +----------|  R  |----------+ | | 
 | 591 |  *              |     |<-----------+ | 
 | 592 |  *              +-----+ | 
 | 593 |  * | 
 | 594 |  * Key:  ---X-->  HEAD flag set in pointer | 
 | 595 |  *         T      Tail page | 
 | 596 |  *         R      Reader page | 
 | 597 |  *         N      Next page | 
 | 598 |  * | 
 | 599 |  * (see __rb_reserve_next() to see where this happens) | 
 | 600 |  * | 
 | 601 |  *  What the above shows is that the reader just swapped out | 
 | 602 |  *  the reader page with a page in the buffer, but before it | 
 | 603 |  *  could make the new header point back to the new page added | 
 | 604 |  *  it was preempted by a writer. The writer moved forward onto | 
 | 605 |  *  the new page added by the reader and is about to move forward | 
 | 606 |  *  again. | 
 | 607 |  * | 
 | 608 |  *  You can see, it is legitimate for the previous pointer of | 
 | 609 |  *  the head (or any page) not to point back to itself. But only | 
 | 610 |  *  temporarially. | 
 | 611 |  */ | 
 | 612 |  | 
 | 613 | #define RB_PAGE_NORMAL		0UL | 
 | 614 | #define RB_PAGE_HEAD		1UL | 
 | 615 | #define RB_PAGE_UPDATE		2UL | 
 | 616 |  | 
 | 617 |  | 
 | 618 | #define RB_FLAG_MASK		3UL | 
 | 619 |  | 
 | 620 | /* PAGE_MOVED is not part of the mask */ | 
 | 621 | #define RB_PAGE_MOVED		4UL | 
 | 622 |  | 
 | 623 | /* | 
 | 624 |  * rb_list_head - remove any bit | 
 | 625 |  */ | 
 | 626 | static struct list_head *rb_list_head(struct list_head *list) | 
 | 627 | { | 
 | 628 | 	unsigned long val = (unsigned long)list; | 
 | 629 |  | 
 | 630 | 	return (struct list_head *)(val & ~RB_FLAG_MASK); | 
 | 631 | } | 
 | 632 |  | 
 | 633 | /* | 
| Jiri Olsa | 6d3f1e1 | 2009-10-23 19:36:19 -0400 | [diff] [blame] | 634 |  * rb_is_head_page - test if the given page is the head page | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 635 |  * | 
 | 636 |  * Because the reader may move the head_page pointer, we can | 
 | 637 |  * not trust what the head page is (it may be pointing to | 
 | 638 |  * the reader page). But if the next page is a header page, | 
 | 639 |  * its flags will be non zero. | 
 | 640 |  */ | 
| Jesper Juhl | 42b16b3 | 2011-01-17 00:09:38 +0100 | [diff] [blame] | 641 | static inline int | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 642 | rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 643 | 		struct buffer_page *page, struct list_head *list) | 
 | 644 | { | 
 | 645 | 	unsigned long val; | 
 | 646 |  | 
 | 647 | 	val = (unsigned long)list->next; | 
 | 648 |  | 
 | 649 | 	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) | 
 | 650 | 		return RB_PAGE_MOVED; | 
 | 651 |  | 
 | 652 | 	return val & RB_FLAG_MASK; | 
 | 653 | } | 
 | 654 |  | 
 | 655 | /* | 
 | 656 |  * rb_is_reader_page | 
 | 657 |  * | 
 | 658 |  * The unique thing about the reader page, is that, if the | 
 | 659 |  * writer is ever on it, the previous pointer never points | 
 | 660 |  * back to the reader page. | 
 | 661 |  */ | 
 | 662 | static int rb_is_reader_page(struct buffer_page *page) | 
 | 663 | { | 
 | 664 | 	struct list_head *list = page->list.prev; | 
 | 665 |  | 
 | 666 | 	return rb_list_head(list->next) != &page->list; | 
 | 667 | } | 
 | 668 |  | 
 | 669 | /* | 
 | 670 |  * rb_set_list_to_head - set a list_head to be pointing to head. | 
 | 671 |  */ | 
 | 672 | static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 673 | 				struct list_head *list) | 
 | 674 | { | 
 | 675 | 	unsigned long *ptr; | 
 | 676 |  | 
 | 677 | 	ptr = (unsigned long *)&list->next; | 
 | 678 | 	*ptr |= RB_PAGE_HEAD; | 
 | 679 | 	*ptr &= ~RB_PAGE_UPDATE; | 
 | 680 | } | 
 | 681 |  | 
 | 682 | /* | 
 | 683 |  * rb_head_page_activate - sets up head page | 
 | 684 |  */ | 
 | 685 | static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) | 
 | 686 | { | 
 | 687 | 	struct buffer_page *head; | 
 | 688 |  | 
 | 689 | 	head = cpu_buffer->head_page; | 
 | 690 | 	if (!head) | 
 | 691 | 		return; | 
 | 692 |  | 
 | 693 | 	/* | 
 | 694 | 	 * Set the previous list pointer to have the HEAD flag. | 
 | 695 | 	 */ | 
 | 696 | 	rb_set_list_to_head(cpu_buffer, head->list.prev); | 
 | 697 | } | 
 | 698 |  | 
 | 699 | static void rb_list_head_clear(struct list_head *list) | 
 | 700 | { | 
 | 701 | 	unsigned long *ptr = (unsigned long *)&list->next; | 
 | 702 |  | 
 | 703 | 	*ptr &= ~RB_FLAG_MASK; | 
 | 704 | } | 
 | 705 |  | 
 | 706 | /* | 
 | 707 |  * rb_head_page_dactivate - clears head page ptr (for free list) | 
 | 708 |  */ | 
 | 709 | static void | 
 | 710 | rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) | 
 | 711 | { | 
 | 712 | 	struct list_head *hd; | 
 | 713 |  | 
 | 714 | 	/* Go through the whole list and clear any pointers found. */ | 
 | 715 | 	rb_list_head_clear(cpu_buffer->pages); | 
 | 716 |  | 
 | 717 | 	list_for_each(hd, cpu_buffer->pages) | 
 | 718 | 		rb_list_head_clear(hd); | 
 | 719 | } | 
 | 720 |  | 
 | 721 | static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 722 | 			    struct buffer_page *head, | 
 | 723 | 			    struct buffer_page *prev, | 
 | 724 | 			    int old_flag, int new_flag) | 
 | 725 | { | 
 | 726 | 	struct list_head *list; | 
 | 727 | 	unsigned long val = (unsigned long)&head->list; | 
 | 728 | 	unsigned long ret; | 
 | 729 |  | 
 | 730 | 	list = &prev->list; | 
 | 731 |  | 
 | 732 | 	val &= ~RB_FLAG_MASK; | 
 | 733 |  | 
| Steven Rostedt | 08a4081 | 2009-09-14 09:31:35 -0400 | [diff] [blame] | 734 | 	ret = cmpxchg((unsigned long *)&list->next, | 
 | 735 | 		      val | old_flag, val | new_flag); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 736 |  | 
 | 737 | 	/* check if the reader took the page */ | 
 | 738 | 	if ((ret & ~RB_FLAG_MASK) != val) | 
 | 739 | 		return RB_PAGE_MOVED; | 
 | 740 |  | 
 | 741 | 	return ret & RB_FLAG_MASK; | 
 | 742 | } | 
 | 743 |  | 
 | 744 | static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 745 | 				   struct buffer_page *head, | 
 | 746 | 				   struct buffer_page *prev, | 
 | 747 | 				   int old_flag) | 
 | 748 | { | 
 | 749 | 	return rb_head_page_set(cpu_buffer, head, prev, | 
 | 750 | 				old_flag, RB_PAGE_UPDATE); | 
 | 751 | } | 
 | 752 |  | 
 | 753 | static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 754 | 				 struct buffer_page *head, | 
 | 755 | 				 struct buffer_page *prev, | 
 | 756 | 				 int old_flag) | 
 | 757 | { | 
 | 758 | 	return rb_head_page_set(cpu_buffer, head, prev, | 
 | 759 | 				old_flag, RB_PAGE_HEAD); | 
 | 760 | } | 
 | 761 |  | 
 | 762 | static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 763 | 				   struct buffer_page *head, | 
 | 764 | 				   struct buffer_page *prev, | 
 | 765 | 				   int old_flag) | 
 | 766 | { | 
 | 767 | 	return rb_head_page_set(cpu_buffer, head, prev, | 
 | 768 | 				old_flag, RB_PAGE_NORMAL); | 
 | 769 | } | 
 | 770 |  | 
 | 771 | static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 772 | 			       struct buffer_page **bpage) | 
 | 773 | { | 
 | 774 | 	struct list_head *p = rb_list_head((*bpage)->list.next); | 
 | 775 |  | 
 | 776 | 	*bpage = list_entry(p, struct buffer_page, list); | 
 | 777 | } | 
 | 778 |  | 
 | 779 | static struct buffer_page * | 
 | 780 | rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) | 
 | 781 | { | 
 | 782 | 	struct buffer_page *head; | 
 | 783 | 	struct buffer_page *page; | 
 | 784 | 	struct list_head *list; | 
 | 785 | 	int i; | 
 | 786 |  | 
 | 787 | 	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) | 
 | 788 | 		return NULL; | 
 | 789 |  | 
 | 790 | 	/* sanity check */ | 
 | 791 | 	list = cpu_buffer->pages; | 
 | 792 | 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) | 
 | 793 | 		return NULL; | 
 | 794 |  | 
 | 795 | 	page = head = cpu_buffer->head_page; | 
 | 796 | 	/* | 
 | 797 | 	 * It is possible that the writer moves the header behind | 
 | 798 | 	 * where we started, and we miss in one loop. | 
 | 799 | 	 * A second loop should grab the header, but we'll do | 
 | 800 | 	 * three loops just because I'm paranoid. | 
 | 801 | 	 */ | 
 | 802 | 	for (i = 0; i < 3; i++) { | 
 | 803 | 		do { | 
 | 804 | 			if (rb_is_head_page(cpu_buffer, page, page->list.prev)) { | 
 | 805 | 				cpu_buffer->head_page = page; | 
 | 806 | 				return page; | 
 | 807 | 			} | 
 | 808 | 			rb_inc_page(cpu_buffer, &page); | 
 | 809 | 		} while (page != head); | 
 | 810 | 	} | 
 | 811 |  | 
 | 812 | 	RB_WARN_ON(cpu_buffer, 1); | 
 | 813 |  | 
 | 814 | 	return NULL; | 
 | 815 | } | 
 | 816 |  | 
 | 817 | static int rb_head_page_replace(struct buffer_page *old, | 
 | 818 | 				struct buffer_page *new) | 
 | 819 | { | 
 | 820 | 	unsigned long *ptr = (unsigned long *)&old->list.prev->next; | 
 | 821 | 	unsigned long val; | 
 | 822 | 	unsigned long ret; | 
 | 823 |  | 
 | 824 | 	val = *ptr & ~RB_FLAG_MASK; | 
 | 825 | 	val |= RB_PAGE_HEAD; | 
 | 826 |  | 
| Steven Rostedt | 08a4081 | 2009-09-14 09:31:35 -0400 | [diff] [blame] | 827 | 	ret = cmpxchg(ptr, val, (unsigned long)&new->list); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 828 |  | 
 | 829 | 	return ret == val; | 
 | 830 | } | 
 | 831 |  | 
 | 832 | /* | 
 | 833 |  * rb_tail_page_update - move the tail page forward | 
 | 834 |  * | 
 | 835 |  * Returns 1 if moved tail page, 0 if someone else did. | 
 | 836 |  */ | 
 | 837 | static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 838 | 			       struct buffer_page *tail_page, | 
 | 839 | 			       struct buffer_page *next_page) | 
 | 840 | { | 
 | 841 | 	struct buffer_page *old_tail; | 
 | 842 | 	unsigned long old_entries; | 
 | 843 | 	unsigned long old_write; | 
 | 844 | 	int ret = 0; | 
 | 845 |  | 
 | 846 | 	/* | 
 | 847 | 	 * The tail page now needs to be moved forward. | 
 | 848 | 	 * | 
 | 849 | 	 * We need to reset the tail page, but without messing | 
 | 850 | 	 * with possible erasing of data brought in by interrupts | 
 | 851 | 	 * that have moved the tail page and are currently on it. | 
 | 852 | 	 * | 
 | 853 | 	 * We add a counter to the write field to denote this. | 
 | 854 | 	 */ | 
 | 855 | 	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); | 
 | 856 | 	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); | 
 | 857 |  | 
 | 858 | 	/* | 
 | 859 | 	 * Just make sure we have seen our old_write and synchronize | 
 | 860 | 	 * with any interrupts that come in. | 
 | 861 | 	 */ | 
 | 862 | 	barrier(); | 
 | 863 |  | 
 | 864 | 	/* | 
 | 865 | 	 * If the tail page is still the same as what we think | 
 | 866 | 	 * it is, then it is up to us to update the tail | 
 | 867 | 	 * pointer. | 
 | 868 | 	 */ | 
 | 869 | 	if (tail_page == cpu_buffer->tail_page) { | 
 | 870 | 		/* Zero the write counter */ | 
 | 871 | 		unsigned long val = old_write & ~RB_WRITE_MASK; | 
 | 872 | 		unsigned long eval = old_entries & ~RB_WRITE_MASK; | 
 | 873 |  | 
 | 874 | 		/* | 
 | 875 | 		 * This will only succeed if an interrupt did | 
 | 876 | 		 * not come in and change it. In which case, we | 
 | 877 | 		 * do not want to modify it. | 
| Lai Jiangshan | da706d8 | 2009-07-15 16:27:30 +0800 | [diff] [blame] | 878 | 		 * | 
 | 879 | 		 * We add (void) to let the compiler know that we do not care | 
 | 880 | 		 * about the return value of these functions. We use the | 
 | 881 | 		 * cmpxchg to only update if an interrupt did not already | 
 | 882 | 		 * do it for us. If the cmpxchg fails, we don't care. | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 883 | 		 */ | 
| Lai Jiangshan | da706d8 | 2009-07-15 16:27:30 +0800 | [diff] [blame] | 884 | 		(void)local_cmpxchg(&next_page->write, old_write, val); | 
 | 885 | 		(void)local_cmpxchg(&next_page->entries, old_entries, eval); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 886 |  | 
 | 887 | 		/* | 
 | 888 | 		 * No need to worry about races with clearing out the commit. | 
 | 889 | 		 * it only can increment when a commit takes place. But that | 
 | 890 | 		 * only happens in the outer most nested commit. | 
 | 891 | 		 */ | 
 | 892 | 		local_set(&next_page->page->commit, 0); | 
 | 893 |  | 
 | 894 | 		old_tail = cmpxchg(&cpu_buffer->tail_page, | 
 | 895 | 				   tail_page, next_page); | 
 | 896 |  | 
 | 897 | 		if (old_tail == tail_page) | 
 | 898 | 			ret = 1; | 
 | 899 | 	} | 
 | 900 |  | 
 | 901 | 	return ret; | 
 | 902 | } | 
 | 903 |  | 
 | 904 | static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 905 | 			  struct buffer_page *bpage) | 
 | 906 | { | 
 | 907 | 	unsigned long val = (unsigned long)bpage; | 
 | 908 |  | 
 | 909 | 	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) | 
 | 910 | 		return 1; | 
 | 911 |  | 
 | 912 | 	return 0; | 
 | 913 | } | 
 | 914 |  | 
 | 915 | /** | 
 | 916 |  * rb_check_list - make sure a pointer to a list has the last bits zero | 
 | 917 |  */ | 
 | 918 | static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 919 | 			 struct list_head *list) | 
 | 920 | { | 
 | 921 | 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) | 
 | 922 | 		return 1; | 
 | 923 | 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) | 
 | 924 | 		return 1; | 
 | 925 | 	return 0; | 
 | 926 | } | 
 | 927 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 928 | /** | 
 | 929 |  * check_pages - integrity check of buffer pages | 
 | 930 |  * @cpu_buffer: CPU buffer with pages to test | 
 | 931 |  * | 
| Wenji Huang | c3706f0 | 2009-02-10 01:03:18 -0500 | [diff] [blame] | 932 |  * As a safety measure we check to make sure the data pages have not | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 933 |  * been corrupted. | 
 | 934 |  */ | 
 | 935 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) | 
 | 936 | { | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 937 | 	struct list_head *head = cpu_buffer->pages; | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 938 | 	struct buffer_page *bpage, *tmp; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 939 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 940 | 	rb_head_page_deactivate(cpu_buffer); | 
 | 941 |  | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 942 | 	if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) | 
 | 943 | 		return -1; | 
 | 944 | 	if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) | 
 | 945 | 		return -1; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 946 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 947 | 	if (rb_check_list(cpu_buffer, head)) | 
 | 948 | 		return -1; | 
 | 949 |  | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 950 | 	list_for_each_entry_safe(bpage, tmp, head, list) { | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 951 | 		if (RB_WARN_ON(cpu_buffer, | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 952 | 			       bpage->list.next->prev != &bpage->list)) | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 953 | 			return -1; | 
 | 954 | 		if (RB_WARN_ON(cpu_buffer, | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 955 | 			       bpage->list.prev->next != &bpage->list)) | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 956 | 			return -1; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 957 | 		if (rb_check_list(cpu_buffer, &bpage->list)) | 
 | 958 | 			return -1; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 959 | 	} | 
 | 960 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 961 | 	rb_head_page_activate(cpu_buffer); | 
 | 962 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 963 | 	return 0; | 
 | 964 | } | 
 | 965 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 966 | static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 967 | 			     unsigned nr_pages) | 
 | 968 | { | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 969 | 	struct buffer_page *bpage, *tmp; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 970 | 	LIST_HEAD(pages); | 
 | 971 | 	unsigned i; | 
 | 972 |  | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 973 | 	WARN_ON(!nr_pages); | 
 | 974 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 975 | 	for (i = 0; i < nr_pages; i++) { | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 976 | 		struct page *page; | 
| Vaibhav Nagarnaik | d7ec4bf | 2011-06-07 17:01:42 -0700 | [diff] [blame] | 977 | 		/* | 
 | 978 | 		 * __GFP_NORETRY flag makes sure that the allocation fails | 
 | 979 | 		 * gracefully without invoking oom-killer and the system is | 
 | 980 | 		 * not destabilized. | 
 | 981 | 		 */ | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 982 | 		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 
| Vaibhav Nagarnaik | d7ec4bf | 2011-06-07 17:01:42 -0700 | [diff] [blame] | 983 | 				    GFP_KERNEL | __GFP_NORETRY, | 
 | 984 | 				    cpu_to_node(cpu_buffer->cpu)); | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 985 | 		if (!bpage) | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 986 | 			goto free_pages; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 987 |  | 
 | 988 | 		rb_check_bpage(cpu_buffer, bpage); | 
 | 989 |  | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 990 | 		list_add(&bpage->list, &pages); | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 991 |  | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 992 | 		page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), | 
| Vaibhav Nagarnaik | d7ec4bf | 2011-06-07 17:01:42 -0700 | [diff] [blame] | 993 | 					GFP_KERNEL | __GFP_NORETRY, 0); | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 994 | 		if (!page) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 995 | 			goto free_pages; | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 996 | 		bpage->page = page_address(page); | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 997 | 		rb_init_page(bpage->page); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 998 | 	} | 
 | 999 |  | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 1000 | 	/* | 
 | 1001 | 	 * The ring buffer page list is a circular list that does not | 
 | 1002 | 	 * start and end with a list head. All page list items point to | 
 | 1003 | 	 * other pages. | 
 | 1004 | 	 */ | 
 | 1005 | 	cpu_buffer->pages = pages.next; | 
 | 1006 | 	list_del(&pages); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1007 |  | 
 | 1008 | 	rb_check_pages(cpu_buffer); | 
 | 1009 |  | 
 | 1010 | 	return 0; | 
 | 1011 |  | 
 | 1012 |  free_pages: | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1013 | 	list_for_each_entry_safe(bpage, tmp, &pages, list) { | 
 | 1014 | 		list_del_init(&bpage->list); | 
 | 1015 | 		free_buffer_page(bpage); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1016 | 	} | 
 | 1017 | 	return -ENOMEM; | 
 | 1018 | } | 
 | 1019 |  | 
 | 1020 | static struct ring_buffer_per_cpu * | 
 | 1021 | rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | 
 | 1022 | { | 
 | 1023 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1024 | 	struct buffer_page *bpage; | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 1025 | 	struct page *page; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1026 | 	int ret; | 
 | 1027 |  | 
 | 1028 | 	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), | 
 | 1029 | 				  GFP_KERNEL, cpu_to_node(cpu)); | 
 | 1030 | 	if (!cpu_buffer) | 
 | 1031 | 		return NULL; | 
 | 1032 |  | 
 | 1033 | 	cpu_buffer->cpu = cpu; | 
 | 1034 | 	cpu_buffer->buffer = buffer; | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1035 | 	raw_spin_lock_init(&cpu_buffer->reader_lock); | 
| Peter Zijlstra | 1f8a6a1 | 2009-06-08 18:18:39 +0200 | [diff] [blame] | 1036 | 	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); | 
| Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 1037 | 	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1038 |  | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1039 | 	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 1040 | 			    GFP_KERNEL, cpu_to_node(cpu)); | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1041 | 	if (!bpage) | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 1042 | 		goto fail_free_buffer; | 
 | 1043 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1044 | 	rb_check_bpage(cpu_buffer, bpage); | 
 | 1045 |  | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1046 | 	cpu_buffer->reader_page = bpage; | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 1047 | 	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); | 
 | 1048 | 	if (!page) | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 1049 | 		goto fail_free_reader; | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 1050 | 	bpage->page = page_address(page); | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1051 | 	rb_init_page(bpage->page); | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 1052 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1053 | 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1054 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1055 | 	ret = rb_allocate_pages(cpu_buffer, buffer->pages); | 
 | 1056 | 	if (ret < 0) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1057 | 		goto fail_free_reader; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1058 |  | 
 | 1059 | 	cpu_buffer->head_page | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 1060 | 		= list_entry(cpu_buffer->pages, struct buffer_page, list); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1061 | 	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1062 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1063 | 	rb_head_page_activate(cpu_buffer); | 
 | 1064 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1065 | 	return cpu_buffer; | 
 | 1066 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1067 |  fail_free_reader: | 
 | 1068 | 	free_buffer_page(cpu_buffer->reader_page); | 
 | 1069 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1070 |  fail_free_buffer: | 
 | 1071 | 	kfree(cpu_buffer); | 
 | 1072 | 	return NULL; | 
 | 1073 | } | 
 | 1074 |  | 
 | 1075 | static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) | 
 | 1076 | { | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 1077 | 	struct list_head *head = cpu_buffer->pages; | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1078 | 	struct buffer_page *bpage, *tmp; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1079 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1080 | 	free_buffer_page(cpu_buffer->reader_page); | 
 | 1081 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1082 | 	rb_head_page_deactivate(cpu_buffer); | 
 | 1083 |  | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 1084 | 	if (head) { | 
 | 1085 | 		list_for_each_entry_safe(bpage, tmp, head, list) { | 
 | 1086 | 			list_del_init(&bpage->list); | 
 | 1087 | 			free_buffer_page(bpage); | 
 | 1088 | 		} | 
 | 1089 | 		bpage = list_entry(head, struct buffer_page, list); | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1090 | 		free_buffer_page(bpage); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1091 | 	} | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 1092 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1093 | 	kfree(cpu_buffer); | 
 | 1094 | } | 
 | 1095 |  | 
| Steven Rostedt | 59222ef | 2009-03-12 11:46:03 -0400 | [diff] [blame] | 1096 | #ifdef CONFIG_HOTPLUG_CPU | 
| Frederic Weisbecker | 09c9e84 | 2009-03-21 04:33:36 +0100 | [diff] [blame] | 1097 | static int rb_cpu_notify(struct notifier_block *self, | 
 | 1098 | 			 unsigned long action, void *hcpu); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1099 | #endif | 
 | 1100 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1101 | /** | 
 | 1102 |  * ring_buffer_alloc - allocate a new ring_buffer | 
| Robert Richter | 68814b5 | 2008-11-24 12:24:12 +0100 | [diff] [blame] | 1103 |  * @size: the size in bytes per cpu that is needed. | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1104 |  * @flags: attributes to set for the ring buffer. | 
 | 1105 |  * | 
 | 1106 |  * Currently the only flag that is available is the RB_FL_OVERWRITE | 
 | 1107 |  * flag. This flag means that the buffer will overwrite old data | 
 | 1108 |  * when the buffer wraps. If this flag is not set, the buffer will | 
 | 1109 |  * drop data when the tail hits the head. | 
 | 1110 |  */ | 
| Peter Zijlstra | 1f8a6a1 | 2009-06-08 18:18:39 +0200 | [diff] [blame] | 1111 | struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | 
 | 1112 | 					struct lock_class_key *key) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1113 | { | 
 | 1114 | 	struct ring_buffer *buffer; | 
 | 1115 | 	int bsize; | 
 | 1116 | 	int cpu; | 
 | 1117 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1118 | 	/* keep it in its own cache line */ | 
 | 1119 | 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), | 
 | 1120 | 			 GFP_KERNEL); | 
 | 1121 | 	if (!buffer) | 
 | 1122 | 		return NULL; | 
 | 1123 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1124 | 	if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) | 
 | 1125 | 		goto fail_free_buffer; | 
 | 1126 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1127 | 	buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 
 | 1128 | 	buffer->flags = flags; | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 1129 | 	buffer->clock = trace_clock_local; | 
| Peter Zijlstra | 1f8a6a1 | 2009-06-08 18:18:39 +0200 | [diff] [blame] | 1130 | 	buffer->reader_lock_key = key; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1131 |  | 
 | 1132 | 	/* need at least two pages */ | 
| Steven Rostedt | 5f78abe | 2009-06-17 14:11:10 -0400 | [diff] [blame] | 1133 | 	if (buffer->pages < 2) | 
 | 1134 | 		buffer->pages = 2; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1135 |  | 
| Frederic Weisbecker | 3bf832c | 2009-03-19 14:47:33 +0100 | [diff] [blame] | 1136 | 	/* | 
 | 1137 | 	 * In case of non-hotplug cpu, if the ring-buffer is allocated | 
 | 1138 | 	 * in early initcall, it will not be notified of secondary cpus. | 
 | 1139 | 	 * In that off case, we need to allocate for all possible cpus. | 
 | 1140 | 	 */ | 
 | 1141 | #ifdef CONFIG_HOTPLUG_CPU | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1142 | 	get_online_cpus(); | 
 | 1143 | 	cpumask_copy(buffer->cpumask, cpu_online_mask); | 
| Frederic Weisbecker | 3bf832c | 2009-03-19 14:47:33 +0100 | [diff] [blame] | 1144 | #else | 
 | 1145 | 	cpumask_copy(buffer->cpumask, cpu_possible_mask); | 
 | 1146 | #endif | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1147 | 	buffer->cpus = nr_cpu_ids; | 
 | 1148 |  | 
 | 1149 | 	bsize = sizeof(void *) * nr_cpu_ids; | 
 | 1150 | 	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), | 
 | 1151 | 				  GFP_KERNEL); | 
 | 1152 | 	if (!buffer->buffers) | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1153 | 		goto fail_free_cpumask; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1154 |  | 
 | 1155 | 	for_each_buffer_cpu(buffer, cpu) { | 
 | 1156 | 		buffer->buffers[cpu] = | 
 | 1157 | 			rb_allocate_cpu_buffer(buffer, cpu); | 
 | 1158 | 		if (!buffer->buffers[cpu]) | 
 | 1159 | 			goto fail_free_buffers; | 
 | 1160 | 	} | 
 | 1161 |  | 
| Steven Rostedt | 59222ef | 2009-03-12 11:46:03 -0400 | [diff] [blame] | 1162 | #ifdef CONFIG_HOTPLUG_CPU | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1163 | 	buffer->cpu_notify.notifier_call = rb_cpu_notify; | 
 | 1164 | 	buffer->cpu_notify.priority = 0; | 
 | 1165 | 	register_cpu_notifier(&buffer->cpu_notify); | 
 | 1166 | #endif | 
 | 1167 |  | 
 | 1168 | 	put_online_cpus(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1169 | 	mutex_init(&buffer->mutex); | 
 | 1170 |  | 
 | 1171 | 	return buffer; | 
 | 1172 |  | 
 | 1173 |  fail_free_buffers: | 
 | 1174 | 	for_each_buffer_cpu(buffer, cpu) { | 
 | 1175 | 		if (buffer->buffers[cpu]) | 
 | 1176 | 			rb_free_cpu_buffer(buffer->buffers[cpu]); | 
 | 1177 | 	} | 
 | 1178 | 	kfree(buffer->buffers); | 
 | 1179 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1180 |  fail_free_cpumask: | 
 | 1181 | 	free_cpumask_var(buffer->cpumask); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1182 | 	put_online_cpus(); | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1183 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1184 |  fail_free_buffer: | 
 | 1185 | 	kfree(buffer); | 
 | 1186 | 	return NULL; | 
 | 1187 | } | 
| Peter Zijlstra | 1f8a6a1 | 2009-06-08 18:18:39 +0200 | [diff] [blame] | 1188 | EXPORT_SYMBOL_GPL(__ring_buffer_alloc); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1189 |  | 
 | 1190 | /** | 
 | 1191 |  * ring_buffer_free - free a ring buffer. | 
 | 1192 |  * @buffer: the buffer to free. | 
 | 1193 |  */ | 
 | 1194 | void | 
 | 1195 | ring_buffer_free(struct ring_buffer *buffer) | 
 | 1196 | { | 
 | 1197 | 	int cpu; | 
 | 1198 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1199 | 	get_online_cpus(); | 
 | 1200 |  | 
| Steven Rostedt | 59222ef | 2009-03-12 11:46:03 -0400 | [diff] [blame] | 1201 | #ifdef CONFIG_HOTPLUG_CPU | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1202 | 	unregister_cpu_notifier(&buffer->cpu_notify); | 
 | 1203 | #endif | 
 | 1204 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1205 | 	for_each_buffer_cpu(buffer, cpu) | 
 | 1206 | 		rb_free_cpu_buffer(buffer->buffers[cpu]); | 
 | 1207 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1208 | 	put_online_cpus(); | 
 | 1209 |  | 
| Eric Dumazet | bd3f022 | 2009-08-07 12:49:29 +0200 | [diff] [blame] | 1210 | 	kfree(buffer->buffers); | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1211 | 	free_cpumask_var(buffer->cpumask); | 
 | 1212 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1213 | 	kfree(buffer); | 
 | 1214 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1215 | EXPORT_SYMBOL_GPL(ring_buffer_free); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1216 |  | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 1217 | void ring_buffer_set_clock(struct ring_buffer *buffer, | 
 | 1218 | 			   u64 (*clock)(void)) | 
 | 1219 | { | 
 | 1220 | 	buffer->clock = clock; | 
 | 1221 | } | 
 | 1222 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1223 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); | 
 | 1224 |  | 
 | 1225 | static void | 
 | 1226 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | 
 | 1227 | { | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1228 | 	struct buffer_page *bpage; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1229 | 	struct list_head *p; | 
 | 1230 | 	unsigned i; | 
 | 1231 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1232 | 	raw_spin_lock_irq(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1233 | 	rb_head_page_deactivate(cpu_buffer); | 
 | 1234 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1235 | 	for (i = 0; i < nr_pages; i++) { | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 1236 | 		if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) | 
| Julia Lawall | 292f60c | 2010-03-29 17:37:02 +0200 | [diff] [blame] | 1237 | 			goto out; | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 1238 | 		p = cpu_buffer->pages->next; | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1239 | 		bpage = list_entry(p, struct buffer_page, list); | 
 | 1240 | 		list_del_init(&bpage->list); | 
 | 1241 | 		free_buffer_page(bpage); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1242 | 	} | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 1243 | 	if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) | 
| Julia Lawall | 292f60c | 2010-03-29 17:37:02 +0200 | [diff] [blame] | 1244 | 		goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1245 |  | 
 | 1246 | 	rb_reset_cpu(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1247 | 	rb_check_pages(cpu_buffer); | 
 | 1248 |  | 
| Julia Lawall | 292f60c | 2010-03-29 17:37:02 +0200 | [diff] [blame] | 1249 | out: | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1250 | 	raw_spin_unlock_irq(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1251 | } | 
 | 1252 |  | 
 | 1253 | static void | 
 | 1254 | rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 1255 | 		struct list_head *pages, unsigned nr_pages) | 
 | 1256 | { | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1257 | 	struct buffer_page *bpage; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1258 | 	struct list_head *p; | 
 | 1259 | 	unsigned i; | 
 | 1260 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1261 | 	raw_spin_lock_irq(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1262 | 	rb_head_page_deactivate(cpu_buffer); | 
 | 1263 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1264 | 	for (i = 0; i < nr_pages; i++) { | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 1265 | 		if (RB_WARN_ON(cpu_buffer, list_empty(pages))) | 
| Julia Lawall | 292f60c | 2010-03-29 17:37:02 +0200 | [diff] [blame] | 1266 | 			goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1267 | 		p = pages->next; | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1268 | 		bpage = list_entry(p, struct buffer_page, list); | 
 | 1269 | 		list_del_init(&bpage->list); | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 1270 | 		list_add_tail(&bpage->list, cpu_buffer->pages); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1271 | 	} | 
 | 1272 | 	rb_reset_cpu(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1273 | 	rb_check_pages(cpu_buffer); | 
 | 1274 |  | 
| Julia Lawall | 292f60c | 2010-03-29 17:37:02 +0200 | [diff] [blame] | 1275 | out: | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1276 | 	raw_spin_unlock_irq(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1277 | } | 
 | 1278 |  | 
 | 1279 | /** | 
 | 1280 |  * ring_buffer_resize - resize the ring buffer | 
 | 1281 |  * @buffer: the buffer to resize. | 
 | 1282 |  * @size: the new size. | 
 | 1283 |  * | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1284 |  * Minimum size is 2 * BUF_PAGE_SIZE. | 
 | 1285 |  * | 
 | 1286 |  * Returns -1 on failure. | 
 | 1287 |  */ | 
 | 1288 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | 
 | 1289 | { | 
 | 1290 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 1291 | 	unsigned nr_pages, rm_pages, new_pages; | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1292 | 	struct buffer_page *bpage, *tmp; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1293 | 	unsigned long buffer_size; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1294 | 	LIST_HEAD(pages); | 
 | 1295 | 	int i, cpu; | 
 | 1296 |  | 
| Ingo Molnar | ee51a1d | 2008-11-13 14:58:31 +0100 | [diff] [blame] | 1297 | 	/* | 
 | 1298 | 	 * Always succeed at resizing a non-existent buffer: | 
 | 1299 | 	 */ | 
 | 1300 | 	if (!buffer) | 
 | 1301 | 		return size; | 
 | 1302 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1303 | 	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 
 | 1304 | 	size *= BUF_PAGE_SIZE; | 
 | 1305 | 	buffer_size = buffer->pages * BUF_PAGE_SIZE; | 
 | 1306 |  | 
 | 1307 | 	/* we need a minimum of two pages */ | 
 | 1308 | 	if (size < BUF_PAGE_SIZE * 2) | 
 | 1309 | 		size = BUF_PAGE_SIZE * 2; | 
 | 1310 |  | 
 | 1311 | 	if (size == buffer_size) | 
 | 1312 | 		return size; | 
 | 1313 |  | 
| Steven Rostedt | 1842101 | 2009-12-10 22:54:27 -0500 | [diff] [blame] | 1314 | 	atomic_inc(&buffer->record_disabled); | 
 | 1315 |  | 
 | 1316 | 	/* Make sure all writers are done with this buffer. */ | 
 | 1317 | 	synchronize_sched(); | 
 | 1318 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1319 | 	mutex_lock(&buffer->mutex); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1320 | 	get_online_cpus(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1321 |  | 
 | 1322 | 	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 
 | 1323 |  | 
 | 1324 | 	if (size < buffer_size) { | 
 | 1325 |  | 
 | 1326 | 		/* easy case, just free pages */ | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1327 | 		if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) | 
 | 1328 | 			goto out_fail; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1329 |  | 
 | 1330 | 		rm_pages = buffer->pages - nr_pages; | 
 | 1331 |  | 
 | 1332 | 		for_each_buffer_cpu(buffer, cpu) { | 
 | 1333 | 			cpu_buffer = buffer->buffers[cpu]; | 
 | 1334 | 			rb_remove_pages(cpu_buffer, rm_pages); | 
 | 1335 | 		} | 
 | 1336 | 		goto out; | 
 | 1337 | 	} | 
 | 1338 |  | 
 | 1339 | 	/* | 
 | 1340 | 	 * This is a bit more difficult. We only want to add pages | 
 | 1341 | 	 * when we can allocate enough for all CPUs. We do this | 
 | 1342 | 	 * by allocating all the pages and storing them on a local | 
 | 1343 | 	 * link list. If we succeed in our allocation, then we | 
 | 1344 | 	 * add these pages to the cpu_buffers. Otherwise we just free | 
 | 1345 | 	 * them all and return -ENOMEM; | 
 | 1346 | 	 */ | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1347 | 	if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) | 
 | 1348 | 		goto out_fail; | 
| Steven Rostedt | f536aaf | 2008-11-10 23:07:30 -0500 | [diff] [blame] | 1349 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1350 | 	new_pages = nr_pages - buffer->pages; | 
 | 1351 |  | 
 | 1352 | 	for_each_buffer_cpu(buffer, cpu) { | 
 | 1353 | 		for (i = 0; i < new_pages; i++) { | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 1354 | 			struct page *page; | 
| Vaibhav Nagarnaik | d7ec4bf | 2011-06-07 17:01:42 -0700 | [diff] [blame] | 1355 | 			/* | 
 | 1356 | 			 * __GFP_NORETRY flag makes sure that the allocation | 
 | 1357 | 			 * fails gracefully without invoking oom-killer and | 
 | 1358 | 			 * the system is not destabilized. | 
 | 1359 | 			 */ | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1360 | 			bpage = kzalloc_node(ALIGN(sizeof(*bpage), | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 1361 | 						  cache_line_size()), | 
| Vaibhav Nagarnaik | d7ec4bf | 2011-06-07 17:01:42 -0700 | [diff] [blame] | 1362 | 					    GFP_KERNEL | __GFP_NORETRY, | 
 | 1363 | 					    cpu_to_node(cpu)); | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1364 | 			if (!bpage) | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 1365 | 				goto free_pages; | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1366 | 			list_add(&bpage->list, &pages); | 
| Vaibhav Nagarnaik | d7ec4bf | 2011-06-07 17:01:42 -0700 | [diff] [blame] | 1367 | 			page = alloc_pages_node(cpu_to_node(cpu), | 
 | 1368 | 						GFP_KERNEL | __GFP_NORETRY, 0); | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 1369 | 			if (!page) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1370 | 				goto free_pages; | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 1371 | 			bpage->page = page_address(page); | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1372 | 			rb_init_page(bpage->page); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1373 | 		} | 
 | 1374 | 	} | 
 | 1375 |  | 
 | 1376 | 	for_each_buffer_cpu(buffer, cpu) { | 
 | 1377 | 		cpu_buffer = buffer->buffers[cpu]; | 
 | 1378 | 		rb_insert_pages(cpu_buffer, &pages, new_pages); | 
 | 1379 | 	} | 
 | 1380 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1381 | 	if (RB_WARN_ON(buffer, !list_empty(&pages))) | 
 | 1382 | 		goto out_fail; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1383 |  | 
 | 1384 |  out: | 
 | 1385 | 	buffer->pages = nr_pages; | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1386 | 	put_online_cpus(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1387 | 	mutex_unlock(&buffer->mutex); | 
 | 1388 |  | 
| Steven Rostedt | 1842101 | 2009-12-10 22:54:27 -0500 | [diff] [blame] | 1389 | 	atomic_dec(&buffer->record_disabled); | 
 | 1390 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1391 | 	return size; | 
 | 1392 |  | 
 | 1393 |  free_pages: | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1394 | 	list_for_each_entry_safe(bpage, tmp, &pages, list) { | 
 | 1395 | 		list_del_init(&bpage->list); | 
 | 1396 | 		free_buffer_page(bpage); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1397 | 	} | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1398 | 	put_online_cpus(); | 
| Vegard Nossum | 641d2f6 | 2008-11-18 19:22:13 +0100 | [diff] [blame] | 1399 | 	mutex_unlock(&buffer->mutex); | 
| Steven Rostedt | 1842101 | 2009-12-10 22:54:27 -0500 | [diff] [blame] | 1400 | 	atomic_dec(&buffer->record_disabled); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1401 | 	return -ENOMEM; | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1402 |  | 
 | 1403 | 	/* | 
 | 1404 | 	 * Something went totally wrong, and we are too paranoid | 
 | 1405 | 	 * to even clean up the mess. | 
 | 1406 | 	 */ | 
 | 1407 |  out_fail: | 
 | 1408 | 	put_online_cpus(); | 
 | 1409 | 	mutex_unlock(&buffer->mutex); | 
| Steven Rostedt | 1842101 | 2009-12-10 22:54:27 -0500 | [diff] [blame] | 1410 | 	atomic_dec(&buffer->record_disabled); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1411 | 	return -1; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1412 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1413 | EXPORT_SYMBOL_GPL(ring_buffer_resize); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1414 |  | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 1415 | void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val) | 
 | 1416 | { | 
 | 1417 | 	mutex_lock(&buffer->mutex); | 
 | 1418 | 	if (val) | 
 | 1419 | 		buffer->flags |= RB_FL_OVERWRITE; | 
 | 1420 | 	else | 
 | 1421 | 		buffer->flags &= ~RB_FL_OVERWRITE; | 
 | 1422 | 	mutex_unlock(&buffer->mutex); | 
 | 1423 | } | 
 | 1424 | EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); | 
 | 1425 |  | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 1426 | static inline void * | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1427 | __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 1428 | { | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1429 | 	return bpage->data + index; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 1430 | } | 
 | 1431 |  | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1432 | static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1433 | { | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1434 | 	return bpage->page->data + index; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1435 | } | 
 | 1436 |  | 
 | 1437 | static inline struct ring_buffer_event * | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1438 | rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1439 | { | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 1440 | 	return __rb_page_index(cpu_buffer->reader_page, | 
 | 1441 | 			       cpu_buffer->reader_page->read); | 
 | 1442 | } | 
 | 1443 |  | 
 | 1444 | static inline struct ring_buffer_event * | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1445 | rb_iter_head_event(struct ring_buffer_iter *iter) | 
 | 1446 | { | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 1447 | 	return __rb_page_index(iter->head_page, iter->head); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1448 | } | 
 | 1449 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1450 | static inline unsigned long rb_page_write(struct buffer_page *bpage) | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1451 | { | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1452 | 	return local_read(&bpage->write) & RB_WRITE_MASK; | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1453 | } | 
 | 1454 |  | 
 | 1455 | static inline unsigned rb_page_commit(struct buffer_page *bpage) | 
 | 1456 | { | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 1457 | 	return local_read(&bpage->page->commit); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1458 | } | 
 | 1459 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1460 | static inline unsigned long rb_page_entries(struct buffer_page *bpage) | 
 | 1461 | { | 
 | 1462 | 	return local_read(&bpage->entries) & RB_WRITE_MASK; | 
 | 1463 | } | 
 | 1464 |  | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 1465 | /* Size is determined by what has been committed */ | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1466 | static inline unsigned rb_page_size(struct buffer_page *bpage) | 
 | 1467 | { | 
 | 1468 | 	return rb_page_commit(bpage); | 
 | 1469 | } | 
 | 1470 |  | 
 | 1471 | static inline unsigned | 
 | 1472 | rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) | 
 | 1473 | { | 
 | 1474 | 	return rb_page_commit(cpu_buffer->commit_page); | 
 | 1475 | } | 
 | 1476 |  | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1477 | static inline unsigned | 
 | 1478 | rb_event_index(struct ring_buffer_event *event) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1479 | { | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1480 | 	unsigned long addr = (unsigned long)event; | 
 | 1481 |  | 
| Steven Rostedt | 22f470f | 2009-06-11 09:29:58 -0400 | [diff] [blame] | 1482 | 	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1483 | } | 
 | 1484 |  | 
| Steven Rostedt | 0f0c85f | 2009-05-11 16:08:00 -0400 | [diff] [blame] | 1485 | static inline int | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 1486 | rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 1487 | 		   struct ring_buffer_event *event) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1488 | { | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1489 | 	unsigned long addr = (unsigned long)event; | 
 | 1490 | 	unsigned long index; | 
 | 1491 |  | 
 | 1492 | 	index = rb_event_index(event); | 
 | 1493 | 	addr &= PAGE_MASK; | 
 | 1494 |  | 
 | 1495 | 	return cpu_buffer->commit_page->page == (void *)addr && | 
 | 1496 | 		rb_commit_index(cpu_buffer) == index; | 
 | 1497 | } | 
 | 1498 |  | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 1499 | static void | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1500 | rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | 
 | 1501 | { | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1502 | 	unsigned long max_count; | 
 | 1503 |  | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1504 | 	/* | 
 | 1505 | 	 * We only race with interrupts and NMIs on this CPU. | 
 | 1506 | 	 * If we own the commit event, then we can commit | 
 | 1507 | 	 * all others that interrupted us, since the interruptions | 
 | 1508 | 	 * are in stack format (they finish before they come | 
 | 1509 | 	 * back to us). This allows us to do a simple loop to | 
 | 1510 | 	 * assign the commit to the tail. | 
 | 1511 | 	 */ | 
| Steven Rostedt | a8ccf1d | 2008-12-23 11:32:24 -0500 | [diff] [blame] | 1512 |  again: | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1513 | 	max_count = cpu_buffer->buffer->pages * 100; | 
 | 1514 |  | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1515 | 	while (cpu_buffer->commit_page != cpu_buffer->tail_page) { | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1516 | 		if (RB_WARN_ON(cpu_buffer, !(--max_count))) | 
 | 1517 | 			return; | 
 | 1518 | 		if (RB_WARN_ON(cpu_buffer, | 
 | 1519 | 			       rb_is_reader_page(cpu_buffer->tail_page))) | 
 | 1520 | 			return; | 
 | 1521 | 		local_set(&cpu_buffer->commit_page->page->commit, | 
 | 1522 | 			  rb_page_write(cpu_buffer->commit_page)); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1523 | 		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 1524 | 		cpu_buffer->write_stamp = | 
 | 1525 | 			cpu_buffer->commit_page->page->time_stamp; | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1526 | 		/* add barrier to keep gcc from optimizing too much */ | 
 | 1527 | 		barrier(); | 
 | 1528 | 	} | 
 | 1529 | 	while (rb_commit_index(cpu_buffer) != | 
 | 1530 | 	       rb_page_write(cpu_buffer->commit_page)) { | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1531 |  | 
 | 1532 | 		local_set(&cpu_buffer->commit_page->page->commit, | 
 | 1533 | 			  rb_page_write(cpu_buffer->commit_page)); | 
 | 1534 | 		RB_WARN_ON(cpu_buffer, | 
 | 1535 | 			   local_read(&cpu_buffer->commit_page->page->commit) & | 
 | 1536 | 			   ~RB_WRITE_MASK); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1537 | 		barrier(); | 
 | 1538 | 	} | 
| Steven Rostedt | a8ccf1d | 2008-12-23 11:32:24 -0500 | [diff] [blame] | 1539 |  | 
 | 1540 | 	/* again, keep gcc from optimizing */ | 
 | 1541 | 	barrier(); | 
 | 1542 |  | 
 | 1543 | 	/* | 
 | 1544 | 	 * If an interrupt came in just after the first while loop | 
 | 1545 | 	 * and pushed the tail page forward, we will be left with | 
 | 1546 | 	 * a dangling commit that will never go forward. | 
 | 1547 | 	 */ | 
 | 1548 | 	if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) | 
 | 1549 | 		goto again; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1550 | } | 
 | 1551 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1552 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1553 | { | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 1554 | 	cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 1555 | 	cpu_buffer->reader_page->read = 0; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1556 | } | 
 | 1557 |  | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 1558 | static void rb_inc_iter(struct ring_buffer_iter *iter) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1559 | { | 
 | 1560 | 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 
 | 1561 |  | 
 | 1562 | 	/* | 
 | 1563 | 	 * The iterator could be on the reader page (it starts there). | 
 | 1564 | 	 * But the head could have moved, since the reader was | 
 | 1565 | 	 * found. Check for this case and assign the iterator | 
 | 1566 | 	 * to the head page instead of next. | 
 | 1567 | 	 */ | 
 | 1568 | 	if (iter->head_page == cpu_buffer->reader_page) | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1569 | 		iter->head_page = rb_set_head_page(cpu_buffer); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1570 | 	else | 
 | 1571 | 		rb_inc_page(cpu_buffer, &iter->head_page); | 
 | 1572 |  | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 1573 | 	iter->read_stamp = iter->head_page->page->time_stamp; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1574 | 	iter->head = 0; | 
 | 1575 | } | 
 | 1576 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 1577 | /* Slow path, do not inline */ | 
 | 1578 | static noinline struct ring_buffer_event * | 
 | 1579 | rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) | 
 | 1580 | { | 
 | 1581 | 	event->type_len = RINGBUF_TYPE_TIME_EXTEND; | 
 | 1582 |  | 
 | 1583 | 	/* Not the first event on the page? */ | 
 | 1584 | 	if (rb_event_index(event)) { | 
 | 1585 | 		event->time_delta = delta & TS_MASK; | 
 | 1586 | 		event->array[0] = delta >> TS_SHIFT; | 
 | 1587 | 	} else { | 
 | 1588 | 		/* nope, just zero it */ | 
 | 1589 | 		event->time_delta = 0; | 
 | 1590 | 		event->array[0] = 0; | 
 | 1591 | 	} | 
 | 1592 |  | 
 | 1593 | 	return skip_time_extend(event); | 
 | 1594 | } | 
 | 1595 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1596 | /** | 
 | 1597 |  * ring_buffer_update_event - update event type and data | 
 | 1598 |  * @event: the even to update | 
 | 1599 |  * @type: the type of event | 
 | 1600 |  * @length: the size of the event field in the ring buffer | 
 | 1601 |  * | 
 | 1602 |  * Update the type and data fields of the event. The length | 
 | 1603 |  * is the actual size that is written to the ring buffer, | 
 | 1604 |  * and with this, we can determine what to place into the | 
 | 1605 |  * data field. | 
 | 1606 |  */ | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 1607 | static void | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 1608 | rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 1609 | 		struct ring_buffer_event *event, unsigned length, | 
 | 1610 | 		int add_timestamp, u64 delta) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1611 | { | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 1612 | 	/* Only a commit updates the timestamp */ | 
 | 1613 | 	if (unlikely(!rb_event_is_commit(cpu_buffer, event))) | 
 | 1614 | 		delta = 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1615 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 1616 | 	/* | 
 | 1617 | 	 * If we need to add a timestamp, then we | 
 | 1618 | 	 * add it to the start of the resevered space. | 
 | 1619 | 	 */ | 
 | 1620 | 	if (unlikely(add_timestamp)) { | 
 | 1621 | 		event = rb_add_time_stamp(event, delta); | 
 | 1622 | 		length -= RB_LEN_TIME_EXTEND; | 
 | 1623 | 		delta = 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1624 | 	} | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 1625 |  | 
 | 1626 | 	event->time_delta = delta; | 
 | 1627 | 	length -= RB_EVNT_HDR_SIZE; | 
 | 1628 | 	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { | 
 | 1629 | 		event->type_len = 0; | 
 | 1630 | 		event->array[0] = length; | 
 | 1631 | 	} else | 
 | 1632 | 		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1633 | } | 
 | 1634 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1635 | /* | 
 | 1636 |  * rb_handle_head_page - writer hit the head page | 
 | 1637 |  * | 
 | 1638 |  * Returns: +1 to retry page | 
 | 1639 |  *           0 to continue | 
 | 1640 |  *          -1 on error | 
 | 1641 |  */ | 
 | 1642 | static int | 
 | 1643 | rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 1644 | 		    struct buffer_page *tail_page, | 
 | 1645 | 		    struct buffer_page *next_page) | 
 | 1646 | { | 
 | 1647 | 	struct buffer_page *new_head; | 
 | 1648 | 	int entries; | 
 | 1649 | 	int type; | 
 | 1650 | 	int ret; | 
 | 1651 |  | 
 | 1652 | 	entries = rb_page_entries(next_page); | 
 | 1653 |  | 
 | 1654 | 	/* | 
 | 1655 | 	 * The hard part is here. We need to move the head | 
 | 1656 | 	 * forward, and protect against both readers on | 
 | 1657 | 	 * other CPUs and writers coming in via interrupts. | 
 | 1658 | 	 */ | 
 | 1659 | 	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, | 
 | 1660 | 				       RB_PAGE_HEAD); | 
 | 1661 |  | 
 | 1662 | 	/* | 
 | 1663 | 	 * type can be one of four: | 
 | 1664 | 	 *  NORMAL - an interrupt already moved it for us | 
 | 1665 | 	 *  HEAD   - we are the first to get here. | 
 | 1666 | 	 *  UPDATE - we are the interrupt interrupting | 
 | 1667 | 	 *           a current move. | 
 | 1668 | 	 *  MOVED  - a reader on another CPU moved the next | 
 | 1669 | 	 *           pointer to its reader page. Give up | 
 | 1670 | 	 *           and try again. | 
 | 1671 | 	 */ | 
 | 1672 |  | 
 | 1673 | 	switch (type) { | 
 | 1674 | 	case RB_PAGE_HEAD: | 
 | 1675 | 		/* | 
 | 1676 | 		 * We changed the head to UPDATE, thus | 
 | 1677 | 		 * it is our responsibility to update | 
 | 1678 | 		 * the counters. | 
 | 1679 | 		 */ | 
 | 1680 | 		local_add(entries, &cpu_buffer->overrun); | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 1681 | 		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1682 |  | 
 | 1683 | 		/* | 
 | 1684 | 		 * The entries will be zeroed out when we move the | 
 | 1685 | 		 * tail page. | 
 | 1686 | 		 */ | 
 | 1687 |  | 
 | 1688 | 		/* still more to do */ | 
 | 1689 | 		break; | 
 | 1690 |  | 
 | 1691 | 	case RB_PAGE_UPDATE: | 
 | 1692 | 		/* | 
 | 1693 | 		 * This is an interrupt that interrupt the | 
 | 1694 | 		 * previous update. Still more to do. | 
 | 1695 | 		 */ | 
 | 1696 | 		break; | 
 | 1697 | 	case RB_PAGE_NORMAL: | 
 | 1698 | 		/* | 
 | 1699 | 		 * An interrupt came in before the update | 
 | 1700 | 		 * and processed this for us. | 
 | 1701 | 		 * Nothing left to do. | 
 | 1702 | 		 */ | 
 | 1703 | 		return 1; | 
 | 1704 | 	case RB_PAGE_MOVED: | 
 | 1705 | 		/* | 
 | 1706 | 		 * The reader is on another CPU and just did | 
 | 1707 | 		 * a swap with our next_page. | 
 | 1708 | 		 * Try again. | 
 | 1709 | 		 */ | 
 | 1710 | 		return 1; | 
 | 1711 | 	default: | 
 | 1712 | 		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ | 
 | 1713 | 		return -1; | 
 | 1714 | 	} | 
 | 1715 |  | 
 | 1716 | 	/* | 
 | 1717 | 	 * Now that we are here, the old head pointer is | 
 | 1718 | 	 * set to UPDATE. This will keep the reader from | 
 | 1719 | 	 * swapping the head page with the reader page. | 
 | 1720 | 	 * The reader (on another CPU) will spin till | 
 | 1721 | 	 * we are finished. | 
 | 1722 | 	 * | 
 | 1723 | 	 * We just need to protect against interrupts | 
 | 1724 | 	 * doing the job. We will set the next pointer | 
 | 1725 | 	 * to HEAD. After that, we set the old pointer | 
 | 1726 | 	 * to NORMAL, but only if it was HEAD before. | 
 | 1727 | 	 * otherwise we are an interrupt, and only | 
 | 1728 | 	 * want the outer most commit to reset it. | 
 | 1729 | 	 */ | 
 | 1730 | 	new_head = next_page; | 
 | 1731 | 	rb_inc_page(cpu_buffer, &new_head); | 
 | 1732 |  | 
 | 1733 | 	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, | 
 | 1734 | 				    RB_PAGE_NORMAL); | 
 | 1735 |  | 
 | 1736 | 	/* | 
 | 1737 | 	 * Valid returns are: | 
 | 1738 | 	 *  HEAD   - an interrupt came in and already set it. | 
 | 1739 | 	 *  NORMAL - One of two things: | 
 | 1740 | 	 *            1) We really set it. | 
 | 1741 | 	 *            2) A bunch of interrupts came in and moved | 
 | 1742 | 	 *               the page forward again. | 
 | 1743 | 	 */ | 
 | 1744 | 	switch (ret) { | 
 | 1745 | 	case RB_PAGE_HEAD: | 
 | 1746 | 	case RB_PAGE_NORMAL: | 
 | 1747 | 		/* OK */ | 
 | 1748 | 		break; | 
 | 1749 | 	default: | 
 | 1750 | 		RB_WARN_ON(cpu_buffer, 1); | 
 | 1751 | 		return -1; | 
 | 1752 | 	} | 
 | 1753 |  | 
 | 1754 | 	/* | 
 | 1755 | 	 * It is possible that an interrupt came in, | 
 | 1756 | 	 * set the head up, then more interrupts came in | 
 | 1757 | 	 * and moved it again. When we get back here, | 
 | 1758 | 	 * the page would have been set to NORMAL but we | 
 | 1759 | 	 * just set it back to HEAD. | 
 | 1760 | 	 * | 
 | 1761 | 	 * How do you detect this? Well, if that happened | 
 | 1762 | 	 * the tail page would have moved. | 
 | 1763 | 	 */ | 
 | 1764 | 	if (ret == RB_PAGE_NORMAL) { | 
 | 1765 | 		/* | 
 | 1766 | 		 * If the tail had moved passed next, then we need | 
 | 1767 | 		 * to reset the pointer. | 
 | 1768 | 		 */ | 
 | 1769 | 		if (cpu_buffer->tail_page != tail_page && | 
 | 1770 | 		    cpu_buffer->tail_page != next_page) | 
 | 1771 | 			rb_head_page_set_normal(cpu_buffer, new_head, | 
 | 1772 | 						next_page, | 
 | 1773 | 						RB_PAGE_HEAD); | 
 | 1774 | 	} | 
 | 1775 |  | 
 | 1776 | 	/* | 
 | 1777 | 	 * If this was the outer most commit (the one that | 
 | 1778 | 	 * changed the original pointer from HEAD to UPDATE), | 
 | 1779 | 	 * then it is up to us to reset it to NORMAL. | 
 | 1780 | 	 */ | 
 | 1781 | 	if (type == RB_PAGE_HEAD) { | 
 | 1782 | 		ret = rb_head_page_set_normal(cpu_buffer, next_page, | 
 | 1783 | 					      tail_page, | 
 | 1784 | 					      RB_PAGE_UPDATE); | 
 | 1785 | 		if (RB_WARN_ON(cpu_buffer, | 
 | 1786 | 			       ret != RB_PAGE_UPDATE)) | 
 | 1787 | 			return -1; | 
 | 1788 | 	} | 
 | 1789 |  | 
 | 1790 | 	return 0; | 
 | 1791 | } | 
 | 1792 |  | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 1793 | static unsigned rb_calculate_event_length(unsigned length) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1794 | { | 
 | 1795 | 	struct ring_buffer_event event; /* Used only for sizeof array */ | 
 | 1796 |  | 
 | 1797 | 	/* zero length can cause confusions */ | 
 | 1798 | 	if (!length) | 
 | 1799 | 		length = 1; | 
 | 1800 |  | 
| Steven Rostedt | 2271048 | 2010-03-18 17:54:19 -0400 | [diff] [blame] | 1801 | 	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1802 | 		length += sizeof(event.array[0]); | 
 | 1803 |  | 
 | 1804 | 	length += RB_EVNT_HDR_SIZE; | 
| Steven Rostedt | 2271048 | 2010-03-18 17:54:19 -0400 | [diff] [blame] | 1805 | 	length = ALIGN(length, RB_ARCH_ALIGNMENT); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1806 |  | 
 | 1807 | 	return length; | 
 | 1808 | } | 
 | 1809 |  | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 1810 | static inline void | 
 | 1811 | rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 1812 | 	      struct buffer_page *tail_page, | 
 | 1813 | 	      unsigned long tail, unsigned long length) | 
 | 1814 | { | 
 | 1815 | 	struct ring_buffer_event *event; | 
 | 1816 |  | 
 | 1817 | 	/* | 
 | 1818 | 	 * Only the event that crossed the page boundary | 
 | 1819 | 	 * must fill the old tail_page with padding. | 
 | 1820 | 	 */ | 
 | 1821 | 	if (tail >= BUF_PAGE_SIZE) { | 
| Steven Rostedt | b3230c8 | 2010-05-21 11:55:21 -0400 | [diff] [blame] | 1822 | 		/* | 
 | 1823 | 		 * If the page was filled, then we still need | 
 | 1824 | 		 * to update the real_end. Reset it to zero | 
 | 1825 | 		 * and the reader will ignore it. | 
 | 1826 | 		 */ | 
 | 1827 | 		if (tail == BUF_PAGE_SIZE) | 
 | 1828 | 			tail_page->real_end = 0; | 
 | 1829 |  | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 1830 | 		local_sub(length, &tail_page->write); | 
 | 1831 | 		return; | 
 | 1832 | 	} | 
 | 1833 |  | 
 | 1834 | 	event = __rb_page_index(tail_page, tail); | 
| Linus Torvalds | b0b7065 | 2009-06-20 10:56:46 -0700 | [diff] [blame] | 1835 | 	kmemcheck_annotate_bitfield(event, bitfield); | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 1836 |  | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 1837 | 	/* account for padding bytes */ | 
 | 1838 | 	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); | 
 | 1839 |  | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 1840 | 	/* | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 1841 | 	 * Save the original length to the meta data. | 
 | 1842 | 	 * This will be used by the reader to add lost event | 
 | 1843 | 	 * counter. | 
 | 1844 | 	 */ | 
 | 1845 | 	tail_page->real_end = tail; | 
 | 1846 |  | 
 | 1847 | 	/* | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 1848 | 	 * If this event is bigger than the minimum size, then | 
 | 1849 | 	 * we need to be careful that we don't subtract the | 
 | 1850 | 	 * write counter enough to allow another writer to slip | 
 | 1851 | 	 * in on this page. | 
 | 1852 | 	 * We put in a discarded commit instead, to make sure | 
 | 1853 | 	 * that this space is not used again. | 
 | 1854 | 	 * | 
 | 1855 | 	 * If we are less than the minimum size, we don't need to | 
 | 1856 | 	 * worry about it. | 
 | 1857 | 	 */ | 
 | 1858 | 	if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { | 
 | 1859 | 		/* No room for any events */ | 
 | 1860 |  | 
 | 1861 | 		/* Mark the rest of the page with padding */ | 
 | 1862 | 		rb_event_set_padding(event); | 
 | 1863 |  | 
 | 1864 | 		/* Set the write back to the previous setting */ | 
 | 1865 | 		local_sub(length, &tail_page->write); | 
 | 1866 | 		return; | 
 | 1867 | 	} | 
 | 1868 |  | 
 | 1869 | 	/* Put in a discarded event */ | 
 | 1870 | 	event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; | 
 | 1871 | 	event->type_len = RINGBUF_TYPE_PADDING; | 
 | 1872 | 	/* time delta must be non zero */ | 
 | 1873 | 	event->time_delta = 1; | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 1874 |  | 
 | 1875 | 	/* Set write to end of buffer */ | 
 | 1876 | 	length = (tail + length) - BUF_PAGE_SIZE; | 
 | 1877 | 	local_sub(length, &tail_page->write); | 
 | 1878 | } | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 1879 |  | 
| Steven Rostedt | 747e94a | 2010-10-08 13:51:48 -0400 | [diff] [blame] | 1880 | /* | 
 | 1881 |  * This is the slow path, force gcc not to inline it. | 
 | 1882 |  */ | 
 | 1883 | static noinline struct ring_buffer_event * | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 1884 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 1885 | 	     unsigned long length, unsigned long tail, | 
| Steven Rostedt | e8bc43e | 2010-10-20 10:58:02 -0400 | [diff] [blame] | 1886 | 	     struct buffer_page *tail_page, u64 ts) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1887 | { | 
| Steven Rostedt | 5a50e33 | 2009-11-17 08:43:01 -0500 | [diff] [blame] | 1888 | 	struct buffer_page *commit_page = cpu_buffer->commit_page; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1889 | 	struct ring_buffer *buffer = cpu_buffer->buffer; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1890 | 	struct buffer_page *next_page; | 
 | 1891 | 	int ret; | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 1892 |  | 
 | 1893 | 	next_page = tail_page; | 
 | 1894 |  | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 1895 | 	rb_inc_page(cpu_buffer, &next_page); | 
 | 1896 |  | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 1897 | 	/* | 
 | 1898 | 	 * If for some reason, we had an interrupt storm that made | 
 | 1899 | 	 * it all the way around the buffer, bail, and warn | 
 | 1900 | 	 * about it. | 
 | 1901 | 	 */ | 
 | 1902 | 	if (unlikely(next_page == commit_page)) { | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1903 | 		local_inc(&cpu_buffer->commit_overrun); | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 1904 | 		goto out_reset; | 
 | 1905 | 	} | 
 | 1906 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1907 | 	/* | 
 | 1908 | 	 * This is where the fun begins! | 
 | 1909 | 	 * | 
 | 1910 | 	 * We are fighting against races between a reader that | 
 | 1911 | 	 * could be on another CPU trying to swap its reader | 
 | 1912 | 	 * page with the buffer head. | 
 | 1913 | 	 * | 
 | 1914 | 	 * We are also fighting against interrupts coming in and | 
 | 1915 | 	 * moving the head or tail on us as well. | 
 | 1916 | 	 * | 
 | 1917 | 	 * If the next page is the head page then we have filled | 
 | 1918 | 	 * the buffer, unless the commit page is still on the | 
 | 1919 | 	 * reader page. | 
 | 1920 | 	 */ | 
 | 1921 | 	if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) { | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 1922 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1923 | 		/* | 
 | 1924 | 		 * If the commit is not on the reader page, then | 
 | 1925 | 		 * move the header page. | 
 | 1926 | 		 */ | 
 | 1927 | 		if (!rb_is_reader_page(cpu_buffer->commit_page)) { | 
 | 1928 | 			/* | 
 | 1929 | 			 * If we are not in overwrite mode, | 
 | 1930 | 			 * this is easy, just stop here. | 
 | 1931 | 			 */ | 
 | 1932 | 			if (!(buffer->flags & RB_FL_OVERWRITE)) | 
 | 1933 | 				goto out_reset; | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 1934 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1935 | 			ret = rb_handle_head_page(cpu_buffer, | 
 | 1936 | 						  tail_page, | 
 | 1937 | 						  next_page); | 
 | 1938 | 			if (ret < 0) | 
 | 1939 | 				goto out_reset; | 
 | 1940 | 			if (ret) | 
 | 1941 | 				goto out_again; | 
 | 1942 | 		} else { | 
 | 1943 | 			/* | 
 | 1944 | 			 * We need to be careful here too. The | 
 | 1945 | 			 * commit page could still be on the reader | 
 | 1946 | 			 * page. We could have a small buffer, and | 
 | 1947 | 			 * have filled up the buffer with events | 
 | 1948 | 			 * from interrupts and such, and wrapped. | 
 | 1949 | 			 * | 
 | 1950 | 			 * Note, if the tail page is also the on the | 
 | 1951 | 			 * reader_page, we let it move out. | 
 | 1952 | 			 */ | 
 | 1953 | 			if (unlikely((cpu_buffer->commit_page != | 
 | 1954 | 				      cpu_buffer->tail_page) && | 
 | 1955 | 				     (cpu_buffer->commit_page == | 
 | 1956 | 				      cpu_buffer->reader_page))) { | 
 | 1957 | 				local_inc(&cpu_buffer->commit_overrun); | 
 | 1958 | 				goto out_reset; | 
 | 1959 | 			} | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 1960 | 		} | 
 | 1961 | 	} | 
 | 1962 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1963 | 	ret = rb_tail_page_update(cpu_buffer, tail_page, next_page); | 
 | 1964 | 	if (ret) { | 
 | 1965 | 		/* | 
 | 1966 | 		 * Nested commits always have zero deltas, so | 
 | 1967 | 		 * just reread the time stamp | 
 | 1968 | 		 */ | 
| Steven Rostedt | e8bc43e | 2010-10-20 10:58:02 -0400 | [diff] [blame] | 1969 | 		ts = rb_time_stamp(buffer); | 
 | 1970 | 		next_page->page->time_stamp = ts; | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 1971 | 	} | 
 | 1972 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1973 |  out_again: | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 1974 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1975 | 	rb_reset_tail(cpu_buffer, tail_page, tail, length); | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 1976 |  | 
 | 1977 | 	/* fail and let the caller try again */ | 
 | 1978 | 	return ERR_PTR(-EAGAIN); | 
 | 1979 |  | 
| Steven Rostedt | 45141d4 | 2009-02-12 13:19:48 -0500 | [diff] [blame] | 1980 |  out_reset: | 
| Lai Jiangshan | 6f3b344 | 2009-01-12 11:06:18 +0800 | [diff] [blame] | 1981 | 	/* reset write */ | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 1982 | 	rb_reset_tail(cpu_buffer, tail_page, tail, length); | 
| Lai Jiangshan | 6f3b344 | 2009-01-12 11:06:18 +0800 | [diff] [blame] | 1983 |  | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1984 | 	return NULL; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1985 | } | 
 | 1986 |  | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 1987 | static struct ring_buffer_event * | 
 | 1988 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 1989 | 		  unsigned long length, u64 ts, | 
 | 1990 | 		  u64 delta, int add_timestamp) | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 1991 | { | 
| Steven Rostedt | 5a50e33 | 2009-11-17 08:43:01 -0500 | [diff] [blame] | 1992 | 	struct buffer_page *tail_page; | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 1993 | 	struct ring_buffer_event *event; | 
 | 1994 | 	unsigned long tail, write; | 
 | 1995 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 1996 | 	/* | 
 | 1997 | 	 * If the time delta since the last event is too big to | 
 | 1998 | 	 * hold in the time field of the event, then we append a | 
 | 1999 | 	 * TIME EXTEND event ahead of the data event. | 
 | 2000 | 	 */ | 
 | 2001 | 	if (unlikely(add_timestamp)) | 
 | 2002 | 		length += RB_LEN_TIME_EXTEND; | 
 | 2003 |  | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2004 | 	tail_page = cpu_buffer->tail_page; | 
 | 2005 | 	write = local_add_return(length, &tail_page->write); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2006 |  | 
 | 2007 | 	/* set write to only the index of the write */ | 
 | 2008 | 	write &= RB_WRITE_MASK; | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2009 | 	tail = write - length; | 
 | 2010 |  | 
| Steven Rostedt (Red Hat) | 17ff13b | 2014-02-11 13:38:54 -0500 | [diff] [blame] | 2011 | 	/* | 
 | 2012 | 	 * If this is the first commit on the page, then it has the same | 
 | 2013 | 	 * timestamp as the page itself. | 
 | 2014 | 	 */ | 
 | 2015 | 	if (!tail) | 
 | 2016 | 		delta = 0; | 
 | 2017 |  | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2018 | 	/* See if we shot pass the end of this buffer page */ | 
| Steven Rostedt | 747e94a | 2010-10-08 13:51:48 -0400 | [diff] [blame] | 2019 | 	if (unlikely(write > BUF_PAGE_SIZE)) | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2020 | 		return rb_move_tail(cpu_buffer, length, tail, | 
| Steven Rostedt | 5a50e33 | 2009-11-17 08:43:01 -0500 | [diff] [blame] | 2021 | 				    tail_page, ts); | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2022 |  | 
 | 2023 | 	/* We reserved something on the buffer */ | 
 | 2024 |  | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2025 | 	event = __rb_page_index(tail_page, tail); | 
| Vegard Nossum | 1744a21 | 2009-02-28 08:29:44 +0100 | [diff] [blame] | 2026 | 	kmemcheck_annotate_bitfield(event, bitfield); | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2027 | 	rb_update_event(cpu_buffer, event, length, add_timestamp, delta); | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2028 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2029 | 	local_inc(&tail_page->entries); | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2030 |  | 
 | 2031 | 	/* | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2032 | 	 * If this is the first commit on the page, then update | 
 | 2033 | 	 * its timestamp. | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2034 | 	 */ | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2035 | 	if (!tail) | 
| Steven Rostedt | e8bc43e | 2010-10-20 10:58:02 -0400 | [diff] [blame] | 2036 | 		tail_page->page->time_stamp = ts; | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2037 |  | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2038 | 	/* account for these added bytes */ | 
 | 2039 | 	local_add(length, &cpu_buffer->entries_bytes); | 
 | 2040 |  | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2041 | 	return event; | 
 | 2042 | } | 
 | 2043 |  | 
| Steven Rostedt | edd813b | 2009-06-02 23:00:53 -0400 | [diff] [blame] | 2044 | static inline int | 
 | 2045 | rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 2046 | 		  struct ring_buffer_event *event) | 
 | 2047 | { | 
 | 2048 | 	unsigned long new_index, old_index; | 
 | 2049 | 	struct buffer_page *bpage; | 
 | 2050 | 	unsigned long index; | 
 | 2051 | 	unsigned long addr; | 
 | 2052 |  | 
 | 2053 | 	new_index = rb_event_index(event); | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2054 | 	old_index = new_index + rb_event_ts_length(event); | 
| Steven Rostedt | edd813b | 2009-06-02 23:00:53 -0400 | [diff] [blame] | 2055 | 	addr = (unsigned long)event; | 
 | 2056 | 	addr &= PAGE_MASK; | 
 | 2057 |  | 
 | 2058 | 	bpage = cpu_buffer->tail_page; | 
 | 2059 |  | 
 | 2060 | 	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2061 | 		unsigned long write_mask = | 
 | 2062 | 			local_read(&bpage->write) & ~RB_WRITE_MASK; | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2063 | 		unsigned long event_length = rb_event_length(event); | 
| Steven Rostedt | edd813b | 2009-06-02 23:00:53 -0400 | [diff] [blame] | 2064 | 		/* | 
 | 2065 | 		 * This is on the tail page. It is possible that | 
 | 2066 | 		 * a write could come in and move the tail page | 
 | 2067 | 		 * and write to the next page. That is fine | 
 | 2068 | 		 * because we just shorten what is on this page. | 
 | 2069 | 		 */ | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2070 | 		old_index += write_mask; | 
 | 2071 | 		new_index += write_mask; | 
| Steven Rostedt | edd813b | 2009-06-02 23:00:53 -0400 | [diff] [blame] | 2072 | 		index = local_cmpxchg(&bpage->write, old_index, new_index); | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2073 | 		if (index == old_index) { | 
 | 2074 | 			/* update counters */ | 
 | 2075 | 			local_sub(event_length, &cpu_buffer->entries_bytes); | 
| Steven Rostedt | edd813b | 2009-06-02 23:00:53 -0400 | [diff] [blame] | 2076 | 			return 1; | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2077 | 		} | 
| Steven Rostedt | edd813b | 2009-06-02 23:00:53 -0400 | [diff] [blame] | 2078 | 	} | 
 | 2079 |  | 
 | 2080 | 	/* could not discard */ | 
 | 2081 | 	return 0; | 
 | 2082 | } | 
 | 2083 |  | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2084 | static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) | 
 | 2085 | { | 
 | 2086 | 	local_inc(&cpu_buffer->committing); | 
 | 2087 | 	local_inc(&cpu_buffer->commits); | 
 | 2088 | } | 
 | 2089 |  | 
| Steven Rostedt | d9abde2 | 2010-10-19 13:17:08 -0400 | [diff] [blame] | 2090 | static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2091 | { | 
 | 2092 | 	unsigned long commits; | 
 | 2093 |  | 
 | 2094 | 	if (RB_WARN_ON(cpu_buffer, | 
 | 2095 | 		       !local_read(&cpu_buffer->committing))) | 
 | 2096 | 		return; | 
 | 2097 |  | 
 | 2098 |  again: | 
 | 2099 | 	commits = local_read(&cpu_buffer->commits); | 
 | 2100 | 	/* synchronize with interrupts */ | 
 | 2101 | 	barrier(); | 
 | 2102 | 	if (local_read(&cpu_buffer->committing) == 1) | 
 | 2103 | 		rb_set_commit_to_write(cpu_buffer); | 
 | 2104 |  | 
 | 2105 | 	local_dec(&cpu_buffer->committing); | 
 | 2106 |  | 
 | 2107 | 	/* synchronize with interrupts */ | 
 | 2108 | 	barrier(); | 
 | 2109 |  | 
 | 2110 | 	/* | 
 | 2111 | 	 * Need to account for interrupts coming in between the | 
 | 2112 | 	 * updating of the commit page and the clearing of the | 
 | 2113 | 	 * committing counter. | 
 | 2114 | 	 */ | 
 | 2115 | 	if (unlikely(local_read(&cpu_buffer->commits) != commits) && | 
 | 2116 | 	    !local_read(&cpu_buffer->committing)) { | 
 | 2117 | 		local_inc(&cpu_buffer->committing); | 
 | 2118 | 		goto again; | 
 | 2119 | 	} | 
 | 2120 | } | 
 | 2121 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2122 | static struct ring_buffer_event * | 
| Steven Rostedt | 62f0b3e | 2009-09-04 14:11:34 -0400 | [diff] [blame] | 2123 | rb_reserve_next_event(struct ring_buffer *buffer, | 
 | 2124 | 		      struct ring_buffer_per_cpu *cpu_buffer, | 
| Steven Rostedt | 1cd8d73 | 2009-05-11 14:08:09 -0400 | [diff] [blame] | 2125 | 		      unsigned long length) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2126 | { | 
 | 2127 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2128 | 	u64 ts, delta; | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 2129 | 	int nr_loops = 0; | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2130 | 	int add_timestamp; | 
| Steven Rostedt | 140ff89 | 2010-10-08 10:50:30 -0400 | [diff] [blame] | 2131 | 	u64 diff; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2132 |  | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2133 | 	rb_start_commit(cpu_buffer); | 
 | 2134 |  | 
| Steven Rostedt | 85bac32 | 2009-09-04 14:24:40 -0400 | [diff] [blame] | 2135 | #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP | 
| Steven Rostedt | 62f0b3e | 2009-09-04 14:11:34 -0400 | [diff] [blame] | 2136 | 	/* | 
 | 2137 | 	 * Due to the ability to swap a cpu buffer from a buffer | 
 | 2138 | 	 * it is possible it was swapped before we committed. | 
 | 2139 | 	 * (committing stops a swap). We check for it here and | 
 | 2140 | 	 * if it happened, we have to fail the write. | 
 | 2141 | 	 */ | 
 | 2142 | 	barrier(); | 
 | 2143 | 	if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) { | 
 | 2144 | 		local_dec(&cpu_buffer->committing); | 
 | 2145 | 		local_dec(&cpu_buffer->commits); | 
 | 2146 | 		return NULL; | 
 | 2147 | 	} | 
| Steven Rostedt | 85bac32 | 2009-09-04 14:24:40 -0400 | [diff] [blame] | 2148 | #endif | 
| Steven Rostedt | 62f0b3e | 2009-09-04 14:11:34 -0400 | [diff] [blame] | 2149 |  | 
| Steven Rostedt | be957c4 | 2009-05-11 14:42:53 -0400 | [diff] [blame] | 2150 | 	length = rb_calculate_event_length(length); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2151 |  again: | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2152 | 	add_timestamp = 0; | 
 | 2153 | 	delta = 0; | 
 | 2154 |  | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 2155 | 	/* | 
 | 2156 | 	 * We allow for interrupts to reenter here and do a trace. | 
 | 2157 | 	 * If one does, it will cause this original code to loop | 
 | 2158 | 	 * back here. Even with heavy interrupts happening, this | 
 | 2159 | 	 * should only happen a few times in a row. If this happens | 
 | 2160 | 	 * 1000 times in a row, there must be either an interrupt | 
 | 2161 | 	 * storm or we have something buggy. | 
 | 2162 | 	 * Bail! | 
 | 2163 | 	 */ | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 2164 | 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2165 | 		goto out_fail; | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 2166 |  | 
| Jiri Olsa | 6d3f1e1 | 2009-10-23 19:36:19 -0400 | [diff] [blame] | 2167 | 	ts = rb_time_stamp(cpu_buffer->buffer); | 
| Steven Rostedt | 140ff89 | 2010-10-08 10:50:30 -0400 | [diff] [blame] | 2168 | 	diff = ts - cpu_buffer->write_stamp; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2169 |  | 
| Steven Rostedt | 140ff89 | 2010-10-08 10:50:30 -0400 | [diff] [blame] | 2170 | 	/* make sure this diff is calculated here */ | 
 | 2171 | 	barrier(); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2172 |  | 
| Steven Rostedt | 140ff89 | 2010-10-08 10:50:30 -0400 | [diff] [blame] | 2173 | 	/* Did the write stamp get updated already? */ | 
 | 2174 | 	if (likely(ts >= cpu_buffer->write_stamp)) { | 
| Steven Rostedt | 168b6b1 | 2009-05-11 22:11:05 -0400 | [diff] [blame] | 2175 | 		delta = diff; | 
 | 2176 | 		if (unlikely(test_time_stamp(delta))) { | 
| Jiri Olsa | 31274d7 | 2011-02-18 15:52:19 +0100 | [diff] [blame] | 2177 | 			int local_clock_stable = 1; | 
 | 2178 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 
 | 2179 | 			local_clock_stable = sched_clock_stable; | 
 | 2180 | #endif | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2181 | 			WARN_ONCE(delta > (1ULL << 59), | 
| Jiri Olsa | 31274d7 | 2011-02-18 15:52:19 +0100 | [diff] [blame] | 2182 | 				  KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s", | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2183 | 				  (unsigned long long)delta, | 
 | 2184 | 				  (unsigned long long)ts, | 
| Jiri Olsa | 31274d7 | 2011-02-18 15:52:19 +0100 | [diff] [blame] | 2185 | 				  (unsigned long long)cpu_buffer->write_stamp, | 
 | 2186 | 				  local_clock_stable ? "" : | 
 | 2187 | 				  "If you just came from a suspend/resume,\n" | 
 | 2188 | 				  "please switch to the trace global clock:\n" | 
 | 2189 | 				  "  echo global > /sys/kernel/debug/tracing/trace_clock\n"); | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2190 | 			add_timestamp = 1; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2191 | 		} | 
| Steven Rostedt | 168b6b1 | 2009-05-11 22:11:05 -0400 | [diff] [blame] | 2192 | 	} | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2193 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2194 | 	event = __rb_reserve_next(cpu_buffer, length, ts, | 
 | 2195 | 				  delta, add_timestamp); | 
| Steven Rostedt | 168b6b1 | 2009-05-11 22:11:05 -0400 | [diff] [blame] | 2196 | 	if (unlikely(PTR_ERR(event) == -EAGAIN)) | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2197 | 		goto again; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2198 |  | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2199 | 	if (!event) | 
 | 2200 | 		goto out_fail; | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2201 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2202 | 	return event; | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2203 |  | 
 | 2204 |  out_fail: | 
 | 2205 | 	rb_end_commit(cpu_buffer); | 
 | 2206 | 	return NULL; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2207 | } | 
 | 2208 |  | 
| Paul Mundt | 1155de4 | 2009-06-25 14:30:12 +0900 | [diff] [blame] | 2209 | #ifdef CONFIG_TRACING | 
 | 2210 |  | 
| Steven Rostedt | aa18efb | 2009-04-20 16:16:11 -0400 | [diff] [blame] | 2211 | #define TRACE_RECURSIVE_DEPTH 16 | 
| Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 2212 |  | 
| Steven Rostedt | d9abde2 | 2010-10-19 13:17:08 -0400 | [diff] [blame] | 2213 | /* Keep this code out of the fast path cache */ | 
 | 2214 | static noinline void trace_recursive_fail(void) | 
| Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 2215 | { | 
| Steven Rostedt | aa18efb | 2009-04-20 16:16:11 -0400 | [diff] [blame] | 2216 | 	/* Disable all tracing before we do anything else */ | 
 | 2217 | 	tracing_off_permanent(); | 
| Frederic Weisbecker | e057a5e | 2009-04-19 23:38:12 +0200 | [diff] [blame] | 2218 |  | 
| Steven Rostedt | 7d7d2b8 | 2009-04-27 12:37:49 -0400 | [diff] [blame] | 2219 | 	printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:" | 
| Steven Rostedt | aa18efb | 2009-04-20 16:16:11 -0400 | [diff] [blame] | 2220 | 		    "HC[%lu]:SC[%lu]:NMI[%lu]\n", | 
| Steven Rostedt | b1cff0a | 2011-05-25 14:27:43 -0400 | [diff] [blame] | 2221 | 		    trace_recursion_buffer(), | 
| Steven Rostedt | aa18efb | 2009-04-20 16:16:11 -0400 | [diff] [blame] | 2222 | 		    hardirq_count() >> HARDIRQ_SHIFT, | 
 | 2223 | 		    softirq_count() >> SOFTIRQ_SHIFT, | 
 | 2224 | 		    in_nmi()); | 
| Frederic Weisbecker | e057a5e | 2009-04-19 23:38:12 +0200 | [diff] [blame] | 2225 |  | 
| Steven Rostedt | aa18efb | 2009-04-20 16:16:11 -0400 | [diff] [blame] | 2226 | 	WARN_ON_ONCE(1); | 
| Steven Rostedt | d9abde2 | 2010-10-19 13:17:08 -0400 | [diff] [blame] | 2227 | } | 
 | 2228 |  | 
 | 2229 | static inline int trace_recursive_lock(void) | 
 | 2230 | { | 
| Steven Rostedt | b1cff0a | 2011-05-25 14:27:43 -0400 | [diff] [blame] | 2231 | 	trace_recursion_inc(); | 
| Steven Rostedt | d9abde2 | 2010-10-19 13:17:08 -0400 | [diff] [blame] | 2232 |  | 
| Steven Rostedt | b1cff0a | 2011-05-25 14:27:43 -0400 | [diff] [blame] | 2233 | 	if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH)) | 
| Steven Rostedt | d9abde2 | 2010-10-19 13:17:08 -0400 | [diff] [blame] | 2234 | 		return 0; | 
 | 2235 |  | 
 | 2236 | 	trace_recursive_fail(); | 
 | 2237 |  | 
| Steven Rostedt | aa18efb | 2009-04-20 16:16:11 -0400 | [diff] [blame] | 2238 | 	return -1; | 
| Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 2239 | } | 
 | 2240 |  | 
| Steven Rostedt | d9abde2 | 2010-10-19 13:17:08 -0400 | [diff] [blame] | 2241 | static inline void trace_recursive_unlock(void) | 
| Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 2242 | { | 
| Steven Rostedt | b1cff0a | 2011-05-25 14:27:43 -0400 | [diff] [blame] | 2243 | 	WARN_ON_ONCE(!trace_recursion_buffer()); | 
| Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 2244 |  | 
| Steven Rostedt | b1cff0a | 2011-05-25 14:27:43 -0400 | [diff] [blame] | 2245 | 	trace_recursion_dec(); | 
| Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 2246 | } | 
 | 2247 |  | 
| Paul Mundt | 1155de4 | 2009-06-25 14:30:12 +0900 | [diff] [blame] | 2248 | #else | 
 | 2249 |  | 
 | 2250 | #define trace_recursive_lock()		(0) | 
 | 2251 | #define trace_recursive_unlock()	do { } while (0) | 
 | 2252 |  | 
 | 2253 | #endif | 
 | 2254 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2255 | /** | 
 | 2256 |  * ring_buffer_lock_reserve - reserve a part of the buffer | 
 | 2257 |  * @buffer: the ring buffer to reserve from | 
 | 2258 |  * @length: the length of the data to reserve (excluding event header) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2259 |  * | 
 | 2260 |  * Returns a reseverd event on the ring buffer to copy directly to. | 
 | 2261 |  * The user of this interface will need to get the body to write into | 
 | 2262 |  * and can use the ring_buffer_event_data() interface. | 
 | 2263 |  * | 
 | 2264 |  * The length is the length of the data needed, not the event length | 
 | 2265 |  * which also includes the event header. | 
 | 2266 |  * | 
 | 2267 |  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. | 
 | 2268 |  * If NULL is returned, then nothing has been allocated or locked. | 
 | 2269 |  */ | 
 | 2270 | struct ring_buffer_event * | 
| Arnaldo Carvalho de Melo | 0a98775 | 2009-02-05 16:12:56 -0200 | [diff] [blame] | 2271 | ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2272 | { | 
 | 2273 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 2274 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2275 | 	int cpu; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2276 |  | 
| Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 2277 | 	if (ring_buffer_flags != RB_BUFFERS_ON) | 
| Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 2278 | 		return NULL; | 
 | 2279 |  | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2280 | 	/* If we are tracing schedule, we don't want to recurse */ | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2281 | 	preempt_disable_notrace(); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2282 |  | 
| Lai Jiangshan | 52fbe9c | 2010-03-08 14:50:43 +0800 | [diff] [blame] | 2283 | 	if (atomic_read(&buffer->record_disabled)) | 
 | 2284 | 		goto out_nocheck; | 
 | 2285 |  | 
| Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 2286 | 	if (trace_recursive_lock()) | 
 | 2287 | 		goto out_nocheck; | 
 | 2288 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2289 | 	cpu = raw_smp_processor_id(); | 
 | 2290 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2291 | 	if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2292 | 		goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2293 |  | 
 | 2294 | 	cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2295 |  | 
 | 2296 | 	if (atomic_read(&cpu_buffer->record_disabled)) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2297 | 		goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2298 |  | 
| Steven Rostedt | be957c4 | 2009-05-11 14:42:53 -0400 | [diff] [blame] | 2299 | 	if (length > BUF_MAX_DATA_SIZE) | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2300 | 		goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2301 |  | 
| Steven Rostedt | 62f0b3e | 2009-09-04 14:11:34 -0400 | [diff] [blame] | 2302 | 	event = rb_reserve_next_event(buffer, cpu_buffer, length); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2303 | 	if (!event) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2304 | 		goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2305 |  | 
 | 2306 | 	return event; | 
 | 2307 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2308 |  out: | 
| Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 2309 | 	trace_recursive_unlock(); | 
 | 2310 |  | 
 | 2311 |  out_nocheck: | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2312 | 	preempt_enable_notrace(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2313 | 	return NULL; | 
 | 2314 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2315 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2316 |  | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 2317 | static void | 
 | 2318 | rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2319 | 		      struct ring_buffer_event *event) | 
 | 2320 | { | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2321 | 	u64 delta; | 
 | 2322 |  | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2323 | 	/* | 
 | 2324 | 	 * The event first in the commit queue updates the | 
 | 2325 | 	 * time stamp. | 
 | 2326 | 	 */ | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2327 | 	if (rb_event_is_commit(cpu_buffer, event)) { | 
 | 2328 | 		/* | 
 | 2329 | 		 * A commit event that is first on a page | 
 | 2330 | 		 * updates the write timestamp with the page stamp | 
 | 2331 | 		 */ | 
 | 2332 | 		if (!rb_event_index(event)) | 
 | 2333 | 			cpu_buffer->write_stamp = | 
 | 2334 | 				cpu_buffer->commit_page->page->time_stamp; | 
 | 2335 | 		else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { | 
 | 2336 | 			delta = event->array[0]; | 
 | 2337 | 			delta <<= TS_SHIFT; | 
 | 2338 | 			delta += event->time_delta; | 
 | 2339 | 			cpu_buffer->write_stamp += delta; | 
 | 2340 | 		} else | 
 | 2341 | 			cpu_buffer->write_stamp += event->time_delta; | 
 | 2342 | 	} | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 2343 | } | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2344 |  | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 2345 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 2346 | 		      struct ring_buffer_event *event) | 
 | 2347 | { | 
 | 2348 | 	local_inc(&cpu_buffer->entries); | 
 | 2349 | 	rb_update_write_stamp(cpu_buffer, event); | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2350 | 	rb_end_commit(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2351 | } | 
 | 2352 |  | 
 | 2353 | /** | 
 | 2354 |  * ring_buffer_unlock_commit - commit a reserved | 
 | 2355 |  * @buffer: The buffer to commit to | 
 | 2356 |  * @event: The event pointer to commit. | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2357 |  * | 
 | 2358 |  * This commits the data to the ring buffer, and releases any locks held. | 
 | 2359 |  * | 
 | 2360 |  * Must be paired with ring_buffer_lock_reserve. | 
 | 2361 |  */ | 
 | 2362 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, | 
| Arnaldo Carvalho de Melo | 0a98775 | 2009-02-05 16:12:56 -0200 | [diff] [blame] | 2363 | 			      struct ring_buffer_event *event) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2364 | { | 
 | 2365 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 2366 | 	int cpu = raw_smp_processor_id(); | 
 | 2367 |  | 
 | 2368 | 	cpu_buffer = buffer->buffers[cpu]; | 
 | 2369 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2370 | 	rb_commit(cpu_buffer, event); | 
 | 2371 |  | 
| Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 2372 | 	trace_recursive_unlock(); | 
 | 2373 |  | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2374 | 	preempt_enable_notrace(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2375 |  | 
 | 2376 | 	return 0; | 
 | 2377 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2378 | EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2379 |  | 
| Frederic Weisbecker | f3b9aae | 2009-04-19 23:39:33 +0200 | [diff] [blame] | 2380 | static inline void rb_event_discard(struct ring_buffer_event *event) | 
 | 2381 | { | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2382 | 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) | 
 | 2383 | 		event = skip_time_extend(event); | 
 | 2384 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 2385 | 	/* array[0] holds the actual length for the discarded event */ | 
 | 2386 | 	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; | 
 | 2387 | 	event->type_len = RINGBUF_TYPE_PADDING; | 
| Frederic Weisbecker | f3b9aae | 2009-04-19 23:39:33 +0200 | [diff] [blame] | 2388 | 	/* time delta must be non zero */ | 
 | 2389 | 	if (!event->time_delta) | 
 | 2390 | 		event->time_delta = 1; | 
 | 2391 | } | 
 | 2392 |  | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 2393 | /* | 
 | 2394 |  * Decrement the entries to the page that an event is on. | 
 | 2395 |  * The event does not even need to exist, only the pointer | 
 | 2396 |  * to the page it is on. This may only be called before the commit | 
 | 2397 |  * takes place. | 
 | 2398 |  */ | 
 | 2399 | static inline void | 
 | 2400 | rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 2401 | 		   struct ring_buffer_event *event) | 
 | 2402 | { | 
 | 2403 | 	unsigned long addr = (unsigned long)event; | 
 | 2404 | 	struct buffer_page *bpage = cpu_buffer->commit_page; | 
 | 2405 | 	struct buffer_page *start; | 
 | 2406 |  | 
 | 2407 | 	addr &= PAGE_MASK; | 
 | 2408 |  | 
 | 2409 | 	/* Do the likely case first */ | 
 | 2410 | 	if (likely(bpage->page == (void *)addr)) { | 
 | 2411 | 		local_dec(&bpage->entries); | 
 | 2412 | 		return; | 
 | 2413 | 	} | 
 | 2414 |  | 
 | 2415 | 	/* | 
 | 2416 | 	 * Because the commit page may be on the reader page we | 
 | 2417 | 	 * start with the next page and check the end loop there. | 
 | 2418 | 	 */ | 
 | 2419 | 	rb_inc_page(cpu_buffer, &bpage); | 
 | 2420 | 	start = bpage; | 
 | 2421 | 	do { | 
 | 2422 | 		if (bpage->page == (void *)addr) { | 
 | 2423 | 			local_dec(&bpage->entries); | 
 | 2424 | 			return; | 
 | 2425 | 		} | 
 | 2426 | 		rb_inc_page(cpu_buffer, &bpage); | 
 | 2427 | 	} while (bpage != start); | 
 | 2428 |  | 
 | 2429 | 	/* commit not part of this buffer?? */ | 
 | 2430 | 	RB_WARN_ON(cpu_buffer, 1); | 
 | 2431 | } | 
 | 2432 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2433 | /** | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2434 |  * ring_buffer_commit_discard - discard an event that has not been committed | 
 | 2435 |  * @buffer: the ring buffer | 
 | 2436 |  * @event: non committed event to discard | 
 | 2437 |  * | 
| Steven Rostedt | dc892f7 | 2009-09-03 15:33:41 -0400 | [diff] [blame] | 2438 |  * Sometimes an event that is in the ring buffer needs to be ignored. | 
 | 2439 |  * This function lets the user discard an event in the ring buffer | 
 | 2440 |  * and then that event will not be read later. | 
 | 2441 |  * | 
 | 2442 |  * This function only works if it is called before the the item has been | 
 | 2443 |  * committed. It will try to free the event from the ring buffer | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2444 |  * if another event has not been added behind it. | 
 | 2445 |  * | 
 | 2446 |  * If another event has been added behind it, it will set the event | 
 | 2447 |  * up as discarded, and perform the commit. | 
 | 2448 |  * | 
 | 2449 |  * If this function is called, do not call ring_buffer_unlock_commit on | 
 | 2450 |  * the event. | 
 | 2451 |  */ | 
 | 2452 | void ring_buffer_discard_commit(struct ring_buffer *buffer, | 
 | 2453 | 				struct ring_buffer_event *event) | 
 | 2454 | { | 
 | 2455 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2456 | 	int cpu; | 
 | 2457 |  | 
 | 2458 | 	/* The event is discarded regardless */ | 
| Frederic Weisbecker | f3b9aae | 2009-04-19 23:39:33 +0200 | [diff] [blame] | 2459 | 	rb_event_discard(event); | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2460 |  | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2461 | 	cpu = smp_processor_id(); | 
 | 2462 | 	cpu_buffer = buffer->buffers[cpu]; | 
 | 2463 |  | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2464 | 	/* | 
 | 2465 | 	 * This must only be called if the event has not been | 
 | 2466 | 	 * committed yet. Thus we can assume that preemption | 
 | 2467 | 	 * is still disabled. | 
 | 2468 | 	 */ | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2469 | 	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2470 |  | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 2471 | 	rb_decrement_entry(cpu_buffer, event); | 
| Steven Rostedt | 0f2541d | 2009-08-05 12:02:48 -0400 | [diff] [blame] | 2472 | 	if (rb_try_to_discard(cpu_buffer, event)) | 
| Steven Rostedt | edd813b | 2009-06-02 23:00:53 -0400 | [diff] [blame] | 2473 | 		goto out; | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2474 |  | 
 | 2475 | 	/* | 
 | 2476 | 	 * The commit is still visible by the reader, so we | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 2477 | 	 * must still update the timestamp. | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2478 | 	 */ | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 2479 | 	rb_update_write_stamp(cpu_buffer, event); | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2480 |  out: | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2481 | 	rb_end_commit(cpu_buffer); | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2482 |  | 
| Frederic Weisbecker | f3b9aae | 2009-04-19 23:39:33 +0200 | [diff] [blame] | 2483 | 	trace_recursive_unlock(); | 
 | 2484 |  | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2485 | 	preempt_enable_notrace(); | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2486 |  | 
 | 2487 | } | 
 | 2488 | EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); | 
 | 2489 |  | 
 | 2490 | /** | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2491 |  * ring_buffer_write - write data to the buffer without reserving | 
 | 2492 |  * @buffer: The ring buffer to write to. | 
 | 2493 |  * @length: The length of the data being written (excluding the event header) | 
 | 2494 |  * @data: The data to write to the buffer. | 
 | 2495 |  * | 
 | 2496 |  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as | 
 | 2497 |  * one function. If you already have the data to write to the buffer, it | 
 | 2498 |  * may be easier to simply call this function. | 
 | 2499 |  * | 
 | 2500 |  * Note, like ring_buffer_lock_reserve, the length is the length of the data | 
 | 2501 |  * and not the length of the event which would hold the header. | 
 | 2502 |  */ | 
 | 2503 | int ring_buffer_write(struct ring_buffer *buffer, | 
 | 2504 | 			unsigned long length, | 
 | 2505 | 			void *data) | 
 | 2506 | { | 
 | 2507 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 2508 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2509 | 	void *body; | 
 | 2510 | 	int ret = -EBUSY; | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2511 | 	int cpu; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2512 |  | 
| Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 2513 | 	if (ring_buffer_flags != RB_BUFFERS_ON) | 
| Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 2514 | 		return -EBUSY; | 
 | 2515 |  | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2516 | 	preempt_disable_notrace(); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2517 |  | 
| Lai Jiangshan | 52fbe9c | 2010-03-08 14:50:43 +0800 | [diff] [blame] | 2518 | 	if (atomic_read(&buffer->record_disabled)) | 
 | 2519 | 		goto out; | 
 | 2520 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2521 | 	cpu = raw_smp_processor_id(); | 
 | 2522 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2523 | 	if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2524 | 		goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2525 |  | 
 | 2526 | 	cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2527 |  | 
 | 2528 | 	if (atomic_read(&cpu_buffer->record_disabled)) | 
 | 2529 | 		goto out; | 
 | 2530 |  | 
| Steven Rostedt | be957c4 | 2009-05-11 14:42:53 -0400 | [diff] [blame] | 2531 | 	if (length > BUF_MAX_DATA_SIZE) | 
 | 2532 | 		goto out; | 
 | 2533 |  | 
| Steven Rostedt | 62f0b3e | 2009-09-04 14:11:34 -0400 | [diff] [blame] | 2534 | 	event = rb_reserve_next_event(buffer, cpu_buffer, length); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2535 | 	if (!event) | 
 | 2536 | 		goto out; | 
 | 2537 |  | 
 | 2538 | 	body = rb_event_data(event); | 
 | 2539 |  | 
 | 2540 | 	memcpy(body, data, length); | 
 | 2541 |  | 
 | 2542 | 	rb_commit(cpu_buffer, event); | 
 | 2543 |  | 
 | 2544 | 	ret = 0; | 
 | 2545 |  out: | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2546 | 	preempt_enable_notrace(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2547 |  | 
 | 2548 | 	return ret; | 
 | 2549 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2550 | EXPORT_SYMBOL_GPL(ring_buffer_write); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2551 |  | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 2552 | static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2553 | { | 
 | 2554 | 	struct buffer_page *reader = cpu_buffer->reader_page; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2555 | 	struct buffer_page *head = rb_set_head_page(cpu_buffer); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2556 | 	struct buffer_page *commit = cpu_buffer->commit_page; | 
 | 2557 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2558 | 	/* In case of error, head will be NULL */ | 
 | 2559 | 	if (unlikely(!head)) | 
 | 2560 | 		return 1; | 
 | 2561 |  | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2562 | 	return reader->read == rb_page_commit(reader) && | 
 | 2563 | 		(commit == reader || | 
 | 2564 | 		 (commit == head && | 
 | 2565 | 		  head->read == rb_page_commit(commit))); | 
 | 2566 | } | 
 | 2567 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2568 | /** | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2569 |  * ring_buffer_record_disable - stop all writes into the buffer | 
 | 2570 |  * @buffer: The ring buffer to stop writes to. | 
 | 2571 |  * | 
 | 2572 |  * This prevents all writes to the buffer. Any attempt to write | 
 | 2573 |  * to the buffer after this will fail and return NULL. | 
 | 2574 |  * | 
 | 2575 |  * The caller should call synchronize_sched() after this. | 
 | 2576 |  */ | 
 | 2577 | void ring_buffer_record_disable(struct ring_buffer *buffer) | 
 | 2578 | { | 
 | 2579 | 	atomic_inc(&buffer->record_disabled); | 
 | 2580 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2581 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2582 |  | 
 | 2583 | /** | 
 | 2584 |  * ring_buffer_record_enable - enable writes to the buffer | 
 | 2585 |  * @buffer: The ring buffer to enable writes | 
 | 2586 |  * | 
 | 2587 |  * Note, multiple disables will need the same number of enables | 
| Adam Buchbinder | c41b20e | 2009-12-11 16:35:39 -0500 | [diff] [blame] | 2588 |  * to truly enable the writing (much like preempt_disable). | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2589 |  */ | 
 | 2590 | void ring_buffer_record_enable(struct ring_buffer *buffer) | 
 | 2591 | { | 
 | 2592 | 	atomic_dec(&buffer->record_disabled); | 
 | 2593 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2594 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2595 |  | 
 | 2596 | /** | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 2597 |  * ring_buffer_record_off - stop all writes into the buffer | 
 | 2598 |  * @buffer: The ring buffer to stop writes to. | 
 | 2599 |  * | 
 | 2600 |  * This prevents all writes to the buffer. Any attempt to write | 
 | 2601 |  * to the buffer after this will fail and return NULL. | 
 | 2602 |  * | 
 | 2603 |  * This is different than ring_buffer_record_disable() as | 
 | 2604 |  * it works like an on/off switch, where as the disable() verison | 
 | 2605 |  * must be paired with a enable(). | 
 | 2606 |  */ | 
 | 2607 | void ring_buffer_record_off(struct ring_buffer *buffer) | 
 | 2608 | { | 
 | 2609 | 	unsigned int rd; | 
 | 2610 | 	unsigned int new_rd; | 
 | 2611 |  | 
 | 2612 | 	do { | 
 | 2613 | 		rd = atomic_read(&buffer->record_disabled); | 
 | 2614 | 		new_rd = rd | RB_BUFFER_OFF; | 
 | 2615 | 	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); | 
 | 2616 | } | 
 | 2617 | EXPORT_SYMBOL_GPL(ring_buffer_record_off); | 
 | 2618 |  | 
 | 2619 | /** | 
 | 2620 |  * ring_buffer_record_on - restart writes into the buffer | 
 | 2621 |  * @buffer: The ring buffer to start writes to. | 
 | 2622 |  * | 
 | 2623 |  * This enables all writes to the buffer that was disabled by | 
 | 2624 |  * ring_buffer_record_off(). | 
 | 2625 |  * | 
 | 2626 |  * This is different than ring_buffer_record_enable() as | 
 | 2627 |  * it works like an on/off switch, where as the enable() verison | 
 | 2628 |  * must be paired with a disable(). | 
 | 2629 |  */ | 
 | 2630 | void ring_buffer_record_on(struct ring_buffer *buffer) | 
 | 2631 | { | 
 | 2632 | 	unsigned int rd; | 
 | 2633 | 	unsigned int new_rd; | 
 | 2634 |  | 
 | 2635 | 	do { | 
 | 2636 | 		rd = atomic_read(&buffer->record_disabled); | 
 | 2637 | 		new_rd = rd & ~RB_BUFFER_OFF; | 
 | 2638 | 	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); | 
 | 2639 | } | 
 | 2640 | EXPORT_SYMBOL_GPL(ring_buffer_record_on); | 
 | 2641 |  | 
 | 2642 | /** | 
 | 2643 |  * ring_buffer_record_is_on - return true if the ring buffer can write | 
 | 2644 |  * @buffer: The ring buffer to see if write is enabled | 
 | 2645 |  * | 
 | 2646 |  * Returns true if the ring buffer is in a state that it accepts writes. | 
 | 2647 |  */ | 
 | 2648 | int ring_buffer_record_is_on(struct ring_buffer *buffer) | 
 | 2649 | { | 
 | 2650 | 	return !atomic_read(&buffer->record_disabled); | 
 | 2651 | } | 
 | 2652 |  | 
 | 2653 | /** | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2654 |  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer | 
 | 2655 |  * @buffer: The ring buffer to stop writes to. | 
 | 2656 |  * @cpu: The CPU buffer to stop | 
 | 2657 |  * | 
 | 2658 |  * This prevents all writes to the buffer. Any attempt to write | 
 | 2659 |  * to the buffer after this will fail and return NULL. | 
 | 2660 |  * | 
 | 2661 |  * The caller should call synchronize_sched() after this. | 
 | 2662 |  */ | 
 | 2663 | void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) | 
 | 2664 | { | 
 | 2665 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 2666 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2667 | 	if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 2668 | 		return; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2669 |  | 
 | 2670 | 	cpu_buffer = buffer->buffers[cpu]; | 
 | 2671 | 	atomic_inc(&cpu_buffer->record_disabled); | 
 | 2672 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2673 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2674 |  | 
 | 2675 | /** | 
 | 2676 |  * ring_buffer_record_enable_cpu - enable writes to the buffer | 
 | 2677 |  * @buffer: The ring buffer to enable writes | 
 | 2678 |  * @cpu: The CPU to enable. | 
 | 2679 |  * | 
 | 2680 |  * Note, multiple disables will need the same number of enables | 
| Adam Buchbinder | c41b20e | 2009-12-11 16:35:39 -0500 | [diff] [blame] | 2681 |  * to truly enable the writing (much like preempt_disable). | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2682 |  */ | 
 | 2683 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | 
 | 2684 | { | 
 | 2685 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 2686 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2687 | 	if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 2688 | 		return; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2689 |  | 
 | 2690 | 	cpu_buffer = buffer->buffers[cpu]; | 
 | 2691 | 	atomic_dec(&cpu_buffer->record_disabled); | 
 | 2692 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2693 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2694 |  | 
| Steven Rostedt | f6195aa | 2010-09-01 12:23:12 -0400 | [diff] [blame] | 2695 | /* | 
 | 2696 |  * The total entries in the ring buffer is the running counter | 
 | 2697 |  * of entries entered into the ring buffer, minus the sum of | 
 | 2698 |  * the entries read from the ring buffer and the number of | 
 | 2699 |  * entries that were overwritten. | 
 | 2700 |  */ | 
 | 2701 | static inline unsigned long | 
 | 2702 | rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) | 
 | 2703 | { | 
 | 2704 | 	return local_read(&cpu_buffer->entries) - | 
 | 2705 | 		(local_read(&cpu_buffer->overrun) + cpu_buffer->read); | 
 | 2706 | } | 
 | 2707 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2708 | /** | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2709 |  * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer | 
 | 2710 |  * @buffer: The ring buffer | 
 | 2711 |  * @cpu: The per CPU buffer to read from. | 
 | 2712 |  */ | 
 | 2713 | unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) | 
 | 2714 | { | 
 | 2715 | 	unsigned long flags; | 
 | 2716 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 2717 | 	struct buffer_page *bpage; | 
| Steven Rostedt | 4d7981b | 2012-11-29 22:31:16 -0500 | [diff] [blame] | 2718 | 	unsigned long ret = 0; | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2719 |  | 
 | 2720 | 	if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
 | 2721 | 		return 0; | 
 | 2722 |  | 
 | 2723 | 	cpu_buffer = buffer->buffers[cpu]; | 
| Linus Torvalds | 7115e3f | 2011-10-26 17:03:38 +0200 | [diff] [blame] | 2724 | 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2725 | 	/* | 
 | 2726 | 	 * if the tail is on reader_page, oldest time stamp is on the reader | 
 | 2727 | 	 * page | 
 | 2728 | 	 */ | 
 | 2729 | 	if (cpu_buffer->tail_page == cpu_buffer->reader_page) | 
 | 2730 | 		bpage = cpu_buffer->reader_page; | 
 | 2731 | 	else | 
 | 2732 | 		bpage = rb_set_head_page(cpu_buffer); | 
| Steven Rostedt | 4d7981b | 2012-11-29 22:31:16 -0500 | [diff] [blame] | 2733 | 	if (bpage) | 
 | 2734 | 		ret = bpage->page->time_stamp; | 
| Linus Torvalds | 7115e3f | 2011-10-26 17:03:38 +0200 | [diff] [blame] | 2735 | 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2736 |  | 
 | 2737 | 	return ret; | 
 | 2738 | } | 
 | 2739 | EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); | 
 | 2740 |  | 
 | 2741 | /** | 
 | 2742 |  * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer | 
 | 2743 |  * @buffer: The ring buffer | 
 | 2744 |  * @cpu: The per CPU buffer to read from. | 
 | 2745 |  */ | 
 | 2746 | unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu) | 
 | 2747 | { | 
 | 2748 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 2749 | 	unsigned long ret; | 
 | 2750 |  | 
 | 2751 | 	if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
 | 2752 | 		return 0; | 
 | 2753 |  | 
 | 2754 | 	cpu_buffer = buffer->buffers[cpu]; | 
 | 2755 | 	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; | 
 | 2756 |  | 
 | 2757 | 	return ret; | 
 | 2758 | } | 
 | 2759 | EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); | 
 | 2760 |  | 
 | 2761 | /** | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2762 |  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer | 
 | 2763 |  * @buffer: The ring buffer | 
 | 2764 |  * @cpu: The per CPU buffer to get the entries from. | 
 | 2765 |  */ | 
 | 2766 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) | 
 | 2767 | { | 
 | 2768 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 2769 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2770 | 	if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 2771 | 		return 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2772 |  | 
 | 2773 | 	cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 2774 |  | 
| Steven Rostedt | f6195aa | 2010-09-01 12:23:12 -0400 | [diff] [blame] | 2775 | 	return rb_num_of_entries(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2776 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2777 | EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2778 |  | 
 | 2779 | /** | 
 | 2780 |  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer | 
 | 2781 |  * @buffer: The ring buffer | 
 | 2782 |  * @cpu: The per CPU buffer to get the number of overruns from | 
 | 2783 |  */ | 
 | 2784 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) | 
 | 2785 | { | 
 | 2786 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 2787 | 	unsigned long ret; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2788 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2789 | 	if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 2790 | 		return 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2791 |  | 
 | 2792 | 	cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2793 | 	ret = local_read(&cpu_buffer->overrun); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 2794 |  | 
 | 2795 | 	return ret; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2796 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2797 | EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2798 |  | 
 | 2799 | /** | 
| Steven Rostedt | f0d2c68 | 2009-04-29 13:43:37 -0400 | [diff] [blame] | 2800 |  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits | 
 | 2801 |  * @buffer: The ring buffer | 
 | 2802 |  * @cpu: The per CPU buffer to get the number of overruns from | 
 | 2803 |  */ | 
 | 2804 | unsigned long | 
 | 2805 | ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) | 
 | 2806 | { | 
 | 2807 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 2808 | 	unsigned long ret; | 
 | 2809 |  | 
 | 2810 | 	if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
 | 2811 | 		return 0; | 
 | 2812 |  | 
 | 2813 | 	cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2814 | 	ret = local_read(&cpu_buffer->commit_overrun); | 
| Steven Rostedt | f0d2c68 | 2009-04-29 13:43:37 -0400 | [diff] [blame] | 2815 |  | 
 | 2816 | 	return ret; | 
 | 2817 | } | 
 | 2818 | EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); | 
 | 2819 |  | 
 | 2820 | /** | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2821 |  * ring_buffer_entries - get the number of entries in a buffer | 
 | 2822 |  * @buffer: The ring buffer | 
 | 2823 |  * | 
 | 2824 |  * Returns the total number of entries in the ring buffer | 
 | 2825 |  * (all CPU entries) | 
 | 2826 |  */ | 
 | 2827 | unsigned long ring_buffer_entries(struct ring_buffer *buffer) | 
 | 2828 | { | 
 | 2829 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 2830 | 	unsigned long entries = 0; | 
 | 2831 | 	int cpu; | 
 | 2832 |  | 
 | 2833 | 	/* if you care about this being correct, lock the buffer */ | 
 | 2834 | 	for_each_buffer_cpu(buffer, cpu) { | 
 | 2835 | 		cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | f6195aa | 2010-09-01 12:23:12 -0400 | [diff] [blame] | 2836 | 		entries += rb_num_of_entries(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2837 | 	} | 
 | 2838 |  | 
 | 2839 | 	return entries; | 
 | 2840 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2841 | EXPORT_SYMBOL_GPL(ring_buffer_entries); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2842 |  | 
 | 2843 | /** | 
| Jiri Olsa | 67b394f | 2009-10-23 19:36:18 -0400 | [diff] [blame] | 2844 |  * ring_buffer_overruns - get the number of overruns in buffer | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2845 |  * @buffer: The ring buffer | 
 | 2846 |  * | 
 | 2847 |  * Returns the total number of overruns in the ring buffer | 
 | 2848 |  * (all CPU entries) | 
 | 2849 |  */ | 
 | 2850 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer) | 
 | 2851 | { | 
 | 2852 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 2853 | 	unsigned long overruns = 0; | 
 | 2854 | 	int cpu; | 
 | 2855 |  | 
 | 2856 | 	/* if you care about this being correct, lock the buffer */ | 
 | 2857 | 	for_each_buffer_cpu(buffer, cpu) { | 
 | 2858 | 		cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2859 | 		overruns += local_read(&cpu_buffer->overrun); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2860 | 	} | 
 | 2861 |  | 
 | 2862 | 	return overruns; | 
 | 2863 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2864 | EXPORT_SYMBOL_GPL(ring_buffer_overruns); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2865 |  | 
| Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 2866 | static void rb_iter_reset(struct ring_buffer_iter *iter) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2867 | { | 
 | 2868 | 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 
 | 2869 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2870 | 	/* Iterator usage is expected to have record disabled */ | 
 | 2871 | 	if (list_empty(&cpu_buffer->reader_page->list)) { | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2872 | 		iter->head_page = rb_set_head_page(cpu_buffer); | 
 | 2873 | 		if (unlikely(!iter->head_page)) | 
 | 2874 | 			return; | 
 | 2875 | 		iter->head = iter->head_page->read; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2876 | 	} else { | 
 | 2877 | 		iter->head_page = cpu_buffer->reader_page; | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 2878 | 		iter->head = cpu_buffer->reader_page->read; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2879 | 	} | 
 | 2880 | 	if (iter->head) | 
 | 2881 | 		iter->read_stamp = cpu_buffer->read_stamp; | 
 | 2882 | 	else | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 2883 | 		iter->read_stamp = iter->head_page->page->time_stamp; | 
| Steven Rostedt | 492a74f | 2010-01-25 15:17:47 -0500 | [diff] [blame] | 2884 | 	iter->cache_reader_page = cpu_buffer->reader_page; | 
 | 2885 | 	iter->cache_read = cpu_buffer->read; | 
| Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 2886 | } | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 2887 |  | 
| Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 2888 | /** | 
 | 2889 |  * ring_buffer_iter_reset - reset an iterator | 
 | 2890 |  * @iter: The iterator to reset | 
 | 2891 |  * | 
 | 2892 |  * Resets the iterator, so that it will start from the beginning | 
 | 2893 |  * again. | 
 | 2894 |  */ | 
 | 2895 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | 
 | 2896 | { | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 2897 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
| Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 2898 | 	unsigned long flags; | 
 | 2899 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 2900 | 	if (!iter) | 
 | 2901 | 		return; | 
 | 2902 |  | 
 | 2903 | 	cpu_buffer = iter->cpu_buffer; | 
 | 2904 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 2905 | 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 2906 | 	rb_iter_reset(iter); | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 2907 | 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2908 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2909 | EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2910 |  | 
 | 2911 | /** | 
 | 2912 |  * ring_buffer_iter_empty - check if an iterator has no more to read | 
 | 2913 |  * @iter: The iterator to check | 
 | 2914 |  */ | 
 | 2915 | int ring_buffer_iter_empty(struct ring_buffer_iter *iter) | 
 | 2916 | { | 
 | 2917 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 2918 |  | 
 | 2919 | 	cpu_buffer = iter->cpu_buffer; | 
 | 2920 |  | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2921 | 	return iter->head_page == cpu_buffer->commit_page && | 
 | 2922 | 		iter->head == rb_commit_index(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2923 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2924 | EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2925 |  | 
 | 2926 | static void | 
 | 2927 | rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, | 
 | 2928 | 		     struct ring_buffer_event *event) | 
 | 2929 | { | 
 | 2930 | 	u64 delta; | 
 | 2931 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 2932 | 	switch (event->type_len) { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2933 | 	case RINGBUF_TYPE_PADDING: | 
 | 2934 | 		return; | 
 | 2935 |  | 
 | 2936 | 	case RINGBUF_TYPE_TIME_EXTEND: | 
 | 2937 | 		delta = event->array[0]; | 
 | 2938 | 		delta <<= TS_SHIFT; | 
 | 2939 | 		delta += event->time_delta; | 
 | 2940 | 		cpu_buffer->read_stamp += delta; | 
 | 2941 | 		return; | 
 | 2942 |  | 
 | 2943 | 	case RINGBUF_TYPE_TIME_STAMP: | 
 | 2944 | 		/* FIXME: not implemented */ | 
 | 2945 | 		return; | 
 | 2946 |  | 
 | 2947 | 	case RINGBUF_TYPE_DATA: | 
 | 2948 | 		cpu_buffer->read_stamp += event->time_delta; | 
 | 2949 | 		return; | 
 | 2950 |  | 
 | 2951 | 	default: | 
 | 2952 | 		BUG(); | 
 | 2953 | 	} | 
 | 2954 | 	return; | 
 | 2955 | } | 
 | 2956 |  | 
 | 2957 | static void | 
 | 2958 | rb_update_iter_read_stamp(struct ring_buffer_iter *iter, | 
 | 2959 | 			  struct ring_buffer_event *event) | 
 | 2960 | { | 
 | 2961 | 	u64 delta; | 
 | 2962 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 2963 | 	switch (event->type_len) { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2964 | 	case RINGBUF_TYPE_PADDING: | 
 | 2965 | 		return; | 
 | 2966 |  | 
 | 2967 | 	case RINGBUF_TYPE_TIME_EXTEND: | 
 | 2968 | 		delta = event->array[0]; | 
 | 2969 | 		delta <<= TS_SHIFT; | 
 | 2970 | 		delta += event->time_delta; | 
 | 2971 | 		iter->read_stamp += delta; | 
 | 2972 | 		return; | 
 | 2973 |  | 
 | 2974 | 	case RINGBUF_TYPE_TIME_STAMP: | 
 | 2975 | 		/* FIXME: not implemented */ | 
 | 2976 | 		return; | 
 | 2977 |  | 
 | 2978 | 	case RINGBUF_TYPE_DATA: | 
 | 2979 | 		iter->read_stamp += event->time_delta; | 
 | 2980 | 		return; | 
 | 2981 |  | 
 | 2982 | 	default: | 
 | 2983 | 		BUG(); | 
 | 2984 | 	} | 
 | 2985 | 	return; | 
 | 2986 | } | 
 | 2987 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2988 | static struct buffer_page * | 
 | 2989 | rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2990 | { | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2991 | 	struct buffer_page *reader = NULL; | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 2992 | 	unsigned long overwrite; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2993 | 	unsigned long flags; | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 2994 | 	int nr_loops = 0; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2995 | 	int ret; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2996 |  | 
| Steven Rostedt | 3e03fb7 | 2008-11-06 00:09:43 -0500 | [diff] [blame] | 2997 | 	local_irq_save(flags); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 2998 | 	arch_spin_lock(&cpu_buffer->lock); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2999 |  | 
 | 3000 |  again: | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3001 | 	/* | 
 | 3002 | 	 * This should normally only loop twice. But because the | 
 | 3003 | 	 * start of the reader inserts an empty page, it causes | 
 | 3004 | 	 * a case where we will loop three times. There should be no | 
 | 3005 | 	 * reason to loop four times (that I know of). | 
 | 3006 | 	 */ | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 3007 | 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3008 | 		reader = NULL; | 
 | 3009 | 		goto out; | 
 | 3010 | 	} | 
 | 3011 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3012 | 	reader = cpu_buffer->reader_page; | 
 | 3013 |  | 
 | 3014 | 	/* If there's more to read, return this page */ | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3015 | 	if (cpu_buffer->reader_page->read < rb_page_size(reader)) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3016 | 		goto out; | 
 | 3017 |  | 
 | 3018 | 	/* Never should we have an index greater than the size */ | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 3019 | 	if (RB_WARN_ON(cpu_buffer, | 
 | 3020 | 		       cpu_buffer->reader_page->read > rb_page_size(reader))) | 
 | 3021 | 		goto out; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3022 |  | 
 | 3023 | 	/* check if we caught up to the tail */ | 
 | 3024 | 	reader = NULL; | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3025 | 	if (cpu_buffer->commit_page == cpu_buffer->reader_page) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3026 | 		goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3027 |  | 
 | 3028 | 	/* | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3029 | 	 * Reset the reader page to size zero. | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3030 | 	 */ | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3031 | 	local_set(&cpu_buffer->reader_page->write, 0); | 
 | 3032 | 	local_set(&cpu_buffer->reader_page->entries, 0); | 
 | 3033 | 	local_set(&cpu_buffer->reader_page->page->commit, 0); | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 3034 | 	cpu_buffer->reader_page->real_end = 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3035 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3036 |  spin: | 
 | 3037 | 	/* | 
 | 3038 | 	 * Splice the empty reader page into the list around the head. | 
 | 3039 | 	 */ | 
 | 3040 | 	reader = rb_set_head_page(cpu_buffer); | 
| Steven Rostedt | 4d7981b | 2012-11-29 22:31:16 -0500 | [diff] [blame] | 3041 | 	if (!reader) | 
 | 3042 | 		goto out; | 
| Steven Rostedt | 0e1ff5d | 2010-01-06 20:40:44 -0500 | [diff] [blame] | 3043 | 	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3044 | 	cpu_buffer->reader_page->list.prev = reader->list.prev; | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3045 |  | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 3046 | 	/* | 
 | 3047 | 	 * cpu_buffer->pages just needs to point to the buffer, it | 
 | 3048 | 	 *  has no specific buffer page to point to. Lets move it out | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 3049 | 	 *  of our way so we don't accidentally swap it. | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 3050 | 	 */ | 
 | 3051 | 	cpu_buffer->pages = reader->list.prev; | 
 | 3052 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3053 | 	/* The reader page will be pointing to the new head */ | 
 | 3054 | 	rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3055 |  | 
 | 3056 | 	/* | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3057 | 	 * We want to make sure we read the overruns after we set up our | 
 | 3058 | 	 * pointers to the next object. The writer side does a | 
 | 3059 | 	 * cmpxchg to cross pages which acts as the mb on the writer | 
 | 3060 | 	 * side. Note, the reader will constantly fail the swap | 
 | 3061 | 	 * while the writer is updating the pointers, so this | 
 | 3062 | 	 * guarantees that the overwrite recorded here is the one we | 
 | 3063 | 	 * want to compare with the last_overrun. | 
 | 3064 | 	 */ | 
 | 3065 | 	smp_mb(); | 
 | 3066 | 	overwrite = local_read(&(cpu_buffer->overrun)); | 
 | 3067 |  | 
 | 3068 | 	/* | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3069 | 	 * Here's the tricky part. | 
 | 3070 | 	 * | 
 | 3071 | 	 * We need to move the pointer past the header page. | 
 | 3072 | 	 * But we can only do that if a writer is not currently | 
 | 3073 | 	 * moving it. The page before the header page has the | 
 | 3074 | 	 * flag bit '1' set if it is pointing to the page we want. | 
 | 3075 | 	 * but if the writer is in the process of moving it | 
 | 3076 | 	 * than it will be '2' or already moved '0'. | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3077 | 	 */ | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3078 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3079 | 	ret = rb_head_page_replace(reader, cpu_buffer->reader_page); | 
 | 3080 |  | 
 | 3081 | 	/* | 
 | 3082 | 	 * If we did not convert it, then we must try again. | 
 | 3083 | 	 */ | 
 | 3084 | 	if (!ret) | 
 | 3085 | 		goto spin; | 
 | 3086 |  | 
 | 3087 | 	/* | 
 | 3088 | 	 * Yeah! We succeeded in replacing the page. | 
 | 3089 | 	 * | 
 | 3090 | 	 * Now make the new head point back to the reader page. | 
 | 3091 | 	 */ | 
| David Sharp | 5ded3dc | 2010-01-06 17:12:07 -0800 | [diff] [blame] | 3092 | 	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3093 | 	rb_inc_page(cpu_buffer, &cpu_buffer->head_page); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3094 |  | 
 | 3095 | 	/* Finally update the reader page to the new head */ | 
 | 3096 | 	cpu_buffer->reader_page = reader; | 
 | 3097 | 	rb_reset_reader_page(cpu_buffer); | 
 | 3098 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3099 | 	if (overwrite != cpu_buffer->last_overrun) { | 
 | 3100 | 		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; | 
 | 3101 | 		cpu_buffer->last_overrun = overwrite; | 
 | 3102 | 	} | 
 | 3103 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3104 | 	goto again; | 
 | 3105 |  | 
 | 3106 |  out: | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 3107 | 	arch_spin_unlock(&cpu_buffer->lock); | 
| Steven Rostedt | 3e03fb7 | 2008-11-06 00:09:43 -0500 | [diff] [blame] | 3108 | 	local_irq_restore(flags); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3109 |  | 
 | 3110 | 	return reader; | 
 | 3111 | } | 
 | 3112 |  | 
 | 3113 | static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) | 
 | 3114 | { | 
 | 3115 | 	struct ring_buffer_event *event; | 
 | 3116 | 	struct buffer_page *reader; | 
 | 3117 | 	unsigned length; | 
 | 3118 |  | 
 | 3119 | 	reader = rb_get_reader_page(cpu_buffer); | 
 | 3120 |  | 
 | 3121 | 	/* This function should not be called when buffer is empty */ | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 3122 | 	if (RB_WARN_ON(cpu_buffer, !reader)) | 
 | 3123 | 		return; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3124 |  | 
 | 3125 | 	event = rb_reader_event(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3126 |  | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 3127 | 	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 
| Steven Rostedt | e4906ef | 2009-04-30 20:49:44 -0400 | [diff] [blame] | 3128 | 		cpu_buffer->read++; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3129 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3130 | 	rb_update_read_stamp(cpu_buffer, event); | 
 | 3131 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3132 | 	length = rb_event_length(event); | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 3133 | 	cpu_buffer->reader_page->read += length; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3134 | } | 
 | 3135 |  | 
 | 3136 | static void rb_advance_iter(struct ring_buffer_iter *iter) | 
 | 3137 | { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3138 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 3139 | 	struct ring_buffer_event *event; | 
 | 3140 | 	unsigned length; | 
 | 3141 |  | 
 | 3142 | 	cpu_buffer = iter->cpu_buffer; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3143 |  | 
 | 3144 | 	/* | 
 | 3145 | 	 * Check if we are at the end of the buffer. | 
 | 3146 | 	 */ | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3147 | 	if (iter->head >= rb_page_size(iter->head_page)) { | 
| Steven Rostedt | ea05b57 | 2009-06-03 09:30:10 -0400 | [diff] [blame] | 3148 | 		/* discarded commits can make the page empty */ | 
 | 3149 | 		if (iter->head_page == cpu_buffer->commit_page) | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 3150 | 			return; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3151 | 		rb_inc_iter(iter); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3152 | 		return; | 
 | 3153 | 	} | 
 | 3154 |  | 
 | 3155 | 	event = rb_iter_head_event(iter); | 
 | 3156 |  | 
 | 3157 | 	length = rb_event_length(event); | 
 | 3158 |  | 
 | 3159 | 	/* | 
 | 3160 | 	 * This should not be called to advance the header if we are | 
 | 3161 | 	 * at the tail of the buffer. | 
 | 3162 | 	 */ | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 3163 | 	if (RB_WARN_ON(cpu_buffer, | 
| Steven Rostedt | f536aaf | 2008-11-10 23:07:30 -0500 | [diff] [blame] | 3164 | 		       (iter->head_page == cpu_buffer->commit_page) && | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 3165 | 		       (iter->head + length > rb_commit_index(cpu_buffer)))) | 
 | 3166 | 		return; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3167 |  | 
 | 3168 | 	rb_update_iter_read_stamp(iter, event); | 
 | 3169 |  | 
 | 3170 | 	iter->head += length; | 
 | 3171 |  | 
 | 3172 | 	/* check for end of page padding */ | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3173 | 	if ((iter->head >= rb_page_size(iter->head_page)) && | 
 | 3174 | 	    (iter->head_page != cpu_buffer->commit_page)) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3175 | 		rb_advance_iter(iter); | 
 | 3176 | } | 
 | 3177 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3178 | static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) | 
 | 3179 | { | 
 | 3180 | 	return cpu_buffer->lost_events; | 
 | 3181 | } | 
 | 3182 |  | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3183 | static struct ring_buffer_event * | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3184 | rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, | 
 | 3185 | 	       unsigned long *lost_events) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3186 | { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3187 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3188 | 	struct buffer_page *reader; | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3189 | 	int nr_loops = 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3190 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3191 |  again: | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3192 | 	/* | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 3193 | 	 * We repeat when a time extend is encountered. | 
 | 3194 | 	 * Since the time extend is always attached to a data event, | 
 | 3195 | 	 * we should never loop more than once. | 
 | 3196 | 	 * (We never hit the following condition more than twice). | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3197 | 	 */ | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 3198 | 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3199 | 		return NULL; | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3200 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3201 | 	reader = rb_get_reader_page(cpu_buffer); | 
 | 3202 | 	if (!reader) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3203 | 		return NULL; | 
 | 3204 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3205 | 	event = rb_reader_event(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3206 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 3207 | 	switch (event->type_len) { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3208 | 	case RINGBUF_TYPE_PADDING: | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3209 | 		if (rb_null_event(event)) | 
 | 3210 | 			RB_WARN_ON(cpu_buffer, 1); | 
 | 3211 | 		/* | 
 | 3212 | 		 * Because the writer could be discarding every | 
 | 3213 | 		 * event it creates (which would probably be bad) | 
 | 3214 | 		 * if we were to go back to "again" then we may never | 
 | 3215 | 		 * catch up, and will trigger the warn on, or lock | 
 | 3216 | 		 * the box. Return the padding, and we will release | 
 | 3217 | 		 * the current locks, and try again. | 
 | 3218 | 		 */ | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3219 | 		return event; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3220 |  | 
 | 3221 | 	case RINGBUF_TYPE_TIME_EXTEND: | 
 | 3222 | 		/* Internal data, OK to advance */ | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3223 | 		rb_advance_reader(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3224 | 		goto again; | 
 | 3225 |  | 
 | 3226 | 	case RINGBUF_TYPE_TIME_STAMP: | 
 | 3227 | 		/* FIXME: not implemented */ | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3228 | 		rb_advance_reader(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3229 | 		goto again; | 
 | 3230 |  | 
 | 3231 | 	case RINGBUF_TYPE_DATA: | 
 | 3232 | 		if (ts) { | 
 | 3233 | 			*ts = cpu_buffer->read_stamp + event->time_delta; | 
| Robert Richter | d8eeb2d | 2009-07-31 14:58:04 +0200 | [diff] [blame] | 3234 | 			ring_buffer_normalize_time_stamp(cpu_buffer->buffer, | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 3235 | 							 cpu_buffer->cpu, ts); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3236 | 		} | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3237 | 		if (lost_events) | 
 | 3238 | 			*lost_events = rb_lost_events(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3239 | 		return event; | 
 | 3240 |  | 
 | 3241 | 	default: | 
 | 3242 | 		BUG(); | 
 | 3243 | 	} | 
 | 3244 |  | 
 | 3245 | 	return NULL; | 
 | 3246 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3247 | EXPORT_SYMBOL_GPL(ring_buffer_peek); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3248 |  | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3249 | static struct ring_buffer_event * | 
 | 3250 | rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3251 | { | 
 | 3252 | 	struct ring_buffer *buffer; | 
 | 3253 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 3254 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3255 | 	int nr_loops = 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3256 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3257 | 	cpu_buffer = iter->cpu_buffer; | 
 | 3258 | 	buffer = cpu_buffer->buffer; | 
 | 3259 |  | 
| Steven Rostedt | 492a74f | 2010-01-25 15:17:47 -0500 | [diff] [blame] | 3260 | 	/* | 
 | 3261 | 	 * Check if someone performed a consuming read to | 
 | 3262 | 	 * the buffer. A consuming read invalidates the iterator | 
 | 3263 | 	 * and we need to reset the iterator in this case. | 
 | 3264 | 	 */ | 
 | 3265 | 	if (unlikely(iter->cache_read != cpu_buffer->read || | 
 | 3266 | 		     iter->cache_reader_page != cpu_buffer->reader_page)) | 
 | 3267 | 		rb_iter_reset(iter); | 
 | 3268 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3269 |  again: | 
| Steven Rostedt | 3c05d74 | 2010-01-26 16:14:08 -0500 | [diff] [blame] | 3270 | 	if (ring_buffer_iter_empty(iter)) | 
 | 3271 | 		return NULL; | 
 | 3272 |  | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3273 | 	/* | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 3274 | 	 * We repeat when a time extend is encountered. | 
 | 3275 | 	 * Since the time extend is always attached to a data event, | 
 | 3276 | 	 * we should never loop more than once. | 
 | 3277 | 	 * (We never hit the following condition more than twice). | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3278 | 	 */ | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 3279 | 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3280 | 		return NULL; | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3281 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3282 | 	if (rb_per_cpu_empty(cpu_buffer)) | 
 | 3283 | 		return NULL; | 
 | 3284 |  | 
| Steven Rostedt | 3c05d74 | 2010-01-26 16:14:08 -0500 | [diff] [blame] | 3285 | 	if (iter->head >= local_read(&iter->head_page->page->commit)) { | 
 | 3286 | 		rb_inc_iter(iter); | 
 | 3287 | 		goto again; | 
 | 3288 | 	} | 
 | 3289 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3290 | 	event = rb_iter_head_event(iter); | 
 | 3291 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 3292 | 	switch (event->type_len) { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3293 | 	case RINGBUF_TYPE_PADDING: | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3294 | 		if (rb_null_event(event)) { | 
 | 3295 | 			rb_inc_iter(iter); | 
 | 3296 | 			goto again; | 
 | 3297 | 		} | 
 | 3298 | 		rb_advance_iter(iter); | 
 | 3299 | 		return event; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3300 |  | 
 | 3301 | 	case RINGBUF_TYPE_TIME_EXTEND: | 
 | 3302 | 		/* Internal data, OK to advance */ | 
 | 3303 | 		rb_advance_iter(iter); | 
 | 3304 | 		goto again; | 
 | 3305 |  | 
 | 3306 | 	case RINGBUF_TYPE_TIME_STAMP: | 
 | 3307 | 		/* FIXME: not implemented */ | 
 | 3308 | 		rb_advance_iter(iter); | 
 | 3309 | 		goto again; | 
 | 3310 |  | 
 | 3311 | 	case RINGBUF_TYPE_DATA: | 
 | 3312 | 		if (ts) { | 
 | 3313 | 			*ts = iter->read_stamp + event->time_delta; | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 3314 | 			ring_buffer_normalize_time_stamp(buffer, | 
 | 3315 | 							 cpu_buffer->cpu, ts); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3316 | 		} | 
 | 3317 | 		return event; | 
 | 3318 |  | 
 | 3319 | 	default: | 
 | 3320 | 		BUG(); | 
 | 3321 | 	} | 
 | 3322 |  | 
 | 3323 | 	return NULL; | 
 | 3324 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3325 | EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3326 |  | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3327 | static inline int rb_ok_to_lock(void) | 
 | 3328 | { | 
 | 3329 | 	/* | 
 | 3330 | 	 * If an NMI die dumps out the content of the ring buffer | 
 | 3331 | 	 * do not grab locks. We also permanently disable the ring | 
 | 3332 | 	 * buffer too. A one time deal is all you get from reading | 
 | 3333 | 	 * the ring buffer from an NMI. | 
 | 3334 | 	 */ | 
| Steven Rostedt | 464e85e | 2009-08-05 15:26:37 -0400 | [diff] [blame] | 3335 | 	if (likely(!in_nmi())) | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3336 | 		return 1; | 
 | 3337 |  | 
 | 3338 | 	tracing_off_permanent(); | 
 | 3339 | 	return 0; | 
 | 3340 | } | 
 | 3341 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3342 | /** | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3343 |  * ring_buffer_peek - peek at the next event to be read | 
 | 3344 |  * @buffer: The ring buffer to read | 
 | 3345 |  * @cpu: The cpu to peak at | 
 | 3346 |  * @ts: The timestamp counter of this event. | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3347 |  * @lost_events: a variable to store if events were lost (may be NULL) | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3348 |  * | 
 | 3349 |  * This will return the event that will be read next, but does | 
 | 3350 |  * not consume the data. | 
 | 3351 |  */ | 
 | 3352 | struct ring_buffer_event * | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3353 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, | 
 | 3354 | 		 unsigned long *lost_events) | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3355 | { | 
 | 3356 | 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3357 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3358 | 	unsigned long flags; | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3359 | 	int dolock; | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3360 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3361 | 	if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3362 | 		return NULL; | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3363 |  | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3364 | 	dolock = rb_ok_to_lock(); | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3365 |  again: | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3366 | 	local_irq_save(flags); | 
 | 3367 | 	if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3368 | 		raw_spin_lock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3369 | 	event = rb_buffer_peek(cpu_buffer, ts, lost_events); | 
| Robert Richter | 469535a | 2009-07-30 19:19:18 +0200 | [diff] [blame] | 3370 | 	if (event && event->type_len == RINGBUF_TYPE_PADDING) | 
 | 3371 | 		rb_advance_reader(cpu_buffer); | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3372 | 	if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3373 | 		raw_spin_unlock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3374 | 	local_irq_restore(flags); | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3375 |  | 
| Steven Rostedt | 1b959e1 | 2009-09-03 10:12:13 -0400 | [diff] [blame] | 3376 | 	if (event && event->type_len == RINGBUF_TYPE_PADDING) | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3377 | 		goto again; | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3378 |  | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3379 | 	return event; | 
 | 3380 | } | 
 | 3381 |  | 
 | 3382 | /** | 
 | 3383 |  * ring_buffer_iter_peek - peek at the next event to be read | 
 | 3384 |  * @iter: The ring buffer iterator | 
 | 3385 |  * @ts: The timestamp counter of this event. | 
 | 3386 |  * | 
 | 3387 |  * This will return the event that will be read next, but does | 
 | 3388 |  * not increment the iterator. | 
 | 3389 |  */ | 
 | 3390 | struct ring_buffer_event * | 
 | 3391 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | 
 | 3392 | { | 
 | 3393 | 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 
 | 3394 | 	struct ring_buffer_event *event; | 
 | 3395 | 	unsigned long flags; | 
 | 3396 |  | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3397 |  again: | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3398 | 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3399 | 	event = rb_iter_peek(iter, ts); | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3400 | 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3401 |  | 
| Steven Rostedt | 1b959e1 | 2009-09-03 10:12:13 -0400 | [diff] [blame] | 3402 | 	if (event && event->type_len == RINGBUF_TYPE_PADDING) | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3403 | 		goto again; | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3404 |  | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3405 | 	return event; | 
 | 3406 | } | 
 | 3407 |  | 
 | 3408 | /** | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3409 |  * ring_buffer_consume - return an event and consume it | 
 | 3410 |  * @buffer: The ring buffer to get the next event from | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3411 |  * @cpu: the cpu to read the buffer from | 
 | 3412 |  * @ts: a variable to store the timestamp (may be NULL) | 
 | 3413 |  * @lost_events: a variable to store if events were lost (may be NULL) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3414 |  * | 
 | 3415 |  * Returns the next event in the ring buffer, and that event is consumed. | 
 | 3416 |  * Meaning, that sequential reads will keep returning a different event, | 
 | 3417 |  * and eventually empty the ring buffer if the producer is slower. | 
 | 3418 |  */ | 
 | 3419 | struct ring_buffer_event * | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3420 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, | 
 | 3421 | 		    unsigned long *lost_events) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3422 | { | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3423 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 3424 | 	struct ring_buffer_event *event = NULL; | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3425 | 	unsigned long flags; | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3426 | 	int dolock; | 
 | 3427 |  | 
 | 3428 | 	dolock = rb_ok_to_lock(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3429 |  | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3430 |  again: | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3431 | 	/* might be called in atomic */ | 
 | 3432 | 	preempt_disable(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3433 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3434 | 	if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
 | 3435 | 		goto out; | 
 | 3436 |  | 
 | 3437 | 	cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3438 | 	local_irq_save(flags); | 
 | 3439 | 	if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3440 | 		raw_spin_lock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3441 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3442 | 	event = rb_buffer_peek(cpu_buffer, ts, lost_events); | 
 | 3443 | 	if (event) { | 
 | 3444 | 		cpu_buffer->lost_events = 0; | 
| Robert Richter | 469535a | 2009-07-30 19:19:18 +0200 | [diff] [blame] | 3445 | 		rb_advance_reader(cpu_buffer); | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3446 | 	} | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3447 |  | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3448 | 	if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3449 | 		raw_spin_unlock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3450 | 	local_irq_restore(flags); | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3451 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3452 |  out: | 
 | 3453 | 	preempt_enable(); | 
 | 3454 |  | 
| Steven Rostedt | 1b959e1 | 2009-09-03 10:12:13 -0400 | [diff] [blame] | 3455 | 	if (event && event->type_len == RINGBUF_TYPE_PADDING) | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3456 | 		goto again; | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3457 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3458 | 	return event; | 
 | 3459 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3460 | EXPORT_SYMBOL_GPL(ring_buffer_consume); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3461 |  | 
 | 3462 | /** | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 3463 |  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3464 |  * @buffer: The ring buffer to read from | 
 | 3465 |  * @cpu: The cpu buffer to iterate over | 
 | 3466 |  * | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 3467 |  * This performs the initial preparations necessary to iterate | 
 | 3468 |  * through the buffer.  Memory is allocated, buffer recording | 
 | 3469 |  * is disabled, and the iterator pointer is returned to the caller. | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3470 |  * | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 3471 |  * Disabling buffer recordng prevents the reading from being | 
 | 3472 |  * corrupted. This is not a consuming read, so a producer is not | 
 | 3473 |  * expected. | 
 | 3474 |  * | 
 | 3475 |  * After a sequence of ring_buffer_read_prepare calls, the user is | 
 | 3476 |  * expected to make at least one call to ring_buffer_prepare_sync. | 
 | 3477 |  * Afterwards, ring_buffer_read_start is invoked to get things going | 
 | 3478 |  * for real. | 
 | 3479 |  * | 
 | 3480 |  * This overall must be paired with ring_buffer_finish. | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3481 |  */ | 
 | 3482 | struct ring_buffer_iter * | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 3483 | ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3484 | { | 
 | 3485 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3486 | 	struct ring_buffer_iter *iter; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3487 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 3488 | 	if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3489 | 		return NULL; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3490 |  | 
 | 3491 | 	iter = kmalloc(sizeof(*iter), GFP_KERNEL); | 
 | 3492 | 	if (!iter) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3493 | 		return NULL; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3494 |  | 
 | 3495 | 	cpu_buffer = buffer->buffers[cpu]; | 
 | 3496 |  | 
 | 3497 | 	iter->cpu_buffer = cpu_buffer; | 
 | 3498 |  | 
 | 3499 | 	atomic_inc(&cpu_buffer->record_disabled); | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 3500 |  | 
 | 3501 | 	return iter; | 
 | 3502 | } | 
 | 3503 | EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); | 
 | 3504 |  | 
 | 3505 | /** | 
 | 3506 |  * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls | 
 | 3507 |  * | 
 | 3508 |  * All previously invoked ring_buffer_read_prepare calls to prepare | 
 | 3509 |  * iterators will be synchronized.  Afterwards, read_buffer_read_start | 
 | 3510 |  * calls on those iterators are allowed. | 
 | 3511 |  */ | 
 | 3512 | void | 
 | 3513 | ring_buffer_read_prepare_sync(void) | 
 | 3514 | { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3515 | 	synchronize_sched(); | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 3516 | } | 
 | 3517 | EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); | 
 | 3518 |  | 
 | 3519 | /** | 
 | 3520 |  * ring_buffer_read_start - start a non consuming read of the buffer | 
 | 3521 |  * @iter: The iterator returned by ring_buffer_read_prepare | 
 | 3522 |  * | 
 | 3523 |  * This finalizes the startup of an iteration through the buffer. | 
 | 3524 |  * The iterator comes from a call to ring_buffer_read_prepare and | 
 | 3525 |  * an intervening ring_buffer_read_prepare_sync must have been | 
 | 3526 |  * performed. | 
 | 3527 |  * | 
 | 3528 |  * Must be paired with ring_buffer_finish. | 
 | 3529 |  */ | 
 | 3530 | void | 
 | 3531 | ring_buffer_read_start(struct ring_buffer_iter *iter) | 
 | 3532 | { | 
 | 3533 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
 | 3534 | 	unsigned long flags; | 
 | 3535 |  | 
 | 3536 | 	if (!iter) | 
 | 3537 | 		return; | 
 | 3538 |  | 
 | 3539 | 	cpu_buffer = iter->cpu_buffer; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3540 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3541 | 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 3542 | 	arch_spin_lock(&cpu_buffer->lock); | 
| Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 3543 | 	rb_iter_reset(iter); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 3544 | 	arch_spin_unlock(&cpu_buffer->lock); | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3545 | 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3546 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3547 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3548 |  | 
 | 3549 | /** | 
 | 3550 |  * ring_buffer_finish - finish reading the iterator of the buffer | 
 | 3551 |  * @iter: The iterator retrieved by ring_buffer_start | 
 | 3552 |  * | 
 | 3553 |  * This re-enables the recording to the buffer, and frees the | 
 | 3554 |  * iterator. | 
 | 3555 |  */ | 
 | 3556 | void | 
 | 3557 | ring_buffer_read_finish(struct ring_buffer_iter *iter) | 
 | 3558 | { | 
 | 3559 | 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 
 | 3560 |  | 
 | 3561 | 	atomic_dec(&cpu_buffer->record_disabled); | 
 | 3562 | 	kfree(iter); | 
 | 3563 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3564 | EXPORT_SYMBOL_GPL(ring_buffer_read_finish); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3565 |  | 
 | 3566 | /** | 
 | 3567 |  * ring_buffer_read - read the next item in the ring buffer by the iterator | 
 | 3568 |  * @iter: The ring buffer iterator | 
 | 3569 |  * @ts: The time stamp of the event read. | 
 | 3570 |  * | 
 | 3571 |  * This reads the next event in the ring buffer and increments the iterator. | 
 | 3572 |  */ | 
 | 3573 | struct ring_buffer_event * | 
 | 3574 | ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | 
 | 3575 | { | 
 | 3576 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3577 | 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 
 | 3578 | 	unsigned long flags; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3579 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3580 | 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 7e9391c | 2009-09-03 10:02:09 -0400 | [diff] [blame] | 3581 |  again: | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3582 | 	event = rb_iter_peek(iter, ts); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3583 | 	if (!event) | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3584 | 		goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3585 |  | 
| Steven Rostedt | 7e9391c | 2009-09-03 10:02:09 -0400 | [diff] [blame] | 3586 | 	if (event->type_len == RINGBUF_TYPE_PADDING) | 
 | 3587 | 		goto again; | 
 | 3588 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3589 | 	rb_advance_iter(iter); | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3590 |  out: | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3591 | 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3592 |  | 
 | 3593 | 	return event; | 
 | 3594 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3595 | EXPORT_SYMBOL_GPL(ring_buffer_read); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3596 |  | 
 | 3597 | /** | 
 | 3598 |  * ring_buffer_size - return the size of the ring buffer (in bytes) | 
 | 3599 |  * @buffer: The ring buffer. | 
 | 3600 |  */ | 
 | 3601 | unsigned long ring_buffer_size(struct ring_buffer *buffer) | 
 | 3602 | { | 
 | 3603 | 	return BUF_PAGE_SIZE * buffer->pages; | 
 | 3604 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3605 | EXPORT_SYMBOL_GPL(ring_buffer_size); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3606 |  | 
 | 3607 | static void | 
 | 3608 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | 
 | 3609 | { | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3610 | 	rb_head_page_deactivate(cpu_buffer); | 
 | 3611 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3612 | 	cpu_buffer->head_page | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 3613 | 		= list_entry(cpu_buffer->pages, struct buffer_page, list); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3614 | 	local_set(&cpu_buffer->head_page->write, 0); | 
| Steven Rostedt | 778c55d | 2009-05-01 18:44:45 -0400 | [diff] [blame] | 3615 | 	local_set(&cpu_buffer->head_page->entries, 0); | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 3616 | 	local_set(&cpu_buffer->head_page->page->commit, 0); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3617 |  | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 3618 | 	cpu_buffer->head_page->read = 0; | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3619 |  | 
 | 3620 | 	cpu_buffer->tail_page = cpu_buffer->head_page; | 
 | 3621 | 	cpu_buffer->commit_page = cpu_buffer->head_page; | 
 | 3622 |  | 
 | 3623 | 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 
 | 3624 | 	local_set(&cpu_buffer->reader_page->write, 0); | 
| Steven Rostedt | 778c55d | 2009-05-01 18:44:45 -0400 | [diff] [blame] | 3625 | 	local_set(&cpu_buffer->reader_page->entries, 0); | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 3626 | 	local_set(&cpu_buffer->reader_page->page->commit, 0); | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 3627 | 	cpu_buffer->reader_page->read = 0; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3628 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3629 | 	local_set(&cpu_buffer->commit_overrun, 0); | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 3630 | 	local_set(&cpu_buffer->entries_bytes, 0); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3631 | 	local_set(&cpu_buffer->overrun, 0); | 
| Steven Rostedt | e4906ef | 2009-04-30 20:49:44 -0400 | [diff] [blame] | 3632 | 	local_set(&cpu_buffer->entries, 0); | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 3633 | 	local_set(&cpu_buffer->committing, 0); | 
 | 3634 | 	local_set(&cpu_buffer->commits, 0); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3635 | 	cpu_buffer->read = 0; | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 3636 | 	cpu_buffer->read_bytes = 0; | 
| Steven Rostedt | 69507c0 | 2009-01-21 18:45:57 -0500 | [diff] [blame] | 3637 |  | 
 | 3638 | 	cpu_buffer->write_stamp = 0; | 
 | 3639 | 	cpu_buffer->read_stamp = 0; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3640 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3641 | 	cpu_buffer->lost_events = 0; | 
 | 3642 | 	cpu_buffer->last_overrun = 0; | 
 | 3643 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3644 | 	rb_head_page_activate(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3645 | } | 
 | 3646 |  | 
 | 3647 | /** | 
 | 3648 |  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer | 
 | 3649 |  * @buffer: The ring buffer to reset a per cpu buffer of | 
 | 3650 |  * @cpu: The CPU buffer to be reset | 
 | 3651 |  */ | 
 | 3652 | void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | 
 | 3653 | { | 
 | 3654 | 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 
 | 3655 | 	unsigned long flags; | 
 | 3656 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 3657 | 	if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3658 | 		return; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3659 |  | 
| Steven Rostedt | 41ede23 | 2009-05-01 20:26:54 -0400 | [diff] [blame] | 3660 | 	atomic_inc(&cpu_buffer->record_disabled); | 
 | 3661 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3662 | 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3663 |  | 
| Steven Rostedt | 41b6a95 | 2009-09-02 09:59:48 -0400 | [diff] [blame] | 3664 | 	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) | 
 | 3665 | 		goto out; | 
 | 3666 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 3667 | 	arch_spin_lock(&cpu_buffer->lock); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3668 |  | 
 | 3669 | 	rb_reset_cpu(cpu_buffer); | 
 | 3670 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 3671 | 	arch_spin_unlock(&cpu_buffer->lock); | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3672 |  | 
| Steven Rostedt | 41b6a95 | 2009-09-02 09:59:48 -0400 | [diff] [blame] | 3673 |  out: | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3674 | 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 41ede23 | 2009-05-01 20:26:54 -0400 | [diff] [blame] | 3675 |  | 
 | 3676 | 	atomic_dec(&cpu_buffer->record_disabled); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3677 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3678 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3679 |  | 
 | 3680 | /** | 
 | 3681 |  * ring_buffer_reset - reset a ring buffer | 
 | 3682 |  * @buffer: The ring buffer to reset all cpu buffers | 
 | 3683 |  */ | 
 | 3684 | void ring_buffer_reset(struct ring_buffer *buffer) | 
 | 3685 | { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3686 | 	int cpu; | 
 | 3687 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3688 | 	for_each_buffer_cpu(buffer, cpu) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3689 | 		ring_buffer_reset_cpu(buffer, cpu); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3690 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3691 | EXPORT_SYMBOL_GPL(ring_buffer_reset); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3692 |  | 
 | 3693 | /** | 
 | 3694 |  * rind_buffer_empty - is the ring buffer empty? | 
 | 3695 |  * @buffer: The ring buffer to test | 
 | 3696 |  */ | 
 | 3697 | int ring_buffer_empty(struct ring_buffer *buffer) | 
 | 3698 | { | 
 | 3699 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
| Steven Rostedt | d478820 | 2009-06-17 00:39:43 -0400 | [diff] [blame] | 3700 | 	unsigned long flags; | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3701 | 	int dolock; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3702 | 	int cpu; | 
| Steven Rostedt | d478820 | 2009-06-17 00:39:43 -0400 | [diff] [blame] | 3703 | 	int ret; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3704 |  | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3705 | 	dolock = rb_ok_to_lock(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3706 |  | 
 | 3707 | 	/* yes this is racy, but if you don't like the race, lock the buffer */ | 
 | 3708 | 	for_each_buffer_cpu(buffer, cpu) { | 
 | 3709 | 		cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3710 | 		local_irq_save(flags); | 
 | 3711 | 		if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3712 | 			raw_spin_lock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | d478820 | 2009-06-17 00:39:43 -0400 | [diff] [blame] | 3713 | 		ret = rb_per_cpu_empty(cpu_buffer); | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3714 | 		if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3715 | 			raw_spin_unlock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3716 | 		local_irq_restore(flags); | 
 | 3717 |  | 
| Steven Rostedt | d478820 | 2009-06-17 00:39:43 -0400 | [diff] [blame] | 3718 | 		if (!ret) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3719 | 			return 0; | 
 | 3720 | 	} | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3721 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3722 | 	return 1; | 
 | 3723 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3724 | EXPORT_SYMBOL_GPL(ring_buffer_empty); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3725 |  | 
 | 3726 | /** | 
 | 3727 |  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? | 
 | 3728 |  * @buffer: The ring buffer | 
 | 3729 |  * @cpu: The CPU buffer to test | 
 | 3730 |  */ | 
 | 3731 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | 
 | 3732 | { | 
 | 3733 | 	struct ring_buffer_per_cpu *cpu_buffer; | 
| Steven Rostedt | d478820 | 2009-06-17 00:39:43 -0400 | [diff] [blame] | 3734 | 	unsigned long flags; | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3735 | 	int dolock; | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3736 | 	int ret; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3737 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 3738 | 	if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3739 | 		return 1; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3740 |  | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3741 | 	dolock = rb_ok_to_lock(); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3742 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3743 | 	cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3744 | 	local_irq_save(flags); | 
 | 3745 | 	if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3746 | 		raw_spin_lock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3747 | 	ret = rb_per_cpu_empty(cpu_buffer); | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3748 | 	if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3749 | 		raw_spin_unlock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3750 | 	local_irq_restore(flags); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3751 |  | 
 | 3752 | 	return ret; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3753 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3754 | EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3755 |  | 
| Steven Rostedt | 85bac32 | 2009-09-04 14:24:40 -0400 | [diff] [blame] | 3756 | #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3757 | /** | 
 | 3758 |  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers | 
 | 3759 |  * @buffer_a: One buffer to swap with | 
 | 3760 |  * @buffer_b: The other buffer to swap with | 
 | 3761 |  * | 
 | 3762 |  * This function is useful for tracers that want to take a "snapshot" | 
 | 3763 |  * of a CPU buffer and has another back up buffer lying around. | 
 | 3764 |  * it is expected that the tracer handles the cpu buffer not being | 
 | 3765 |  * used at the moment. | 
 | 3766 |  */ | 
 | 3767 | int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | 
 | 3768 | 			 struct ring_buffer *buffer_b, int cpu) | 
 | 3769 | { | 
 | 3770 | 	struct ring_buffer_per_cpu *cpu_buffer_a; | 
 | 3771 | 	struct ring_buffer_per_cpu *cpu_buffer_b; | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3772 | 	int ret = -EINVAL; | 
 | 3773 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 3774 | 	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || | 
 | 3775 | 	    !cpumask_test_cpu(cpu, buffer_b->cpumask)) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3776 | 		goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3777 |  | 
 | 3778 | 	/* At least make sure the two buffers are somewhat the same */ | 
| Lai Jiangshan | 6d102bc | 2008-12-17 17:48:23 +0800 | [diff] [blame] | 3779 | 	if (buffer_a->pages != buffer_b->pages) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3780 | 		goto out; | 
 | 3781 |  | 
 | 3782 | 	ret = -EAGAIN; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3783 |  | 
| Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 3784 | 	if (ring_buffer_flags != RB_BUFFERS_ON) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3785 | 		goto out; | 
| Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 3786 |  | 
 | 3787 | 	if (atomic_read(&buffer_a->record_disabled)) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3788 | 		goto out; | 
| Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 3789 |  | 
 | 3790 | 	if (atomic_read(&buffer_b->record_disabled)) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3791 | 		goto out; | 
| Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 3792 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3793 | 	cpu_buffer_a = buffer_a->buffers[cpu]; | 
 | 3794 | 	cpu_buffer_b = buffer_b->buffers[cpu]; | 
 | 3795 |  | 
| Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 3796 | 	if (atomic_read(&cpu_buffer_a->record_disabled)) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3797 | 		goto out; | 
| Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 3798 |  | 
 | 3799 | 	if (atomic_read(&cpu_buffer_b->record_disabled)) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3800 | 		goto out; | 
| Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 3801 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3802 | 	/* | 
 | 3803 | 	 * We can't do a synchronize_sched here because this | 
 | 3804 | 	 * function can be called in atomic context. | 
 | 3805 | 	 * Normally this will be called from the same CPU as cpu. | 
 | 3806 | 	 * If not it's up to the caller to protect this. | 
 | 3807 | 	 */ | 
 | 3808 | 	atomic_inc(&cpu_buffer_a->record_disabled); | 
 | 3809 | 	atomic_inc(&cpu_buffer_b->record_disabled); | 
 | 3810 |  | 
| Steven Rostedt | 9827799 | 2009-09-02 10:56:15 -0400 | [diff] [blame] | 3811 | 	ret = -EBUSY; | 
 | 3812 | 	if (local_read(&cpu_buffer_a->committing)) | 
 | 3813 | 		goto out_dec; | 
 | 3814 | 	if (local_read(&cpu_buffer_b->committing)) | 
 | 3815 | 		goto out_dec; | 
 | 3816 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3817 | 	buffer_a->buffers[cpu] = cpu_buffer_b; | 
 | 3818 | 	buffer_b->buffers[cpu] = cpu_buffer_a; | 
 | 3819 |  | 
 | 3820 | 	cpu_buffer_b->buffer = buffer_a; | 
 | 3821 | 	cpu_buffer_a->buffer = buffer_b; | 
 | 3822 |  | 
| Steven Rostedt | 9827799 | 2009-09-02 10:56:15 -0400 | [diff] [blame] | 3823 | 	ret = 0; | 
 | 3824 |  | 
 | 3825 | out_dec: | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3826 | 	atomic_dec(&cpu_buffer_a->record_disabled); | 
 | 3827 | 	atomic_dec(&cpu_buffer_b->record_disabled); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3828 | out: | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3829 | 	return ret; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3830 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3831 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | 
| Steven Rostedt | 85bac32 | 2009-09-04 14:24:40 -0400 | [diff] [blame] | 3832 | #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3833 |  | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3834 | /** | 
 | 3835 |  * ring_buffer_alloc_read_page - allocate a page to read from buffer | 
 | 3836 |  * @buffer: the buffer to allocate for. | 
 | 3837 |  * | 
 | 3838 |  * This function is used in conjunction with ring_buffer_read_page. | 
 | 3839 |  * When reading a full page from the ring buffer, these functions | 
 | 3840 |  * can be used to speed up the process. The calling function should | 
 | 3841 |  * allocate a few pages first with this function. Then when it | 
 | 3842 |  * needs to get pages from the ring buffer, it passes the result | 
 | 3843 |  * of this function into ring_buffer_read_page, which will swap | 
 | 3844 |  * the page that was allocated, with the read page of the buffer. | 
 | 3845 |  * | 
 | 3846 |  * Returns: | 
 | 3847 |  *  The page allocated, or NULL on error. | 
 | 3848 |  */ | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 3849 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3850 | { | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 3851 | 	struct buffer_data_page *bpage; | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 3852 | 	struct page *page; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3853 |  | 
| Vaibhav Nagarnaik | d7ec4bf | 2011-06-07 17:01:42 -0700 | [diff] [blame] | 3854 | 	page = alloc_pages_node(cpu_to_node(cpu), | 
 | 3855 | 				GFP_KERNEL | __GFP_NORETRY, 0); | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 3856 | 	if (!page) | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3857 | 		return NULL; | 
 | 3858 |  | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 3859 | 	bpage = page_address(page); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3860 |  | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3861 | 	rb_init_page(bpage); | 
 | 3862 |  | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 3863 | 	return bpage; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3864 | } | 
| Steven Rostedt | d6ce96d | 2009-05-05 01:15:24 -0400 | [diff] [blame] | 3865 | EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3866 |  | 
 | 3867 | /** | 
 | 3868 |  * ring_buffer_free_read_page - free an allocated read page | 
 | 3869 |  * @buffer: the buffer the page was allocate for | 
 | 3870 |  * @data: the page to free | 
 | 3871 |  * | 
 | 3872 |  * Free a page allocated from ring_buffer_alloc_read_page. | 
 | 3873 |  */ | 
 | 3874 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | 
 | 3875 | { | 
 | 3876 | 	free_page((unsigned long)data); | 
 | 3877 | } | 
| Steven Rostedt | d6ce96d | 2009-05-05 01:15:24 -0400 | [diff] [blame] | 3878 | EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3879 |  | 
 | 3880 | /** | 
 | 3881 |  * ring_buffer_read_page - extract a page from the ring buffer | 
 | 3882 |  * @buffer: buffer to extract from | 
 | 3883 |  * @data_page: the page to use allocated from ring_buffer_alloc_read_page | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3884 |  * @len: amount to extract | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3885 |  * @cpu: the cpu of the buffer to extract | 
 | 3886 |  * @full: should the extraction only happen when the page is full. | 
 | 3887 |  * | 
 | 3888 |  * This function will pull out a page from the ring buffer and consume it. | 
 | 3889 |  * @data_page must be the address of the variable that was returned | 
 | 3890 |  * from ring_buffer_alloc_read_page. This is because the page might be used | 
 | 3891 |  * to swap with a page in the ring buffer. | 
 | 3892 |  * | 
 | 3893 |  * for example: | 
| Lai Jiangshan | b85fa01 | 2009-02-09 14:21:14 +0800 | [diff] [blame] | 3894 |  *	rpage = ring_buffer_alloc_read_page(buffer); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3895 |  *	if (!rpage) | 
 | 3896 |  *		return error; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3897 |  *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 3898 |  *	if (ret >= 0) | 
 | 3899 |  *		process_page(rpage, ret); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3900 |  * | 
 | 3901 |  * When @full is set, the function will not return true unless | 
 | 3902 |  * the writer is off the reader page. | 
 | 3903 |  * | 
 | 3904 |  * Note: it is up to the calling functions to handle sleeps and wakeups. | 
 | 3905 |  *  The ring buffer can be used anywhere in the kernel and can not | 
 | 3906 |  *  blindly call wake_up. The layer that uses the ring buffer must be | 
 | 3907 |  *  responsible for that. | 
 | 3908 |  * | 
 | 3909 |  * Returns: | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 3910 |  *  >=0 if data has been transferred, returns the offset of consumed data. | 
 | 3911 |  *  <0 if no data has been transferred. | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3912 |  */ | 
 | 3913 | int ring_buffer_read_page(struct ring_buffer *buffer, | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3914 | 			  void **data_page, size_t len, int cpu, int full) | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3915 | { | 
 | 3916 | 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 
 | 3917 | 	struct ring_buffer_event *event; | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 3918 | 	struct buffer_data_page *bpage; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3919 | 	struct buffer_page *reader; | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 3920 | 	unsigned long missed_events; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3921 | 	unsigned long flags; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3922 | 	unsigned int commit; | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 3923 | 	unsigned int read; | 
| Steven Rostedt | 4f3640f | 2009-03-03 23:52:42 -0500 | [diff] [blame] | 3924 | 	u64 save_timestamp; | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 3925 | 	int ret = -1; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3926 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3927 | 	if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
 | 3928 | 		goto out; | 
 | 3929 |  | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 3930 | 	/* | 
 | 3931 | 	 * If len is not big enough to hold the page header, then | 
 | 3932 | 	 * we can not copy anything. | 
 | 3933 | 	 */ | 
 | 3934 | 	if (len <= BUF_PAGE_HDR_SIZE) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3935 | 		goto out; | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 3936 |  | 
 | 3937 | 	len -= BUF_PAGE_HDR_SIZE; | 
 | 3938 |  | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3939 | 	if (!data_page) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3940 | 		goto out; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3941 |  | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 3942 | 	bpage = *data_page; | 
 | 3943 | 	if (!bpage) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3944 | 		goto out; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3945 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3946 | 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3947 |  | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3948 | 	reader = rb_get_reader_page(cpu_buffer); | 
 | 3949 | 	if (!reader) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3950 | 		goto out_unlock; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3951 |  | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3952 | 	event = rb_reader_event(cpu_buffer); | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 3953 |  | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3954 | 	read = reader->read; | 
 | 3955 | 	commit = rb_page_commit(reader); | 
 | 3956 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3957 | 	/* Check if any events were dropped */ | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 3958 | 	missed_events = cpu_buffer->lost_events; | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3959 |  | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3960 | 	/* | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 3961 | 	 * If this page has been partially read or | 
 | 3962 | 	 * if len is not big enough to read the rest of the page or | 
 | 3963 | 	 * a writer is still on the page, then | 
 | 3964 | 	 * we must copy the data from the page to the buffer. | 
 | 3965 | 	 * Otherwise, we can simply swap the page with the one passed in. | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3966 | 	 */ | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 3967 | 	if (read || (len < (commit - read)) || | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3968 | 	    cpu_buffer->reader_page == cpu_buffer->commit_page) { | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 3969 | 		struct buffer_data_page *rpage = cpu_buffer->reader_page->page; | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 3970 | 		unsigned int rpos = read; | 
 | 3971 | 		unsigned int pos = 0; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3972 | 		unsigned int size; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3973 |  | 
 | 3974 | 		if (full) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3975 | 			goto out_unlock; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 3976 |  | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3977 | 		if (len > (commit - read)) | 
 | 3978 | 			len = (commit - read); | 
 | 3979 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 3980 | 		/* Always keep the time extend and data together */ | 
 | 3981 | 		size = rb_event_ts_length(event); | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3982 |  | 
 | 3983 | 		if (len < size) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3984 | 			goto out_unlock; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3985 |  | 
| Steven Rostedt | 4f3640f | 2009-03-03 23:52:42 -0500 | [diff] [blame] | 3986 | 		/* save the current timestamp, since the user will need it */ | 
 | 3987 | 		save_timestamp = cpu_buffer->read_stamp; | 
 | 3988 |  | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3989 | 		/* Need to copy one event at a time */ | 
 | 3990 | 		do { | 
| David Sharp | e1e3592 | 2010-12-22 16:38:24 -0800 | [diff] [blame] | 3991 | 			/* We need the size of one event, because | 
 | 3992 | 			 * rb_advance_reader only advances by one event, | 
 | 3993 | 			 * whereas rb_event_ts_length may include the size of | 
 | 3994 | 			 * one or two events. | 
 | 3995 | 			 * We have already ensured there's enough space if this | 
 | 3996 | 			 * is a time extend. */ | 
 | 3997 | 			size = rb_event_length(event); | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 3998 | 			memcpy(bpage->data + pos, rpage->data + rpos, size); | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 3999 |  | 
 | 4000 | 			len -= size; | 
 | 4001 |  | 
 | 4002 | 			rb_advance_reader(cpu_buffer); | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 4003 | 			rpos = reader->read; | 
 | 4004 | 			pos += size; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4005 |  | 
| Huang Ying | 18fab91 | 2010-07-28 14:14:01 +0800 | [diff] [blame] | 4006 | 			if (rpos >= commit) | 
 | 4007 | 				break; | 
 | 4008 |  | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4009 | 			event = rb_reader_event(cpu_buffer); | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 4010 | 			/* Always keep the time extend and data together */ | 
 | 4011 | 			size = rb_event_ts_length(event); | 
| David Sharp | e1e3592 | 2010-12-22 16:38:24 -0800 | [diff] [blame] | 4012 | 		} while (len >= size); | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 4013 |  | 
 | 4014 | 		/* update bpage */ | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4015 | 		local_set(&bpage->commit, pos); | 
| Steven Rostedt | 4f3640f | 2009-03-03 23:52:42 -0500 | [diff] [blame] | 4016 | 		bpage->time_stamp = save_timestamp; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4017 |  | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 4018 | 		/* we copied everything to the beginning */ | 
 | 4019 | 		read = 0; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4020 | 	} else { | 
| Steven Rostedt | afbab76 | 2009-05-01 19:40:05 -0400 | [diff] [blame] | 4021 | 		/* update the entry counter */ | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 4022 | 		cpu_buffer->read += rb_page_entries(reader); | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 4023 | 		cpu_buffer->read_bytes += BUF_PAGE_SIZE; | 
| Steven Rostedt | afbab76 | 2009-05-01 19:40:05 -0400 | [diff] [blame] | 4024 |  | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4025 | 		/* swap the pages */ | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 4026 | 		rb_init_page(bpage); | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4027 | 		bpage = reader->page; | 
 | 4028 | 		reader->page = *data_page; | 
 | 4029 | 		local_set(&reader->write, 0); | 
| Steven Rostedt | 778c55d | 2009-05-01 18:44:45 -0400 | [diff] [blame] | 4030 | 		local_set(&reader->entries, 0); | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4031 | 		reader->read = 0; | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 4032 | 		*data_page = bpage; | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 4033 |  | 
 | 4034 | 		/* | 
 | 4035 | 		 * Use the real_end for the data size, | 
 | 4036 | 		 * This gives us a chance to store the lost events | 
 | 4037 | 		 * on the page. | 
 | 4038 | 		 */ | 
 | 4039 | 		if (reader->real_end) | 
 | 4040 | 			local_set(&bpage->commit, reader->real_end); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4041 | 	} | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 4042 | 	ret = read; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4043 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 4044 | 	cpu_buffer->lost_events = 0; | 
| Steven Rostedt | 2711ca2 | 2010-05-21 13:32:26 -0400 | [diff] [blame] | 4045 |  | 
 | 4046 | 	commit = local_read(&bpage->commit); | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 4047 | 	/* | 
 | 4048 | 	 * Set a flag in the commit field if we lost events | 
 | 4049 | 	 */ | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 4050 | 	if (missed_events) { | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 4051 | 		/* If there is room at the end of the page to save the | 
 | 4052 | 		 * missed events, then record it there. | 
 | 4053 | 		 */ | 
 | 4054 | 		if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { | 
 | 4055 | 			memcpy(&bpage->data[commit], &missed_events, | 
 | 4056 | 			       sizeof(missed_events)); | 
 | 4057 | 			local_add(RB_MISSED_STORED, &bpage->commit); | 
| Steven Rostedt | 2711ca2 | 2010-05-21 13:32:26 -0400 | [diff] [blame] | 4058 | 			commit += sizeof(missed_events); | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 4059 | 		} | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 4060 | 		local_add(RB_MISSED_EVENTS, &bpage->commit); | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 4061 | 	} | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 4062 |  | 
| Steven Rostedt | 2711ca2 | 2010-05-21 13:32:26 -0400 | [diff] [blame] | 4063 | 	/* | 
 | 4064 | 	 * This page may be off to user land. Zero it out here. | 
 | 4065 | 	 */ | 
 | 4066 | 	if (commit < BUF_PAGE_SIZE) | 
 | 4067 | 		memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); | 
 | 4068 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4069 |  out_unlock: | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 4070 | 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4071 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4072 |  out: | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4073 | 	return ret; | 
 | 4074 | } | 
| Steven Rostedt | d6ce96d | 2009-05-05 01:15:24 -0400 | [diff] [blame] | 4075 | EXPORT_SYMBOL_GPL(ring_buffer_read_page); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4076 |  | 
| Steven Rostedt | 59222ef | 2009-03-12 11:46:03 -0400 | [diff] [blame] | 4077 | #ifdef CONFIG_HOTPLUG_CPU | 
| Frederic Weisbecker | 09c9e84 | 2009-03-21 04:33:36 +0100 | [diff] [blame] | 4078 | static int rb_cpu_notify(struct notifier_block *self, | 
 | 4079 | 			 unsigned long action, void *hcpu) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4080 | { | 
 | 4081 | 	struct ring_buffer *buffer = | 
 | 4082 | 		container_of(self, struct ring_buffer, cpu_notify); | 
 | 4083 | 	long cpu = (long)hcpu; | 
 | 4084 |  | 
 | 4085 | 	switch (action) { | 
 | 4086 | 	case CPU_UP_PREPARE: | 
 | 4087 | 	case CPU_UP_PREPARE_FROZEN: | 
| Rusty Russell | 3f237a7 | 2009-06-12 21:15:30 +0930 | [diff] [blame] | 4088 | 		if (cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4089 | 			return NOTIFY_OK; | 
 | 4090 |  | 
 | 4091 | 		buffer->buffers[cpu] = | 
 | 4092 | 			rb_allocate_cpu_buffer(buffer, cpu); | 
 | 4093 | 		if (!buffer->buffers[cpu]) { | 
 | 4094 | 			WARN(1, "failed to allocate ring buffer on CPU %ld\n", | 
 | 4095 | 			     cpu); | 
 | 4096 | 			return NOTIFY_OK; | 
 | 4097 | 		} | 
 | 4098 | 		smp_wmb(); | 
| Rusty Russell | 3f237a7 | 2009-06-12 21:15:30 +0930 | [diff] [blame] | 4099 | 		cpumask_set_cpu(cpu, buffer->cpumask); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4100 | 		break; | 
 | 4101 | 	case CPU_DOWN_PREPARE: | 
 | 4102 | 	case CPU_DOWN_PREPARE_FROZEN: | 
 | 4103 | 		/* | 
 | 4104 | 		 * Do nothing. | 
 | 4105 | 		 *  If we were to free the buffer, then the user would | 
 | 4106 | 		 *  lose any trace that was in the buffer. | 
 | 4107 | 		 */ | 
 | 4108 | 		break; | 
 | 4109 | 	default: | 
 | 4110 | 		break; | 
 | 4111 | 	} | 
 | 4112 | 	return NOTIFY_OK; | 
 | 4113 | } | 
 | 4114 | #endif |