| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1 | /* | 
|  | 2 | * Generic ring buffer | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | 
|  | 5 | */ | 
| Steven Rostedt | 0b07436 | 2013-01-22 16:58:30 -0500 | [diff] [blame] | 6 | #include <linux/ftrace_event.h> | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 7 | #include <linux/ring_buffer.h> | 
| Ingo Molnar | 14131f2 | 2009-02-26 18:47:11 +0100 | [diff] [blame] | 8 | #include <linux/trace_clock.h> | 
| Steven Rostedt | 0b07436 | 2013-01-22 16:58:30 -0500 | [diff] [blame] | 9 | #include <linux/trace_seq.h> | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 10 | #include <linux/spinlock.h> | 
|  | 11 | #include <linux/debugfs.h> | 
|  | 12 | #include <linux/uaccess.h> | 
| Steven Rostedt | a81bd80 | 2009-02-06 01:45:16 -0500 | [diff] [blame] | 13 | #include <linux/hardirq.h> | 
| Vegard Nossum | 1744a21 | 2009-02-28 08:29:44 +0100 | [diff] [blame] | 14 | #include <linux/kmemcheck.h> | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 15 | #include <linux/module.h> | 
|  | 16 | #include <linux/percpu.h> | 
|  | 17 | #include <linux/mutex.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 18 | #include <linux/slab.h> | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 19 | #include <linux/init.h> | 
|  | 20 | #include <linux/hash.h> | 
|  | 21 | #include <linux/list.h> | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 22 | #include <linux/cpu.h> | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 23 | #include <linux/fs.h> | 
|  | 24 |  | 
| Christoph Lameter | 7961576 | 2010-01-05 15:34:50 +0900 | [diff] [blame] | 25 | #include <asm/local.h> | 
| Steven Rostedt | 182e9f5 | 2008-11-03 23:15:56 -0500 | [diff] [blame] | 26 |  | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 27 | static void update_pages_handler(struct work_struct *work); | 
|  | 28 |  | 
| Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 29 | /* | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 30 | * The ring buffer header is special. We must manually up keep it. | 
|  | 31 | */ | 
|  | 32 | int ring_buffer_print_entry_header(struct trace_seq *s) | 
|  | 33 | { | 
|  | 34 | int ret; | 
|  | 35 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 36 | ret = trace_seq_printf(s, "# compressed entry header\n"); | 
|  | 37 | ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n"); | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 38 | ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n"); | 
|  | 39 | ret = trace_seq_printf(s, "\tarray       :   32 bits\n"); | 
|  | 40 | ret = trace_seq_printf(s, "\n"); | 
|  | 41 | ret = trace_seq_printf(s, "\tpadding     : type == %d\n", | 
|  | 42 | RINGBUF_TYPE_PADDING); | 
|  | 43 | ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", | 
|  | 44 | RINGBUF_TYPE_TIME_EXTEND); | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 45 | ret = trace_seq_printf(s, "\tdata max type_len  == %d\n", | 
|  | 46 | RINGBUF_TYPE_DATA_TYPE_LEN_MAX); | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 47 |  | 
|  | 48 | return ret; | 
|  | 49 | } | 
|  | 50 |  | 
|  | 51 | /* | 
| Steven Rostedt | 5cc9854 | 2009-03-12 22:24:17 -0400 | [diff] [blame] | 52 | * The ring buffer is made up of a list of pages. A separate list of pages is | 
|  | 53 | * allocated for each CPU. A writer may only write to a buffer that is | 
|  | 54 | * associated with the CPU it is currently executing on.  A reader may read | 
|  | 55 | * from any per cpu buffer. | 
|  | 56 | * | 
|  | 57 | * The reader is special. For each per cpu buffer, the reader has its own | 
|  | 58 | * reader page. When a reader has read the entire reader page, this reader | 
|  | 59 | * page is swapped with another page in the ring buffer. | 
|  | 60 | * | 
|  | 61 | * Now, as long as the writer is off the reader page, the reader can do what | 
|  | 62 | * ever it wants with that page. The writer will never write to that page | 
|  | 63 | * again (as long as it is out of the ring buffer). | 
|  | 64 | * | 
|  | 65 | * Here's some silly ASCII art. | 
|  | 66 | * | 
|  | 67 | *   +------+ | 
|  | 68 | *   |reader|          RING BUFFER | 
|  | 69 | *   |page  | | 
|  | 70 | *   +------+        +---+   +---+   +---+ | 
|  | 71 | *                   |   |-->|   |-->|   | | 
|  | 72 | *                   +---+   +---+   +---+ | 
|  | 73 | *                     ^               | | 
|  | 74 | *                     |               | | 
|  | 75 | *                     +---------------+ | 
|  | 76 | * | 
|  | 77 | * | 
|  | 78 | *   +------+ | 
|  | 79 | *   |reader|          RING BUFFER | 
|  | 80 | *   |page  |------------------v | 
|  | 81 | *   +------+        +---+   +---+   +---+ | 
|  | 82 | *                   |   |-->|   |-->|   | | 
|  | 83 | *                   +---+   +---+   +---+ | 
|  | 84 | *                     ^               | | 
|  | 85 | *                     |               | | 
|  | 86 | *                     +---------------+ | 
|  | 87 | * | 
|  | 88 | * | 
|  | 89 | *   +------+ | 
|  | 90 | *   |reader|          RING BUFFER | 
|  | 91 | *   |page  |------------------v | 
|  | 92 | *   +------+        +---+   +---+   +---+ | 
|  | 93 | *      ^            |   |-->|   |-->|   | | 
|  | 94 | *      |            +---+   +---+   +---+ | 
|  | 95 | *      |                              | | 
|  | 96 | *      |                              | | 
|  | 97 | *      +------------------------------+ | 
|  | 98 | * | 
|  | 99 | * | 
|  | 100 | *   +------+ | 
|  | 101 | *   |buffer|          RING BUFFER | 
|  | 102 | *   |page  |------------------v | 
|  | 103 | *   +------+        +---+   +---+   +---+ | 
|  | 104 | *      ^            |   |   |   |-->|   | | 
|  | 105 | *      |   New      +---+   +---+   +---+ | 
|  | 106 | *      |  Reader------^               | | 
|  | 107 | *      |   page                       | | 
|  | 108 | *      +------------------------------+ | 
|  | 109 | * | 
|  | 110 | * | 
|  | 111 | * After we make this swap, the reader can hand this page off to the splice | 
|  | 112 | * code and be done with it. It can even allocate a new page if it needs to | 
|  | 113 | * and swap that into the ring buffer. | 
|  | 114 | * | 
|  | 115 | * We will be using cmpxchg soon to make all this lockless. | 
|  | 116 | * | 
|  | 117 | */ | 
|  | 118 |  | 
|  | 119 | /* | 
| Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 120 | * A fast way to enable or disable all ring buffers is to | 
|  | 121 | * call tracing_on or tracing_off. Turning off the ring buffers | 
|  | 122 | * prevents all ring buffers from being recorded to. | 
|  | 123 | * Turning this switch on, makes it OK to write to the | 
|  | 124 | * ring buffer, if the ring buffer is enabled itself. | 
|  | 125 | * | 
|  | 126 | * There's three layers that must be on in order to write | 
|  | 127 | * to the ring buffer. | 
|  | 128 | * | 
|  | 129 | * 1) This global flag must be set. | 
|  | 130 | * 2) The ring buffer must be enabled for recording. | 
|  | 131 | * 3) The per cpu buffer must be enabled for recording. | 
|  | 132 | * | 
|  | 133 | * In case of an anomaly, this global flag has a bit set that | 
|  | 134 | * will permantly disable all ring buffers. | 
|  | 135 | */ | 
|  | 136 |  | 
|  | 137 | /* | 
|  | 138 | * Global flag to disable all recording to ring buffers | 
|  | 139 | *  This has two bits: ON, DISABLED | 
|  | 140 | * | 
|  | 141 | *  ON   DISABLED | 
|  | 142 | * ---- ---------- | 
|  | 143 | *   0      0        : ring buffers are off | 
|  | 144 | *   1      0        : ring buffers are on | 
|  | 145 | *   X      1        : ring buffers are permanently disabled | 
|  | 146 | */ | 
|  | 147 |  | 
|  | 148 | enum { | 
|  | 149 | RB_BUFFERS_ON_BIT	= 0, | 
|  | 150 | RB_BUFFERS_DISABLED_BIT	= 1, | 
|  | 151 | }; | 
|  | 152 |  | 
|  | 153 | enum { | 
|  | 154 | RB_BUFFERS_ON		= 1 << RB_BUFFERS_ON_BIT, | 
|  | 155 | RB_BUFFERS_DISABLED	= 1 << RB_BUFFERS_DISABLED_BIT, | 
|  | 156 | }; | 
|  | 157 |  | 
| Hannes Eder | 5e39841 | 2009-02-10 19:44:34 +0100 | [diff] [blame] | 158 | static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; | 
| Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 159 |  | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 160 | /* Used for individual buffers (after the counter) */ | 
|  | 161 | #define RB_BUFFER_OFF		(1 << 20) | 
|  | 162 |  | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 163 | #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) | 
|  | 164 |  | 
| Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 165 | /** | 
| Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 166 | * tracing_off_permanent - permanently disable ring buffers | 
|  | 167 | * | 
|  | 168 | * This function, once called, will disable all ring buffers | 
| Wenji Huang | c3706f0 | 2009-02-10 01:03:18 -0500 | [diff] [blame] | 169 | * permanently. | 
| Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 170 | */ | 
|  | 171 | void tracing_off_permanent(void) | 
|  | 172 | { | 
|  | 173 | set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); | 
| Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 174 | } | 
|  | 175 |  | 
| Steven Rostedt | e3d6bf0 | 2009-03-03 13:53:07 -0500 | [diff] [blame] | 176 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) | 
| Andrew Morton | 67d3472 | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 177 | #define RB_ALIGNMENT		4U | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 178 | #define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 179 | #define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */ | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 180 |  | 
| Steven Rostedt | 2271048 | 2010-03-18 17:54:19 -0400 | [diff] [blame] | 181 | #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | 
|  | 182 | # define RB_FORCE_8BYTE_ALIGNMENT	0 | 
|  | 183 | # define RB_ARCH_ALIGNMENT		RB_ALIGNMENT | 
|  | 184 | #else | 
|  | 185 | # define RB_FORCE_8BYTE_ALIGNMENT	1 | 
|  | 186 | # define RB_ARCH_ALIGNMENT		8U | 
|  | 187 | #endif | 
|  | 188 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 189 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ | 
|  | 190 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 191 |  | 
|  | 192 | enum { | 
|  | 193 | RB_LEN_TIME_EXTEND = 8, | 
|  | 194 | RB_LEN_TIME_STAMP = 16, | 
|  | 195 | }; | 
|  | 196 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 197 | #define skip_time_extend(event) \ | 
|  | 198 | ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) | 
|  | 199 |  | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 200 | static inline int rb_null_event(struct ring_buffer_event *event) | 
|  | 201 | { | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 202 | return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 203 | } | 
|  | 204 |  | 
|  | 205 | static void rb_event_set_padding(struct ring_buffer_event *event) | 
|  | 206 | { | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 207 | /* padding has a NULL time_delta */ | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 208 | event->type_len = RINGBUF_TYPE_PADDING; | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 209 | event->time_delta = 0; | 
|  | 210 | } | 
|  | 211 |  | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 212 | static unsigned | 
|  | 213 | rb_event_data_length(struct ring_buffer_event *event) | 
|  | 214 | { | 
|  | 215 | unsigned length; | 
|  | 216 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 217 | if (event->type_len) | 
|  | 218 | length = event->type_len * RB_ALIGNMENT; | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 219 | else | 
|  | 220 | length = event->array[0]; | 
|  | 221 | return length + RB_EVNT_HDR_SIZE; | 
|  | 222 | } | 
|  | 223 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 224 | /* | 
|  | 225 | * Return the length of the given event. Will return | 
|  | 226 | * the length of the time extend if the event is a | 
|  | 227 | * time extend. | 
|  | 228 | */ | 
|  | 229 | static inline unsigned | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 230 | rb_event_length(struct ring_buffer_event *event) | 
|  | 231 | { | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 232 | switch (event->type_len) { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 233 | case RINGBUF_TYPE_PADDING: | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 234 | if (rb_null_event(event)) | 
|  | 235 | /* undefined */ | 
|  | 236 | return -1; | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 237 | return  event->array[0] + RB_EVNT_HDR_SIZE; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 238 |  | 
|  | 239 | case RINGBUF_TYPE_TIME_EXTEND: | 
|  | 240 | return RB_LEN_TIME_EXTEND; | 
|  | 241 |  | 
|  | 242 | case RINGBUF_TYPE_TIME_STAMP: | 
|  | 243 | return RB_LEN_TIME_STAMP; | 
|  | 244 |  | 
|  | 245 | case RINGBUF_TYPE_DATA: | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 246 | return rb_event_data_length(event); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 247 | default: | 
|  | 248 | BUG(); | 
|  | 249 | } | 
|  | 250 | /* not hit */ | 
|  | 251 | return 0; | 
|  | 252 | } | 
|  | 253 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 254 | /* | 
|  | 255 | * Return total length of time extend and data, | 
|  | 256 | *   or just the event length for all other events. | 
|  | 257 | */ | 
|  | 258 | static inline unsigned | 
|  | 259 | rb_event_ts_length(struct ring_buffer_event *event) | 
|  | 260 | { | 
|  | 261 | unsigned len = 0; | 
|  | 262 |  | 
|  | 263 | if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { | 
|  | 264 | /* time extends include the data event after it */ | 
|  | 265 | len = RB_LEN_TIME_EXTEND; | 
|  | 266 | event = skip_time_extend(event); | 
|  | 267 | } | 
|  | 268 | return len + rb_event_length(event); | 
|  | 269 | } | 
|  | 270 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 271 | /** | 
|  | 272 | * ring_buffer_event_length - return the length of the event | 
|  | 273 | * @event: the event to get the length of | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 274 | * | 
|  | 275 | * Returns the size of the data load of a data event. | 
|  | 276 | * If the event is something other than a data event, it | 
|  | 277 | * returns the size of the event itself. With the exception | 
|  | 278 | * of a TIME EXTEND, where it still returns the size of the | 
|  | 279 | * data load of the data event after it. | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 280 | */ | 
|  | 281 | unsigned ring_buffer_event_length(struct ring_buffer_event *event) | 
|  | 282 | { | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 283 | unsigned length; | 
|  | 284 |  | 
|  | 285 | if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) | 
|  | 286 | event = skip_time_extend(event); | 
|  | 287 |  | 
|  | 288 | length = rb_event_length(event); | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 289 | if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 
| Robert Richter | 465634a | 2009-01-07 15:32:11 +0100 | [diff] [blame] | 290 | return length; | 
|  | 291 | length -= RB_EVNT_HDR_SIZE; | 
|  | 292 | if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) | 
|  | 293 | length -= sizeof(event->array[0]); | 
|  | 294 | return length; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 295 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 296 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 297 |  | 
|  | 298 | /* inline for ring buffer fast paths */ | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 299 | static void * | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 300 | rb_event_data(struct ring_buffer_event *event) | 
|  | 301 | { | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 302 | if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) | 
|  | 303 | event = skip_time_extend(event); | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 304 | BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 305 | /* If length is in len field, then array[0] has the data */ | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 306 | if (event->type_len) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 307 | return (void *)&event->array[0]; | 
|  | 308 | /* Otherwise length is in array[0] and array[1] has the data */ | 
|  | 309 | return (void *)&event->array[1]; | 
|  | 310 | } | 
|  | 311 |  | 
|  | 312 | /** | 
|  | 313 | * ring_buffer_event_data - return the data of the event | 
|  | 314 | * @event: the event to get the data from | 
|  | 315 | */ | 
|  | 316 | void *ring_buffer_event_data(struct ring_buffer_event *event) | 
|  | 317 | { | 
|  | 318 | return rb_event_data(event); | 
|  | 319 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 320 | EXPORT_SYMBOL_GPL(ring_buffer_event_data); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 321 |  | 
|  | 322 | #define for_each_buffer_cpu(buffer, cpu)		\ | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 323 | for_each_cpu(cpu, buffer->cpumask) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 324 |  | 
|  | 325 | #define TS_SHIFT	27 | 
|  | 326 | #define TS_MASK		((1ULL << TS_SHIFT) - 1) | 
|  | 327 | #define TS_DELTA_TEST	(~TS_MASK) | 
|  | 328 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 329 | /* Flag when events were overwritten */ | 
|  | 330 | #define RB_MISSED_EVENTS	(1 << 31) | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 331 | /* Missed count stored at end */ | 
|  | 332 | #define RB_MISSED_STORED	(1 << 30) | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 333 |  | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 334 | struct buffer_data_page { | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 335 | u64		 time_stamp;	/* page time stamp */ | 
| Wenji Huang | c3706f0 | 2009-02-10 01:03:18 -0500 | [diff] [blame] | 336 | local_t		 commit;	/* write committed index */ | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 337 | unsigned char	 data[];	/* data of buffer page */ | 
|  | 338 | }; | 
|  | 339 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 340 | /* | 
|  | 341 | * Note, the buffer_page list must be first. The buffer pages | 
|  | 342 | * are allocated in cache lines, which means that each buffer | 
|  | 343 | * page will be at the beginning of a cache line, and thus | 
|  | 344 | * the least significant bits will be zero. We use this to | 
|  | 345 | * add flags in the list struct pointers, to make the ring buffer | 
|  | 346 | * lockless. | 
|  | 347 | */ | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 348 | struct buffer_page { | 
| Steven Rostedt | 778c55d | 2009-05-01 18:44:45 -0400 | [diff] [blame] | 349 | struct list_head list;		/* list of buffer pages */ | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 350 | local_t		 write;		/* index for next write */ | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 351 | unsigned	 read;		/* index for next read */ | 
| Steven Rostedt | 778c55d | 2009-05-01 18:44:45 -0400 | [diff] [blame] | 352 | local_t		 entries;	/* entries on this page */ | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 353 | unsigned long	 real_end;	/* real end of data */ | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 354 | struct buffer_data_page *page;	/* Actual data page */ | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 355 | }; | 
|  | 356 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 357 | /* | 
|  | 358 | * The buffer page counters, write and entries, must be reset | 
|  | 359 | * atomically when crossing page boundaries. To synchronize this | 
|  | 360 | * update, two counters are inserted into the number. One is | 
|  | 361 | * the actual counter for the write position or count on the page. | 
|  | 362 | * | 
|  | 363 | * The other is a counter of updaters. Before an update happens | 
|  | 364 | * the update partition of the counter is incremented. This will | 
|  | 365 | * allow the updater to update the counter atomically. | 
|  | 366 | * | 
|  | 367 | * The counter is 20 bits, and the state data is 12. | 
|  | 368 | */ | 
|  | 369 | #define RB_WRITE_MASK		0xfffff | 
|  | 370 | #define RB_WRITE_INTCNT		(1 << 20) | 
|  | 371 |  | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 372 | static void rb_init_page(struct buffer_data_page *bpage) | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 373 | { | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 374 | local_set(&bpage->commit, 0); | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 375 | } | 
|  | 376 |  | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 377 | /** | 
|  | 378 | * ring_buffer_page_len - the size of data on the page. | 
|  | 379 | * @page: The page to read | 
|  | 380 | * | 
|  | 381 | * Returns the amount of data on the page, including buffer page header. | 
|  | 382 | */ | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 383 | size_t ring_buffer_page_len(void *page) | 
|  | 384 | { | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 385 | return local_read(&((struct buffer_data_page *)page)->commit) | 
|  | 386 | + BUF_PAGE_HDR_SIZE; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 387 | } | 
|  | 388 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 389 | /* | 
| Steven Rostedt | ed56829 | 2008-09-29 23:02:40 -0400 | [diff] [blame] | 390 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing | 
|  | 391 | * this issue out. | 
|  | 392 | */ | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 393 | static void free_buffer_page(struct buffer_page *bpage) | 
| Steven Rostedt | ed56829 | 2008-09-29 23:02:40 -0400 | [diff] [blame] | 394 | { | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 395 | free_page((unsigned long)bpage->page); | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 396 | kfree(bpage); | 
| Steven Rostedt | ed56829 | 2008-09-29 23:02:40 -0400 | [diff] [blame] | 397 | } | 
|  | 398 |  | 
|  | 399 | /* | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 400 | * We need to fit the time_stamp delta into 27 bits. | 
|  | 401 | */ | 
|  | 402 | static inline int test_time_stamp(u64 delta) | 
|  | 403 | { | 
|  | 404 | if (delta & TS_DELTA_TEST) | 
|  | 405 | return 1; | 
|  | 406 | return 0; | 
|  | 407 | } | 
|  | 408 |  | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 409 | #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 410 |  | 
| Steven Rostedt | be957c4 | 2009-05-11 14:42:53 -0400 | [diff] [blame] | 411 | /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ | 
|  | 412 | #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) | 
|  | 413 |  | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 414 | int ring_buffer_print_page_header(struct trace_seq *s) | 
|  | 415 | { | 
|  | 416 | struct buffer_data_page field; | 
|  | 417 | int ret; | 
|  | 418 |  | 
|  | 419 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" | 
| Tom Zanussi | 26a5074 | 2009-10-06 01:09:50 -0500 | [diff] [blame] | 420 | "offset:0;\tsize:%u;\tsigned:%u;\n", | 
|  | 421 | (unsigned int)sizeof(field.time_stamp), | 
|  | 422 | (unsigned int)is_signed_type(u64)); | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 423 |  | 
|  | 424 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" | 
| Tom Zanussi | 26a5074 | 2009-10-06 01:09:50 -0500 | [diff] [blame] | 425 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 426 | (unsigned int)offsetof(typeof(field), commit), | 
| Tom Zanussi | 26a5074 | 2009-10-06 01:09:50 -0500 | [diff] [blame] | 427 | (unsigned int)sizeof(field.commit), | 
|  | 428 | (unsigned int)is_signed_type(long)); | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 429 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 430 | ret = trace_seq_printf(s, "\tfield: int overwrite;\t" | 
|  | 431 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | 
|  | 432 | (unsigned int)offsetof(typeof(field), commit), | 
|  | 433 | 1, | 
|  | 434 | (unsigned int)is_signed_type(long)); | 
|  | 435 |  | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 436 | ret = trace_seq_printf(s, "\tfield: char data;\t" | 
| Tom Zanussi | 26a5074 | 2009-10-06 01:09:50 -0500 | [diff] [blame] | 437 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 438 | (unsigned int)offsetof(typeof(field), data), | 
| Tom Zanussi | 26a5074 | 2009-10-06 01:09:50 -0500 | [diff] [blame] | 439 | (unsigned int)BUF_PAGE_SIZE, | 
|  | 440 | (unsigned int)is_signed_type(char)); | 
| Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 441 |  | 
|  | 442 | return ret; | 
|  | 443 | } | 
|  | 444 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 445 | /* | 
|  | 446 | * head_page == tail_page && head == tail then buffer is empty. | 
|  | 447 | */ | 
|  | 448 | struct ring_buffer_per_cpu { | 
|  | 449 | int				cpu; | 
| Richard Kennedy | 985023d | 2010-03-25 11:27:36 +0000 | [diff] [blame] | 450 | atomic_t			record_disabled; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 451 | struct ring_buffer		*buffer; | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 452 | raw_spinlock_t			reader_lock;	/* serialize readers */ | 
| Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 453 | arch_spinlock_t			lock; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 454 | struct lock_class_key		lock_key; | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 455 | unsigned int			nr_pages; | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 456 | struct list_head		*pages; | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 457 | struct buffer_page		*head_page;	/* read from head */ | 
|  | 458 | struct buffer_page		*tail_page;	/* write to tail */ | 
| Wenji Huang | c3706f0 | 2009-02-10 01:03:18 -0500 | [diff] [blame] | 459 | struct buffer_page		*commit_page;	/* committed pages */ | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 460 | struct buffer_page		*reader_page; | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 461 | unsigned long			lost_events; | 
|  | 462 | unsigned long			last_overrun; | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 463 | local_t				entries_bytes; | 
| Steven Rostedt | e4906ef | 2009-04-30 20:49:44 -0400 | [diff] [blame] | 464 | local_t				entries; | 
| Slava Pestov | 884bfe8 | 2011-07-15 14:23:58 -0700 | [diff] [blame] | 465 | local_t				overrun; | 
|  | 466 | local_t				commit_overrun; | 
|  | 467 | local_t				dropped_events; | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 468 | local_t				committing; | 
|  | 469 | local_t				commits; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 470 | unsigned long			read; | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 471 | unsigned long			read_bytes; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 472 | u64				write_stamp; | 
|  | 473 | u64				read_stamp; | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 474 | /* ring buffer pages to update, > 0 to add, < 0 to remove */ | 
|  | 475 | int				nr_pages_to_update; | 
|  | 476 | struct list_head		new_pages; /* new pages to add */ | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 477 | struct work_struct		update_pages_work; | 
| Vaibhav Nagarnaik | 05fdd70 | 2012-05-18 13:29:51 -0700 | [diff] [blame] | 478 | struct completion		update_done; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 479 | }; | 
|  | 480 |  | 
|  | 481 | struct ring_buffer { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 482 | unsigned			flags; | 
|  | 483 | int				cpus; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 484 | atomic_t			record_disabled; | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 485 | atomic_t			resize_disabled; | 
| Arnaldo Carvalho de Melo | 00f62f6 | 2009-02-09 17:04:06 -0200 | [diff] [blame] | 486 | cpumask_var_t			cpumask; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 487 |  | 
| Peter Zijlstra | 1f8a6a1 | 2009-06-08 18:18:39 +0200 | [diff] [blame] | 488 | struct lock_class_key		*reader_lock_key; | 
|  | 489 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 490 | struct mutex			mutex; | 
|  | 491 |  | 
|  | 492 | struct ring_buffer_per_cpu	**buffers; | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 493 |  | 
| Steven Rostedt | 59222ef | 2009-03-12 11:46:03 -0400 | [diff] [blame] | 494 | #ifdef CONFIG_HOTPLUG_CPU | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 495 | struct notifier_block		cpu_notify; | 
|  | 496 | #endif | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 497 | u64				(*clock)(void); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 498 | }; | 
|  | 499 |  | 
|  | 500 | struct ring_buffer_iter { | 
|  | 501 | struct ring_buffer_per_cpu	*cpu_buffer; | 
|  | 502 | unsigned long			head; | 
|  | 503 | struct buffer_page		*head_page; | 
| Steven Rostedt | 492a74f | 2010-01-25 15:17:47 -0500 | [diff] [blame] | 504 | struct buffer_page		*cache_reader_page; | 
|  | 505 | unsigned long			cache_read; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 506 | u64				read_stamp; | 
|  | 507 | }; | 
|  | 508 |  | 
| Steven Rostedt | f536aaf | 2008-11-10 23:07:30 -0500 | [diff] [blame] | 509 | /* buffer may be either ring_buffer or ring_buffer_per_cpu */ | 
| Steven Rostedt | 077c540 | 2009-09-03 19:53:46 -0400 | [diff] [blame] | 510 | #define RB_WARN_ON(b, cond)						\ | 
|  | 511 | ({								\ | 
|  | 512 | int _____ret = unlikely(cond);				\ | 
|  | 513 | if (_____ret) {						\ | 
|  | 514 | if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ | 
|  | 515 | struct ring_buffer_per_cpu *__b =	\ | 
|  | 516 | (void *)b;			\ | 
|  | 517 | atomic_inc(&__b->buffer->record_disabled); \ | 
|  | 518 | } else						\ | 
|  | 519 | atomic_inc(&b->record_disabled);	\ | 
|  | 520 | WARN_ON(1);					\ | 
|  | 521 | }							\ | 
|  | 522 | _____ret;						\ | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 523 | }) | 
| Steven Rostedt | f536aaf | 2008-11-10 23:07:30 -0500 | [diff] [blame] | 524 |  | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 525 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 
|  | 526 | #define DEBUG_SHIFT 0 | 
|  | 527 |  | 
| Jiri Olsa | 6d3f1e1 | 2009-10-23 19:36:19 -0400 | [diff] [blame] | 528 | static inline u64 rb_time_stamp(struct ring_buffer *buffer) | 
| Steven Rostedt | 88eb012 | 2009-05-11 16:28:23 -0400 | [diff] [blame] | 529 | { | 
|  | 530 | /* shift to debug/test normalization and TIME_EXTENTS */ | 
|  | 531 | return buffer->clock() << DEBUG_SHIFT; | 
|  | 532 | } | 
|  | 533 |  | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 534 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) | 
|  | 535 | { | 
|  | 536 | u64 time; | 
|  | 537 |  | 
|  | 538 | preempt_disable_notrace(); | 
| Jiri Olsa | 6d3f1e1 | 2009-10-23 19:36:19 -0400 | [diff] [blame] | 539 | time = rb_time_stamp(buffer); | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 540 | preempt_enable_no_resched_notrace(); | 
|  | 541 |  | 
|  | 542 | return time; | 
|  | 543 | } | 
|  | 544 | EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); | 
|  | 545 |  | 
|  | 546 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, | 
|  | 547 | int cpu, u64 *ts) | 
|  | 548 | { | 
|  | 549 | /* Just stupid testing the normalize function and deltas */ | 
|  | 550 | *ts >>= DEBUG_SHIFT; | 
|  | 551 | } | 
|  | 552 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); | 
|  | 553 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 554 | /* | 
|  | 555 | * Making the ring buffer lockless makes things tricky. | 
|  | 556 | * Although writes only happen on the CPU that they are on, | 
|  | 557 | * and they only need to worry about interrupts. Reads can | 
|  | 558 | * happen on any CPU. | 
|  | 559 | * | 
|  | 560 | * The reader page is always off the ring buffer, but when the | 
|  | 561 | * reader finishes with a page, it needs to swap its page with | 
|  | 562 | * a new one from the buffer. The reader needs to take from | 
|  | 563 | * the head (writes go to the tail). But if a writer is in overwrite | 
|  | 564 | * mode and wraps, it must push the head page forward. | 
|  | 565 | * | 
|  | 566 | * Here lies the problem. | 
|  | 567 | * | 
|  | 568 | * The reader must be careful to replace only the head page, and | 
|  | 569 | * not another one. As described at the top of the file in the | 
|  | 570 | * ASCII art, the reader sets its old page to point to the next | 
|  | 571 | * page after head. It then sets the page after head to point to | 
|  | 572 | * the old reader page. But if the writer moves the head page | 
|  | 573 | * during this operation, the reader could end up with the tail. | 
|  | 574 | * | 
|  | 575 | * We use cmpxchg to help prevent this race. We also do something | 
|  | 576 | * special with the page before head. We set the LSB to 1. | 
|  | 577 | * | 
|  | 578 | * When the writer must push the page forward, it will clear the | 
|  | 579 | * bit that points to the head page, move the head, and then set | 
|  | 580 | * the bit that points to the new head page. | 
|  | 581 | * | 
|  | 582 | * We also don't want an interrupt coming in and moving the head | 
|  | 583 | * page on another writer. Thus we use the second LSB to catch | 
|  | 584 | * that too. Thus: | 
|  | 585 | * | 
|  | 586 | * head->list->prev->next        bit 1          bit 0 | 
|  | 587 | *                              -------        ------- | 
|  | 588 | * Normal page                     0              0 | 
|  | 589 | * Points to head page             0              1 | 
|  | 590 | * New head page                   1              0 | 
|  | 591 | * | 
|  | 592 | * Note we can not trust the prev pointer of the head page, because: | 
|  | 593 | * | 
|  | 594 | * +----+       +-----+        +-----+ | 
|  | 595 | * |    |------>|  T  |---X--->|  N  | | 
|  | 596 | * |    |<------|     |        |     | | 
|  | 597 | * +----+       +-----+        +-----+ | 
|  | 598 | *   ^                           ^ | | 
|  | 599 | *   |          +-----+          | | | 
|  | 600 | *   +----------|  R  |----------+ | | 
|  | 601 | *              |     |<-----------+ | 
|  | 602 | *              +-----+ | 
|  | 603 | * | 
|  | 604 | * Key:  ---X-->  HEAD flag set in pointer | 
|  | 605 | *         T      Tail page | 
|  | 606 | *         R      Reader page | 
|  | 607 | *         N      Next page | 
|  | 608 | * | 
|  | 609 | * (see __rb_reserve_next() to see where this happens) | 
|  | 610 | * | 
|  | 611 | *  What the above shows is that the reader just swapped out | 
|  | 612 | *  the reader page with a page in the buffer, but before it | 
|  | 613 | *  could make the new header point back to the new page added | 
|  | 614 | *  it was preempted by a writer. The writer moved forward onto | 
|  | 615 | *  the new page added by the reader and is about to move forward | 
|  | 616 | *  again. | 
|  | 617 | * | 
|  | 618 | *  You can see, it is legitimate for the previous pointer of | 
|  | 619 | *  the head (or any page) not to point back to itself. But only | 
|  | 620 | *  temporarially. | 
|  | 621 | */ | 
|  | 622 |  | 
|  | 623 | #define RB_PAGE_NORMAL		0UL | 
|  | 624 | #define RB_PAGE_HEAD		1UL | 
|  | 625 | #define RB_PAGE_UPDATE		2UL | 
|  | 626 |  | 
|  | 627 |  | 
|  | 628 | #define RB_FLAG_MASK		3UL | 
|  | 629 |  | 
|  | 630 | /* PAGE_MOVED is not part of the mask */ | 
|  | 631 | #define RB_PAGE_MOVED		4UL | 
|  | 632 |  | 
|  | 633 | /* | 
|  | 634 | * rb_list_head - remove any bit | 
|  | 635 | */ | 
|  | 636 | static struct list_head *rb_list_head(struct list_head *list) | 
|  | 637 | { | 
|  | 638 | unsigned long val = (unsigned long)list; | 
|  | 639 |  | 
|  | 640 | return (struct list_head *)(val & ~RB_FLAG_MASK); | 
|  | 641 | } | 
|  | 642 |  | 
|  | 643 | /* | 
| Jiri Olsa | 6d3f1e1 | 2009-10-23 19:36:19 -0400 | [diff] [blame] | 644 | * rb_is_head_page - test if the given page is the head page | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 645 | * | 
|  | 646 | * Because the reader may move the head_page pointer, we can | 
|  | 647 | * not trust what the head page is (it may be pointing to | 
|  | 648 | * the reader page). But if the next page is a header page, | 
|  | 649 | * its flags will be non zero. | 
|  | 650 | */ | 
| Jesper Juhl | 42b16b3 | 2011-01-17 00:09:38 +0100 | [diff] [blame] | 651 | static inline int | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 652 | rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 653 | struct buffer_page *page, struct list_head *list) | 
|  | 654 | { | 
|  | 655 | unsigned long val; | 
|  | 656 |  | 
|  | 657 | val = (unsigned long)list->next; | 
|  | 658 |  | 
|  | 659 | if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) | 
|  | 660 | return RB_PAGE_MOVED; | 
|  | 661 |  | 
|  | 662 | return val & RB_FLAG_MASK; | 
|  | 663 | } | 
|  | 664 |  | 
|  | 665 | /* | 
|  | 666 | * rb_is_reader_page | 
|  | 667 | * | 
|  | 668 | * The unique thing about the reader page, is that, if the | 
|  | 669 | * writer is ever on it, the previous pointer never points | 
|  | 670 | * back to the reader page. | 
|  | 671 | */ | 
|  | 672 | static int rb_is_reader_page(struct buffer_page *page) | 
|  | 673 | { | 
|  | 674 | struct list_head *list = page->list.prev; | 
|  | 675 |  | 
|  | 676 | return rb_list_head(list->next) != &page->list; | 
|  | 677 | } | 
|  | 678 |  | 
|  | 679 | /* | 
|  | 680 | * rb_set_list_to_head - set a list_head to be pointing to head. | 
|  | 681 | */ | 
|  | 682 | static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 683 | struct list_head *list) | 
|  | 684 | { | 
|  | 685 | unsigned long *ptr; | 
|  | 686 |  | 
|  | 687 | ptr = (unsigned long *)&list->next; | 
|  | 688 | *ptr |= RB_PAGE_HEAD; | 
|  | 689 | *ptr &= ~RB_PAGE_UPDATE; | 
|  | 690 | } | 
|  | 691 |  | 
|  | 692 | /* | 
|  | 693 | * rb_head_page_activate - sets up head page | 
|  | 694 | */ | 
|  | 695 | static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) | 
|  | 696 | { | 
|  | 697 | struct buffer_page *head; | 
|  | 698 |  | 
|  | 699 | head = cpu_buffer->head_page; | 
|  | 700 | if (!head) | 
|  | 701 | return; | 
|  | 702 |  | 
|  | 703 | /* | 
|  | 704 | * Set the previous list pointer to have the HEAD flag. | 
|  | 705 | */ | 
|  | 706 | rb_set_list_to_head(cpu_buffer, head->list.prev); | 
|  | 707 | } | 
|  | 708 |  | 
|  | 709 | static void rb_list_head_clear(struct list_head *list) | 
|  | 710 | { | 
|  | 711 | unsigned long *ptr = (unsigned long *)&list->next; | 
|  | 712 |  | 
|  | 713 | *ptr &= ~RB_FLAG_MASK; | 
|  | 714 | } | 
|  | 715 |  | 
|  | 716 | /* | 
|  | 717 | * rb_head_page_dactivate - clears head page ptr (for free list) | 
|  | 718 | */ | 
|  | 719 | static void | 
|  | 720 | rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) | 
|  | 721 | { | 
|  | 722 | struct list_head *hd; | 
|  | 723 |  | 
|  | 724 | /* Go through the whole list and clear any pointers found. */ | 
|  | 725 | rb_list_head_clear(cpu_buffer->pages); | 
|  | 726 |  | 
|  | 727 | list_for_each(hd, cpu_buffer->pages) | 
|  | 728 | rb_list_head_clear(hd); | 
|  | 729 | } | 
|  | 730 |  | 
|  | 731 | static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 732 | struct buffer_page *head, | 
|  | 733 | struct buffer_page *prev, | 
|  | 734 | int old_flag, int new_flag) | 
|  | 735 | { | 
|  | 736 | struct list_head *list; | 
|  | 737 | unsigned long val = (unsigned long)&head->list; | 
|  | 738 | unsigned long ret; | 
|  | 739 |  | 
|  | 740 | list = &prev->list; | 
|  | 741 |  | 
|  | 742 | val &= ~RB_FLAG_MASK; | 
|  | 743 |  | 
| Steven Rostedt | 08a4081 | 2009-09-14 09:31:35 -0400 | [diff] [blame] | 744 | ret = cmpxchg((unsigned long *)&list->next, | 
|  | 745 | val | old_flag, val | new_flag); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 746 |  | 
|  | 747 | /* check if the reader took the page */ | 
|  | 748 | if ((ret & ~RB_FLAG_MASK) != val) | 
|  | 749 | return RB_PAGE_MOVED; | 
|  | 750 |  | 
|  | 751 | return ret & RB_FLAG_MASK; | 
|  | 752 | } | 
|  | 753 |  | 
|  | 754 | static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 755 | struct buffer_page *head, | 
|  | 756 | struct buffer_page *prev, | 
|  | 757 | int old_flag) | 
|  | 758 | { | 
|  | 759 | return rb_head_page_set(cpu_buffer, head, prev, | 
|  | 760 | old_flag, RB_PAGE_UPDATE); | 
|  | 761 | } | 
|  | 762 |  | 
|  | 763 | static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 764 | struct buffer_page *head, | 
|  | 765 | struct buffer_page *prev, | 
|  | 766 | int old_flag) | 
|  | 767 | { | 
|  | 768 | return rb_head_page_set(cpu_buffer, head, prev, | 
|  | 769 | old_flag, RB_PAGE_HEAD); | 
|  | 770 | } | 
|  | 771 |  | 
|  | 772 | static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 773 | struct buffer_page *head, | 
|  | 774 | struct buffer_page *prev, | 
|  | 775 | int old_flag) | 
|  | 776 | { | 
|  | 777 | return rb_head_page_set(cpu_buffer, head, prev, | 
|  | 778 | old_flag, RB_PAGE_NORMAL); | 
|  | 779 | } | 
|  | 780 |  | 
|  | 781 | static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 782 | struct buffer_page **bpage) | 
|  | 783 | { | 
|  | 784 | struct list_head *p = rb_list_head((*bpage)->list.next); | 
|  | 785 |  | 
|  | 786 | *bpage = list_entry(p, struct buffer_page, list); | 
|  | 787 | } | 
|  | 788 |  | 
|  | 789 | static struct buffer_page * | 
|  | 790 | rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) | 
|  | 791 | { | 
|  | 792 | struct buffer_page *head; | 
|  | 793 | struct buffer_page *page; | 
|  | 794 | struct list_head *list; | 
|  | 795 | int i; | 
|  | 796 |  | 
|  | 797 | if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) | 
|  | 798 | return NULL; | 
|  | 799 |  | 
|  | 800 | /* sanity check */ | 
|  | 801 | list = cpu_buffer->pages; | 
|  | 802 | if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) | 
|  | 803 | return NULL; | 
|  | 804 |  | 
|  | 805 | page = head = cpu_buffer->head_page; | 
|  | 806 | /* | 
|  | 807 | * It is possible that the writer moves the header behind | 
|  | 808 | * where we started, and we miss in one loop. | 
|  | 809 | * A second loop should grab the header, but we'll do | 
|  | 810 | * three loops just because I'm paranoid. | 
|  | 811 | */ | 
|  | 812 | for (i = 0; i < 3; i++) { | 
|  | 813 | do { | 
|  | 814 | if (rb_is_head_page(cpu_buffer, page, page->list.prev)) { | 
|  | 815 | cpu_buffer->head_page = page; | 
|  | 816 | return page; | 
|  | 817 | } | 
|  | 818 | rb_inc_page(cpu_buffer, &page); | 
|  | 819 | } while (page != head); | 
|  | 820 | } | 
|  | 821 |  | 
|  | 822 | RB_WARN_ON(cpu_buffer, 1); | 
|  | 823 |  | 
|  | 824 | return NULL; | 
|  | 825 | } | 
|  | 826 |  | 
|  | 827 | static int rb_head_page_replace(struct buffer_page *old, | 
|  | 828 | struct buffer_page *new) | 
|  | 829 | { | 
|  | 830 | unsigned long *ptr = (unsigned long *)&old->list.prev->next; | 
|  | 831 | unsigned long val; | 
|  | 832 | unsigned long ret; | 
|  | 833 |  | 
|  | 834 | val = *ptr & ~RB_FLAG_MASK; | 
|  | 835 | val |= RB_PAGE_HEAD; | 
|  | 836 |  | 
| Steven Rostedt | 08a4081 | 2009-09-14 09:31:35 -0400 | [diff] [blame] | 837 | ret = cmpxchg(ptr, val, (unsigned long)&new->list); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 838 |  | 
|  | 839 | return ret == val; | 
|  | 840 | } | 
|  | 841 |  | 
|  | 842 | /* | 
|  | 843 | * rb_tail_page_update - move the tail page forward | 
|  | 844 | * | 
|  | 845 | * Returns 1 if moved tail page, 0 if someone else did. | 
|  | 846 | */ | 
|  | 847 | static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 848 | struct buffer_page *tail_page, | 
|  | 849 | struct buffer_page *next_page) | 
|  | 850 | { | 
|  | 851 | struct buffer_page *old_tail; | 
|  | 852 | unsigned long old_entries; | 
|  | 853 | unsigned long old_write; | 
|  | 854 | int ret = 0; | 
|  | 855 |  | 
|  | 856 | /* | 
|  | 857 | * The tail page now needs to be moved forward. | 
|  | 858 | * | 
|  | 859 | * We need to reset the tail page, but without messing | 
|  | 860 | * with possible erasing of data brought in by interrupts | 
|  | 861 | * that have moved the tail page and are currently on it. | 
|  | 862 | * | 
|  | 863 | * We add a counter to the write field to denote this. | 
|  | 864 | */ | 
|  | 865 | old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); | 
|  | 866 | old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); | 
|  | 867 |  | 
|  | 868 | /* | 
|  | 869 | * Just make sure we have seen our old_write and synchronize | 
|  | 870 | * with any interrupts that come in. | 
|  | 871 | */ | 
|  | 872 | barrier(); | 
|  | 873 |  | 
|  | 874 | /* | 
|  | 875 | * If the tail page is still the same as what we think | 
|  | 876 | * it is, then it is up to us to update the tail | 
|  | 877 | * pointer. | 
|  | 878 | */ | 
|  | 879 | if (tail_page == cpu_buffer->tail_page) { | 
|  | 880 | /* Zero the write counter */ | 
|  | 881 | unsigned long val = old_write & ~RB_WRITE_MASK; | 
|  | 882 | unsigned long eval = old_entries & ~RB_WRITE_MASK; | 
|  | 883 |  | 
|  | 884 | /* | 
|  | 885 | * This will only succeed if an interrupt did | 
|  | 886 | * not come in and change it. In which case, we | 
|  | 887 | * do not want to modify it. | 
| Lai Jiangshan | da706d8 | 2009-07-15 16:27:30 +0800 | [diff] [blame] | 888 | * | 
|  | 889 | * We add (void) to let the compiler know that we do not care | 
|  | 890 | * about the return value of these functions. We use the | 
|  | 891 | * cmpxchg to only update if an interrupt did not already | 
|  | 892 | * do it for us. If the cmpxchg fails, we don't care. | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 893 | */ | 
| Lai Jiangshan | da706d8 | 2009-07-15 16:27:30 +0800 | [diff] [blame] | 894 | (void)local_cmpxchg(&next_page->write, old_write, val); | 
|  | 895 | (void)local_cmpxchg(&next_page->entries, old_entries, eval); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 896 |  | 
|  | 897 | /* | 
|  | 898 | * No need to worry about races with clearing out the commit. | 
|  | 899 | * it only can increment when a commit takes place. But that | 
|  | 900 | * only happens in the outer most nested commit. | 
|  | 901 | */ | 
|  | 902 | local_set(&next_page->page->commit, 0); | 
|  | 903 |  | 
|  | 904 | old_tail = cmpxchg(&cpu_buffer->tail_page, | 
|  | 905 | tail_page, next_page); | 
|  | 906 |  | 
|  | 907 | if (old_tail == tail_page) | 
|  | 908 | ret = 1; | 
|  | 909 | } | 
|  | 910 |  | 
|  | 911 | return ret; | 
|  | 912 | } | 
|  | 913 |  | 
|  | 914 | static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 915 | struct buffer_page *bpage) | 
|  | 916 | { | 
|  | 917 | unsigned long val = (unsigned long)bpage; | 
|  | 918 |  | 
|  | 919 | if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) | 
|  | 920 | return 1; | 
|  | 921 |  | 
|  | 922 | return 0; | 
|  | 923 | } | 
|  | 924 |  | 
|  | 925 | /** | 
|  | 926 | * rb_check_list - make sure a pointer to a list has the last bits zero | 
|  | 927 | */ | 
|  | 928 | static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 929 | struct list_head *list) | 
|  | 930 | { | 
|  | 931 | if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) | 
|  | 932 | return 1; | 
|  | 933 | if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) | 
|  | 934 | return 1; | 
|  | 935 | return 0; | 
|  | 936 | } | 
|  | 937 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 938 | /** | 
|  | 939 | * check_pages - integrity check of buffer pages | 
|  | 940 | * @cpu_buffer: CPU buffer with pages to test | 
|  | 941 | * | 
| Wenji Huang | c3706f0 | 2009-02-10 01:03:18 -0500 | [diff] [blame] | 942 | * As a safety measure we check to make sure the data pages have not | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 943 | * been corrupted. | 
|  | 944 | */ | 
|  | 945 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) | 
|  | 946 | { | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 947 | struct list_head *head = cpu_buffer->pages; | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 948 | struct buffer_page *bpage, *tmp; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 949 |  | 
| Steven Rostedt | 308f7ee | 2012-05-16 19:46:32 -0400 | [diff] [blame] | 950 | /* Reset the head page if it exists */ | 
|  | 951 | if (cpu_buffer->head_page) | 
|  | 952 | rb_set_head_page(cpu_buffer); | 
|  | 953 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 954 | rb_head_page_deactivate(cpu_buffer); | 
|  | 955 |  | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 956 | if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) | 
|  | 957 | return -1; | 
|  | 958 | if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) | 
|  | 959 | return -1; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 960 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 961 | if (rb_check_list(cpu_buffer, head)) | 
|  | 962 | return -1; | 
|  | 963 |  | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 964 | list_for_each_entry_safe(bpage, tmp, head, list) { | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 965 | if (RB_WARN_ON(cpu_buffer, | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 966 | bpage->list.next->prev != &bpage->list)) | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 967 | return -1; | 
|  | 968 | if (RB_WARN_ON(cpu_buffer, | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 969 | bpage->list.prev->next != &bpage->list)) | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 970 | return -1; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 971 | if (rb_check_list(cpu_buffer, &bpage->list)) | 
|  | 972 | return -1; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 973 | } | 
|  | 974 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 975 | rb_head_page_activate(cpu_buffer); | 
|  | 976 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 977 | return 0; | 
|  | 978 | } | 
|  | 979 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 980 | static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 981 | { | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 982 | int i; | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 983 | struct buffer_page *bpage, *tmp; | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 984 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 985 | for (i = 0; i < nr_pages; i++) { | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 986 | struct page *page; | 
| Vaibhav Nagarnaik | d7ec4bf | 2011-06-07 17:01:42 -0700 | [diff] [blame] | 987 | /* | 
|  | 988 | * __GFP_NORETRY flag makes sure that the allocation fails | 
|  | 989 | * gracefully without invoking oom-killer and the system is | 
|  | 990 | * not destabilized. | 
|  | 991 | */ | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 992 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 
| Vaibhav Nagarnaik | d7ec4bf | 2011-06-07 17:01:42 -0700 | [diff] [blame] | 993 | GFP_KERNEL | __GFP_NORETRY, | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 994 | cpu_to_node(cpu)); | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 995 | if (!bpage) | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 996 | goto free_pages; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 997 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 998 | list_add(&bpage->list, pages); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 999 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1000 | page = alloc_pages_node(cpu_to_node(cpu), | 
| Vaibhav Nagarnaik | d7ec4bf | 2011-06-07 17:01:42 -0700 | [diff] [blame] | 1001 | GFP_KERNEL | __GFP_NORETRY, 0); | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 1002 | if (!page) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1003 | goto free_pages; | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 1004 | bpage->page = page_address(page); | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1005 | rb_init_page(bpage->page); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1006 | } | 
|  | 1007 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1008 | return 0; | 
|  | 1009 |  | 
|  | 1010 | free_pages: | 
|  | 1011 | list_for_each_entry_safe(bpage, tmp, pages, list) { | 
|  | 1012 | list_del_init(&bpage->list); | 
|  | 1013 | free_buffer_page(bpage); | 
|  | 1014 | } | 
|  | 1015 |  | 
|  | 1016 | return -ENOMEM; | 
|  | 1017 | } | 
|  | 1018 |  | 
|  | 1019 | static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 1020 | unsigned nr_pages) | 
|  | 1021 | { | 
|  | 1022 | LIST_HEAD(pages); | 
|  | 1023 |  | 
|  | 1024 | WARN_ON(!nr_pages); | 
|  | 1025 |  | 
|  | 1026 | if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) | 
|  | 1027 | return -ENOMEM; | 
|  | 1028 |  | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 1029 | /* | 
|  | 1030 | * The ring buffer page list is a circular list that does not | 
|  | 1031 | * start and end with a list head. All page list items point to | 
|  | 1032 | * other pages. | 
|  | 1033 | */ | 
|  | 1034 | cpu_buffer->pages = pages.next; | 
|  | 1035 | list_del(&pages); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1036 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1037 | cpu_buffer->nr_pages = nr_pages; | 
|  | 1038 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1039 | rb_check_pages(cpu_buffer); | 
|  | 1040 |  | 
|  | 1041 | return 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1042 | } | 
|  | 1043 |  | 
|  | 1044 | static struct ring_buffer_per_cpu * | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1045 | rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1046 | { | 
|  | 1047 | struct ring_buffer_per_cpu *cpu_buffer; | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1048 | struct buffer_page *bpage; | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 1049 | struct page *page; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1050 | int ret; | 
|  | 1051 |  | 
|  | 1052 | cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), | 
|  | 1053 | GFP_KERNEL, cpu_to_node(cpu)); | 
|  | 1054 | if (!cpu_buffer) | 
|  | 1055 | return NULL; | 
|  | 1056 |  | 
|  | 1057 | cpu_buffer->cpu = cpu; | 
|  | 1058 | cpu_buffer->buffer = buffer; | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1059 | raw_spin_lock_init(&cpu_buffer->reader_lock); | 
| Peter Zijlstra | 1f8a6a1 | 2009-06-08 18:18:39 +0200 | [diff] [blame] | 1060 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); | 
| Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 1061 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1062 | INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); | 
| Vaibhav Nagarnaik | 05fdd70 | 2012-05-18 13:29:51 -0700 | [diff] [blame] | 1063 | init_completion(&cpu_buffer->update_done); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1064 |  | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1065 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 1066 | GFP_KERNEL, cpu_to_node(cpu)); | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1067 | if (!bpage) | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 1068 | goto fail_free_buffer; | 
|  | 1069 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1070 | rb_check_bpage(cpu_buffer, bpage); | 
|  | 1071 |  | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1072 | cpu_buffer->reader_page = bpage; | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 1073 | page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); | 
|  | 1074 | if (!page) | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 1075 | goto fail_free_reader; | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 1076 | bpage->page = page_address(page); | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1077 | rb_init_page(bpage->page); | 
| Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 1078 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1079 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 
| Vaibhav Nagarnaik | 44b9946 | 2012-06-22 11:50:05 -0700 | [diff] [blame] | 1080 | INIT_LIST_HEAD(&cpu_buffer->new_pages); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1081 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1082 | ret = rb_allocate_pages(cpu_buffer, nr_pages); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1083 | if (ret < 0) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1084 | goto fail_free_reader; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1085 |  | 
|  | 1086 | cpu_buffer->head_page | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 1087 | = list_entry(cpu_buffer->pages, struct buffer_page, list); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1088 | cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1089 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1090 | rb_head_page_activate(cpu_buffer); | 
|  | 1091 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1092 | return cpu_buffer; | 
|  | 1093 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1094 | fail_free_reader: | 
|  | 1095 | free_buffer_page(cpu_buffer->reader_page); | 
|  | 1096 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1097 | fail_free_buffer: | 
|  | 1098 | kfree(cpu_buffer); | 
|  | 1099 | return NULL; | 
|  | 1100 | } | 
|  | 1101 |  | 
|  | 1102 | static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) | 
|  | 1103 | { | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 1104 | struct list_head *head = cpu_buffer->pages; | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1105 | struct buffer_page *bpage, *tmp; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1106 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1107 | free_buffer_page(cpu_buffer->reader_page); | 
|  | 1108 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1109 | rb_head_page_deactivate(cpu_buffer); | 
|  | 1110 |  | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 1111 | if (head) { | 
|  | 1112 | list_for_each_entry_safe(bpage, tmp, head, list) { | 
|  | 1113 | list_del_init(&bpage->list); | 
|  | 1114 | free_buffer_page(bpage); | 
|  | 1115 | } | 
|  | 1116 | bpage = list_entry(head, struct buffer_page, list); | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1117 | free_buffer_page(bpage); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1118 | } | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 1119 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1120 | kfree(cpu_buffer); | 
|  | 1121 | } | 
|  | 1122 |  | 
| Steven Rostedt | 59222ef | 2009-03-12 11:46:03 -0400 | [diff] [blame] | 1123 | #ifdef CONFIG_HOTPLUG_CPU | 
| Frederic Weisbecker | 09c9e84 | 2009-03-21 04:33:36 +0100 | [diff] [blame] | 1124 | static int rb_cpu_notify(struct notifier_block *self, | 
|  | 1125 | unsigned long action, void *hcpu); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1126 | #endif | 
|  | 1127 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1128 | /** | 
|  | 1129 | * ring_buffer_alloc - allocate a new ring_buffer | 
| Robert Richter | 68814b5 | 2008-11-24 12:24:12 +0100 | [diff] [blame] | 1130 | * @size: the size in bytes per cpu that is needed. | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1131 | * @flags: attributes to set for the ring buffer. | 
|  | 1132 | * | 
|  | 1133 | * Currently the only flag that is available is the RB_FL_OVERWRITE | 
|  | 1134 | * flag. This flag means that the buffer will overwrite old data | 
|  | 1135 | * when the buffer wraps. If this flag is not set, the buffer will | 
|  | 1136 | * drop data when the tail hits the head. | 
|  | 1137 | */ | 
| Peter Zijlstra | 1f8a6a1 | 2009-06-08 18:18:39 +0200 | [diff] [blame] | 1138 | struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | 
|  | 1139 | struct lock_class_key *key) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1140 | { | 
|  | 1141 | struct ring_buffer *buffer; | 
|  | 1142 | int bsize; | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1143 | int cpu, nr_pages; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1144 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1145 | /* keep it in its own cache line */ | 
|  | 1146 | buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), | 
|  | 1147 | GFP_KERNEL); | 
|  | 1148 | if (!buffer) | 
|  | 1149 | return NULL; | 
|  | 1150 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1151 | if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) | 
|  | 1152 | goto fail_free_buffer; | 
|  | 1153 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1154 | nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1155 | buffer->flags = flags; | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 1156 | buffer->clock = trace_clock_local; | 
| Peter Zijlstra | 1f8a6a1 | 2009-06-08 18:18:39 +0200 | [diff] [blame] | 1157 | buffer->reader_lock_key = key; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1158 |  | 
|  | 1159 | /* need at least two pages */ | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1160 | if (nr_pages < 2) | 
|  | 1161 | nr_pages = 2; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1162 |  | 
| Frederic Weisbecker | 3bf832c | 2009-03-19 14:47:33 +0100 | [diff] [blame] | 1163 | /* | 
|  | 1164 | * In case of non-hotplug cpu, if the ring-buffer is allocated | 
|  | 1165 | * in early initcall, it will not be notified of secondary cpus. | 
|  | 1166 | * In that off case, we need to allocate for all possible cpus. | 
|  | 1167 | */ | 
|  | 1168 | #ifdef CONFIG_HOTPLUG_CPU | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1169 | get_online_cpus(); | 
|  | 1170 | cpumask_copy(buffer->cpumask, cpu_online_mask); | 
| Frederic Weisbecker | 3bf832c | 2009-03-19 14:47:33 +0100 | [diff] [blame] | 1171 | #else | 
|  | 1172 | cpumask_copy(buffer->cpumask, cpu_possible_mask); | 
|  | 1173 | #endif | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1174 | buffer->cpus = nr_cpu_ids; | 
|  | 1175 |  | 
|  | 1176 | bsize = sizeof(void *) * nr_cpu_ids; | 
|  | 1177 | buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), | 
|  | 1178 | GFP_KERNEL); | 
|  | 1179 | if (!buffer->buffers) | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1180 | goto fail_free_cpumask; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1181 |  | 
|  | 1182 | for_each_buffer_cpu(buffer, cpu) { | 
|  | 1183 | buffer->buffers[cpu] = | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1184 | rb_allocate_cpu_buffer(buffer, nr_pages, cpu); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1185 | if (!buffer->buffers[cpu]) | 
|  | 1186 | goto fail_free_buffers; | 
|  | 1187 | } | 
|  | 1188 |  | 
| Steven Rostedt | 59222ef | 2009-03-12 11:46:03 -0400 | [diff] [blame] | 1189 | #ifdef CONFIG_HOTPLUG_CPU | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1190 | buffer->cpu_notify.notifier_call = rb_cpu_notify; | 
|  | 1191 | buffer->cpu_notify.priority = 0; | 
|  | 1192 | register_cpu_notifier(&buffer->cpu_notify); | 
|  | 1193 | #endif | 
|  | 1194 |  | 
|  | 1195 | put_online_cpus(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1196 | mutex_init(&buffer->mutex); | 
|  | 1197 |  | 
|  | 1198 | return buffer; | 
|  | 1199 |  | 
|  | 1200 | fail_free_buffers: | 
|  | 1201 | for_each_buffer_cpu(buffer, cpu) { | 
|  | 1202 | if (buffer->buffers[cpu]) | 
|  | 1203 | rb_free_cpu_buffer(buffer->buffers[cpu]); | 
|  | 1204 | } | 
|  | 1205 | kfree(buffer->buffers); | 
|  | 1206 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1207 | fail_free_cpumask: | 
|  | 1208 | free_cpumask_var(buffer->cpumask); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1209 | put_online_cpus(); | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1210 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1211 | fail_free_buffer: | 
|  | 1212 | kfree(buffer); | 
|  | 1213 | return NULL; | 
|  | 1214 | } | 
| Peter Zijlstra | 1f8a6a1 | 2009-06-08 18:18:39 +0200 | [diff] [blame] | 1215 | EXPORT_SYMBOL_GPL(__ring_buffer_alloc); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1216 |  | 
|  | 1217 | /** | 
|  | 1218 | * ring_buffer_free - free a ring buffer. | 
|  | 1219 | * @buffer: the buffer to free. | 
|  | 1220 | */ | 
|  | 1221 | void | 
|  | 1222 | ring_buffer_free(struct ring_buffer *buffer) | 
|  | 1223 | { | 
|  | 1224 | int cpu; | 
|  | 1225 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1226 | get_online_cpus(); | 
|  | 1227 |  | 
| Steven Rostedt | 59222ef | 2009-03-12 11:46:03 -0400 | [diff] [blame] | 1228 | #ifdef CONFIG_HOTPLUG_CPU | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1229 | unregister_cpu_notifier(&buffer->cpu_notify); | 
|  | 1230 | #endif | 
|  | 1231 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1232 | for_each_buffer_cpu(buffer, cpu) | 
|  | 1233 | rb_free_cpu_buffer(buffer->buffers[cpu]); | 
|  | 1234 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 1235 | put_online_cpus(); | 
|  | 1236 |  | 
| Eric Dumazet | bd3f022 | 2009-08-07 12:49:29 +0200 | [diff] [blame] | 1237 | kfree(buffer->buffers); | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1238 | free_cpumask_var(buffer->cpumask); | 
|  | 1239 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1240 | kfree(buffer); | 
|  | 1241 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1242 | EXPORT_SYMBOL_GPL(ring_buffer_free); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1243 |  | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 1244 | void ring_buffer_set_clock(struct ring_buffer *buffer, | 
|  | 1245 | u64 (*clock)(void)) | 
|  | 1246 | { | 
|  | 1247 | buffer->clock = clock; | 
|  | 1248 | } | 
|  | 1249 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1250 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); | 
|  | 1251 |  | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1252 | static inline unsigned long rb_page_entries(struct buffer_page *bpage) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1253 | { | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1254 | return local_read(&bpage->entries) & RB_WRITE_MASK; | 
|  | 1255 | } | 
|  | 1256 |  | 
|  | 1257 | static inline unsigned long rb_page_write(struct buffer_page *bpage) | 
|  | 1258 | { | 
|  | 1259 | return local_read(&bpage->write) & RB_WRITE_MASK; | 
|  | 1260 | } | 
|  | 1261 |  | 
| Vaibhav Nagarnaik | 5040b4b | 2012-05-03 18:59:51 -0700 | [diff] [blame] | 1262 | static int | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1263 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages) | 
|  | 1264 | { | 
|  | 1265 | struct list_head *tail_page, *to_remove, *next_page; | 
|  | 1266 | struct buffer_page *to_remove_page, *tmp_iter_page; | 
|  | 1267 | struct buffer_page *last_page, *first_page; | 
|  | 1268 | unsigned int nr_removed; | 
|  | 1269 | unsigned long head_bit; | 
|  | 1270 | int page_entries; | 
|  | 1271 |  | 
|  | 1272 | head_bit = 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1273 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1274 | raw_spin_lock_irq(&cpu_buffer->reader_lock); | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1275 | atomic_inc(&cpu_buffer->record_disabled); | 
|  | 1276 | /* | 
|  | 1277 | * We don't race with the readers since we have acquired the reader | 
|  | 1278 | * lock. We also don't race with writers after disabling recording. | 
|  | 1279 | * This makes it easy to figure out the first and the last page to be | 
|  | 1280 | * removed from the list. We unlink all the pages in between including | 
|  | 1281 | * the first and last pages. This is done in a busy loop so that we | 
|  | 1282 | * lose the least number of traces. | 
|  | 1283 | * The pages are freed after we restart recording and unlock readers. | 
|  | 1284 | */ | 
|  | 1285 | tail_page = &cpu_buffer->tail_page->list; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1286 |  | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1287 | /* | 
|  | 1288 | * tail page might be on reader page, we remove the next page | 
|  | 1289 | * from the ring buffer | 
|  | 1290 | */ | 
|  | 1291 | if (cpu_buffer->tail_page == cpu_buffer->reader_page) | 
|  | 1292 | tail_page = rb_list_head(tail_page->next); | 
|  | 1293 | to_remove = tail_page; | 
|  | 1294 |  | 
|  | 1295 | /* start of pages to remove */ | 
|  | 1296 | first_page = list_entry(rb_list_head(to_remove->next), | 
|  | 1297 | struct buffer_page, list); | 
|  | 1298 |  | 
|  | 1299 | for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { | 
|  | 1300 | to_remove = rb_list_head(to_remove)->next; | 
|  | 1301 | head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1302 | } | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1303 |  | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1304 | next_page = rb_list_head(to_remove)->next; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1305 |  | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1306 | /* | 
|  | 1307 | * Now we remove all pages between tail_page and next_page. | 
|  | 1308 | * Make sure that we have head_bit value preserved for the | 
|  | 1309 | * next page | 
|  | 1310 | */ | 
|  | 1311 | tail_page->next = (struct list_head *)((unsigned long)next_page | | 
|  | 1312 | head_bit); | 
|  | 1313 | next_page = rb_list_head(next_page); | 
|  | 1314 | next_page->prev = tail_page; | 
|  | 1315 |  | 
|  | 1316 | /* make sure pages points to a valid page in the ring buffer */ | 
|  | 1317 | cpu_buffer->pages = next_page; | 
|  | 1318 |  | 
|  | 1319 | /* update head page */ | 
|  | 1320 | if (head_bit) | 
|  | 1321 | cpu_buffer->head_page = list_entry(next_page, | 
|  | 1322 | struct buffer_page, list); | 
|  | 1323 |  | 
|  | 1324 | /* | 
|  | 1325 | * change read pointer to make sure any read iterators reset | 
|  | 1326 | * themselves | 
|  | 1327 | */ | 
|  | 1328 | cpu_buffer->read = 0; | 
|  | 1329 |  | 
|  | 1330 | /* pages are removed, resume tracing and then free the pages */ | 
|  | 1331 | atomic_dec(&cpu_buffer->record_disabled); | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1332 | raw_spin_unlock_irq(&cpu_buffer->reader_lock); | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1333 |  | 
|  | 1334 | RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); | 
|  | 1335 |  | 
|  | 1336 | /* last buffer page to remove */ | 
|  | 1337 | last_page = list_entry(rb_list_head(to_remove), struct buffer_page, | 
|  | 1338 | list); | 
|  | 1339 | tmp_iter_page = first_page; | 
|  | 1340 |  | 
|  | 1341 | do { | 
|  | 1342 | to_remove_page = tmp_iter_page; | 
|  | 1343 | rb_inc_page(cpu_buffer, &tmp_iter_page); | 
|  | 1344 |  | 
|  | 1345 | /* update the counters */ | 
|  | 1346 | page_entries = rb_page_entries(to_remove_page); | 
|  | 1347 | if (page_entries) { | 
|  | 1348 | /* | 
|  | 1349 | * If something was added to this page, it was full | 
|  | 1350 | * since it is not the tail page. So we deduct the | 
|  | 1351 | * bytes consumed in ring buffer from here. | 
| Vaibhav Nagarnaik | 48fdc72 | 2012-06-29 12:31:41 -0700 | [diff] [blame] | 1352 | * Increment overrun to account for the lost events. | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1353 | */ | 
| Vaibhav Nagarnaik | 48fdc72 | 2012-06-29 12:31:41 -0700 | [diff] [blame] | 1354 | local_add(page_entries, &cpu_buffer->overrun); | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1355 | local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); | 
|  | 1356 | } | 
|  | 1357 |  | 
|  | 1358 | /* | 
|  | 1359 | * We have already removed references to this list item, just | 
|  | 1360 | * free up the buffer_page and its page | 
|  | 1361 | */ | 
|  | 1362 | free_buffer_page(to_remove_page); | 
|  | 1363 | nr_removed--; | 
|  | 1364 |  | 
|  | 1365 | } while (to_remove_page != last_page); | 
|  | 1366 |  | 
|  | 1367 | RB_WARN_ON(cpu_buffer, nr_removed); | 
| Vaibhav Nagarnaik | 5040b4b | 2012-05-03 18:59:51 -0700 | [diff] [blame] | 1368 |  | 
|  | 1369 | return nr_removed == 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1370 | } | 
|  | 1371 |  | 
| Vaibhav Nagarnaik | 5040b4b | 2012-05-03 18:59:51 -0700 | [diff] [blame] | 1372 | static int | 
|  | 1373 | rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1374 | { | 
| Vaibhav Nagarnaik | 5040b4b | 2012-05-03 18:59:51 -0700 | [diff] [blame] | 1375 | struct list_head *pages = &cpu_buffer->new_pages; | 
|  | 1376 | int retries, success; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1377 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1378 | raw_spin_lock_irq(&cpu_buffer->reader_lock); | 
| Vaibhav Nagarnaik | 5040b4b | 2012-05-03 18:59:51 -0700 | [diff] [blame] | 1379 | /* | 
|  | 1380 | * We are holding the reader lock, so the reader page won't be swapped | 
|  | 1381 | * in the ring buffer. Now we are racing with the writer trying to | 
|  | 1382 | * move head page and the tail page. | 
|  | 1383 | * We are going to adapt the reader page update process where: | 
|  | 1384 | * 1. We first splice the start and end of list of new pages between | 
|  | 1385 | *    the head page and its previous page. | 
|  | 1386 | * 2. We cmpxchg the prev_page->next to point from head page to the | 
|  | 1387 | *    start of new pages list. | 
|  | 1388 | * 3. Finally, we update the head->prev to the end of new list. | 
|  | 1389 | * | 
|  | 1390 | * We will try this process 10 times, to make sure that we don't keep | 
|  | 1391 | * spinning. | 
|  | 1392 | */ | 
|  | 1393 | retries = 10; | 
|  | 1394 | success = 0; | 
|  | 1395 | while (retries--) { | 
|  | 1396 | struct list_head *head_page, *prev_page, *r; | 
|  | 1397 | struct list_head *last_page, *first_page; | 
|  | 1398 | struct list_head *head_page_with_bit; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1399 |  | 
| Vaibhav Nagarnaik | 5040b4b | 2012-05-03 18:59:51 -0700 | [diff] [blame] | 1400 | head_page = &rb_set_head_page(cpu_buffer)->list; | 
| Steven Rostedt | 54f7be5 | 2012-11-29 22:27:22 -0500 | [diff] [blame] | 1401 | if (!head_page) | 
|  | 1402 | break; | 
| Vaibhav Nagarnaik | 5040b4b | 2012-05-03 18:59:51 -0700 | [diff] [blame] | 1403 | prev_page = head_page->prev; | 
|  | 1404 |  | 
|  | 1405 | first_page = pages->next; | 
|  | 1406 | last_page  = pages->prev; | 
|  | 1407 |  | 
|  | 1408 | head_page_with_bit = (struct list_head *) | 
|  | 1409 | ((unsigned long)head_page | RB_PAGE_HEAD); | 
|  | 1410 |  | 
|  | 1411 | last_page->next = head_page_with_bit; | 
|  | 1412 | first_page->prev = prev_page; | 
|  | 1413 |  | 
|  | 1414 | r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); | 
|  | 1415 |  | 
|  | 1416 | if (r == head_page_with_bit) { | 
|  | 1417 | /* | 
|  | 1418 | * yay, we replaced the page pointer to our new list, | 
|  | 1419 | * now, we just have to update to head page's prev | 
|  | 1420 | * pointer to point to end of list | 
|  | 1421 | */ | 
|  | 1422 | head_page->prev = last_page; | 
|  | 1423 | success = 1; | 
|  | 1424 | break; | 
|  | 1425 | } | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1426 | } | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1427 |  | 
| Vaibhav Nagarnaik | 5040b4b | 2012-05-03 18:59:51 -0700 | [diff] [blame] | 1428 | if (success) | 
|  | 1429 | INIT_LIST_HEAD(pages); | 
|  | 1430 | /* | 
|  | 1431 | * If we weren't successful in adding in new pages, warn and stop | 
|  | 1432 | * tracing | 
|  | 1433 | */ | 
|  | 1434 | RB_WARN_ON(cpu_buffer, !success); | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 1435 | raw_spin_unlock_irq(&cpu_buffer->reader_lock); | 
| Vaibhav Nagarnaik | 5040b4b | 2012-05-03 18:59:51 -0700 | [diff] [blame] | 1436 |  | 
|  | 1437 | /* free pages if they weren't inserted */ | 
|  | 1438 | if (!success) { | 
|  | 1439 | struct buffer_page *bpage, *tmp; | 
|  | 1440 | list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, | 
|  | 1441 | list) { | 
|  | 1442 | list_del_init(&bpage->list); | 
|  | 1443 | free_buffer_page(bpage); | 
|  | 1444 | } | 
|  | 1445 | } | 
|  | 1446 | return success; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1447 | } | 
|  | 1448 |  | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1449 | static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1450 | { | 
| Vaibhav Nagarnaik | 5040b4b | 2012-05-03 18:59:51 -0700 | [diff] [blame] | 1451 | int success; | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1452 |  | 
| Vaibhav Nagarnaik | 5040b4b | 2012-05-03 18:59:51 -0700 | [diff] [blame] | 1453 | if (cpu_buffer->nr_pages_to_update > 0) | 
|  | 1454 | success = rb_insert_pages(cpu_buffer); | 
|  | 1455 | else | 
|  | 1456 | success = rb_remove_pages(cpu_buffer, | 
|  | 1457 | -cpu_buffer->nr_pages_to_update); | 
|  | 1458 |  | 
|  | 1459 | if (success) | 
|  | 1460 | cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1461 | } | 
|  | 1462 |  | 
|  | 1463 | static void update_pages_handler(struct work_struct *work) | 
|  | 1464 | { | 
|  | 1465 | struct ring_buffer_per_cpu *cpu_buffer = container_of(work, | 
|  | 1466 | struct ring_buffer_per_cpu, update_pages_work); | 
|  | 1467 | rb_update_pages(cpu_buffer); | 
| Vaibhav Nagarnaik | 05fdd70 | 2012-05-18 13:29:51 -0700 | [diff] [blame] | 1468 | complete(&cpu_buffer->update_done); | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1469 | } | 
|  | 1470 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1471 | /** | 
|  | 1472 | * ring_buffer_resize - resize the ring buffer | 
|  | 1473 | * @buffer: the buffer to resize. | 
|  | 1474 | * @size: the new size. | 
|  | 1475 | * | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1476 | * Minimum size is 2 * BUF_PAGE_SIZE. | 
|  | 1477 | * | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1478 | * Returns 0 on success and < 0 on failure. | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1479 | */ | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1480 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, | 
|  | 1481 | int cpu_id) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1482 | { | 
|  | 1483 | struct ring_buffer_per_cpu *cpu_buffer; | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1484 | unsigned nr_pages; | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1485 | int cpu, err = 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1486 |  | 
| Ingo Molnar | ee51a1d | 2008-11-13 14:58:31 +0100 | [diff] [blame] | 1487 | /* | 
|  | 1488 | * Always succeed at resizing a non-existent buffer: | 
|  | 1489 | */ | 
|  | 1490 | if (!buffer) | 
|  | 1491 | return size; | 
|  | 1492 |  | 
| Steven Rostedt | 6a31e1f | 2012-05-23 15:35:17 -0400 | [diff] [blame] | 1493 | /* Make sure the requested buffer exists */ | 
|  | 1494 | if (cpu_id != RING_BUFFER_ALL_CPUS && | 
|  | 1495 | !cpumask_test_cpu(cpu_id, buffer->cpumask)) | 
|  | 1496 | return size; | 
|  | 1497 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1498 | size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 
|  | 1499 | size *= BUF_PAGE_SIZE; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1500 |  | 
|  | 1501 | /* we need a minimum of two pages */ | 
|  | 1502 | if (size < BUF_PAGE_SIZE * 2) | 
|  | 1503 | size = BUF_PAGE_SIZE * 2; | 
|  | 1504 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1505 | nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 
|  | 1506 |  | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1507 | /* | 
|  | 1508 | * Don't succeed if resizing is disabled, as a reader might be | 
|  | 1509 | * manipulating the ring buffer and is expecting a sane state while | 
|  | 1510 | * this is true. | 
|  | 1511 | */ | 
|  | 1512 | if (atomic_read(&buffer->resize_disabled)) | 
|  | 1513 | return -EBUSY; | 
|  | 1514 |  | 
|  | 1515 | /* prevent another thread from changing buffer sizes */ | 
|  | 1516 | mutex_lock(&buffer->mutex); | 
|  | 1517 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1518 | if (cpu_id == RING_BUFFER_ALL_CPUS) { | 
|  | 1519 | /* calculate the pages to update */ | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1520 | for_each_buffer_cpu(buffer, cpu) { | 
|  | 1521 | cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1522 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1523 | cpu_buffer->nr_pages_to_update = nr_pages - | 
|  | 1524 | cpu_buffer->nr_pages; | 
| Vaibhav Nagarnaik | d7ec4bf | 2011-06-07 17:01:42 -0700 | [diff] [blame] | 1525 | /* | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1526 | * nothing more to do for removing pages or no update | 
| Vaibhav Nagarnaik | d7ec4bf | 2011-06-07 17:01:42 -0700 | [diff] [blame] | 1527 | */ | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1528 | if (cpu_buffer->nr_pages_to_update <= 0) | 
|  | 1529 | continue; | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1530 | /* | 
|  | 1531 | * to add pages, make sure all new pages can be | 
|  | 1532 | * allocated without receiving ENOMEM | 
|  | 1533 | */ | 
|  | 1534 | INIT_LIST_HEAD(&cpu_buffer->new_pages); | 
|  | 1535 | if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update, | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1536 | &cpu_buffer->new_pages, cpu)) { | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1537 | /* not enough memory for new pages */ | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1538 | err = -ENOMEM; | 
|  | 1539 | goto out_err; | 
|  | 1540 | } | 
|  | 1541 | } | 
|  | 1542 |  | 
|  | 1543 | get_online_cpus(); | 
|  | 1544 | /* | 
|  | 1545 | * Fire off all the required work handlers | 
| Vaibhav Nagarnaik | 05fdd70 | 2012-05-18 13:29:51 -0700 | [diff] [blame] | 1546 | * We can't schedule on offline CPUs, but it's not necessary | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1547 | * since we can change their buffer sizes without any race. | 
|  | 1548 | */ | 
|  | 1549 | for_each_buffer_cpu(buffer, cpu) { | 
|  | 1550 | cpu_buffer = buffer->buffers[cpu]; | 
| Vaibhav Nagarnaik | 05fdd70 | 2012-05-18 13:29:51 -0700 | [diff] [blame] | 1551 | if (!cpu_buffer->nr_pages_to_update) | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1552 | continue; | 
|  | 1553 |  | 
| Vaibhav Nagarnaik | 05fdd70 | 2012-05-18 13:29:51 -0700 | [diff] [blame] | 1554 | if (cpu_online(cpu)) | 
|  | 1555 | schedule_work_on(cpu, | 
|  | 1556 | &cpu_buffer->update_pages_work); | 
|  | 1557 | else | 
|  | 1558 | rb_update_pages(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1559 | } | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1560 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1561 | /* wait for all the updates to complete */ | 
|  | 1562 | for_each_buffer_cpu(buffer, cpu) { | 
|  | 1563 | cpu_buffer = buffer->buffers[cpu]; | 
| Vaibhav Nagarnaik | 05fdd70 | 2012-05-18 13:29:51 -0700 | [diff] [blame] | 1564 | if (!cpu_buffer->nr_pages_to_update) | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1565 | continue; | 
|  | 1566 |  | 
| Vaibhav Nagarnaik | 05fdd70 | 2012-05-18 13:29:51 -0700 | [diff] [blame] | 1567 | if (cpu_online(cpu)) | 
|  | 1568 | wait_for_completion(&cpu_buffer->update_done); | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1569 | cpu_buffer->nr_pages_to_update = 0; | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1570 | } | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1571 |  | 
|  | 1572 | put_online_cpus(); | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1573 | } else { | 
| Vaibhav Nagarnaik | 8e49f41 | 2012-10-10 16:40:27 -0700 | [diff] [blame] | 1574 | /* Make sure this CPU has been intitialized */ | 
|  | 1575 | if (!cpumask_test_cpu(cpu_id, buffer->cpumask)) | 
|  | 1576 | goto out; | 
|  | 1577 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1578 | cpu_buffer = buffer->buffers[cpu_id]; | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1579 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1580 | if (nr_pages == cpu_buffer->nr_pages) | 
|  | 1581 | goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1582 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1583 | cpu_buffer->nr_pages_to_update = nr_pages - | 
|  | 1584 | cpu_buffer->nr_pages; | 
|  | 1585 |  | 
|  | 1586 | INIT_LIST_HEAD(&cpu_buffer->new_pages); | 
|  | 1587 | if (cpu_buffer->nr_pages_to_update > 0 && | 
|  | 1588 | __rb_allocate_pages(cpu_buffer->nr_pages_to_update, | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1589 | &cpu_buffer->new_pages, cpu_id)) { | 
|  | 1590 | err = -ENOMEM; | 
|  | 1591 | goto out_err; | 
|  | 1592 | } | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1593 |  | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1594 | get_online_cpus(); | 
|  | 1595 |  | 
|  | 1596 | if (cpu_online(cpu_id)) { | 
|  | 1597 | schedule_work_on(cpu_id, | 
|  | 1598 | &cpu_buffer->update_pages_work); | 
| Vaibhav Nagarnaik | 05fdd70 | 2012-05-18 13:29:51 -0700 | [diff] [blame] | 1599 | wait_for_completion(&cpu_buffer->update_done); | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1600 | } else | 
|  | 1601 | rb_update_pages(cpu_buffer); | 
|  | 1602 |  | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1603 | cpu_buffer->nr_pages_to_update = 0; | 
| Vaibhav Nagarnaik | 05fdd70 | 2012-05-18 13:29:51 -0700 | [diff] [blame] | 1604 | put_online_cpus(); | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1605 | } | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1606 |  | 
|  | 1607 | out: | 
| Steven Rostedt | 659f451 | 2012-05-14 17:02:33 -0400 | [diff] [blame] | 1608 | /* | 
|  | 1609 | * The ring buffer resize can happen with the ring buffer | 
|  | 1610 | * enabled, so that the update disturbs the tracing as little | 
|  | 1611 | * as possible. But if the buffer is disabled, we do not need | 
|  | 1612 | * to worry about that, and we can take the time to verify | 
|  | 1613 | * that the buffer is not corrupt. | 
|  | 1614 | */ | 
|  | 1615 | if (atomic_read(&buffer->record_disabled)) { | 
|  | 1616 | atomic_inc(&buffer->record_disabled); | 
|  | 1617 | /* | 
|  | 1618 | * Even though the buffer was disabled, we must make sure | 
|  | 1619 | * that it is truly disabled before calling rb_check_pages. | 
|  | 1620 | * There could have been a race between checking | 
|  | 1621 | * record_disable and incrementing it. | 
|  | 1622 | */ | 
|  | 1623 | synchronize_sched(); | 
|  | 1624 | for_each_buffer_cpu(buffer, cpu) { | 
|  | 1625 | cpu_buffer = buffer->buffers[cpu]; | 
|  | 1626 | rb_check_pages(cpu_buffer); | 
|  | 1627 | } | 
|  | 1628 | atomic_dec(&buffer->record_disabled); | 
|  | 1629 | } | 
|  | 1630 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1631 | mutex_unlock(&buffer->mutex); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1632 | return size; | 
|  | 1633 |  | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1634 | out_err: | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1635 | for_each_buffer_cpu(buffer, cpu) { | 
|  | 1636 | struct buffer_page *bpage, *tmp; | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1637 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1638 | cpu_buffer = buffer->buffers[cpu]; | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1639 | cpu_buffer->nr_pages_to_update = 0; | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1640 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1641 | if (list_empty(&cpu_buffer->new_pages)) | 
|  | 1642 | continue; | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1643 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1644 | list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, | 
|  | 1645 | list) { | 
|  | 1646 | list_del_init(&bpage->list); | 
|  | 1647 | free_buffer_page(bpage); | 
|  | 1648 | } | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1649 | } | 
| Vegard Nossum | 641d2f6 | 2008-11-18 19:22:13 +0100 | [diff] [blame] | 1650 | mutex_unlock(&buffer->mutex); | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 1651 | return err; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1652 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1653 | EXPORT_SYMBOL_GPL(ring_buffer_resize); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1654 |  | 
| David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 1655 | void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val) | 
|  | 1656 | { | 
|  | 1657 | mutex_lock(&buffer->mutex); | 
|  | 1658 | if (val) | 
|  | 1659 | buffer->flags |= RB_FL_OVERWRITE; | 
|  | 1660 | else | 
|  | 1661 | buffer->flags &= ~RB_FL_OVERWRITE; | 
|  | 1662 | mutex_unlock(&buffer->mutex); | 
|  | 1663 | } | 
|  | 1664 | EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); | 
|  | 1665 |  | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 1666 | static inline void * | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1667 | __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 1668 | { | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1669 | return bpage->data + index; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 1670 | } | 
|  | 1671 |  | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1672 | static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1673 | { | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 1674 | return bpage->page->data + index; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1675 | } | 
|  | 1676 |  | 
|  | 1677 | static inline struct ring_buffer_event * | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1678 | rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1679 | { | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 1680 | return __rb_page_index(cpu_buffer->reader_page, | 
|  | 1681 | cpu_buffer->reader_page->read); | 
|  | 1682 | } | 
|  | 1683 |  | 
|  | 1684 | static inline struct ring_buffer_event * | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1685 | rb_iter_head_event(struct ring_buffer_iter *iter) | 
|  | 1686 | { | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 1687 | return __rb_page_index(iter->head_page, iter->head); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1688 | } | 
|  | 1689 |  | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1690 | static inline unsigned rb_page_commit(struct buffer_page *bpage) | 
|  | 1691 | { | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 1692 | return local_read(&bpage->page->commit); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1693 | } | 
|  | 1694 |  | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 1695 | /* Size is determined by what has been committed */ | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1696 | static inline unsigned rb_page_size(struct buffer_page *bpage) | 
|  | 1697 | { | 
|  | 1698 | return rb_page_commit(bpage); | 
|  | 1699 | } | 
|  | 1700 |  | 
|  | 1701 | static inline unsigned | 
|  | 1702 | rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) | 
|  | 1703 | { | 
|  | 1704 | return rb_page_commit(cpu_buffer->commit_page); | 
|  | 1705 | } | 
|  | 1706 |  | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1707 | static inline unsigned | 
|  | 1708 | rb_event_index(struct ring_buffer_event *event) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1709 | { | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1710 | unsigned long addr = (unsigned long)event; | 
|  | 1711 |  | 
| Steven Rostedt | 22f470f | 2009-06-11 09:29:58 -0400 | [diff] [blame] | 1712 | return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1713 | } | 
|  | 1714 |  | 
| Steven Rostedt | 0f0c85f | 2009-05-11 16:08:00 -0400 | [diff] [blame] | 1715 | static inline int | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 1716 | rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 1717 | struct ring_buffer_event *event) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1718 | { | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1719 | unsigned long addr = (unsigned long)event; | 
|  | 1720 | unsigned long index; | 
|  | 1721 |  | 
|  | 1722 | index = rb_event_index(event); | 
|  | 1723 | addr &= PAGE_MASK; | 
|  | 1724 |  | 
|  | 1725 | return cpu_buffer->commit_page->page == (void *)addr && | 
|  | 1726 | rb_commit_index(cpu_buffer) == index; | 
|  | 1727 | } | 
|  | 1728 |  | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 1729 | static void | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1730 | rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | 
|  | 1731 | { | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1732 | unsigned long max_count; | 
|  | 1733 |  | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1734 | /* | 
|  | 1735 | * We only race with interrupts and NMIs on this CPU. | 
|  | 1736 | * If we own the commit event, then we can commit | 
|  | 1737 | * all others that interrupted us, since the interruptions | 
|  | 1738 | * are in stack format (they finish before they come | 
|  | 1739 | * back to us). This allows us to do a simple loop to | 
|  | 1740 | * assign the commit to the tail. | 
|  | 1741 | */ | 
| Steven Rostedt | a8ccf1d | 2008-12-23 11:32:24 -0500 | [diff] [blame] | 1742 | again: | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 1743 | max_count = cpu_buffer->nr_pages * 100; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1744 |  | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1745 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1746 | if (RB_WARN_ON(cpu_buffer, !(--max_count))) | 
|  | 1747 | return; | 
|  | 1748 | if (RB_WARN_ON(cpu_buffer, | 
|  | 1749 | rb_is_reader_page(cpu_buffer->tail_page))) | 
|  | 1750 | return; | 
|  | 1751 | local_set(&cpu_buffer->commit_page->page->commit, | 
|  | 1752 | rb_page_write(cpu_buffer->commit_page)); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1753 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 1754 | cpu_buffer->write_stamp = | 
|  | 1755 | cpu_buffer->commit_page->page->time_stamp; | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1756 | /* add barrier to keep gcc from optimizing too much */ | 
|  | 1757 | barrier(); | 
|  | 1758 | } | 
|  | 1759 | while (rb_commit_index(cpu_buffer) != | 
|  | 1760 | rb_page_write(cpu_buffer->commit_page)) { | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1761 |  | 
|  | 1762 | local_set(&cpu_buffer->commit_page->page->commit, | 
|  | 1763 | rb_page_write(cpu_buffer->commit_page)); | 
|  | 1764 | RB_WARN_ON(cpu_buffer, | 
|  | 1765 | local_read(&cpu_buffer->commit_page->page->commit) & | 
|  | 1766 | ~RB_WRITE_MASK); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1767 | barrier(); | 
|  | 1768 | } | 
| Steven Rostedt | a8ccf1d | 2008-12-23 11:32:24 -0500 | [diff] [blame] | 1769 |  | 
|  | 1770 | /* again, keep gcc from optimizing */ | 
|  | 1771 | barrier(); | 
|  | 1772 |  | 
|  | 1773 | /* | 
|  | 1774 | * If an interrupt came in just after the first while loop | 
|  | 1775 | * and pushed the tail page forward, we will be left with | 
|  | 1776 | * a dangling commit that will never go forward. | 
|  | 1777 | */ | 
|  | 1778 | if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) | 
|  | 1779 | goto again; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1780 | } | 
|  | 1781 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1782 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1783 | { | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 1784 | cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 1785 | cpu_buffer->reader_page->read = 0; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1786 | } | 
|  | 1787 |  | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 1788 | static void rb_inc_iter(struct ring_buffer_iter *iter) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1789 | { | 
|  | 1790 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 
|  | 1791 |  | 
|  | 1792 | /* | 
|  | 1793 | * The iterator could be on the reader page (it starts there). | 
|  | 1794 | * But the head could have moved, since the reader was | 
|  | 1795 | * found. Check for this case and assign the iterator | 
|  | 1796 | * to the head page instead of next. | 
|  | 1797 | */ | 
|  | 1798 | if (iter->head_page == cpu_buffer->reader_page) | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1799 | iter->head_page = rb_set_head_page(cpu_buffer); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1800 | else | 
|  | 1801 | rb_inc_page(cpu_buffer, &iter->head_page); | 
|  | 1802 |  | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 1803 | iter->read_stamp = iter->head_page->page->time_stamp; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1804 | iter->head = 0; | 
|  | 1805 | } | 
|  | 1806 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 1807 | /* Slow path, do not inline */ | 
|  | 1808 | static noinline struct ring_buffer_event * | 
|  | 1809 | rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) | 
|  | 1810 | { | 
|  | 1811 | event->type_len = RINGBUF_TYPE_TIME_EXTEND; | 
|  | 1812 |  | 
|  | 1813 | /* Not the first event on the page? */ | 
|  | 1814 | if (rb_event_index(event)) { | 
|  | 1815 | event->time_delta = delta & TS_MASK; | 
|  | 1816 | event->array[0] = delta >> TS_SHIFT; | 
|  | 1817 | } else { | 
|  | 1818 | /* nope, just zero it */ | 
|  | 1819 | event->time_delta = 0; | 
|  | 1820 | event->array[0] = 0; | 
|  | 1821 | } | 
|  | 1822 |  | 
|  | 1823 | return skip_time_extend(event); | 
|  | 1824 | } | 
|  | 1825 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1826 | /** | 
| David Sharp | 01e3e71 | 2012-06-07 16:46:24 -0700 | [diff] [blame] | 1827 | * rb_update_event - update event type and data | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1828 | * @event: the even to update | 
|  | 1829 | * @type: the type of event | 
|  | 1830 | * @length: the size of the event field in the ring buffer | 
|  | 1831 | * | 
|  | 1832 | * Update the type and data fields of the event. The length | 
|  | 1833 | * is the actual size that is written to the ring buffer, | 
|  | 1834 | * and with this, we can determine what to place into the | 
|  | 1835 | * data field. | 
|  | 1836 | */ | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 1837 | static void | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 1838 | rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 1839 | struct ring_buffer_event *event, unsigned length, | 
|  | 1840 | int add_timestamp, u64 delta) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1841 | { | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 1842 | /* Only a commit updates the timestamp */ | 
|  | 1843 | if (unlikely(!rb_event_is_commit(cpu_buffer, event))) | 
|  | 1844 | delta = 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1845 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 1846 | /* | 
|  | 1847 | * If we need to add a timestamp, then we | 
|  | 1848 | * add it to the start of the resevered space. | 
|  | 1849 | */ | 
|  | 1850 | if (unlikely(add_timestamp)) { | 
|  | 1851 | event = rb_add_time_stamp(event, delta); | 
|  | 1852 | length -= RB_LEN_TIME_EXTEND; | 
|  | 1853 | delta = 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1854 | } | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 1855 |  | 
|  | 1856 | event->time_delta = delta; | 
|  | 1857 | length -= RB_EVNT_HDR_SIZE; | 
|  | 1858 | if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { | 
|  | 1859 | event->type_len = 0; | 
|  | 1860 | event->array[0] = length; | 
|  | 1861 | } else | 
|  | 1862 | event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1863 | } | 
|  | 1864 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1865 | /* | 
|  | 1866 | * rb_handle_head_page - writer hit the head page | 
|  | 1867 | * | 
|  | 1868 | * Returns: +1 to retry page | 
|  | 1869 | *           0 to continue | 
|  | 1870 | *          -1 on error | 
|  | 1871 | */ | 
|  | 1872 | static int | 
|  | 1873 | rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 1874 | struct buffer_page *tail_page, | 
|  | 1875 | struct buffer_page *next_page) | 
|  | 1876 | { | 
|  | 1877 | struct buffer_page *new_head; | 
|  | 1878 | int entries; | 
|  | 1879 | int type; | 
|  | 1880 | int ret; | 
|  | 1881 |  | 
|  | 1882 | entries = rb_page_entries(next_page); | 
|  | 1883 |  | 
|  | 1884 | /* | 
|  | 1885 | * The hard part is here. We need to move the head | 
|  | 1886 | * forward, and protect against both readers on | 
|  | 1887 | * other CPUs and writers coming in via interrupts. | 
|  | 1888 | */ | 
|  | 1889 | type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, | 
|  | 1890 | RB_PAGE_HEAD); | 
|  | 1891 |  | 
|  | 1892 | /* | 
|  | 1893 | * type can be one of four: | 
|  | 1894 | *  NORMAL - an interrupt already moved it for us | 
|  | 1895 | *  HEAD   - we are the first to get here. | 
|  | 1896 | *  UPDATE - we are the interrupt interrupting | 
|  | 1897 | *           a current move. | 
|  | 1898 | *  MOVED  - a reader on another CPU moved the next | 
|  | 1899 | *           pointer to its reader page. Give up | 
|  | 1900 | *           and try again. | 
|  | 1901 | */ | 
|  | 1902 |  | 
|  | 1903 | switch (type) { | 
|  | 1904 | case RB_PAGE_HEAD: | 
|  | 1905 | /* | 
|  | 1906 | * We changed the head to UPDATE, thus | 
|  | 1907 | * it is our responsibility to update | 
|  | 1908 | * the counters. | 
|  | 1909 | */ | 
|  | 1910 | local_add(entries, &cpu_buffer->overrun); | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 1911 | local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 1912 |  | 
|  | 1913 | /* | 
|  | 1914 | * The entries will be zeroed out when we move the | 
|  | 1915 | * tail page. | 
|  | 1916 | */ | 
|  | 1917 |  | 
|  | 1918 | /* still more to do */ | 
|  | 1919 | break; | 
|  | 1920 |  | 
|  | 1921 | case RB_PAGE_UPDATE: | 
|  | 1922 | /* | 
|  | 1923 | * This is an interrupt that interrupt the | 
|  | 1924 | * previous update. Still more to do. | 
|  | 1925 | */ | 
|  | 1926 | break; | 
|  | 1927 | case RB_PAGE_NORMAL: | 
|  | 1928 | /* | 
|  | 1929 | * An interrupt came in before the update | 
|  | 1930 | * and processed this for us. | 
|  | 1931 | * Nothing left to do. | 
|  | 1932 | */ | 
|  | 1933 | return 1; | 
|  | 1934 | case RB_PAGE_MOVED: | 
|  | 1935 | /* | 
|  | 1936 | * The reader is on another CPU and just did | 
|  | 1937 | * a swap with our next_page. | 
|  | 1938 | * Try again. | 
|  | 1939 | */ | 
|  | 1940 | return 1; | 
|  | 1941 | default: | 
|  | 1942 | RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ | 
|  | 1943 | return -1; | 
|  | 1944 | } | 
|  | 1945 |  | 
|  | 1946 | /* | 
|  | 1947 | * Now that we are here, the old head pointer is | 
|  | 1948 | * set to UPDATE. This will keep the reader from | 
|  | 1949 | * swapping the head page with the reader page. | 
|  | 1950 | * The reader (on another CPU) will spin till | 
|  | 1951 | * we are finished. | 
|  | 1952 | * | 
|  | 1953 | * We just need to protect against interrupts | 
|  | 1954 | * doing the job. We will set the next pointer | 
|  | 1955 | * to HEAD. After that, we set the old pointer | 
|  | 1956 | * to NORMAL, but only if it was HEAD before. | 
|  | 1957 | * otherwise we are an interrupt, and only | 
|  | 1958 | * want the outer most commit to reset it. | 
|  | 1959 | */ | 
|  | 1960 | new_head = next_page; | 
|  | 1961 | rb_inc_page(cpu_buffer, &new_head); | 
|  | 1962 |  | 
|  | 1963 | ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, | 
|  | 1964 | RB_PAGE_NORMAL); | 
|  | 1965 |  | 
|  | 1966 | /* | 
|  | 1967 | * Valid returns are: | 
|  | 1968 | *  HEAD   - an interrupt came in and already set it. | 
|  | 1969 | *  NORMAL - One of two things: | 
|  | 1970 | *            1) We really set it. | 
|  | 1971 | *            2) A bunch of interrupts came in and moved | 
|  | 1972 | *               the page forward again. | 
|  | 1973 | */ | 
|  | 1974 | switch (ret) { | 
|  | 1975 | case RB_PAGE_HEAD: | 
|  | 1976 | case RB_PAGE_NORMAL: | 
|  | 1977 | /* OK */ | 
|  | 1978 | break; | 
|  | 1979 | default: | 
|  | 1980 | RB_WARN_ON(cpu_buffer, 1); | 
|  | 1981 | return -1; | 
|  | 1982 | } | 
|  | 1983 |  | 
|  | 1984 | /* | 
|  | 1985 | * It is possible that an interrupt came in, | 
|  | 1986 | * set the head up, then more interrupts came in | 
|  | 1987 | * and moved it again. When we get back here, | 
|  | 1988 | * the page would have been set to NORMAL but we | 
|  | 1989 | * just set it back to HEAD. | 
|  | 1990 | * | 
|  | 1991 | * How do you detect this? Well, if that happened | 
|  | 1992 | * the tail page would have moved. | 
|  | 1993 | */ | 
|  | 1994 | if (ret == RB_PAGE_NORMAL) { | 
|  | 1995 | /* | 
|  | 1996 | * If the tail had moved passed next, then we need | 
|  | 1997 | * to reset the pointer. | 
|  | 1998 | */ | 
|  | 1999 | if (cpu_buffer->tail_page != tail_page && | 
|  | 2000 | cpu_buffer->tail_page != next_page) | 
|  | 2001 | rb_head_page_set_normal(cpu_buffer, new_head, | 
|  | 2002 | next_page, | 
|  | 2003 | RB_PAGE_HEAD); | 
|  | 2004 | } | 
|  | 2005 |  | 
|  | 2006 | /* | 
|  | 2007 | * If this was the outer most commit (the one that | 
|  | 2008 | * changed the original pointer from HEAD to UPDATE), | 
|  | 2009 | * then it is up to us to reset it to NORMAL. | 
|  | 2010 | */ | 
|  | 2011 | if (type == RB_PAGE_HEAD) { | 
|  | 2012 | ret = rb_head_page_set_normal(cpu_buffer, next_page, | 
|  | 2013 | tail_page, | 
|  | 2014 | RB_PAGE_UPDATE); | 
|  | 2015 | if (RB_WARN_ON(cpu_buffer, | 
|  | 2016 | ret != RB_PAGE_UPDATE)) | 
|  | 2017 | return -1; | 
|  | 2018 | } | 
|  | 2019 |  | 
|  | 2020 | return 0; | 
|  | 2021 | } | 
|  | 2022 |  | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 2023 | static unsigned rb_calculate_event_length(unsigned length) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2024 | { | 
|  | 2025 | struct ring_buffer_event event; /* Used only for sizeof array */ | 
|  | 2026 |  | 
|  | 2027 | /* zero length can cause confusions */ | 
|  | 2028 | if (!length) | 
|  | 2029 | length = 1; | 
|  | 2030 |  | 
| Steven Rostedt | 2271048 | 2010-03-18 17:54:19 -0400 | [diff] [blame] | 2031 | if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2032 | length += sizeof(event.array[0]); | 
|  | 2033 |  | 
|  | 2034 | length += RB_EVNT_HDR_SIZE; | 
| Steven Rostedt | 2271048 | 2010-03-18 17:54:19 -0400 | [diff] [blame] | 2035 | length = ALIGN(length, RB_ARCH_ALIGNMENT); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2036 |  | 
|  | 2037 | return length; | 
|  | 2038 | } | 
|  | 2039 |  | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 2040 | static inline void | 
|  | 2041 | rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 2042 | struct buffer_page *tail_page, | 
|  | 2043 | unsigned long tail, unsigned long length) | 
|  | 2044 | { | 
|  | 2045 | struct ring_buffer_event *event; | 
|  | 2046 |  | 
|  | 2047 | /* | 
|  | 2048 | * Only the event that crossed the page boundary | 
|  | 2049 | * must fill the old tail_page with padding. | 
|  | 2050 | */ | 
|  | 2051 | if (tail >= BUF_PAGE_SIZE) { | 
| Steven Rostedt | b3230c8 | 2010-05-21 11:55:21 -0400 | [diff] [blame] | 2052 | /* | 
|  | 2053 | * If the page was filled, then we still need | 
|  | 2054 | * to update the real_end. Reset it to zero | 
|  | 2055 | * and the reader will ignore it. | 
|  | 2056 | */ | 
|  | 2057 | if (tail == BUF_PAGE_SIZE) | 
|  | 2058 | tail_page->real_end = 0; | 
|  | 2059 |  | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 2060 | local_sub(length, &tail_page->write); | 
|  | 2061 | return; | 
|  | 2062 | } | 
|  | 2063 |  | 
|  | 2064 | event = __rb_page_index(tail_page, tail); | 
| Linus Torvalds | b0b7065 | 2009-06-20 10:56:46 -0700 | [diff] [blame] | 2065 | kmemcheck_annotate_bitfield(event, bitfield); | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 2066 |  | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2067 | /* account for padding bytes */ | 
|  | 2068 | local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); | 
|  | 2069 |  | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 2070 | /* | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 2071 | * Save the original length to the meta data. | 
|  | 2072 | * This will be used by the reader to add lost event | 
|  | 2073 | * counter. | 
|  | 2074 | */ | 
|  | 2075 | tail_page->real_end = tail; | 
|  | 2076 |  | 
|  | 2077 | /* | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 2078 | * If this event is bigger than the minimum size, then | 
|  | 2079 | * we need to be careful that we don't subtract the | 
|  | 2080 | * write counter enough to allow another writer to slip | 
|  | 2081 | * in on this page. | 
|  | 2082 | * We put in a discarded commit instead, to make sure | 
|  | 2083 | * that this space is not used again. | 
|  | 2084 | * | 
|  | 2085 | * If we are less than the minimum size, we don't need to | 
|  | 2086 | * worry about it. | 
|  | 2087 | */ | 
|  | 2088 | if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { | 
|  | 2089 | /* No room for any events */ | 
|  | 2090 |  | 
|  | 2091 | /* Mark the rest of the page with padding */ | 
|  | 2092 | rb_event_set_padding(event); | 
|  | 2093 |  | 
|  | 2094 | /* Set the write back to the previous setting */ | 
|  | 2095 | local_sub(length, &tail_page->write); | 
|  | 2096 | return; | 
|  | 2097 | } | 
|  | 2098 |  | 
|  | 2099 | /* Put in a discarded event */ | 
|  | 2100 | event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; | 
|  | 2101 | event->type_len = RINGBUF_TYPE_PADDING; | 
|  | 2102 | /* time delta must be non zero */ | 
|  | 2103 | event->time_delta = 1; | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 2104 |  | 
|  | 2105 | /* Set write to end of buffer */ | 
|  | 2106 | length = (tail + length) - BUF_PAGE_SIZE; | 
|  | 2107 | local_sub(length, &tail_page->write); | 
|  | 2108 | } | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2109 |  | 
| Steven Rostedt | 747e94a | 2010-10-08 13:51:48 -0400 | [diff] [blame] | 2110 | /* | 
|  | 2111 | * This is the slow path, force gcc not to inline it. | 
|  | 2112 | */ | 
|  | 2113 | static noinline struct ring_buffer_event * | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2114 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 2115 | unsigned long length, unsigned long tail, | 
| Steven Rostedt | e8bc43e | 2010-10-20 10:58:02 -0400 | [diff] [blame] | 2116 | struct buffer_page *tail_page, u64 ts) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2117 | { | 
| Steven Rostedt | 5a50e33 | 2009-11-17 08:43:01 -0500 | [diff] [blame] | 2118 | struct buffer_page *commit_page = cpu_buffer->commit_page; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2119 | struct ring_buffer *buffer = cpu_buffer->buffer; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2120 | struct buffer_page *next_page; | 
|  | 2121 | int ret; | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 2122 |  | 
|  | 2123 | next_page = tail_page; | 
|  | 2124 |  | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 2125 | rb_inc_page(cpu_buffer, &next_page); | 
|  | 2126 |  | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 2127 | /* | 
|  | 2128 | * If for some reason, we had an interrupt storm that made | 
|  | 2129 | * it all the way around the buffer, bail, and warn | 
|  | 2130 | * about it. | 
|  | 2131 | */ | 
|  | 2132 | if (unlikely(next_page == commit_page)) { | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2133 | local_inc(&cpu_buffer->commit_overrun); | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 2134 | goto out_reset; | 
|  | 2135 | } | 
|  | 2136 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2137 | /* | 
|  | 2138 | * This is where the fun begins! | 
|  | 2139 | * | 
|  | 2140 | * We are fighting against races between a reader that | 
|  | 2141 | * could be on another CPU trying to swap its reader | 
|  | 2142 | * page with the buffer head. | 
|  | 2143 | * | 
|  | 2144 | * We are also fighting against interrupts coming in and | 
|  | 2145 | * moving the head or tail on us as well. | 
|  | 2146 | * | 
|  | 2147 | * If the next page is the head page then we have filled | 
|  | 2148 | * the buffer, unless the commit page is still on the | 
|  | 2149 | * reader page. | 
|  | 2150 | */ | 
|  | 2151 | if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) { | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 2152 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2153 | /* | 
|  | 2154 | * If the commit is not on the reader page, then | 
|  | 2155 | * move the header page. | 
|  | 2156 | */ | 
|  | 2157 | if (!rb_is_reader_page(cpu_buffer->commit_page)) { | 
|  | 2158 | /* | 
|  | 2159 | * If we are not in overwrite mode, | 
|  | 2160 | * this is easy, just stop here. | 
|  | 2161 | */ | 
| Slava Pestov | 884bfe8 | 2011-07-15 14:23:58 -0700 | [diff] [blame] | 2162 | if (!(buffer->flags & RB_FL_OVERWRITE)) { | 
|  | 2163 | local_inc(&cpu_buffer->dropped_events); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2164 | goto out_reset; | 
| Slava Pestov | 884bfe8 | 2011-07-15 14:23:58 -0700 | [diff] [blame] | 2165 | } | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 2166 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2167 | ret = rb_handle_head_page(cpu_buffer, | 
|  | 2168 | tail_page, | 
|  | 2169 | next_page); | 
|  | 2170 | if (ret < 0) | 
|  | 2171 | goto out_reset; | 
|  | 2172 | if (ret) | 
|  | 2173 | goto out_again; | 
|  | 2174 | } else { | 
|  | 2175 | /* | 
|  | 2176 | * We need to be careful here too. The | 
|  | 2177 | * commit page could still be on the reader | 
|  | 2178 | * page. We could have a small buffer, and | 
|  | 2179 | * have filled up the buffer with events | 
|  | 2180 | * from interrupts and such, and wrapped. | 
|  | 2181 | * | 
|  | 2182 | * Note, if the tail page is also the on the | 
|  | 2183 | * reader_page, we let it move out. | 
|  | 2184 | */ | 
|  | 2185 | if (unlikely((cpu_buffer->commit_page != | 
|  | 2186 | cpu_buffer->tail_page) && | 
|  | 2187 | (cpu_buffer->commit_page == | 
|  | 2188 | cpu_buffer->reader_page))) { | 
|  | 2189 | local_inc(&cpu_buffer->commit_overrun); | 
|  | 2190 | goto out_reset; | 
|  | 2191 | } | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 2192 | } | 
|  | 2193 | } | 
|  | 2194 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2195 | ret = rb_tail_page_update(cpu_buffer, tail_page, next_page); | 
|  | 2196 | if (ret) { | 
|  | 2197 | /* | 
|  | 2198 | * Nested commits always have zero deltas, so | 
|  | 2199 | * just reread the time stamp | 
|  | 2200 | */ | 
| Steven Rostedt | e8bc43e | 2010-10-20 10:58:02 -0400 | [diff] [blame] | 2201 | ts = rb_time_stamp(buffer); | 
|  | 2202 | next_page->page->time_stamp = ts; | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 2203 | } | 
|  | 2204 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2205 | out_again: | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 2206 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2207 | rb_reset_tail(cpu_buffer, tail_page, tail, length); | 
| Steven Rostedt | aa20ae8 | 2009-05-05 21:16:11 -0400 | [diff] [blame] | 2208 |  | 
|  | 2209 | /* fail and let the caller try again */ | 
|  | 2210 | return ERR_PTR(-EAGAIN); | 
|  | 2211 |  | 
| Steven Rostedt | 45141d4 | 2009-02-12 13:19:48 -0500 | [diff] [blame] | 2212 | out_reset: | 
| Lai Jiangshan | 6f3b344 | 2009-01-12 11:06:18 +0800 | [diff] [blame] | 2213 | /* reset write */ | 
| Steven Rostedt | c7b0930 | 2009-06-11 11:12:00 -0400 | [diff] [blame] | 2214 | rb_reset_tail(cpu_buffer, tail_page, tail, length); | 
| Lai Jiangshan | 6f3b344 | 2009-01-12 11:06:18 +0800 | [diff] [blame] | 2215 |  | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2216 | return NULL; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2217 | } | 
|  | 2218 |  | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2219 | static struct ring_buffer_event * | 
|  | 2220 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2221 | unsigned long length, u64 ts, | 
|  | 2222 | u64 delta, int add_timestamp) | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2223 | { | 
| Steven Rostedt | 5a50e33 | 2009-11-17 08:43:01 -0500 | [diff] [blame] | 2224 | struct buffer_page *tail_page; | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2225 | struct ring_buffer_event *event; | 
|  | 2226 | unsigned long tail, write; | 
|  | 2227 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2228 | /* | 
|  | 2229 | * If the time delta since the last event is too big to | 
|  | 2230 | * hold in the time field of the event, then we append a | 
|  | 2231 | * TIME EXTEND event ahead of the data event. | 
|  | 2232 | */ | 
|  | 2233 | if (unlikely(add_timestamp)) | 
|  | 2234 | length += RB_LEN_TIME_EXTEND; | 
|  | 2235 |  | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2236 | tail_page = cpu_buffer->tail_page; | 
|  | 2237 | write = local_add_return(length, &tail_page->write); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2238 |  | 
|  | 2239 | /* set write to only the index of the write */ | 
|  | 2240 | write &= RB_WRITE_MASK; | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2241 | tail = write - length; | 
|  | 2242 |  | 
|  | 2243 | /* See if we shot pass the end of this buffer page */ | 
| Steven Rostedt | 747e94a | 2010-10-08 13:51:48 -0400 | [diff] [blame] | 2244 | if (unlikely(write > BUF_PAGE_SIZE)) | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2245 | return rb_move_tail(cpu_buffer, length, tail, | 
| Steven Rostedt | 5a50e33 | 2009-11-17 08:43:01 -0500 | [diff] [blame] | 2246 | tail_page, ts); | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2247 |  | 
|  | 2248 | /* We reserved something on the buffer */ | 
|  | 2249 |  | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2250 | event = __rb_page_index(tail_page, tail); | 
| Vegard Nossum | 1744a21 | 2009-02-28 08:29:44 +0100 | [diff] [blame] | 2251 | kmemcheck_annotate_bitfield(event, bitfield); | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2252 | rb_update_event(cpu_buffer, event, length, add_timestamp, delta); | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2253 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2254 | local_inc(&tail_page->entries); | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2255 |  | 
|  | 2256 | /* | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2257 | * If this is the first commit on the page, then update | 
|  | 2258 | * its timestamp. | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2259 | */ | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2260 | if (!tail) | 
| Steven Rostedt | e8bc43e | 2010-10-20 10:58:02 -0400 | [diff] [blame] | 2261 | tail_page->page->time_stamp = ts; | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2262 |  | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2263 | /* account for these added bytes */ | 
|  | 2264 | local_add(length, &cpu_buffer->entries_bytes); | 
|  | 2265 |  | 
| Steven Rostedt | 6634ff2 | 2009-05-06 15:30:07 -0400 | [diff] [blame] | 2266 | return event; | 
|  | 2267 | } | 
|  | 2268 |  | 
| Steven Rostedt | edd813bf | 2009-06-02 23:00:53 -0400 | [diff] [blame] | 2269 | static inline int | 
|  | 2270 | rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 2271 | struct ring_buffer_event *event) | 
|  | 2272 | { | 
|  | 2273 | unsigned long new_index, old_index; | 
|  | 2274 | struct buffer_page *bpage; | 
|  | 2275 | unsigned long index; | 
|  | 2276 | unsigned long addr; | 
|  | 2277 |  | 
|  | 2278 | new_index = rb_event_index(event); | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2279 | old_index = new_index + rb_event_ts_length(event); | 
| Steven Rostedt | edd813bf | 2009-06-02 23:00:53 -0400 | [diff] [blame] | 2280 | addr = (unsigned long)event; | 
|  | 2281 | addr &= PAGE_MASK; | 
|  | 2282 |  | 
|  | 2283 | bpage = cpu_buffer->tail_page; | 
|  | 2284 |  | 
|  | 2285 | if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2286 | unsigned long write_mask = | 
|  | 2287 | local_read(&bpage->write) & ~RB_WRITE_MASK; | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2288 | unsigned long event_length = rb_event_length(event); | 
| Steven Rostedt | edd813bf | 2009-06-02 23:00:53 -0400 | [diff] [blame] | 2289 | /* | 
|  | 2290 | * This is on the tail page. It is possible that | 
|  | 2291 | * a write could come in and move the tail page | 
|  | 2292 | * and write to the next page. That is fine | 
|  | 2293 | * because we just shorten what is on this page. | 
|  | 2294 | */ | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2295 | old_index += write_mask; | 
|  | 2296 | new_index += write_mask; | 
| Steven Rostedt | edd813bf | 2009-06-02 23:00:53 -0400 | [diff] [blame] | 2297 | index = local_cmpxchg(&bpage->write, old_index, new_index); | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2298 | if (index == old_index) { | 
|  | 2299 | /* update counters */ | 
|  | 2300 | local_sub(event_length, &cpu_buffer->entries_bytes); | 
| Steven Rostedt | edd813bf | 2009-06-02 23:00:53 -0400 | [diff] [blame] | 2301 | return 1; | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2302 | } | 
| Steven Rostedt | edd813bf | 2009-06-02 23:00:53 -0400 | [diff] [blame] | 2303 | } | 
|  | 2304 |  | 
|  | 2305 | /* could not discard */ | 
|  | 2306 | return 0; | 
|  | 2307 | } | 
|  | 2308 |  | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2309 | static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) | 
|  | 2310 | { | 
|  | 2311 | local_inc(&cpu_buffer->committing); | 
|  | 2312 | local_inc(&cpu_buffer->commits); | 
|  | 2313 | } | 
|  | 2314 |  | 
| Steven Rostedt | d9abde2 | 2010-10-19 13:17:08 -0400 | [diff] [blame] | 2315 | static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2316 | { | 
|  | 2317 | unsigned long commits; | 
|  | 2318 |  | 
|  | 2319 | if (RB_WARN_ON(cpu_buffer, | 
|  | 2320 | !local_read(&cpu_buffer->committing))) | 
|  | 2321 | return; | 
|  | 2322 |  | 
|  | 2323 | again: | 
|  | 2324 | commits = local_read(&cpu_buffer->commits); | 
|  | 2325 | /* synchronize with interrupts */ | 
|  | 2326 | barrier(); | 
|  | 2327 | if (local_read(&cpu_buffer->committing) == 1) | 
|  | 2328 | rb_set_commit_to_write(cpu_buffer); | 
|  | 2329 |  | 
|  | 2330 | local_dec(&cpu_buffer->committing); | 
|  | 2331 |  | 
|  | 2332 | /* synchronize with interrupts */ | 
|  | 2333 | barrier(); | 
|  | 2334 |  | 
|  | 2335 | /* | 
|  | 2336 | * Need to account for interrupts coming in between the | 
|  | 2337 | * updating of the commit page and the clearing of the | 
|  | 2338 | * committing counter. | 
|  | 2339 | */ | 
|  | 2340 | if (unlikely(local_read(&cpu_buffer->commits) != commits) && | 
|  | 2341 | !local_read(&cpu_buffer->committing)) { | 
|  | 2342 | local_inc(&cpu_buffer->committing); | 
|  | 2343 | goto again; | 
|  | 2344 | } | 
|  | 2345 | } | 
|  | 2346 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2347 | static struct ring_buffer_event * | 
| Steven Rostedt | 62f0b3e | 2009-09-04 14:11:34 -0400 | [diff] [blame] | 2348 | rb_reserve_next_event(struct ring_buffer *buffer, | 
|  | 2349 | struct ring_buffer_per_cpu *cpu_buffer, | 
| Steven Rostedt | 1cd8d73 | 2009-05-11 14:08:09 -0400 | [diff] [blame] | 2350 | unsigned long length) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2351 | { | 
|  | 2352 | struct ring_buffer_event *event; | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2353 | u64 ts, delta; | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 2354 | int nr_loops = 0; | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2355 | int add_timestamp; | 
| Steven Rostedt | 140ff89 | 2010-10-08 10:50:30 -0400 | [diff] [blame] | 2356 | u64 diff; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2357 |  | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2358 | rb_start_commit(cpu_buffer); | 
|  | 2359 |  | 
| Steven Rostedt | 85bac32 | 2009-09-04 14:24:40 -0400 | [diff] [blame] | 2360 | #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP | 
| Steven Rostedt | 62f0b3e | 2009-09-04 14:11:34 -0400 | [diff] [blame] | 2361 | /* | 
|  | 2362 | * Due to the ability to swap a cpu buffer from a buffer | 
|  | 2363 | * it is possible it was swapped before we committed. | 
|  | 2364 | * (committing stops a swap). We check for it here and | 
|  | 2365 | * if it happened, we have to fail the write. | 
|  | 2366 | */ | 
|  | 2367 | barrier(); | 
|  | 2368 | if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) { | 
|  | 2369 | local_dec(&cpu_buffer->committing); | 
|  | 2370 | local_dec(&cpu_buffer->commits); | 
|  | 2371 | return NULL; | 
|  | 2372 | } | 
| Steven Rostedt | 85bac32 | 2009-09-04 14:24:40 -0400 | [diff] [blame] | 2373 | #endif | 
| Steven Rostedt | 62f0b3e | 2009-09-04 14:11:34 -0400 | [diff] [blame] | 2374 |  | 
| Steven Rostedt | be957c4 | 2009-05-11 14:42:53 -0400 | [diff] [blame] | 2375 | length = rb_calculate_event_length(length); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2376 | again: | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2377 | add_timestamp = 0; | 
|  | 2378 | delta = 0; | 
|  | 2379 |  | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 2380 | /* | 
|  | 2381 | * We allow for interrupts to reenter here and do a trace. | 
|  | 2382 | * If one does, it will cause this original code to loop | 
|  | 2383 | * back here. Even with heavy interrupts happening, this | 
|  | 2384 | * should only happen a few times in a row. If this happens | 
|  | 2385 | * 1000 times in a row, there must be either an interrupt | 
|  | 2386 | * storm or we have something buggy. | 
|  | 2387 | * Bail! | 
|  | 2388 | */ | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 2389 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2390 | goto out_fail; | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 2391 |  | 
| Jiri Olsa | 6d3f1e1 | 2009-10-23 19:36:19 -0400 | [diff] [blame] | 2392 | ts = rb_time_stamp(cpu_buffer->buffer); | 
| Steven Rostedt | 140ff89 | 2010-10-08 10:50:30 -0400 | [diff] [blame] | 2393 | diff = ts - cpu_buffer->write_stamp; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2394 |  | 
| Steven Rostedt | 140ff89 | 2010-10-08 10:50:30 -0400 | [diff] [blame] | 2395 | /* make sure this diff is calculated here */ | 
|  | 2396 | barrier(); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2397 |  | 
| Steven Rostedt | 140ff89 | 2010-10-08 10:50:30 -0400 | [diff] [blame] | 2398 | /* Did the write stamp get updated already? */ | 
|  | 2399 | if (likely(ts >= cpu_buffer->write_stamp)) { | 
| Steven Rostedt | 168b6b1 | 2009-05-11 22:11:05 -0400 | [diff] [blame] | 2400 | delta = diff; | 
|  | 2401 | if (unlikely(test_time_stamp(delta))) { | 
| Jiri Olsa | 31274d7 | 2011-02-18 15:52:19 +0100 | [diff] [blame] | 2402 | int local_clock_stable = 1; | 
|  | 2403 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 
|  | 2404 | local_clock_stable = sched_clock_stable; | 
|  | 2405 | #endif | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2406 | WARN_ONCE(delta > (1ULL << 59), | 
| Jiri Olsa | 31274d7 | 2011-02-18 15:52:19 +0100 | [diff] [blame] | 2407 | KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s", | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2408 | (unsigned long long)delta, | 
|  | 2409 | (unsigned long long)ts, | 
| Jiri Olsa | 31274d7 | 2011-02-18 15:52:19 +0100 | [diff] [blame] | 2410 | (unsigned long long)cpu_buffer->write_stamp, | 
|  | 2411 | local_clock_stable ? "" : | 
|  | 2412 | "If you just came from a suspend/resume,\n" | 
|  | 2413 | "please switch to the trace global clock:\n" | 
|  | 2414 | "  echo global > /sys/kernel/debug/tracing/trace_clock\n"); | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2415 | add_timestamp = 1; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2416 | } | 
| Steven Rostedt | 168b6b1 | 2009-05-11 22:11:05 -0400 | [diff] [blame] | 2417 | } | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2418 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2419 | event = __rb_reserve_next(cpu_buffer, length, ts, | 
|  | 2420 | delta, add_timestamp); | 
| Steven Rostedt | 168b6b1 | 2009-05-11 22:11:05 -0400 | [diff] [blame] | 2421 | if (unlikely(PTR_ERR(event) == -EAGAIN)) | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2422 | goto again; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2423 |  | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2424 | if (!event) | 
|  | 2425 | goto out_fail; | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2426 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2427 | return event; | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2428 |  | 
|  | 2429 | out_fail: | 
|  | 2430 | rb_end_commit(cpu_buffer); | 
|  | 2431 | return NULL; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2432 | } | 
|  | 2433 |  | 
| Paul Mundt | 1155de4 | 2009-06-25 14:30:12 +0900 | [diff] [blame] | 2434 | #ifdef CONFIG_TRACING | 
|  | 2435 |  | 
| Steven Rostedt | 567cd4d | 2012-11-02 18:33:05 -0400 | [diff] [blame] | 2436 | /* | 
|  | 2437 | * The lock and unlock are done within a preempt disable section. | 
|  | 2438 | * The current_context per_cpu variable can only be modified | 
|  | 2439 | * by the current task between lock and unlock. But it can | 
|  | 2440 | * be modified more than once via an interrupt. To pass this | 
|  | 2441 | * information from the lock to the unlock without having to | 
|  | 2442 | * access the 'in_interrupt()' functions again (which do show | 
|  | 2443 | * a bit of overhead in something as critical as function tracing, | 
|  | 2444 | * we use a bitmask trick. | 
|  | 2445 | * | 
|  | 2446 | *  bit 0 =  NMI context | 
|  | 2447 | *  bit 1 =  IRQ context | 
|  | 2448 | *  bit 2 =  SoftIRQ context | 
|  | 2449 | *  bit 3 =  normal context. | 
|  | 2450 | * | 
|  | 2451 | * This works because this is the order of contexts that can | 
|  | 2452 | * preempt other contexts. A SoftIRQ never preempts an IRQ | 
|  | 2453 | * context. | 
|  | 2454 | * | 
|  | 2455 | * When the context is determined, the corresponding bit is | 
|  | 2456 | * checked and set (if it was set, then a recursion of that context | 
|  | 2457 | * happened). | 
|  | 2458 | * | 
|  | 2459 | * On unlock, we need to clear this bit. To do so, just subtract | 
|  | 2460 | * 1 from the current_context and AND it to itself. | 
|  | 2461 | * | 
|  | 2462 | * (binary) | 
|  | 2463 | *  101 - 1 = 100 | 
|  | 2464 | *  101 & 100 = 100 (clearing bit zero) | 
|  | 2465 | * | 
|  | 2466 | *  1010 - 1 = 1001 | 
|  | 2467 | *  1010 & 1001 = 1000 (clearing bit 1) | 
|  | 2468 | * | 
|  | 2469 | * The least significant bit can be cleared this way, and it | 
|  | 2470 | * just so happens that it is the same bit corresponding to | 
|  | 2471 | * the current context. | 
|  | 2472 | */ | 
|  | 2473 | static DEFINE_PER_CPU(unsigned int, current_context); | 
| Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 2474 |  | 
| Steven Rostedt | 567cd4d | 2012-11-02 18:33:05 -0400 | [diff] [blame] | 2475 | static __always_inline int trace_recursive_lock(void) | 
| Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 2476 | { | 
| Steven Rostedt | 567cd4d | 2012-11-02 18:33:05 -0400 | [diff] [blame] | 2477 | unsigned int val = this_cpu_read(current_context); | 
|  | 2478 | int bit; | 
| Frederic Weisbecker | e057a5e | 2009-04-19 23:38:12 +0200 | [diff] [blame] | 2479 |  | 
| Steven Rostedt | 567cd4d | 2012-11-02 18:33:05 -0400 | [diff] [blame] | 2480 | if (in_interrupt()) { | 
|  | 2481 | if (in_nmi()) | 
|  | 2482 | bit = 0; | 
|  | 2483 | else if (in_irq()) | 
|  | 2484 | bit = 1; | 
|  | 2485 | else | 
|  | 2486 | bit = 2; | 
|  | 2487 | } else | 
|  | 2488 | bit = 3; | 
| Frederic Weisbecker | e057a5e | 2009-04-19 23:38:12 +0200 | [diff] [blame] | 2489 |  | 
| Steven Rostedt | 567cd4d | 2012-11-02 18:33:05 -0400 | [diff] [blame] | 2490 | if (unlikely(val & (1 << bit))) | 
|  | 2491 | return 1; | 
|  | 2492 |  | 
|  | 2493 | val |= (1 << bit); | 
|  | 2494 | this_cpu_write(current_context, val); | 
|  | 2495 |  | 
|  | 2496 | return 0; | 
| Steven Rostedt | d9abde2 | 2010-10-19 13:17:08 -0400 | [diff] [blame] | 2497 | } | 
|  | 2498 |  | 
| Steven Rostedt | 567cd4d | 2012-11-02 18:33:05 -0400 | [diff] [blame] | 2499 | static __always_inline void trace_recursive_unlock(void) | 
| Steven Rostedt | d9abde2 | 2010-10-19 13:17:08 -0400 | [diff] [blame] | 2500 | { | 
| Steven Rostedt | 567cd4d | 2012-11-02 18:33:05 -0400 | [diff] [blame] | 2501 | unsigned int val = this_cpu_read(current_context); | 
| Steven Rostedt | d9abde2 | 2010-10-19 13:17:08 -0400 | [diff] [blame] | 2502 |  | 
| Steven Rostedt | 567cd4d | 2012-11-02 18:33:05 -0400 | [diff] [blame] | 2503 | val--; | 
|  | 2504 | val &= this_cpu_read(current_context); | 
|  | 2505 | this_cpu_write(current_context, val); | 
| Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 2506 | } | 
|  | 2507 |  | 
| Paul Mundt | 1155de4 | 2009-06-25 14:30:12 +0900 | [diff] [blame] | 2508 | #else | 
|  | 2509 |  | 
|  | 2510 | #define trace_recursive_lock()		(0) | 
|  | 2511 | #define trace_recursive_unlock()	do { } while (0) | 
|  | 2512 |  | 
|  | 2513 | #endif | 
|  | 2514 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2515 | /** | 
|  | 2516 | * ring_buffer_lock_reserve - reserve a part of the buffer | 
|  | 2517 | * @buffer: the ring buffer to reserve from | 
|  | 2518 | * @length: the length of the data to reserve (excluding event header) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2519 | * | 
|  | 2520 | * Returns a reseverd event on the ring buffer to copy directly to. | 
|  | 2521 | * The user of this interface will need to get the body to write into | 
|  | 2522 | * and can use the ring_buffer_event_data() interface. | 
|  | 2523 | * | 
|  | 2524 | * The length is the length of the data needed, not the event length | 
|  | 2525 | * which also includes the event header. | 
|  | 2526 | * | 
|  | 2527 | * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. | 
|  | 2528 | * If NULL is returned, then nothing has been allocated or locked. | 
|  | 2529 | */ | 
|  | 2530 | struct ring_buffer_event * | 
| Arnaldo Carvalho de Melo | 0a98775 | 2009-02-05 16:12:56 -0200 | [diff] [blame] | 2531 | ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2532 | { | 
|  | 2533 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 2534 | struct ring_buffer_event *event; | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2535 | int cpu; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2536 |  | 
| Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 2537 | if (ring_buffer_flags != RB_BUFFERS_ON) | 
| Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 2538 | return NULL; | 
|  | 2539 |  | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2540 | /* If we are tracing schedule, we don't want to recurse */ | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2541 | preempt_disable_notrace(); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2542 |  | 
| Lai Jiangshan | 52fbe9c | 2010-03-08 14:50:43 +0800 | [diff] [blame] | 2543 | if (atomic_read(&buffer->record_disabled)) | 
|  | 2544 | goto out_nocheck; | 
|  | 2545 |  | 
| Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 2546 | if (trace_recursive_lock()) | 
|  | 2547 | goto out_nocheck; | 
|  | 2548 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2549 | cpu = raw_smp_processor_id(); | 
|  | 2550 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2551 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2552 | goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2553 |  | 
|  | 2554 | cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2555 |  | 
|  | 2556 | if (atomic_read(&cpu_buffer->record_disabled)) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2557 | goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2558 |  | 
| Steven Rostedt | be957c4 | 2009-05-11 14:42:53 -0400 | [diff] [blame] | 2559 | if (length > BUF_MAX_DATA_SIZE) | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2560 | goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2561 |  | 
| Steven Rostedt | 62f0b3e | 2009-09-04 14:11:34 -0400 | [diff] [blame] | 2562 | event = rb_reserve_next_event(buffer, cpu_buffer, length); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2563 | if (!event) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2564 | goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2565 |  | 
|  | 2566 | return event; | 
|  | 2567 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2568 | out: | 
| Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 2569 | trace_recursive_unlock(); | 
|  | 2570 |  | 
|  | 2571 | out_nocheck: | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2572 | preempt_enable_notrace(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2573 | return NULL; | 
|  | 2574 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2575 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2576 |  | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 2577 | static void | 
|  | 2578 | rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2579 | struct ring_buffer_event *event) | 
|  | 2580 | { | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2581 | u64 delta; | 
|  | 2582 |  | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2583 | /* | 
|  | 2584 | * The event first in the commit queue updates the | 
|  | 2585 | * time stamp. | 
|  | 2586 | */ | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2587 | if (rb_event_is_commit(cpu_buffer, event)) { | 
|  | 2588 | /* | 
|  | 2589 | * A commit event that is first on a page | 
|  | 2590 | * updates the write timestamp with the page stamp | 
|  | 2591 | */ | 
|  | 2592 | if (!rb_event_index(event)) | 
|  | 2593 | cpu_buffer->write_stamp = | 
|  | 2594 | cpu_buffer->commit_page->page->time_stamp; | 
|  | 2595 | else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { | 
|  | 2596 | delta = event->array[0]; | 
|  | 2597 | delta <<= TS_SHIFT; | 
|  | 2598 | delta += event->time_delta; | 
|  | 2599 | cpu_buffer->write_stamp += delta; | 
|  | 2600 | } else | 
|  | 2601 | cpu_buffer->write_stamp += event->time_delta; | 
|  | 2602 | } | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 2603 | } | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2604 |  | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 2605 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 2606 | struct ring_buffer_event *event) | 
|  | 2607 | { | 
|  | 2608 | local_inc(&cpu_buffer->entries); | 
|  | 2609 | rb_update_write_stamp(cpu_buffer, event); | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2610 | rb_end_commit(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2611 | } | 
|  | 2612 |  | 
|  | 2613 | /** | 
|  | 2614 | * ring_buffer_unlock_commit - commit a reserved | 
|  | 2615 | * @buffer: The buffer to commit to | 
|  | 2616 | * @event: The event pointer to commit. | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2617 | * | 
|  | 2618 | * This commits the data to the ring buffer, and releases any locks held. | 
|  | 2619 | * | 
|  | 2620 | * Must be paired with ring_buffer_lock_reserve. | 
|  | 2621 | */ | 
|  | 2622 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, | 
| Arnaldo Carvalho de Melo | 0a98775 | 2009-02-05 16:12:56 -0200 | [diff] [blame] | 2623 | struct ring_buffer_event *event) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2624 | { | 
|  | 2625 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 2626 | int cpu = raw_smp_processor_id(); | 
|  | 2627 |  | 
|  | 2628 | cpu_buffer = buffer->buffers[cpu]; | 
|  | 2629 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2630 | rb_commit(cpu_buffer, event); | 
|  | 2631 |  | 
| Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 2632 | trace_recursive_unlock(); | 
|  | 2633 |  | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2634 | preempt_enable_notrace(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2635 |  | 
|  | 2636 | return 0; | 
|  | 2637 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2638 | EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2639 |  | 
| Frederic Weisbecker | f3b9aae | 2009-04-19 23:39:33 +0200 | [diff] [blame] | 2640 | static inline void rb_event_discard(struct ring_buffer_event *event) | 
|  | 2641 | { | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 2642 | if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) | 
|  | 2643 | event = skip_time_extend(event); | 
|  | 2644 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 2645 | /* array[0] holds the actual length for the discarded event */ | 
|  | 2646 | event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; | 
|  | 2647 | event->type_len = RINGBUF_TYPE_PADDING; | 
| Frederic Weisbecker | f3b9aae | 2009-04-19 23:39:33 +0200 | [diff] [blame] | 2648 | /* time delta must be non zero */ | 
|  | 2649 | if (!event->time_delta) | 
|  | 2650 | event->time_delta = 1; | 
|  | 2651 | } | 
|  | 2652 |  | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 2653 | /* | 
|  | 2654 | * Decrement the entries to the page that an event is on. | 
|  | 2655 | * The event does not even need to exist, only the pointer | 
|  | 2656 | * to the page it is on. This may only be called before the commit | 
|  | 2657 | * takes place. | 
|  | 2658 | */ | 
|  | 2659 | static inline void | 
|  | 2660 | rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 2661 | struct ring_buffer_event *event) | 
|  | 2662 | { | 
|  | 2663 | unsigned long addr = (unsigned long)event; | 
|  | 2664 | struct buffer_page *bpage = cpu_buffer->commit_page; | 
|  | 2665 | struct buffer_page *start; | 
|  | 2666 |  | 
|  | 2667 | addr &= PAGE_MASK; | 
|  | 2668 |  | 
|  | 2669 | /* Do the likely case first */ | 
|  | 2670 | if (likely(bpage->page == (void *)addr)) { | 
|  | 2671 | local_dec(&bpage->entries); | 
|  | 2672 | return; | 
|  | 2673 | } | 
|  | 2674 |  | 
|  | 2675 | /* | 
|  | 2676 | * Because the commit page may be on the reader page we | 
|  | 2677 | * start with the next page and check the end loop there. | 
|  | 2678 | */ | 
|  | 2679 | rb_inc_page(cpu_buffer, &bpage); | 
|  | 2680 | start = bpage; | 
|  | 2681 | do { | 
|  | 2682 | if (bpage->page == (void *)addr) { | 
|  | 2683 | local_dec(&bpage->entries); | 
|  | 2684 | return; | 
|  | 2685 | } | 
|  | 2686 | rb_inc_page(cpu_buffer, &bpage); | 
|  | 2687 | } while (bpage != start); | 
|  | 2688 |  | 
|  | 2689 | /* commit not part of this buffer?? */ | 
|  | 2690 | RB_WARN_ON(cpu_buffer, 1); | 
|  | 2691 | } | 
|  | 2692 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2693 | /** | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2694 | * ring_buffer_commit_discard - discard an event that has not been committed | 
|  | 2695 | * @buffer: the ring buffer | 
|  | 2696 | * @event: non committed event to discard | 
|  | 2697 | * | 
| Steven Rostedt | dc892f7 | 2009-09-03 15:33:41 -0400 | [diff] [blame] | 2698 | * Sometimes an event that is in the ring buffer needs to be ignored. | 
|  | 2699 | * This function lets the user discard an event in the ring buffer | 
|  | 2700 | * and then that event will not be read later. | 
|  | 2701 | * | 
|  | 2702 | * This function only works if it is called before the the item has been | 
|  | 2703 | * committed. It will try to free the event from the ring buffer | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2704 | * if another event has not been added behind it. | 
|  | 2705 | * | 
|  | 2706 | * If another event has been added behind it, it will set the event | 
|  | 2707 | * up as discarded, and perform the commit. | 
|  | 2708 | * | 
|  | 2709 | * If this function is called, do not call ring_buffer_unlock_commit on | 
|  | 2710 | * the event. | 
|  | 2711 | */ | 
|  | 2712 | void ring_buffer_discard_commit(struct ring_buffer *buffer, | 
|  | 2713 | struct ring_buffer_event *event) | 
|  | 2714 | { | 
|  | 2715 | struct ring_buffer_per_cpu *cpu_buffer; | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2716 | int cpu; | 
|  | 2717 |  | 
|  | 2718 | /* The event is discarded regardless */ | 
| Frederic Weisbecker | f3b9aae | 2009-04-19 23:39:33 +0200 | [diff] [blame] | 2719 | rb_event_discard(event); | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2720 |  | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2721 | cpu = smp_processor_id(); | 
|  | 2722 | cpu_buffer = buffer->buffers[cpu]; | 
|  | 2723 |  | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2724 | /* | 
|  | 2725 | * This must only be called if the event has not been | 
|  | 2726 | * committed yet. Thus we can assume that preemption | 
|  | 2727 | * is still disabled. | 
|  | 2728 | */ | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2729 | RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2730 |  | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 2731 | rb_decrement_entry(cpu_buffer, event); | 
| Steven Rostedt | 0f2541d | 2009-08-05 12:02:48 -0400 | [diff] [blame] | 2732 | if (rb_try_to_discard(cpu_buffer, event)) | 
| Steven Rostedt | edd813bf | 2009-06-02 23:00:53 -0400 | [diff] [blame] | 2733 | goto out; | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2734 |  | 
|  | 2735 | /* | 
|  | 2736 | * The commit is still visible by the reader, so we | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 2737 | * must still update the timestamp. | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2738 | */ | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 2739 | rb_update_write_stamp(cpu_buffer, event); | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2740 | out: | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 2741 | rb_end_commit(cpu_buffer); | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2742 |  | 
| Frederic Weisbecker | f3b9aae | 2009-04-19 23:39:33 +0200 | [diff] [blame] | 2743 | trace_recursive_unlock(); | 
|  | 2744 |  | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2745 | preempt_enable_notrace(); | 
| Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 2746 |  | 
|  | 2747 | } | 
|  | 2748 | EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); | 
|  | 2749 |  | 
|  | 2750 | /** | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2751 | * ring_buffer_write - write data to the buffer without reserving | 
|  | 2752 | * @buffer: The ring buffer to write to. | 
|  | 2753 | * @length: The length of the data being written (excluding the event header) | 
|  | 2754 | * @data: The data to write to the buffer. | 
|  | 2755 | * | 
|  | 2756 | * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as | 
|  | 2757 | * one function. If you already have the data to write to the buffer, it | 
|  | 2758 | * may be easier to simply call this function. | 
|  | 2759 | * | 
|  | 2760 | * Note, like ring_buffer_lock_reserve, the length is the length of the data | 
|  | 2761 | * and not the length of the event which would hold the header. | 
|  | 2762 | */ | 
|  | 2763 | int ring_buffer_write(struct ring_buffer *buffer, | 
| David Sharp | 01e3e71 | 2012-06-07 16:46:24 -0700 | [diff] [blame] | 2764 | unsigned long length, | 
|  | 2765 | void *data) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2766 | { | 
|  | 2767 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 2768 | struct ring_buffer_event *event; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2769 | void *body; | 
|  | 2770 | int ret = -EBUSY; | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2771 | int cpu; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2772 |  | 
| Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 2773 | if (ring_buffer_flags != RB_BUFFERS_ON) | 
| Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 2774 | return -EBUSY; | 
|  | 2775 |  | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2776 | preempt_disable_notrace(); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2777 |  | 
| Lai Jiangshan | 52fbe9c | 2010-03-08 14:50:43 +0800 | [diff] [blame] | 2778 | if (atomic_read(&buffer->record_disabled)) | 
|  | 2779 | goto out; | 
|  | 2780 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2781 | cpu = raw_smp_processor_id(); | 
|  | 2782 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2783 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2784 | goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2785 |  | 
|  | 2786 | cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2787 |  | 
|  | 2788 | if (atomic_read(&cpu_buffer->record_disabled)) | 
|  | 2789 | goto out; | 
|  | 2790 |  | 
| Steven Rostedt | be957c4 | 2009-05-11 14:42:53 -0400 | [diff] [blame] | 2791 | if (length > BUF_MAX_DATA_SIZE) | 
|  | 2792 | goto out; | 
|  | 2793 |  | 
| Steven Rostedt | 62f0b3e | 2009-09-04 14:11:34 -0400 | [diff] [blame] | 2794 | event = rb_reserve_next_event(buffer, cpu_buffer, length); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2795 | if (!event) | 
|  | 2796 | goto out; | 
|  | 2797 |  | 
|  | 2798 | body = rb_event_data(event); | 
|  | 2799 |  | 
|  | 2800 | memcpy(body, data, length); | 
|  | 2801 |  | 
|  | 2802 | rb_commit(cpu_buffer, event); | 
|  | 2803 |  | 
|  | 2804 | ret = 0; | 
|  | 2805 | out: | 
| Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 2806 | preempt_enable_notrace(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2807 |  | 
|  | 2808 | return ret; | 
|  | 2809 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2810 | EXPORT_SYMBOL_GPL(ring_buffer_write); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2811 |  | 
| Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 2812 | static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2813 | { | 
|  | 2814 | struct buffer_page *reader = cpu_buffer->reader_page; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2815 | struct buffer_page *head = rb_set_head_page(cpu_buffer); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2816 | struct buffer_page *commit = cpu_buffer->commit_page; | 
|  | 2817 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 2818 | /* In case of error, head will be NULL */ | 
|  | 2819 | if (unlikely(!head)) | 
|  | 2820 | return 1; | 
|  | 2821 |  | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2822 | return reader->read == rb_page_commit(reader) && | 
|  | 2823 | (commit == reader || | 
|  | 2824 | (commit == head && | 
|  | 2825 | head->read == rb_page_commit(commit))); | 
|  | 2826 | } | 
|  | 2827 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2828 | /** | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2829 | * ring_buffer_record_disable - stop all writes into the buffer | 
|  | 2830 | * @buffer: The ring buffer to stop writes to. | 
|  | 2831 | * | 
|  | 2832 | * This prevents all writes to the buffer. Any attempt to write | 
|  | 2833 | * to the buffer after this will fail and return NULL. | 
|  | 2834 | * | 
|  | 2835 | * The caller should call synchronize_sched() after this. | 
|  | 2836 | */ | 
|  | 2837 | void ring_buffer_record_disable(struct ring_buffer *buffer) | 
|  | 2838 | { | 
|  | 2839 | atomic_inc(&buffer->record_disabled); | 
|  | 2840 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2841 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2842 |  | 
|  | 2843 | /** | 
|  | 2844 | * ring_buffer_record_enable - enable writes to the buffer | 
|  | 2845 | * @buffer: The ring buffer to enable writes | 
|  | 2846 | * | 
|  | 2847 | * Note, multiple disables will need the same number of enables | 
| Adam Buchbinder | c41b20e | 2009-12-11 16:35:39 -0500 | [diff] [blame] | 2848 | * to truly enable the writing (much like preempt_disable). | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2849 | */ | 
|  | 2850 | void ring_buffer_record_enable(struct ring_buffer *buffer) | 
|  | 2851 | { | 
|  | 2852 | atomic_dec(&buffer->record_disabled); | 
|  | 2853 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2854 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2855 |  | 
|  | 2856 | /** | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 2857 | * ring_buffer_record_off - stop all writes into the buffer | 
|  | 2858 | * @buffer: The ring buffer to stop writes to. | 
|  | 2859 | * | 
|  | 2860 | * This prevents all writes to the buffer. Any attempt to write | 
|  | 2861 | * to the buffer after this will fail and return NULL. | 
|  | 2862 | * | 
|  | 2863 | * This is different than ring_buffer_record_disable() as | 
| Wang Tianhong | 87abb3b | 2012-08-02 14:02:00 +0800 | [diff] [blame] | 2864 | * it works like an on/off switch, where as the disable() version | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 2865 | * must be paired with a enable(). | 
|  | 2866 | */ | 
|  | 2867 | void ring_buffer_record_off(struct ring_buffer *buffer) | 
|  | 2868 | { | 
|  | 2869 | unsigned int rd; | 
|  | 2870 | unsigned int new_rd; | 
|  | 2871 |  | 
|  | 2872 | do { | 
|  | 2873 | rd = atomic_read(&buffer->record_disabled); | 
|  | 2874 | new_rd = rd | RB_BUFFER_OFF; | 
|  | 2875 | } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); | 
|  | 2876 | } | 
|  | 2877 | EXPORT_SYMBOL_GPL(ring_buffer_record_off); | 
|  | 2878 |  | 
|  | 2879 | /** | 
|  | 2880 | * ring_buffer_record_on - restart writes into the buffer | 
|  | 2881 | * @buffer: The ring buffer to start writes to. | 
|  | 2882 | * | 
|  | 2883 | * This enables all writes to the buffer that was disabled by | 
|  | 2884 | * ring_buffer_record_off(). | 
|  | 2885 | * | 
|  | 2886 | * This is different than ring_buffer_record_enable() as | 
| Wang Tianhong | 87abb3b | 2012-08-02 14:02:00 +0800 | [diff] [blame] | 2887 | * it works like an on/off switch, where as the enable() version | 
| Steven Rostedt | 499e547 | 2012-02-22 15:50:28 -0500 | [diff] [blame] | 2888 | * must be paired with a disable(). | 
|  | 2889 | */ | 
|  | 2890 | void ring_buffer_record_on(struct ring_buffer *buffer) | 
|  | 2891 | { | 
|  | 2892 | unsigned int rd; | 
|  | 2893 | unsigned int new_rd; | 
|  | 2894 |  | 
|  | 2895 | do { | 
|  | 2896 | rd = atomic_read(&buffer->record_disabled); | 
|  | 2897 | new_rd = rd & ~RB_BUFFER_OFF; | 
|  | 2898 | } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); | 
|  | 2899 | } | 
|  | 2900 | EXPORT_SYMBOL_GPL(ring_buffer_record_on); | 
|  | 2901 |  | 
|  | 2902 | /** | 
|  | 2903 | * ring_buffer_record_is_on - return true if the ring buffer can write | 
|  | 2904 | * @buffer: The ring buffer to see if write is enabled | 
|  | 2905 | * | 
|  | 2906 | * Returns true if the ring buffer is in a state that it accepts writes. | 
|  | 2907 | */ | 
|  | 2908 | int ring_buffer_record_is_on(struct ring_buffer *buffer) | 
|  | 2909 | { | 
|  | 2910 | return !atomic_read(&buffer->record_disabled); | 
|  | 2911 | } | 
|  | 2912 |  | 
|  | 2913 | /** | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2914 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer | 
|  | 2915 | * @buffer: The ring buffer to stop writes to. | 
|  | 2916 | * @cpu: The CPU buffer to stop | 
|  | 2917 | * | 
|  | 2918 | * This prevents all writes to the buffer. Any attempt to write | 
|  | 2919 | * to the buffer after this will fail and return NULL. | 
|  | 2920 | * | 
|  | 2921 | * The caller should call synchronize_sched() after this. | 
|  | 2922 | */ | 
|  | 2923 | void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) | 
|  | 2924 | { | 
|  | 2925 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 2926 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2927 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 2928 | return; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2929 |  | 
|  | 2930 | cpu_buffer = buffer->buffers[cpu]; | 
|  | 2931 | atomic_inc(&cpu_buffer->record_disabled); | 
|  | 2932 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2933 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2934 |  | 
|  | 2935 | /** | 
|  | 2936 | * ring_buffer_record_enable_cpu - enable writes to the buffer | 
|  | 2937 | * @buffer: The ring buffer to enable writes | 
|  | 2938 | * @cpu: The CPU to enable. | 
|  | 2939 | * | 
|  | 2940 | * Note, multiple disables will need the same number of enables | 
| Adam Buchbinder | c41b20e | 2009-12-11 16:35:39 -0500 | [diff] [blame] | 2941 | * to truly enable the writing (much like preempt_disable). | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2942 | */ | 
|  | 2943 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | 
|  | 2944 | { | 
|  | 2945 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 2946 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2947 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 2948 | return; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2949 |  | 
|  | 2950 | cpu_buffer = buffer->buffers[cpu]; | 
|  | 2951 | atomic_dec(&cpu_buffer->record_disabled); | 
|  | 2952 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2953 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2954 |  | 
| Steven Rostedt | f6195aa | 2010-09-01 12:23:12 -0400 | [diff] [blame] | 2955 | /* | 
|  | 2956 | * The total entries in the ring buffer is the running counter | 
|  | 2957 | * of entries entered into the ring buffer, minus the sum of | 
|  | 2958 | * the entries read from the ring buffer and the number of | 
|  | 2959 | * entries that were overwritten. | 
|  | 2960 | */ | 
|  | 2961 | static inline unsigned long | 
|  | 2962 | rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) | 
|  | 2963 | { | 
|  | 2964 | return local_read(&cpu_buffer->entries) - | 
|  | 2965 | (local_read(&cpu_buffer->overrun) + cpu_buffer->read); | 
|  | 2966 | } | 
|  | 2967 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2968 | /** | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2969 | * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer | 
|  | 2970 | * @buffer: The ring buffer | 
|  | 2971 | * @cpu: The per CPU buffer to read from. | 
|  | 2972 | */ | 
| Yoshihiro YUNOMAE | 50ecf2c | 2012-10-11 16:27:54 -0700 | [diff] [blame] | 2973 | u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2974 | { | 
|  | 2975 | unsigned long flags; | 
|  | 2976 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 2977 | struct buffer_page *bpage; | 
| Linus Torvalds | da830e5 | 2012-12-11 18:18:58 -0800 | [diff] [blame] | 2978 | u64 ret = 0; | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2979 |  | 
|  | 2980 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
|  | 2981 | return 0; | 
|  | 2982 |  | 
|  | 2983 | cpu_buffer = buffer->buffers[cpu]; | 
| Linus Torvalds | 7115e3f | 2011-10-26 17:03:38 +0200 | [diff] [blame] | 2984 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2985 | /* | 
|  | 2986 | * if the tail is on reader_page, oldest time stamp is on the reader | 
|  | 2987 | * page | 
|  | 2988 | */ | 
|  | 2989 | if (cpu_buffer->tail_page == cpu_buffer->reader_page) | 
|  | 2990 | bpage = cpu_buffer->reader_page; | 
|  | 2991 | else | 
|  | 2992 | bpage = rb_set_head_page(cpu_buffer); | 
| Steven Rostedt | 54f7be5 | 2012-11-29 22:27:22 -0500 | [diff] [blame] | 2993 | if (bpage) | 
|  | 2994 | ret = bpage->page->time_stamp; | 
| Linus Torvalds | 7115e3f | 2011-10-26 17:03:38 +0200 | [diff] [blame] | 2995 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 2996 |  | 
|  | 2997 | return ret; | 
|  | 2998 | } | 
|  | 2999 | EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); | 
|  | 3000 |  | 
|  | 3001 | /** | 
|  | 3002 | * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer | 
|  | 3003 | * @buffer: The ring buffer | 
|  | 3004 | * @cpu: The per CPU buffer to read from. | 
|  | 3005 | */ | 
|  | 3006 | unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu) | 
|  | 3007 | { | 
|  | 3008 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 3009 | unsigned long ret; | 
|  | 3010 |  | 
|  | 3011 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
|  | 3012 | return 0; | 
|  | 3013 |  | 
|  | 3014 | cpu_buffer = buffer->buffers[cpu]; | 
|  | 3015 | ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; | 
|  | 3016 |  | 
|  | 3017 | return ret; | 
|  | 3018 | } | 
|  | 3019 | EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); | 
|  | 3020 |  | 
|  | 3021 | /** | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3022 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer | 
|  | 3023 | * @buffer: The ring buffer | 
|  | 3024 | * @cpu: The per CPU buffer to get the entries from. | 
|  | 3025 | */ | 
|  | 3026 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) | 
|  | 3027 | { | 
|  | 3028 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 3029 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 3030 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3031 | return 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3032 |  | 
|  | 3033 | cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3034 |  | 
| Steven Rostedt | f6195aa | 2010-09-01 12:23:12 -0400 | [diff] [blame] | 3035 | return rb_num_of_entries(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3036 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3037 | EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3038 |  | 
|  | 3039 | /** | 
| Slava Pestov | 884bfe8 | 2011-07-15 14:23:58 -0700 | [diff] [blame] | 3040 | * ring_buffer_overrun_cpu - get the number of overruns caused by the ring | 
|  | 3041 | * buffer wrapping around (only if RB_FL_OVERWRITE is on). | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3042 | * @buffer: The ring buffer | 
|  | 3043 | * @cpu: The per CPU buffer to get the number of overruns from | 
|  | 3044 | */ | 
|  | 3045 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) | 
|  | 3046 | { | 
|  | 3047 | struct ring_buffer_per_cpu *cpu_buffer; | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3048 | unsigned long ret; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3049 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 3050 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3051 | return 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3052 |  | 
|  | 3053 | cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3054 | ret = local_read(&cpu_buffer->overrun); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3055 |  | 
|  | 3056 | return ret; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3057 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3058 | EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3059 |  | 
|  | 3060 | /** | 
| Slava Pestov | 884bfe8 | 2011-07-15 14:23:58 -0700 | [diff] [blame] | 3061 | * ring_buffer_commit_overrun_cpu - get the number of overruns caused by | 
|  | 3062 | * commits failing due to the buffer wrapping around while there are uncommitted | 
|  | 3063 | * events, such as during an interrupt storm. | 
| Steven Rostedt | f0d2c68 | 2009-04-29 13:43:37 -0400 | [diff] [blame] | 3064 | * @buffer: The ring buffer | 
|  | 3065 | * @cpu: The per CPU buffer to get the number of overruns from | 
|  | 3066 | */ | 
|  | 3067 | unsigned long | 
|  | 3068 | ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) | 
|  | 3069 | { | 
|  | 3070 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 3071 | unsigned long ret; | 
|  | 3072 |  | 
|  | 3073 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
|  | 3074 | return 0; | 
|  | 3075 |  | 
|  | 3076 | cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3077 | ret = local_read(&cpu_buffer->commit_overrun); | 
| Steven Rostedt | f0d2c68 | 2009-04-29 13:43:37 -0400 | [diff] [blame] | 3078 |  | 
|  | 3079 | return ret; | 
|  | 3080 | } | 
|  | 3081 | EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); | 
|  | 3082 |  | 
|  | 3083 | /** | 
| Slava Pestov | 884bfe8 | 2011-07-15 14:23:58 -0700 | [diff] [blame] | 3084 | * ring_buffer_dropped_events_cpu - get the number of dropped events caused by | 
|  | 3085 | * the ring buffer filling up (only if RB_FL_OVERWRITE is off). | 
|  | 3086 | * @buffer: The ring buffer | 
|  | 3087 | * @cpu: The per CPU buffer to get the number of overruns from | 
|  | 3088 | */ | 
|  | 3089 | unsigned long | 
|  | 3090 | ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu) | 
|  | 3091 | { | 
|  | 3092 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 3093 | unsigned long ret; | 
|  | 3094 |  | 
|  | 3095 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
|  | 3096 | return 0; | 
|  | 3097 |  | 
|  | 3098 | cpu_buffer = buffer->buffers[cpu]; | 
|  | 3099 | ret = local_read(&cpu_buffer->dropped_events); | 
|  | 3100 |  | 
|  | 3101 | return ret; | 
|  | 3102 | } | 
|  | 3103 | EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); | 
|  | 3104 |  | 
|  | 3105 | /** | 
| Steven Rostedt (Red Hat) | ad96470 | 2013-01-29 17:45:49 -0500 | [diff] [blame] | 3106 | * ring_buffer_read_events_cpu - get the number of events successfully read | 
|  | 3107 | * @buffer: The ring buffer | 
|  | 3108 | * @cpu: The per CPU buffer to get the number of events read | 
|  | 3109 | */ | 
|  | 3110 | unsigned long | 
|  | 3111 | ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu) | 
|  | 3112 | { | 
|  | 3113 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 3114 |  | 
|  | 3115 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
|  | 3116 | return 0; | 
|  | 3117 |  | 
|  | 3118 | cpu_buffer = buffer->buffers[cpu]; | 
|  | 3119 | return cpu_buffer->read; | 
|  | 3120 | } | 
|  | 3121 | EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); | 
|  | 3122 |  | 
|  | 3123 | /** | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3124 | * ring_buffer_entries - get the number of entries in a buffer | 
|  | 3125 | * @buffer: The ring buffer | 
|  | 3126 | * | 
|  | 3127 | * Returns the total number of entries in the ring buffer | 
|  | 3128 | * (all CPU entries) | 
|  | 3129 | */ | 
|  | 3130 | unsigned long ring_buffer_entries(struct ring_buffer *buffer) | 
|  | 3131 | { | 
|  | 3132 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 3133 | unsigned long entries = 0; | 
|  | 3134 | int cpu; | 
|  | 3135 |  | 
|  | 3136 | /* if you care about this being correct, lock the buffer */ | 
|  | 3137 | for_each_buffer_cpu(buffer, cpu) { | 
|  | 3138 | cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | f6195aa | 2010-09-01 12:23:12 -0400 | [diff] [blame] | 3139 | entries += rb_num_of_entries(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3140 | } | 
|  | 3141 |  | 
|  | 3142 | return entries; | 
|  | 3143 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3144 | EXPORT_SYMBOL_GPL(ring_buffer_entries); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3145 |  | 
|  | 3146 | /** | 
| Jiri Olsa | 67b394f | 2009-10-23 19:36:18 -0400 | [diff] [blame] | 3147 | * ring_buffer_overruns - get the number of overruns in buffer | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3148 | * @buffer: The ring buffer | 
|  | 3149 | * | 
|  | 3150 | * Returns the total number of overruns in the ring buffer | 
|  | 3151 | * (all CPU entries) | 
|  | 3152 | */ | 
|  | 3153 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer) | 
|  | 3154 | { | 
|  | 3155 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 3156 | unsigned long overruns = 0; | 
|  | 3157 | int cpu; | 
|  | 3158 |  | 
|  | 3159 | /* if you care about this being correct, lock the buffer */ | 
|  | 3160 | for_each_buffer_cpu(buffer, cpu) { | 
|  | 3161 | cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3162 | overruns += local_read(&cpu_buffer->overrun); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3163 | } | 
|  | 3164 |  | 
|  | 3165 | return overruns; | 
|  | 3166 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3167 | EXPORT_SYMBOL_GPL(ring_buffer_overruns); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3168 |  | 
| Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 3169 | static void rb_iter_reset(struct ring_buffer_iter *iter) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3170 | { | 
|  | 3171 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 
|  | 3172 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3173 | /* Iterator usage is expected to have record disabled */ | 
|  | 3174 | if (list_empty(&cpu_buffer->reader_page->list)) { | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3175 | iter->head_page = rb_set_head_page(cpu_buffer); | 
|  | 3176 | if (unlikely(!iter->head_page)) | 
|  | 3177 | return; | 
|  | 3178 | iter->head = iter->head_page->read; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3179 | } else { | 
|  | 3180 | iter->head_page = cpu_buffer->reader_page; | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 3181 | iter->head = cpu_buffer->reader_page->read; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3182 | } | 
|  | 3183 | if (iter->head) | 
|  | 3184 | iter->read_stamp = cpu_buffer->read_stamp; | 
|  | 3185 | else | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 3186 | iter->read_stamp = iter->head_page->page->time_stamp; | 
| Steven Rostedt | 492a74f | 2010-01-25 15:17:47 -0500 | [diff] [blame] | 3187 | iter->cache_reader_page = cpu_buffer->reader_page; | 
|  | 3188 | iter->cache_read = cpu_buffer->read; | 
| Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 3189 | } | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3190 |  | 
| Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 3191 | /** | 
|  | 3192 | * ring_buffer_iter_reset - reset an iterator | 
|  | 3193 | * @iter: The iterator to reset | 
|  | 3194 | * | 
|  | 3195 | * Resets the iterator, so that it will start from the beginning | 
|  | 3196 | * again. | 
|  | 3197 | */ | 
|  | 3198 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | 
|  | 3199 | { | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3200 | struct ring_buffer_per_cpu *cpu_buffer; | 
| Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 3201 | unsigned long flags; | 
|  | 3202 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3203 | if (!iter) | 
|  | 3204 | return; | 
|  | 3205 |  | 
|  | 3206 | cpu_buffer = iter->cpu_buffer; | 
|  | 3207 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3208 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 3209 | rb_iter_reset(iter); | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3210 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3211 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3212 | EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3213 |  | 
|  | 3214 | /** | 
|  | 3215 | * ring_buffer_iter_empty - check if an iterator has no more to read | 
|  | 3216 | * @iter: The iterator to check | 
|  | 3217 | */ | 
|  | 3218 | int ring_buffer_iter_empty(struct ring_buffer_iter *iter) | 
|  | 3219 | { | 
|  | 3220 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 3221 |  | 
|  | 3222 | cpu_buffer = iter->cpu_buffer; | 
|  | 3223 |  | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3224 | return iter->head_page == cpu_buffer->commit_page && | 
|  | 3225 | iter->head == rb_commit_index(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3226 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3227 | EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3228 |  | 
|  | 3229 | static void | 
|  | 3230 | rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, | 
|  | 3231 | struct ring_buffer_event *event) | 
|  | 3232 | { | 
|  | 3233 | u64 delta; | 
|  | 3234 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 3235 | switch (event->type_len) { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3236 | case RINGBUF_TYPE_PADDING: | 
|  | 3237 | return; | 
|  | 3238 |  | 
|  | 3239 | case RINGBUF_TYPE_TIME_EXTEND: | 
|  | 3240 | delta = event->array[0]; | 
|  | 3241 | delta <<= TS_SHIFT; | 
|  | 3242 | delta += event->time_delta; | 
|  | 3243 | cpu_buffer->read_stamp += delta; | 
|  | 3244 | return; | 
|  | 3245 |  | 
|  | 3246 | case RINGBUF_TYPE_TIME_STAMP: | 
|  | 3247 | /* FIXME: not implemented */ | 
|  | 3248 | return; | 
|  | 3249 |  | 
|  | 3250 | case RINGBUF_TYPE_DATA: | 
|  | 3251 | cpu_buffer->read_stamp += event->time_delta; | 
|  | 3252 | return; | 
|  | 3253 |  | 
|  | 3254 | default: | 
|  | 3255 | BUG(); | 
|  | 3256 | } | 
|  | 3257 | return; | 
|  | 3258 | } | 
|  | 3259 |  | 
|  | 3260 | static void | 
|  | 3261 | rb_update_iter_read_stamp(struct ring_buffer_iter *iter, | 
|  | 3262 | struct ring_buffer_event *event) | 
|  | 3263 | { | 
|  | 3264 | u64 delta; | 
|  | 3265 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 3266 | switch (event->type_len) { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3267 | case RINGBUF_TYPE_PADDING: | 
|  | 3268 | return; | 
|  | 3269 |  | 
|  | 3270 | case RINGBUF_TYPE_TIME_EXTEND: | 
|  | 3271 | delta = event->array[0]; | 
|  | 3272 | delta <<= TS_SHIFT; | 
|  | 3273 | delta += event->time_delta; | 
|  | 3274 | iter->read_stamp += delta; | 
|  | 3275 | return; | 
|  | 3276 |  | 
|  | 3277 | case RINGBUF_TYPE_TIME_STAMP: | 
|  | 3278 | /* FIXME: not implemented */ | 
|  | 3279 | return; | 
|  | 3280 |  | 
|  | 3281 | case RINGBUF_TYPE_DATA: | 
|  | 3282 | iter->read_stamp += event->time_delta; | 
|  | 3283 | return; | 
|  | 3284 |  | 
|  | 3285 | default: | 
|  | 3286 | BUG(); | 
|  | 3287 | } | 
|  | 3288 | return; | 
|  | 3289 | } | 
|  | 3290 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3291 | static struct buffer_page * | 
|  | 3292 | rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3293 | { | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3294 | struct buffer_page *reader = NULL; | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3295 | unsigned long overwrite; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3296 | unsigned long flags; | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3297 | int nr_loops = 0; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3298 | int ret; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3299 |  | 
| Steven Rostedt | 3e03fb7 | 2008-11-06 00:09:43 -0500 | [diff] [blame] | 3300 | local_irq_save(flags); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 3301 | arch_spin_lock(&cpu_buffer->lock); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3302 |  | 
|  | 3303 | again: | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3304 | /* | 
|  | 3305 | * This should normally only loop twice. But because the | 
|  | 3306 | * start of the reader inserts an empty page, it causes | 
|  | 3307 | * a case where we will loop three times. There should be no | 
|  | 3308 | * reason to loop four times (that I know of). | 
|  | 3309 | */ | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 3310 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3311 | reader = NULL; | 
|  | 3312 | goto out; | 
|  | 3313 | } | 
|  | 3314 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3315 | reader = cpu_buffer->reader_page; | 
|  | 3316 |  | 
|  | 3317 | /* If there's more to read, return this page */ | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3318 | if (cpu_buffer->reader_page->read < rb_page_size(reader)) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3319 | goto out; | 
|  | 3320 |  | 
|  | 3321 | /* Never should we have an index greater than the size */ | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 3322 | if (RB_WARN_ON(cpu_buffer, | 
|  | 3323 | cpu_buffer->reader_page->read > rb_page_size(reader))) | 
|  | 3324 | goto out; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3325 |  | 
|  | 3326 | /* check if we caught up to the tail */ | 
|  | 3327 | reader = NULL; | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3328 | if (cpu_buffer->commit_page == cpu_buffer->reader_page) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3329 | goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3330 |  | 
| Steven Rostedt | a5fb833 | 2012-06-28 13:35:04 -0400 | [diff] [blame] | 3331 | /* Don't bother swapping if the ring buffer is empty */ | 
|  | 3332 | if (rb_num_of_entries(cpu_buffer) == 0) | 
|  | 3333 | goto out; | 
|  | 3334 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3335 | /* | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3336 | * Reset the reader page to size zero. | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3337 | */ | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3338 | local_set(&cpu_buffer->reader_page->write, 0); | 
|  | 3339 | local_set(&cpu_buffer->reader_page->entries, 0); | 
|  | 3340 | local_set(&cpu_buffer->reader_page->page->commit, 0); | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 3341 | cpu_buffer->reader_page->real_end = 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3342 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3343 | spin: | 
|  | 3344 | /* | 
|  | 3345 | * Splice the empty reader page into the list around the head. | 
|  | 3346 | */ | 
|  | 3347 | reader = rb_set_head_page(cpu_buffer); | 
| Steven Rostedt | 54f7be5 | 2012-11-29 22:27:22 -0500 | [diff] [blame] | 3348 | if (!reader) | 
|  | 3349 | goto out; | 
| Steven Rostedt | 0e1ff5d | 2010-01-06 20:40:44 -0500 | [diff] [blame] | 3350 | cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3351 | cpu_buffer->reader_page->list.prev = reader->list.prev; | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3352 |  | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 3353 | /* | 
|  | 3354 | * cpu_buffer->pages just needs to point to the buffer, it | 
|  | 3355 | *  has no specific buffer page to point to. Lets move it out | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 3356 | *  of our way so we don't accidentally swap it. | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 3357 | */ | 
|  | 3358 | cpu_buffer->pages = reader->list.prev; | 
|  | 3359 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3360 | /* The reader page will be pointing to the new head */ | 
|  | 3361 | rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3362 |  | 
|  | 3363 | /* | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3364 | * We want to make sure we read the overruns after we set up our | 
|  | 3365 | * pointers to the next object. The writer side does a | 
|  | 3366 | * cmpxchg to cross pages which acts as the mb on the writer | 
|  | 3367 | * side. Note, the reader will constantly fail the swap | 
|  | 3368 | * while the writer is updating the pointers, so this | 
|  | 3369 | * guarantees that the overwrite recorded here is the one we | 
|  | 3370 | * want to compare with the last_overrun. | 
|  | 3371 | */ | 
|  | 3372 | smp_mb(); | 
|  | 3373 | overwrite = local_read(&(cpu_buffer->overrun)); | 
|  | 3374 |  | 
|  | 3375 | /* | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3376 | * Here's the tricky part. | 
|  | 3377 | * | 
|  | 3378 | * We need to move the pointer past the header page. | 
|  | 3379 | * But we can only do that if a writer is not currently | 
|  | 3380 | * moving it. The page before the header page has the | 
|  | 3381 | * flag bit '1' set if it is pointing to the page we want. | 
|  | 3382 | * but if the writer is in the process of moving it | 
|  | 3383 | * than it will be '2' or already moved '0'. | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3384 | */ | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3385 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3386 | ret = rb_head_page_replace(reader, cpu_buffer->reader_page); | 
|  | 3387 |  | 
|  | 3388 | /* | 
|  | 3389 | * If we did not convert it, then we must try again. | 
|  | 3390 | */ | 
|  | 3391 | if (!ret) | 
|  | 3392 | goto spin; | 
|  | 3393 |  | 
|  | 3394 | /* | 
|  | 3395 | * Yeah! We succeeded in replacing the page. | 
|  | 3396 | * | 
|  | 3397 | * Now make the new head point back to the reader page. | 
|  | 3398 | */ | 
| David Sharp | 5ded3dc | 2010-01-06 17:12:07 -0800 | [diff] [blame] | 3399 | rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3400 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3401 |  | 
|  | 3402 | /* Finally update the reader page to the new head */ | 
|  | 3403 | cpu_buffer->reader_page = reader; | 
|  | 3404 | rb_reset_reader_page(cpu_buffer); | 
|  | 3405 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3406 | if (overwrite != cpu_buffer->last_overrun) { | 
|  | 3407 | cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; | 
|  | 3408 | cpu_buffer->last_overrun = overwrite; | 
|  | 3409 | } | 
|  | 3410 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3411 | goto again; | 
|  | 3412 |  | 
|  | 3413 | out: | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 3414 | arch_spin_unlock(&cpu_buffer->lock); | 
| Steven Rostedt | 3e03fb7 | 2008-11-06 00:09:43 -0500 | [diff] [blame] | 3415 | local_irq_restore(flags); | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3416 |  | 
|  | 3417 | return reader; | 
|  | 3418 | } | 
|  | 3419 |  | 
|  | 3420 | static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) | 
|  | 3421 | { | 
|  | 3422 | struct ring_buffer_event *event; | 
|  | 3423 | struct buffer_page *reader; | 
|  | 3424 | unsigned length; | 
|  | 3425 |  | 
|  | 3426 | reader = rb_get_reader_page(cpu_buffer); | 
|  | 3427 |  | 
|  | 3428 | /* This function should not be called when buffer is empty */ | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 3429 | if (RB_WARN_ON(cpu_buffer, !reader)) | 
|  | 3430 | return; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3431 |  | 
|  | 3432 | event = rb_reader_event(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3433 |  | 
| Steven Rostedt | a1863c2 | 2009-09-03 10:23:58 -0400 | [diff] [blame] | 3434 | if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 
| Steven Rostedt | e4906ef | 2009-04-30 20:49:44 -0400 | [diff] [blame] | 3435 | cpu_buffer->read++; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3436 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3437 | rb_update_read_stamp(cpu_buffer, event); | 
|  | 3438 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3439 | length = rb_event_length(event); | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 3440 | cpu_buffer->reader_page->read += length; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3441 | } | 
|  | 3442 |  | 
|  | 3443 | static void rb_advance_iter(struct ring_buffer_iter *iter) | 
|  | 3444 | { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3445 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 3446 | struct ring_buffer_event *event; | 
|  | 3447 | unsigned length; | 
|  | 3448 |  | 
|  | 3449 | cpu_buffer = iter->cpu_buffer; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3450 |  | 
|  | 3451 | /* | 
|  | 3452 | * Check if we are at the end of the buffer. | 
|  | 3453 | */ | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3454 | if (iter->head >= rb_page_size(iter->head_page)) { | 
| Steven Rostedt | ea05b57 | 2009-06-03 09:30:10 -0400 | [diff] [blame] | 3455 | /* discarded commits can make the page empty */ | 
|  | 3456 | if (iter->head_page == cpu_buffer->commit_page) | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 3457 | return; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3458 | rb_inc_iter(iter); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3459 | return; | 
|  | 3460 | } | 
|  | 3461 |  | 
|  | 3462 | event = rb_iter_head_event(iter); | 
|  | 3463 |  | 
|  | 3464 | length = rb_event_length(event); | 
|  | 3465 |  | 
|  | 3466 | /* | 
|  | 3467 | * This should not be called to advance the header if we are | 
|  | 3468 | * at the tail of the buffer. | 
|  | 3469 | */ | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 3470 | if (RB_WARN_ON(cpu_buffer, | 
| Steven Rostedt | f536aaf | 2008-11-10 23:07:30 -0500 | [diff] [blame] | 3471 | (iter->head_page == cpu_buffer->commit_page) && | 
| Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 3472 | (iter->head + length > rb_commit_index(cpu_buffer)))) | 
|  | 3473 | return; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3474 |  | 
|  | 3475 | rb_update_iter_read_stamp(iter, event); | 
|  | 3476 |  | 
|  | 3477 | iter->head += length; | 
|  | 3478 |  | 
|  | 3479 | /* check for end of page padding */ | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3480 | if ((iter->head >= rb_page_size(iter->head_page)) && | 
|  | 3481 | (iter->head_page != cpu_buffer->commit_page)) | 
| Steven Rostedt | 771e038 | 2012-11-30 10:41:57 -0500 | [diff] [blame] | 3482 | rb_inc_iter(iter); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3483 | } | 
|  | 3484 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3485 | static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) | 
|  | 3486 | { | 
|  | 3487 | return cpu_buffer->lost_events; | 
|  | 3488 | } | 
|  | 3489 |  | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3490 | static struct ring_buffer_event * | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3491 | rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, | 
|  | 3492 | unsigned long *lost_events) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3493 | { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3494 | struct ring_buffer_event *event; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3495 | struct buffer_page *reader; | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3496 | int nr_loops = 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3497 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3498 | again: | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3499 | /* | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 3500 | * We repeat when a time extend is encountered. | 
|  | 3501 | * Since the time extend is always attached to a data event, | 
|  | 3502 | * we should never loop more than once. | 
|  | 3503 | * (We never hit the following condition more than twice). | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3504 | */ | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 3505 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3506 | return NULL; | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3507 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3508 | reader = rb_get_reader_page(cpu_buffer); | 
|  | 3509 | if (!reader) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3510 | return NULL; | 
|  | 3511 |  | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3512 | event = rb_reader_event(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3513 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 3514 | switch (event->type_len) { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3515 | case RINGBUF_TYPE_PADDING: | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3516 | if (rb_null_event(event)) | 
|  | 3517 | RB_WARN_ON(cpu_buffer, 1); | 
|  | 3518 | /* | 
|  | 3519 | * Because the writer could be discarding every | 
|  | 3520 | * event it creates (which would probably be bad) | 
|  | 3521 | * if we were to go back to "again" then we may never | 
|  | 3522 | * catch up, and will trigger the warn on, or lock | 
|  | 3523 | * the box. Return the padding, and we will release | 
|  | 3524 | * the current locks, and try again. | 
|  | 3525 | */ | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3526 | return event; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3527 |  | 
|  | 3528 | case RINGBUF_TYPE_TIME_EXTEND: | 
|  | 3529 | /* Internal data, OK to advance */ | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3530 | rb_advance_reader(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3531 | goto again; | 
|  | 3532 |  | 
|  | 3533 | case RINGBUF_TYPE_TIME_STAMP: | 
|  | 3534 | /* FIXME: not implemented */ | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3535 | rb_advance_reader(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3536 | goto again; | 
|  | 3537 |  | 
|  | 3538 | case RINGBUF_TYPE_DATA: | 
|  | 3539 | if (ts) { | 
|  | 3540 | *ts = cpu_buffer->read_stamp + event->time_delta; | 
| Robert Richter | d8eeb2d | 2009-07-31 14:58:04 +0200 | [diff] [blame] | 3541 | ring_buffer_normalize_time_stamp(cpu_buffer->buffer, | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 3542 | cpu_buffer->cpu, ts); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3543 | } | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3544 | if (lost_events) | 
|  | 3545 | *lost_events = rb_lost_events(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3546 | return event; | 
|  | 3547 |  | 
|  | 3548 | default: | 
|  | 3549 | BUG(); | 
|  | 3550 | } | 
|  | 3551 |  | 
|  | 3552 | return NULL; | 
|  | 3553 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3554 | EXPORT_SYMBOL_GPL(ring_buffer_peek); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3555 |  | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3556 | static struct ring_buffer_event * | 
|  | 3557 | rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3558 | { | 
|  | 3559 | struct ring_buffer *buffer; | 
|  | 3560 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 3561 | struct ring_buffer_event *event; | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3562 | int nr_loops = 0; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3563 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3564 | cpu_buffer = iter->cpu_buffer; | 
|  | 3565 | buffer = cpu_buffer->buffer; | 
|  | 3566 |  | 
| Steven Rostedt | 492a74f | 2010-01-25 15:17:47 -0500 | [diff] [blame] | 3567 | /* | 
|  | 3568 | * Check if someone performed a consuming read to | 
|  | 3569 | * the buffer. A consuming read invalidates the iterator | 
|  | 3570 | * and we need to reset the iterator in this case. | 
|  | 3571 | */ | 
|  | 3572 | if (unlikely(iter->cache_read != cpu_buffer->read || | 
|  | 3573 | iter->cache_reader_page != cpu_buffer->reader_page)) | 
|  | 3574 | rb_iter_reset(iter); | 
|  | 3575 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3576 | again: | 
| Steven Rostedt | 3c05d74 | 2010-01-26 16:14:08 -0500 | [diff] [blame] | 3577 | if (ring_buffer_iter_empty(iter)) | 
|  | 3578 | return NULL; | 
|  | 3579 |  | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3580 | /* | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 3581 | * We repeat when a time extend is encountered. | 
|  | 3582 | * Since the time extend is always attached to a data event, | 
|  | 3583 | * we should never loop more than once. | 
|  | 3584 | * (We never hit the following condition more than twice). | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3585 | */ | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 3586 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3587 | return NULL; | 
| Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 3588 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3589 | if (rb_per_cpu_empty(cpu_buffer)) | 
|  | 3590 | return NULL; | 
|  | 3591 |  | 
| Steven Rostedt | 3c05d74 | 2010-01-26 16:14:08 -0500 | [diff] [blame] | 3592 | if (iter->head >= local_read(&iter->head_page->page->commit)) { | 
|  | 3593 | rb_inc_iter(iter); | 
|  | 3594 | goto again; | 
|  | 3595 | } | 
|  | 3596 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3597 | event = rb_iter_head_event(iter); | 
|  | 3598 |  | 
| Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 3599 | switch (event->type_len) { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3600 | case RINGBUF_TYPE_PADDING: | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3601 | if (rb_null_event(event)) { | 
|  | 3602 | rb_inc_iter(iter); | 
|  | 3603 | goto again; | 
|  | 3604 | } | 
|  | 3605 | rb_advance_iter(iter); | 
|  | 3606 | return event; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3607 |  | 
|  | 3608 | case RINGBUF_TYPE_TIME_EXTEND: | 
|  | 3609 | /* Internal data, OK to advance */ | 
|  | 3610 | rb_advance_iter(iter); | 
|  | 3611 | goto again; | 
|  | 3612 |  | 
|  | 3613 | case RINGBUF_TYPE_TIME_STAMP: | 
|  | 3614 | /* FIXME: not implemented */ | 
|  | 3615 | rb_advance_iter(iter); | 
|  | 3616 | goto again; | 
|  | 3617 |  | 
|  | 3618 | case RINGBUF_TYPE_DATA: | 
|  | 3619 | if (ts) { | 
|  | 3620 | *ts = iter->read_stamp + event->time_delta; | 
| Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 3621 | ring_buffer_normalize_time_stamp(buffer, | 
|  | 3622 | cpu_buffer->cpu, ts); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3623 | } | 
|  | 3624 | return event; | 
|  | 3625 |  | 
|  | 3626 | default: | 
|  | 3627 | BUG(); | 
|  | 3628 | } | 
|  | 3629 |  | 
|  | 3630 | return NULL; | 
|  | 3631 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3632 | EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3633 |  | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3634 | static inline int rb_ok_to_lock(void) | 
|  | 3635 | { | 
|  | 3636 | /* | 
|  | 3637 | * If an NMI die dumps out the content of the ring buffer | 
|  | 3638 | * do not grab locks. We also permanently disable the ring | 
|  | 3639 | * buffer too. A one time deal is all you get from reading | 
|  | 3640 | * the ring buffer from an NMI. | 
|  | 3641 | */ | 
| Steven Rostedt | 464e85e | 2009-08-05 15:26:37 -0400 | [diff] [blame] | 3642 | if (likely(!in_nmi())) | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3643 | return 1; | 
|  | 3644 |  | 
|  | 3645 | tracing_off_permanent(); | 
|  | 3646 | return 0; | 
|  | 3647 | } | 
|  | 3648 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3649 | /** | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3650 | * ring_buffer_peek - peek at the next event to be read | 
|  | 3651 | * @buffer: The ring buffer to read | 
|  | 3652 | * @cpu: The cpu to peak at | 
|  | 3653 | * @ts: The timestamp counter of this event. | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3654 | * @lost_events: a variable to store if events were lost (may be NULL) | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3655 | * | 
|  | 3656 | * This will return the event that will be read next, but does | 
|  | 3657 | * not consume the data. | 
|  | 3658 | */ | 
|  | 3659 | struct ring_buffer_event * | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3660 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, | 
|  | 3661 | unsigned long *lost_events) | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3662 | { | 
|  | 3663 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3664 | struct ring_buffer_event *event; | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3665 | unsigned long flags; | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3666 | int dolock; | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3667 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3668 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3669 | return NULL; | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3670 |  | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3671 | dolock = rb_ok_to_lock(); | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3672 | again: | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3673 | local_irq_save(flags); | 
|  | 3674 | if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3675 | raw_spin_lock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3676 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); | 
| Robert Richter | 469535a | 2009-07-30 19:19:18 +0200 | [diff] [blame] | 3677 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 
|  | 3678 | rb_advance_reader(cpu_buffer); | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3679 | if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3680 | raw_spin_unlock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3681 | local_irq_restore(flags); | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3682 |  | 
| Steven Rostedt | 1b959e1 | 2009-09-03 10:12:13 -0400 | [diff] [blame] | 3683 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3684 | goto again; | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3685 |  | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3686 | return event; | 
|  | 3687 | } | 
|  | 3688 |  | 
|  | 3689 | /** | 
|  | 3690 | * ring_buffer_iter_peek - peek at the next event to be read | 
|  | 3691 | * @iter: The ring buffer iterator | 
|  | 3692 | * @ts: The timestamp counter of this event. | 
|  | 3693 | * | 
|  | 3694 | * This will return the event that will be read next, but does | 
|  | 3695 | * not increment the iterator. | 
|  | 3696 | */ | 
|  | 3697 | struct ring_buffer_event * | 
|  | 3698 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | 
|  | 3699 | { | 
|  | 3700 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 
|  | 3701 | struct ring_buffer_event *event; | 
|  | 3702 | unsigned long flags; | 
|  | 3703 |  | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3704 | again: | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3705 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3706 | event = rb_iter_peek(iter, ts); | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3707 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3708 |  | 
| Steven Rostedt | 1b959e1 | 2009-09-03 10:12:13 -0400 | [diff] [blame] | 3709 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3710 | goto again; | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3711 |  | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3712 | return event; | 
|  | 3713 | } | 
|  | 3714 |  | 
|  | 3715 | /** | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3716 | * ring_buffer_consume - return an event and consume it | 
|  | 3717 | * @buffer: The ring buffer to get the next event from | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3718 | * @cpu: the cpu to read the buffer from | 
|  | 3719 | * @ts: a variable to store the timestamp (may be NULL) | 
|  | 3720 | * @lost_events: a variable to store if events were lost (may be NULL) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3721 | * | 
|  | 3722 | * Returns the next event in the ring buffer, and that event is consumed. | 
|  | 3723 | * Meaning, that sequential reads will keep returning a different event, | 
|  | 3724 | * and eventually empty the ring buffer if the producer is slower. | 
|  | 3725 | */ | 
|  | 3726 | struct ring_buffer_event * | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3727 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, | 
|  | 3728 | unsigned long *lost_events) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3729 | { | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3730 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 3731 | struct ring_buffer_event *event = NULL; | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3732 | unsigned long flags; | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3733 | int dolock; | 
|  | 3734 |  | 
|  | 3735 | dolock = rb_ok_to_lock(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3736 |  | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3737 | again: | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3738 | /* might be called in atomic */ | 
|  | 3739 | preempt_disable(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3740 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3741 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
|  | 3742 | goto out; | 
|  | 3743 |  | 
|  | 3744 | cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3745 | local_irq_save(flags); | 
|  | 3746 | if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3747 | raw_spin_lock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3748 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3749 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); | 
|  | 3750 | if (event) { | 
|  | 3751 | cpu_buffer->lost_events = 0; | 
| Robert Richter | 469535a | 2009-07-30 19:19:18 +0200 | [diff] [blame] | 3752 | rb_advance_reader(cpu_buffer); | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3753 | } | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3754 |  | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3755 | if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3756 | raw_spin_unlock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 3757 | local_irq_restore(flags); | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3758 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 3759 | out: | 
|  | 3760 | preempt_enable(); | 
|  | 3761 |  | 
| Steven Rostedt | 1b959e1 | 2009-09-03 10:12:13 -0400 | [diff] [blame] | 3762 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3763 | goto again; | 
| Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 3764 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3765 | return event; | 
|  | 3766 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3767 | EXPORT_SYMBOL_GPL(ring_buffer_consume); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3768 |  | 
|  | 3769 | /** | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 3770 | * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3771 | * @buffer: The ring buffer to read from | 
|  | 3772 | * @cpu: The cpu buffer to iterate over | 
|  | 3773 | * | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 3774 | * This performs the initial preparations necessary to iterate | 
|  | 3775 | * through the buffer.  Memory is allocated, buffer recording | 
|  | 3776 | * is disabled, and the iterator pointer is returned to the caller. | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3777 | * | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 3778 | * Disabling buffer recordng prevents the reading from being | 
|  | 3779 | * corrupted. This is not a consuming read, so a producer is not | 
|  | 3780 | * expected. | 
|  | 3781 | * | 
|  | 3782 | * After a sequence of ring_buffer_read_prepare calls, the user is | 
|  | 3783 | * expected to make at least one call to ring_buffer_prepare_sync. | 
|  | 3784 | * Afterwards, ring_buffer_read_start is invoked to get things going | 
|  | 3785 | * for real. | 
|  | 3786 | * | 
|  | 3787 | * This overall must be paired with ring_buffer_finish. | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3788 | */ | 
|  | 3789 | struct ring_buffer_iter * | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 3790 | ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3791 | { | 
|  | 3792 | struct ring_buffer_per_cpu *cpu_buffer; | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3793 | struct ring_buffer_iter *iter; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3794 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 3795 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3796 | return NULL; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3797 |  | 
|  | 3798 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | 
|  | 3799 | if (!iter) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3800 | return NULL; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3801 |  | 
|  | 3802 | cpu_buffer = buffer->buffers[cpu]; | 
|  | 3803 |  | 
|  | 3804 | iter->cpu_buffer = cpu_buffer; | 
|  | 3805 |  | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 3806 | atomic_inc(&buffer->resize_disabled); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3807 | atomic_inc(&cpu_buffer->record_disabled); | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 3808 |  | 
|  | 3809 | return iter; | 
|  | 3810 | } | 
|  | 3811 | EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); | 
|  | 3812 |  | 
|  | 3813 | /** | 
|  | 3814 | * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls | 
|  | 3815 | * | 
|  | 3816 | * All previously invoked ring_buffer_read_prepare calls to prepare | 
|  | 3817 | * iterators will be synchronized.  Afterwards, read_buffer_read_start | 
|  | 3818 | * calls on those iterators are allowed. | 
|  | 3819 | */ | 
|  | 3820 | void | 
|  | 3821 | ring_buffer_read_prepare_sync(void) | 
|  | 3822 | { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3823 | synchronize_sched(); | 
| David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 3824 | } | 
|  | 3825 | EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); | 
|  | 3826 |  | 
|  | 3827 | /** | 
|  | 3828 | * ring_buffer_read_start - start a non consuming read of the buffer | 
|  | 3829 | * @iter: The iterator returned by ring_buffer_read_prepare | 
|  | 3830 | * | 
|  | 3831 | * This finalizes the startup of an iteration through the buffer. | 
|  | 3832 | * The iterator comes from a call to ring_buffer_read_prepare and | 
|  | 3833 | * an intervening ring_buffer_read_prepare_sync must have been | 
|  | 3834 | * performed. | 
|  | 3835 | * | 
|  | 3836 | * Must be paired with ring_buffer_finish. | 
|  | 3837 | */ | 
|  | 3838 | void | 
|  | 3839 | ring_buffer_read_start(struct ring_buffer_iter *iter) | 
|  | 3840 | { | 
|  | 3841 | struct ring_buffer_per_cpu *cpu_buffer; | 
|  | 3842 | unsigned long flags; | 
|  | 3843 |  | 
|  | 3844 | if (!iter) | 
|  | 3845 | return; | 
|  | 3846 |  | 
|  | 3847 | cpu_buffer = iter->cpu_buffer; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3848 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3849 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 3850 | arch_spin_lock(&cpu_buffer->lock); | 
| Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 3851 | rb_iter_reset(iter); | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 3852 | arch_spin_unlock(&cpu_buffer->lock); | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3853 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3854 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3855 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3856 |  | 
|  | 3857 | /** | 
|  | 3858 | * ring_buffer_finish - finish reading the iterator of the buffer | 
|  | 3859 | * @iter: The iterator retrieved by ring_buffer_start | 
|  | 3860 | * | 
|  | 3861 | * This re-enables the recording to the buffer, and frees the | 
|  | 3862 | * iterator. | 
|  | 3863 | */ | 
|  | 3864 | void | 
|  | 3865 | ring_buffer_read_finish(struct ring_buffer_iter *iter) | 
|  | 3866 | { | 
|  | 3867 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 
| Steven Rostedt | 9366c1b | 2012-11-29 22:31:16 -0500 | [diff] [blame] | 3868 | unsigned long flags; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3869 |  | 
| Steven Rostedt | 659f451 | 2012-05-14 17:02:33 -0400 | [diff] [blame] | 3870 | /* | 
|  | 3871 | * Ring buffer is disabled from recording, here's a good place | 
| Steven Rostedt | 9366c1b | 2012-11-29 22:31:16 -0500 | [diff] [blame] | 3872 | * to check the integrity of the ring buffer. | 
|  | 3873 | * Must prevent readers from trying to read, as the check | 
|  | 3874 | * clears the HEAD page and readers require it. | 
| Steven Rostedt | 659f451 | 2012-05-14 17:02:33 -0400 | [diff] [blame] | 3875 | */ | 
| Steven Rostedt | 9366c1b | 2012-11-29 22:31:16 -0500 | [diff] [blame] | 3876 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 659f451 | 2012-05-14 17:02:33 -0400 | [diff] [blame] | 3877 | rb_check_pages(cpu_buffer); | 
| Steven Rostedt | 9366c1b | 2012-11-29 22:31:16 -0500 | [diff] [blame] | 3878 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 659f451 | 2012-05-14 17:02:33 -0400 | [diff] [blame] | 3879 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3880 | atomic_dec(&cpu_buffer->record_disabled); | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 3881 | atomic_dec(&cpu_buffer->buffer->resize_disabled); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3882 | kfree(iter); | 
|  | 3883 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3884 | EXPORT_SYMBOL_GPL(ring_buffer_read_finish); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3885 |  | 
|  | 3886 | /** | 
|  | 3887 | * ring_buffer_read - read the next item in the ring buffer by the iterator | 
|  | 3888 | * @iter: The ring buffer iterator | 
|  | 3889 | * @ts: The time stamp of the event read. | 
|  | 3890 | * | 
|  | 3891 | * This reads the next event in the ring buffer and increments the iterator. | 
|  | 3892 | */ | 
|  | 3893 | struct ring_buffer_event * | 
|  | 3894 | ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | 
|  | 3895 | { | 
|  | 3896 | struct ring_buffer_event *event; | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3897 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 
|  | 3898 | unsigned long flags; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3899 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3900 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 7e9391c | 2009-09-03 10:02:09 -0400 | [diff] [blame] | 3901 | again: | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3902 | event = rb_iter_peek(iter, ts); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3903 | if (!event) | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3904 | goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3905 |  | 
| Steven Rostedt | 7e9391c | 2009-09-03 10:02:09 -0400 | [diff] [blame] | 3906 | if (event->type_len == RINGBUF_TYPE_PADDING) | 
|  | 3907 | goto again; | 
|  | 3908 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3909 | rb_advance_iter(iter); | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3910 | out: | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3911 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3912 |  | 
|  | 3913 | return event; | 
|  | 3914 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3915 | EXPORT_SYMBOL_GPL(ring_buffer_read); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3916 |  | 
|  | 3917 | /** | 
|  | 3918 | * ring_buffer_size - return the size of the ring buffer (in bytes) | 
|  | 3919 | * @buffer: The ring buffer. | 
|  | 3920 | */ | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3921 | unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3922 | { | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 3923 | /* | 
|  | 3924 | * Earlier, this method returned | 
|  | 3925 | *	BUF_PAGE_SIZE * buffer->nr_pages | 
|  | 3926 | * Since the nr_pages field is now removed, we have converted this to | 
|  | 3927 | * return the per cpu buffer value. | 
|  | 3928 | */ | 
|  | 3929 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
|  | 3930 | return 0; | 
|  | 3931 |  | 
|  | 3932 | return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3933 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 3934 | EXPORT_SYMBOL_GPL(ring_buffer_size); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3935 |  | 
|  | 3936 | static void | 
|  | 3937 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | 
|  | 3938 | { | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3939 | rb_head_page_deactivate(cpu_buffer); | 
|  | 3940 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3941 | cpu_buffer->head_page | 
| Steven Rostedt | 3adc54f | 2009-03-30 15:32:01 -0400 | [diff] [blame] | 3942 | = list_entry(cpu_buffer->pages, struct buffer_page, list); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3943 | local_set(&cpu_buffer->head_page->write, 0); | 
| Steven Rostedt | 778c55d | 2009-05-01 18:44:45 -0400 | [diff] [blame] | 3944 | local_set(&cpu_buffer->head_page->entries, 0); | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 3945 | local_set(&cpu_buffer->head_page->page->commit, 0); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3946 |  | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 3947 | cpu_buffer->head_page->read = 0; | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3948 |  | 
|  | 3949 | cpu_buffer->tail_page = cpu_buffer->head_page; | 
|  | 3950 | cpu_buffer->commit_page = cpu_buffer->head_page; | 
|  | 3951 |  | 
|  | 3952 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 
| Vaibhav Nagarnaik | 5040b4b | 2012-05-03 18:59:51 -0700 | [diff] [blame] | 3953 | INIT_LIST_HEAD(&cpu_buffer->new_pages); | 
| Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 3954 | local_set(&cpu_buffer->reader_page->write, 0); | 
| Steven Rostedt | 778c55d | 2009-05-01 18:44:45 -0400 | [diff] [blame] | 3955 | local_set(&cpu_buffer->reader_page->entries, 0); | 
| Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 3956 | local_set(&cpu_buffer->reader_page->page->commit, 0); | 
| Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 3957 | cpu_buffer->reader_page->read = 0; | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 3958 |  | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 3959 | local_set(&cpu_buffer->entries_bytes, 0); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3960 | local_set(&cpu_buffer->overrun, 0); | 
| Slava Pestov | 884bfe8 | 2011-07-15 14:23:58 -0700 | [diff] [blame] | 3961 | local_set(&cpu_buffer->commit_overrun, 0); | 
|  | 3962 | local_set(&cpu_buffer->dropped_events, 0); | 
| Steven Rostedt | e4906ef | 2009-04-30 20:49:44 -0400 | [diff] [blame] | 3963 | local_set(&cpu_buffer->entries, 0); | 
| Steven Rostedt | fa74395 | 2009-06-16 12:37:57 -0400 | [diff] [blame] | 3964 | local_set(&cpu_buffer->committing, 0); | 
|  | 3965 | local_set(&cpu_buffer->commits, 0); | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3966 | cpu_buffer->read = 0; | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 3967 | cpu_buffer->read_bytes = 0; | 
| Steven Rostedt | 69507c0 | 2009-01-21 18:45:57 -0500 | [diff] [blame] | 3968 |  | 
|  | 3969 | cpu_buffer->write_stamp = 0; | 
|  | 3970 | cpu_buffer->read_stamp = 0; | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3971 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 3972 | cpu_buffer->lost_events = 0; | 
|  | 3973 | cpu_buffer->last_overrun = 0; | 
|  | 3974 |  | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 3975 | rb_head_page_activate(cpu_buffer); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3976 | } | 
|  | 3977 |  | 
|  | 3978 | /** | 
|  | 3979 | * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer | 
|  | 3980 | * @buffer: The ring buffer to reset a per cpu buffer of | 
|  | 3981 | * @cpu: The CPU buffer to be reset | 
|  | 3982 | */ | 
|  | 3983 | void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | 
|  | 3984 | { | 
|  | 3985 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 
|  | 3986 | unsigned long flags; | 
|  | 3987 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 3988 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 3989 | return; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 3990 |  | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 3991 | atomic_inc(&buffer->resize_disabled); | 
| Steven Rostedt | 41ede23 | 2009-05-01 20:26:54 -0400 | [diff] [blame] | 3992 | atomic_inc(&cpu_buffer->record_disabled); | 
|  | 3993 |  | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 3994 | /* Make sure all commits have finished */ | 
|  | 3995 | synchronize_sched(); | 
|  | 3996 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 3997 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 3998 |  | 
| Steven Rostedt | 41b6a95 | 2009-09-02 09:59:48 -0400 | [diff] [blame] | 3999 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) | 
|  | 4000 | goto out; | 
|  | 4001 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 4002 | arch_spin_lock(&cpu_buffer->lock); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4003 |  | 
|  | 4004 | rb_reset_cpu(cpu_buffer); | 
|  | 4005 |  | 
| Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 4006 | arch_spin_unlock(&cpu_buffer->lock); | 
| Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 4007 |  | 
| Steven Rostedt | 41b6a95 | 2009-09-02 09:59:48 -0400 | [diff] [blame] | 4008 | out: | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 4009 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 41ede23 | 2009-05-01 20:26:54 -0400 | [diff] [blame] | 4010 |  | 
|  | 4011 | atomic_dec(&cpu_buffer->record_disabled); | 
| Vaibhav Nagarnaik | 83f4031 | 2012-05-03 18:59:50 -0700 | [diff] [blame] | 4012 | atomic_dec(&buffer->resize_disabled); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4013 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 4014 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4015 |  | 
|  | 4016 | /** | 
|  | 4017 | * ring_buffer_reset - reset a ring buffer | 
|  | 4018 | * @buffer: The ring buffer to reset all cpu buffers | 
|  | 4019 | */ | 
|  | 4020 | void ring_buffer_reset(struct ring_buffer *buffer) | 
|  | 4021 | { | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4022 | int cpu; | 
|  | 4023 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4024 | for_each_buffer_cpu(buffer, cpu) | 
| Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 4025 | ring_buffer_reset_cpu(buffer, cpu); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4026 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 4027 | EXPORT_SYMBOL_GPL(ring_buffer_reset); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4028 |  | 
|  | 4029 | /** | 
|  | 4030 | * rind_buffer_empty - is the ring buffer empty? | 
|  | 4031 | * @buffer: The ring buffer to test | 
|  | 4032 | */ | 
|  | 4033 | int ring_buffer_empty(struct ring_buffer *buffer) | 
|  | 4034 | { | 
|  | 4035 | struct ring_buffer_per_cpu *cpu_buffer; | 
| Steven Rostedt | d478820 | 2009-06-17 00:39:43 -0400 | [diff] [blame] | 4036 | unsigned long flags; | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 4037 | int dolock; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4038 | int cpu; | 
| Steven Rostedt | d478820 | 2009-06-17 00:39:43 -0400 | [diff] [blame] | 4039 | int ret; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4040 |  | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 4041 | dolock = rb_ok_to_lock(); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4042 |  | 
|  | 4043 | /* yes this is racy, but if you don't like the race, lock the buffer */ | 
|  | 4044 | for_each_buffer_cpu(buffer, cpu) { | 
|  | 4045 | cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 4046 | local_irq_save(flags); | 
|  | 4047 | if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 4048 | raw_spin_lock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | d478820 | 2009-06-17 00:39:43 -0400 | [diff] [blame] | 4049 | ret = rb_per_cpu_empty(cpu_buffer); | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 4050 | if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 4051 | raw_spin_unlock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 4052 | local_irq_restore(flags); | 
|  | 4053 |  | 
| Steven Rostedt | d478820 | 2009-06-17 00:39:43 -0400 | [diff] [blame] | 4054 | if (!ret) | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4055 | return 0; | 
|  | 4056 | } | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4057 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4058 | return 1; | 
|  | 4059 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 4060 | EXPORT_SYMBOL_GPL(ring_buffer_empty); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4061 |  | 
|  | 4062 | /** | 
|  | 4063 | * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? | 
|  | 4064 | * @buffer: The ring buffer | 
|  | 4065 | * @cpu: The CPU buffer to test | 
|  | 4066 | */ | 
|  | 4067 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | 
|  | 4068 | { | 
|  | 4069 | struct ring_buffer_per_cpu *cpu_buffer; | 
| Steven Rostedt | d478820 | 2009-06-17 00:39:43 -0400 | [diff] [blame] | 4070 | unsigned long flags; | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 4071 | int dolock; | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 4072 | int ret; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4073 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4074 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 8aabee5 | 2009-03-12 13:13:49 -0400 | [diff] [blame] | 4075 | return 1; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4076 |  | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 4077 | dolock = rb_ok_to_lock(); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4078 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4079 | cpu_buffer = buffer->buffers[cpu]; | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 4080 | local_irq_save(flags); | 
|  | 4081 | if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 4082 | raw_spin_lock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4083 | ret = rb_per_cpu_empty(cpu_buffer); | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 4084 | if (dolock) | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 4085 | raw_spin_unlock(&cpu_buffer->reader_lock); | 
| Steven Rostedt | 8d707e8 | 2009-06-16 21:22:48 -0400 | [diff] [blame] | 4086 | local_irq_restore(flags); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4087 |  | 
|  | 4088 | return ret; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4089 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 4090 | EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4091 |  | 
| Steven Rostedt | 85bac32 | 2009-09-04 14:24:40 -0400 | [diff] [blame] | 4092 | #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4093 | /** | 
|  | 4094 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers | 
|  | 4095 | * @buffer_a: One buffer to swap with | 
|  | 4096 | * @buffer_b: The other buffer to swap with | 
|  | 4097 | * | 
|  | 4098 | * This function is useful for tracers that want to take a "snapshot" | 
|  | 4099 | * of a CPU buffer and has another back up buffer lying around. | 
|  | 4100 | * it is expected that the tracer handles the cpu buffer not being | 
|  | 4101 | * used at the moment. | 
|  | 4102 | */ | 
|  | 4103 | int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | 
|  | 4104 | struct ring_buffer *buffer_b, int cpu) | 
|  | 4105 | { | 
|  | 4106 | struct ring_buffer_per_cpu *cpu_buffer_a; | 
|  | 4107 | struct ring_buffer_per_cpu *cpu_buffer_b; | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4108 | int ret = -EINVAL; | 
|  | 4109 |  | 
| Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 4110 | if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || | 
|  | 4111 | !cpumask_test_cpu(cpu, buffer_b->cpumask)) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4112 | goto out; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4113 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 4114 | cpu_buffer_a = buffer_a->buffers[cpu]; | 
|  | 4115 | cpu_buffer_b = buffer_b->buffers[cpu]; | 
|  | 4116 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4117 | /* At least make sure the two buffers are somewhat the same */ | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 4118 | if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4119 | goto out; | 
|  | 4120 |  | 
|  | 4121 | ret = -EAGAIN; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4122 |  | 
| Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 4123 | if (ring_buffer_flags != RB_BUFFERS_ON) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4124 | goto out; | 
| Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 4125 |  | 
|  | 4126 | if (atomic_read(&buffer_a->record_disabled)) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4127 | goto out; | 
| Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 4128 |  | 
|  | 4129 | if (atomic_read(&buffer_b->record_disabled)) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4130 | goto out; | 
| Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 4131 |  | 
| Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 4132 | if (atomic_read(&cpu_buffer_a->record_disabled)) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4133 | goto out; | 
| Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 4134 |  | 
|  | 4135 | if (atomic_read(&cpu_buffer_b->record_disabled)) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4136 | goto out; | 
| Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 4137 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4138 | /* | 
|  | 4139 | * We can't do a synchronize_sched here because this | 
|  | 4140 | * function can be called in atomic context. | 
|  | 4141 | * Normally this will be called from the same CPU as cpu. | 
|  | 4142 | * If not it's up to the caller to protect this. | 
|  | 4143 | */ | 
|  | 4144 | atomic_inc(&cpu_buffer_a->record_disabled); | 
|  | 4145 | atomic_inc(&cpu_buffer_b->record_disabled); | 
|  | 4146 |  | 
| Steven Rostedt | 9827799 | 2009-09-02 10:56:15 -0400 | [diff] [blame] | 4147 | ret = -EBUSY; | 
|  | 4148 | if (local_read(&cpu_buffer_a->committing)) | 
|  | 4149 | goto out_dec; | 
|  | 4150 | if (local_read(&cpu_buffer_b->committing)) | 
|  | 4151 | goto out_dec; | 
|  | 4152 |  | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4153 | buffer_a->buffers[cpu] = cpu_buffer_b; | 
|  | 4154 | buffer_b->buffers[cpu] = cpu_buffer_a; | 
|  | 4155 |  | 
|  | 4156 | cpu_buffer_b->buffer = buffer_a; | 
|  | 4157 | cpu_buffer_a->buffer = buffer_b; | 
|  | 4158 |  | 
| Steven Rostedt | 9827799 | 2009-09-02 10:56:15 -0400 | [diff] [blame] | 4159 | ret = 0; | 
|  | 4160 |  | 
|  | 4161 | out_dec: | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4162 | atomic_dec(&cpu_buffer_a->record_disabled); | 
|  | 4163 | atomic_dec(&cpu_buffer_b->record_disabled); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4164 | out: | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4165 | return ret; | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4166 | } | 
| Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 4167 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | 
| Steven Rostedt | 85bac32 | 2009-09-04 14:24:40 -0400 | [diff] [blame] | 4168 | #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ | 
| Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 4169 |  | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4170 | /** | 
|  | 4171 | * ring_buffer_alloc_read_page - allocate a page to read from buffer | 
|  | 4172 | * @buffer: the buffer to allocate for. | 
|  | 4173 | * | 
|  | 4174 | * This function is used in conjunction with ring_buffer_read_page. | 
|  | 4175 | * When reading a full page from the ring buffer, these functions | 
|  | 4176 | * can be used to speed up the process. The calling function should | 
|  | 4177 | * allocate a few pages first with this function. Then when it | 
|  | 4178 | * needs to get pages from the ring buffer, it passes the result | 
|  | 4179 | * of this function into ring_buffer_read_page, which will swap | 
|  | 4180 | * the page that was allocated, with the read page of the buffer. | 
|  | 4181 | * | 
|  | 4182 | * Returns: | 
|  | 4183 | *  The page allocated, or NULL on error. | 
|  | 4184 | */ | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 4185 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4186 | { | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 4187 | struct buffer_data_page *bpage; | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 4188 | struct page *page; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4189 |  | 
| Vaibhav Nagarnaik | d7ec4bf | 2011-06-07 17:01:42 -0700 | [diff] [blame] | 4190 | page = alloc_pages_node(cpu_to_node(cpu), | 
|  | 4191 | GFP_KERNEL | __GFP_NORETRY, 0); | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 4192 | if (!page) | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4193 | return NULL; | 
|  | 4194 |  | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 4195 | bpage = page_address(page); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4196 |  | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4197 | rb_init_page(bpage); | 
|  | 4198 |  | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 4199 | return bpage; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4200 | } | 
| Steven Rostedt | d6ce96d | 2009-05-05 01:15:24 -0400 | [diff] [blame] | 4201 | EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4202 |  | 
|  | 4203 | /** | 
|  | 4204 | * ring_buffer_free_read_page - free an allocated read page | 
|  | 4205 | * @buffer: the buffer the page was allocate for | 
|  | 4206 | * @data: the page to free | 
|  | 4207 | * | 
|  | 4208 | * Free a page allocated from ring_buffer_alloc_read_page. | 
|  | 4209 | */ | 
|  | 4210 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | 
|  | 4211 | { | 
|  | 4212 | free_page((unsigned long)data); | 
|  | 4213 | } | 
| Steven Rostedt | d6ce96d | 2009-05-05 01:15:24 -0400 | [diff] [blame] | 4214 | EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4215 |  | 
|  | 4216 | /** | 
|  | 4217 | * ring_buffer_read_page - extract a page from the ring buffer | 
|  | 4218 | * @buffer: buffer to extract from | 
|  | 4219 | * @data_page: the page to use allocated from ring_buffer_alloc_read_page | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4220 | * @len: amount to extract | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4221 | * @cpu: the cpu of the buffer to extract | 
|  | 4222 | * @full: should the extraction only happen when the page is full. | 
|  | 4223 | * | 
|  | 4224 | * This function will pull out a page from the ring buffer and consume it. | 
|  | 4225 | * @data_page must be the address of the variable that was returned | 
|  | 4226 | * from ring_buffer_alloc_read_page. This is because the page might be used | 
|  | 4227 | * to swap with a page in the ring buffer. | 
|  | 4228 | * | 
|  | 4229 | * for example: | 
| Lai Jiangshan | b85fa01 | 2009-02-09 14:21:14 +0800 | [diff] [blame] | 4230 | *	rpage = ring_buffer_alloc_read_page(buffer); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4231 | *	if (!rpage) | 
|  | 4232 | *		return error; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4233 | *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 4234 | *	if (ret >= 0) | 
|  | 4235 | *		process_page(rpage, ret); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4236 | * | 
|  | 4237 | * When @full is set, the function will not return true unless | 
|  | 4238 | * the writer is off the reader page. | 
|  | 4239 | * | 
|  | 4240 | * Note: it is up to the calling functions to handle sleeps and wakeups. | 
|  | 4241 | *  The ring buffer can be used anywhere in the kernel and can not | 
|  | 4242 | *  blindly call wake_up. The layer that uses the ring buffer must be | 
|  | 4243 | *  responsible for that. | 
|  | 4244 | * | 
|  | 4245 | * Returns: | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 4246 | *  >=0 if data has been transferred, returns the offset of consumed data. | 
|  | 4247 | *  <0 if no data has been transferred. | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4248 | */ | 
|  | 4249 | int ring_buffer_read_page(struct ring_buffer *buffer, | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4250 | void **data_page, size_t len, int cpu, int full) | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4251 | { | 
|  | 4252 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 
|  | 4253 | struct ring_buffer_event *event; | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 4254 | struct buffer_data_page *bpage; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4255 | struct buffer_page *reader; | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 4256 | unsigned long missed_events; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4257 | unsigned long flags; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4258 | unsigned int commit; | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 4259 | unsigned int read; | 
| Steven Rostedt | 4f3640f | 2009-03-03 23:52:42 -0500 | [diff] [blame] | 4260 | u64 save_timestamp; | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 4261 | int ret = -1; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4262 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4263 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 
|  | 4264 | goto out; | 
|  | 4265 |  | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 4266 | /* | 
|  | 4267 | * If len is not big enough to hold the page header, then | 
|  | 4268 | * we can not copy anything. | 
|  | 4269 | */ | 
|  | 4270 | if (len <= BUF_PAGE_HDR_SIZE) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4271 | goto out; | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 4272 |  | 
|  | 4273 | len -= BUF_PAGE_HDR_SIZE; | 
|  | 4274 |  | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4275 | if (!data_page) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4276 | goto out; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4277 |  | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 4278 | bpage = *data_page; | 
|  | 4279 | if (!bpage) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4280 | goto out; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4281 |  | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 4282 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4283 |  | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4284 | reader = rb_get_reader_page(cpu_buffer); | 
|  | 4285 | if (!reader) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4286 | goto out_unlock; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4287 |  | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4288 | event = rb_reader_event(cpu_buffer); | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 4289 |  | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4290 | read = reader->read; | 
|  | 4291 | commit = rb_page_commit(reader); | 
|  | 4292 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 4293 | /* Check if any events were dropped */ | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 4294 | missed_events = cpu_buffer->lost_events; | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 4295 |  | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4296 | /* | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 4297 | * If this page has been partially read or | 
|  | 4298 | * if len is not big enough to read the rest of the page or | 
|  | 4299 | * a writer is still on the page, then | 
|  | 4300 | * we must copy the data from the page to the buffer. | 
|  | 4301 | * Otherwise, we can simply swap the page with the one passed in. | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4302 | */ | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 4303 | if (read || (len < (commit - read)) || | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4304 | cpu_buffer->reader_page == cpu_buffer->commit_page) { | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 4305 | struct buffer_data_page *rpage = cpu_buffer->reader_page->page; | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 4306 | unsigned int rpos = read; | 
|  | 4307 | unsigned int pos = 0; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4308 | unsigned int size; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4309 |  | 
|  | 4310 | if (full) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4311 | goto out_unlock; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4312 |  | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4313 | if (len > (commit - read)) | 
|  | 4314 | len = (commit - read); | 
|  | 4315 |  | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 4316 | /* Always keep the time extend and data together */ | 
|  | 4317 | size = rb_event_ts_length(event); | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4318 |  | 
|  | 4319 | if (len < size) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4320 | goto out_unlock; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4321 |  | 
| Steven Rostedt | 4f3640f | 2009-03-03 23:52:42 -0500 | [diff] [blame] | 4322 | /* save the current timestamp, since the user will need it */ | 
|  | 4323 | save_timestamp = cpu_buffer->read_stamp; | 
|  | 4324 |  | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4325 | /* Need to copy one event at a time */ | 
|  | 4326 | do { | 
| David Sharp | e1e3592 | 2010-12-22 16:38:24 -0800 | [diff] [blame] | 4327 | /* We need the size of one event, because | 
|  | 4328 | * rb_advance_reader only advances by one event, | 
|  | 4329 | * whereas rb_event_ts_length may include the size of | 
|  | 4330 | * one or two events. | 
|  | 4331 | * We have already ensured there's enough space if this | 
|  | 4332 | * is a time extend. */ | 
|  | 4333 | size = rb_event_length(event); | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 4334 | memcpy(bpage->data + pos, rpage->data + rpos, size); | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4335 |  | 
|  | 4336 | len -= size; | 
|  | 4337 |  | 
|  | 4338 | rb_advance_reader(cpu_buffer); | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 4339 | rpos = reader->read; | 
|  | 4340 | pos += size; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4341 |  | 
| Huang Ying | 18fab91 | 2010-07-28 14:14:01 +0800 | [diff] [blame] | 4342 | if (rpos >= commit) | 
|  | 4343 | break; | 
|  | 4344 |  | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4345 | event = rb_reader_event(cpu_buffer); | 
| Steven Rostedt | 69d1b83 | 2010-10-07 18:18:05 -0400 | [diff] [blame] | 4346 | /* Always keep the time extend and data together */ | 
|  | 4347 | size = rb_event_ts_length(event); | 
| David Sharp | e1e3592 | 2010-12-22 16:38:24 -0800 | [diff] [blame] | 4348 | } while (len >= size); | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 4349 |  | 
|  | 4350 | /* update bpage */ | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4351 | local_set(&bpage->commit, pos); | 
| Steven Rostedt | 4f3640f | 2009-03-03 23:52:42 -0500 | [diff] [blame] | 4352 | bpage->time_stamp = save_timestamp; | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4353 |  | 
| Steven Rostedt | 474d32b | 2009-03-03 19:51:40 -0500 | [diff] [blame] | 4354 | /* we copied everything to the beginning */ | 
|  | 4355 | read = 0; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4356 | } else { | 
| Steven Rostedt | afbab76 | 2009-05-01 19:40:05 -0400 | [diff] [blame] | 4357 | /* update the entry counter */ | 
| Steven Rostedt | 77ae365 | 2009-03-27 11:00:29 -0400 | [diff] [blame] | 4358 | cpu_buffer->read += rb_page_entries(reader); | 
| Vaibhav Nagarnaik | c64e148 | 2011-08-16 14:46:16 -0700 | [diff] [blame] | 4359 | cpu_buffer->read_bytes += BUF_PAGE_SIZE; | 
| Steven Rostedt | afbab76 | 2009-05-01 19:40:05 -0400 | [diff] [blame] | 4360 |  | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4361 | /* swap the pages */ | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 4362 | rb_init_page(bpage); | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4363 | bpage = reader->page; | 
|  | 4364 | reader->page = *data_page; | 
|  | 4365 | local_set(&reader->write, 0); | 
| Steven Rostedt | 778c55d | 2009-05-01 18:44:45 -0400 | [diff] [blame] | 4366 | local_set(&reader->entries, 0); | 
| Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 4367 | reader->read = 0; | 
| Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 4368 | *data_page = bpage; | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 4369 |  | 
|  | 4370 | /* | 
|  | 4371 | * Use the real_end for the data size, | 
|  | 4372 | * This gives us a chance to store the lost events | 
|  | 4373 | * on the page. | 
|  | 4374 | */ | 
|  | 4375 | if (reader->real_end) | 
|  | 4376 | local_set(&bpage->commit, reader->real_end); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4377 | } | 
| Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 4378 | ret = read; | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4379 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 4380 | cpu_buffer->lost_events = 0; | 
| Steven Rostedt | 2711ca2 | 2010-05-21 13:32:26 -0400 | [diff] [blame] | 4381 |  | 
|  | 4382 | commit = local_read(&bpage->commit); | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 4383 | /* | 
|  | 4384 | * Set a flag in the commit field if we lost events | 
|  | 4385 | */ | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 4386 | if (missed_events) { | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 4387 | /* If there is room at the end of the page to save the | 
|  | 4388 | * missed events, then record it there. | 
|  | 4389 | */ | 
|  | 4390 | if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { | 
|  | 4391 | memcpy(&bpage->data[commit], &missed_events, | 
|  | 4392 | sizeof(missed_events)); | 
|  | 4393 | local_add(RB_MISSED_STORED, &bpage->commit); | 
| Steven Rostedt | 2711ca2 | 2010-05-21 13:32:26 -0400 | [diff] [blame] | 4394 | commit += sizeof(missed_events); | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 4395 | } | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 4396 | local_add(RB_MISSED_EVENTS, &bpage->commit); | 
| Steven Rostedt | ff0ff84 | 2010-03-31 22:11:42 -0400 | [diff] [blame] | 4397 | } | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 4398 |  | 
| Steven Rostedt | 2711ca2 | 2010-05-21 13:32:26 -0400 | [diff] [blame] | 4399 | /* | 
|  | 4400 | * This page may be off to user land. Zero it out here. | 
|  | 4401 | */ | 
|  | 4402 | if (commit < BUF_PAGE_SIZE) | 
|  | 4403 | memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); | 
|  | 4404 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4405 | out_unlock: | 
| Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 4406 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4407 |  | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4408 | out: | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4409 | return ret; | 
|  | 4410 | } | 
| Steven Rostedt | d6ce96d | 2009-05-05 01:15:24 -0400 | [diff] [blame] | 4411 | EXPORT_SYMBOL_GPL(ring_buffer_read_page); | 
| Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 4412 |  | 
| Steven Rostedt | 59222ef | 2009-03-12 11:46:03 -0400 | [diff] [blame] | 4413 | #ifdef CONFIG_HOTPLUG_CPU | 
| Frederic Weisbecker | 09c9e84 | 2009-03-21 04:33:36 +0100 | [diff] [blame] | 4414 | static int rb_cpu_notify(struct notifier_block *self, | 
|  | 4415 | unsigned long action, void *hcpu) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4416 | { | 
|  | 4417 | struct ring_buffer *buffer = | 
|  | 4418 | container_of(self, struct ring_buffer, cpu_notify); | 
|  | 4419 | long cpu = (long)hcpu; | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 4420 | int cpu_i, nr_pages_same; | 
|  | 4421 | unsigned int nr_pages; | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4422 |  | 
|  | 4423 | switch (action) { | 
|  | 4424 | case CPU_UP_PREPARE: | 
|  | 4425 | case CPU_UP_PREPARE_FROZEN: | 
| Rusty Russell | 3f237a7 | 2009-06-12 21:15:30 +0930 | [diff] [blame] | 4426 | if (cpumask_test_cpu(cpu, buffer->cpumask)) | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4427 | return NOTIFY_OK; | 
|  | 4428 |  | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 4429 | nr_pages = 0; | 
|  | 4430 | nr_pages_same = 1; | 
|  | 4431 | /* check if all cpu sizes are same */ | 
|  | 4432 | for_each_buffer_cpu(buffer, cpu_i) { | 
|  | 4433 | /* fill in the size from first enabled cpu */ | 
|  | 4434 | if (nr_pages == 0) | 
|  | 4435 | nr_pages = buffer->buffers[cpu_i]->nr_pages; | 
|  | 4436 | if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { | 
|  | 4437 | nr_pages_same = 0; | 
|  | 4438 | break; | 
|  | 4439 | } | 
|  | 4440 | } | 
|  | 4441 | /* allocate minimum pages, user can later expand it */ | 
|  | 4442 | if (!nr_pages_same) | 
|  | 4443 | nr_pages = 2; | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4444 | buffer->buffers[cpu] = | 
| Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 4445 | rb_allocate_cpu_buffer(buffer, nr_pages, cpu); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4446 | if (!buffer->buffers[cpu]) { | 
|  | 4447 | WARN(1, "failed to allocate ring buffer on CPU %ld\n", | 
|  | 4448 | cpu); | 
|  | 4449 | return NOTIFY_OK; | 
|  | 4450 | } | 
|  | 4451 | smp_wmb(); | 
| Rusty Russell | 3f237a7 | 2009-06-12 21:15:30 +0930 | [diff] [blame] | 4452 | cpumask_set_cpu(cpu, buffer->cpumask); | 
| Steven Rostedt | 554f786 | 2009-03-11 22:00:13 -0400 | [diff] [blame] | 4453 | break; | 
|  | 4454 | case CPU_DOWN_PREPARE: | 
|  | 4455 | case CPU_DOWN_PREPARE_FROZEN: | 
|  | 4456 | /* | 
|  | 4457 | * Do nothing. | 
|  | 4458 | *  If we were to free the buffer, then the user would | 
|  | 4459 | *  lose any trace that was in the buffer. | 
|  | 4460 | */ | 
|  | 4461 | break; | 
|  | 4462 | default: | 
|  | 4463 | break; | 
|  | 4464 | } | 
|  | 4465 | return NOTIFY_OK; | 
|  | 4466 | } | 
|  | 4467 | #endif |