Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Generic ring buffer |
| 3 | * |
| 4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> |
| 5 | */ |
| 6 | #include <linux/ring_buffer.h> |
Steven Rostedt | 78d904b | 2009-02-05 18:43:07 -0500 | [diff] [blame] | 7 | #include <linux/ftrace_irq.h> |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 8 | #include <linux/spinlock.h> |
| 9 | #include <linux/debugfs.h> |
| 10 | #include <linux/uaccess.h> |
Steven Rostedt | a81bd80 | 2009-02-06 01:45:16 -0500 | [diff] [blame] | 11 | #include <linux/hardirq.h> |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 12 | #include <linux/module.h> |
| 13 | #include <linux/percpu.h> |
| 14 | #include <linux/mutex.h> |
| 15 | #include <linux/sched.h> /* used for sched_clock() (for now) */ |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/hash.h> |
| 18 | #include <linux/list.h> |
| 19 | #include <linux/fs.h> |
| 20 | |
Steven Rostedt | 182e9f5 | 2008-11-03 23:15:56 -0500 | [diff] [blame] | 21 | #include "trace.h" |
| 22 | |
Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 23 | /* |
| 24 | * A fast way to enable or disable all ring buffers is to |
| 25 | * call tracing_on or tracing_off. Turning off the ring buffers |
| 26 | * prevents all ring buffers from being recorded to. |
| 27 | * Turning this switch on, makes it OK to write to the |
| 28 | * ring buffer, if the ring buffer is enabled itself. |
| 29 | * |
| 30 | * There's three layers that must be on in order to write |
| 31 | * to the ring buffer. |
| 32 | * |
| 33 | * 1) This global flag must be set. |
| 34 | * 2) The ring buffer must be enabled for recording. |
| 35 | * 3) The per cpu buffer must be enabled for recording. |
| 36 | * |
| 37 | * In case of an anomaly, this global flag has a bit set that |
| 38 | * will permantly disable all ring buffers. |
| 39 | */ |
| 40 | |
| 41 | /* |
| 42 | * Global flag to disable all recording to ring buffers |
| 43 | * This has two bits: ON, DISABLED |
| 44 | * |
| 45 | * ON DISABLED |
| 46 | * ---- ---------- |
| 47 | * 0 0 : ring buffers are off |
| 48 | * 1 0 : ring buffers are on |
| 49 | * X 1 : ring buffers are permanently disabled |
| 50 | */ |
| 51 | |
| 52 | enum { |
| 53 | RB_BUFFERS_ON_BIT = 0, |
| 54 | RB_BUFFERS_DISABLED_BIT = 1, |
| 55 | }; |
| 56 | |
| 57 | enum { |
| 58 | RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT, |
| 59 | RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, |
| 60 | }; |
| 61 | |
Hannes Eder | 5e39841 | 2009-02-10 19:44:34 +0100 | [diff] [blame] | 62 | static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; |
Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 63 | |
| 64 | /** |
| 65 | * tracing_on - enable all tracing buffers |
| 66 | * |
| 67 | * This function enables all tracing buffers that may have been |
| 68 | * disabled with tracing_off. |
| 69 | */ |
| 70 | void tracing_on(void) |
| 71 | { |
Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 72 | set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); |
Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 73 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 74 | EXPORT_SYMBOL_GPL(tracing_on); |
Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 75 | |
| 76 | /** |
| 77 | * tracing_off - turn off all tracing buffers |
| 78 | * |
| 79 | * This function stops all tracing buffers from recording data. |
| 80 | * It does not disable any overhead the tracers themselves may |
| 81 | * be causing. This function simply causes all recording to |
| 82 | * the ring buffers to fail. |
| 83 | */ |
| 84 | void tracing_off(void) |
| 85 | { |
Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 86 | clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); |
| 87 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 88 | EXPORT_SYMBOL_GPL(tracing_off); |
Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 89 | |
| 90 | /** |
| 91 | * tracing_off_permanent - permanently disable ring buffers |
| 92 | * |
| 93 | * This function, once called, will disable all ring buffers |
Wenji Huang | c3706f0 | 2009-02-10 01:03:18 -0500 | [diff] [blame] | 94 | * permanently. |
Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 95 | */ |
| 96 | void tracing_off_permanent(void) |
| 97 | { |
| 98 | set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); |
Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 99 | } |
| 100 | |
Steven Rostedt | 988ae9d | 2009-02-14 19:17:02 -0500 | [diff] [blame^] | 101 | /** |
| 102 | * tracing_is_on - show state of ring buffers enabled |
| 103 | */ |
| 104 | int tracing_is_on(void) |
| 105 | { |
| 106 | return ring_buffer_flags == RB_BUFFERS_ON; |
| 107 | } |
| 108 | EXPORT_SYMBOL_GPL(tracing_is_on); |
| 109 | |
Ingo Molnar | d06bbd6 | 2008-11-12 10:11:37 +0100 | [diff] [blame] | 110 | #include "trace.h" |
| 111 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 112 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
| 113 | #define DEBUG_SHIFT 0 |
| 114 | |
| 115 | /* FIXME!!! */ |
| 116 | u64 ring_buffer_time_stamp(int cpu) |
| 117 | { |
Steven Rostedt | 47e74f2 | 2008-11-12 00:01:27 -0500 | [diff] [blame] | 118 | u64 time; |
| 119 | |
| 120 | preempt_disable_notrace(); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 121 | /* shift to debug/test normalization and TIME_EXTENTS */ |
Steven Rostedt | 47e74f2 | 2008-11-12 00:01:27 -0500 | [diff] [blame] | 122 | time = sched_clock() << DEBUG_SHIFT; |
Frederic Weisbecker | 2c2d732 | 2008-12-16 22:08:58 +0100 | [diff] [blame] | 123 | preempt_enable_no_resched_notrace(); |
Steven Rostedt | 47e74f2 | 2008-11-12 00:01:27 -0500 | [diff] [blame] | 124 | |
| 125 | return time; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 126 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 127 | EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 128 | |
| 129 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) |
| 130 | { |
| 131 | /* Just stupid testing the normalize function and deltas */ |
| 132 | *ts >>= DEBUG_SHIFT; |
| 133 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 134 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 135 | |
| 136 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) |
Andrew Morton | 67d3472 | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 137 | #define RB_ALIGNMENT 4U |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 138 | #define RB_MAX_SMALL_DATA 28 |
| 139 | |
| 140 | enum { |
| 141 | RB_LEN_TIME_EXTEND = 8, |
| 142 | RB_LEN_TIME_STAMP = 16, |
| 143 | }; |
| 144 | |
| 145 | /* inline for ring buffer fast paths */ |
Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 146 | static unsigned |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 147 | rb_event_length(struct ring_buffer_event *event) |
| 148 | { |
| 149 | unsigned length; |
| 150 | |
| 151 | switch (event->type) { |
| 152 | case RINGBUF_TYPE_PADDING: |
| 153 | /* undefined */ |
| 154 | return -1; |
| 155 | |
| 156 | case RINGBUF_TYPE_TIME_EXTEND: |
| 157 | return RB_LEN_TIME_EXTEND; |
| 158 | |
| 159 | case RINGBUF_TYPE_TIME_STAMP: |
| 160 | return RB_LEN_TIME_STAMP; |
| 161 | |
| 162 | case RINGBUF_TYPE_DATA: |
| 163 | if (event->len) |
Andrew Morton | 67d3472 | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 164 | length = event->len * RB_ALIGNMENT; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 165 | else |
| 166 | length = event->array[0]; |
| 167 | return length + RB_EVNT_HDR_SIZE; |
| 168 | default: |
| 169 | BUG(); |
| 170 | } |
| 171 | /* not hit */ |
| 172 | return 0; |
| 173 | } |
| 174 | |
| 175 | /** |
| 176 | * ring_buffer_event_length - return the length of the event |
| 177 | * @event: the event to get the length of |
| 178 | */ |
| 179 | unsigned ring_buffer_event_length(struct ring_buffer_event *event) |
| 180 | { |
Robert Richter | 465634a | 2009-01-07 15:32:11 +0100 | [diff] [blame] | 181 | unsigned length = rb_event_length(event); |
| 182 | if (event->type != RINGBUF_TYPE_DATA) |
| 183 | return length; |
| 184 | length -= RB_EVNT_HDR_SIZE; |
| 185 | if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) |
| 186 | length -= sizeof(event->array[0]); |
| 187 | return length; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 188 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 189 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 190 | |
| 191 | /* inline for ring buffer fast paths */ |
Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 192 | static void * |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 193 | rb_event_data(struct ring_buffer_event *event) |
| 194 | { |
| 195 | BUG_ON(event->type != RINGBUF_TYPE_DATA); |
| 196 | /* If length is in len field, then array[0] has the data */ |
| 197 | if (event->len) |
| 198 | return (void *)&event->array[0]; |
| 199 | /* Otherwise length is in array[0] and array[1] has the data */ |
| 200 | return (void *)&event->array[1]; |
| 201 | } |
| 202 | |
| 203 | /** |
| 204 | * ring_buffer_event_data - return the data of the event |
| 205 | * @event: the event to get the data from |
| 206 | */ |
| 207 | void *ring_buffer_event_data(struct ring_buffer_event *event) |
| 208 | { |
| 209 | return rb_event_data(event); |
| 210 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 211 | EXPORT_SYMBOL_GPL(ring_buffer_event_data); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 212 | |
| 213 | #define for_each_buffer_cpu(buffer, cpu) \ |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 214 | for_each_cpu(cpu, buffer->cpumask) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 215 | |
| 216 | #define TS_SHIFT 27 |
| 217 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) |
| 218 | #define TS_DELTA_TEST (~TS_MASK) |
| 219 | |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 220 | struct buffer_data_page { |
Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 221 | u64 time_stamp; /* page time stamp */ |
Wenji Huang | c3706f0 | 2009-02-10 01:03:18 -0500 | [diff] [blame] | 222 | local_t commit; /* write committed index */ |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 223 | unsigned char data[]; /* data of buffer page */ |
| 224 | }; |
| 225 | |
| 226 | struct buffer_page { |
| 227 | local_t write; /* index for next write */ |
Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 228 | unsigned read; /* index for next read */ |
Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 229 | struct list_head list; /* list of free pages */ |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 230 | struct buffer_data_page *page; /* Actual data page */ |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 231 | }; |
| 232 | |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 233 | static void rb_init_page(struct buffer_data_page *bpage) |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 234 | { |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 235 | local_set(&bpage->commit, 0); |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 236 | } |
| 237 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 238 | /* |
Steven Rostedt | ed56829 | 2008-09-29 23:02:40 -0400 | [diff] [blame] | 239 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing |
| 240 | * this issue out. |
| 241 | */ |
Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 242 | static void free_buffer_page(struct buffer_page *bpage) |
Steven Rostedt | ed56829 | 2008-09-29 23:02:40 -0400 | [diff] [blame] | 243 | { |
Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 244 | free_page((unsigned long)bpage->page); |
Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 245 | kfree(bpage); |
Steven Rostedt | ed56829 | 2008-09-29 23:02:40 -0400 | [diff] [blame] | 246 | } |
| 247 | |
| 248 | /* |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 249 | * We need to fit the time_stamp delta into 27 bits. |
| 250 | */ |
| 251 | static inline int test_time_stamp(u64 delta) |
| 252 | { |
| 253 | if (delta & TS_DELTA_TEST) |
| 254 | return 1; |
| 255 | return 0; |
| 256 | } |
| 257 | |
Steven Rostedt | 082605d | 2009-01-19 14:32:51 -0500 | [diff] [blame] | 258 | #define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data)) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 259 | |
| 260 | /* |
| 261 | * head_page == tail_page && head == tail then buffer is empty. |
| 262 | */ |
| 263 | struct ring_buffer_per_cpu { |
| 264 | int cpu; |
| 265 | struct ring_buffer *buffer; |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 266 | spinlock_t reader_lock; /* serialize readers */ |
Steven Rostedt | 3e03fb7 | 2008-11-06 00:09:43 -0500 | [diff] [blame] | 267 | raw_spinlock_t lock; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 268 | struct lock_class_key lock_key; |
| 269 | struct list_head pages; |
Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 270 | struct buffer_page *head_page; /* read from head */ |
| 271 | struct buffer_page *tail_page; /* write to tail */ |
Wenji Huang | c3706f0 | 2009-02-10 01:03:18 -0500 | [diff] [blame] | 272 | struct buffer_page *commit_page; /* committed pages */ |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 273 | struct buffer_page *reader_page; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 274 | unsigned long overrun; |
| 275 | unsigned long entries; |
| 276 | u64 write_stamp; |
| 277 | u64 read_stamp; |
| 278 | atomic_t record_disabled; |
| 279 | }; |
| 280 | |
| 281 | struct ring_buffer { |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 282 | unsigned pages; |
| 283 | unsigned flags; |
| 284 | int cpus; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 285 | atomic_t record_disabled; |
Arnaldo Carvalho de Melo | 00f62f6 | 2009-02-09 17:04:06 -0200 | [diff] [blame] | 286 | cpumask_var_t cpumask; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 287 | |
| 288 | struct mutex mutex; |
| 289 | |
| 290 | struct ring_buffer_per_cpu **buffers; |
| 291 | }; |
| 292 | |
| 293 | struct ring_buffer_iter { |
| 294 | struct ring_buffer_per_cpu *cpu_buffer; |
| 295 | unsigned long head; |
| 296 | struct buffer_page *head_page; |
| 297 | u64 read_stamp; |
| 298 | }; |
| 299 | |
Steven Rostedt | f536aaf | 2008-11-10 23:07:30 -0500 | [diff] [blame] | 300 | /* buffer may be either ring_buffer or ring_buffer_per_cpu */ |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 301 | #define RB_WARN_ON(buffer, cond) \ |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 302 | ({ \ |
| 303 | int _____ret = unlikely(cond); \ |
| 304 | if (_____ret) { \ |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 305 | atomic_inc(&buffer->record_disabled); \ |
| 306 | WARN_ON(1); \ |
| 307 | } \ |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 308 | _____ret; \ |
| 309 | }) |
Steven Rostedt | f536aaf | 2008-11-10 23:07:30 -0500 | [diff] [blame] | 310 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 311 | /** |
| 312 | * check_pages - integrity check of buffer pages |
| 313 | * @cpu_buffer: CPU buffer with pages to test |
| 314 | * |
Wenji Huang | c3706f0 | 2009-02-10 01:03:18 -0500 | [diff] [blame] | 315 | * As a safety measure we check to make sure the data pages have not |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 316 | * been corrupted. |
| 317 | */ |
| 318 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) |
| 319 | { |
| 320 | struct list_head *head = &cpu_buffer->pages; |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 321 | struct buffer_page *bpage, *tmp; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 322 | |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 323 | if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) |
| 324 | return -1; |
| 325 | if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) |
| 326 | return -1; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 327 | |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 328 | list_for_each_entry_safe(bpage, tmp, head, list) { |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 329 | if (RB_WARN_ON(cpu_buffer, |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 330 | bpage->list.next->prev != &bpage->list)) |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 331 | return -1; |
| 332 | if (RB_WARN_ON(cpu_buffer, |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 333 | bpage->list.prev->next != &bpage->list)) |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 334 | return -1; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 335 | } |
| 336 | |
| 337 | return 0; |
| 338 | } |
| 339 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 340 | static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, |
| 341 | unsigned nr_pages) |
| 342 | { |
| 343 | struct list_head *head = &cpu_buffer->pages; |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 344 | struct buffer_page *bpage, *tmp; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 345 | unsigned long addr; |
| 346 | LIST_HEAD(pages); |
| 347 | unsigned i; |
| 348 | |
| 349 | for (i = 0; i < nr_pages; i++) { |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 350 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
Steven Rostedt | aa1e0e3 | 2008-10-02 19:18:09 -0400 | [diff] [blame] | 351 | GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 352 | if (!bpage) |
Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 353 | goto free_pages; |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 354 | list_add(&bpage->list, &pages); |
Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 355 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 356 | addr = __get_free_page(GFP_KERNEL); |
| 357 | if (!addr) |
| 358 | goto free_pages; |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 359 | bpage->page = (void *)addr; |
| 360 | rb_init_page(bpage->page); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 361 | } |
| 362 | |
| 363 | list_splice(&pages, head); |
| 364 | |
| 365 | rb_check_pages(cpu_buffer); |
| 366 | |
| 367 | return 0; |
| 368 | |
| 369 | free_pages: |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 370 | list_for_each_entry_safe(bpage, tmp, &pages, list) { |
| 371 | list_del_init(&bpage->list); |
| 372 | free_buffer_page(bpage); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 373 | } |
| 374 | return -ENOMEM; |
| 375 | } |
| 376 | |
| 377 | static struct ring_buffer_per_cpu * |
| 378 | rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) |
| 379 | { |
| 380 | struct ring_buffer_per_cpu *cpu_buffer; |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 381 | struct buffer_page *bpage; |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 382 | unsigned long addr; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 383 | int ret; |
| 384 | |
| 385 | cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), |
| 386 | GFP_KERNEL, cpu_to_node(cpu)); |
| 387 | if (!cpu_buffer) |
| 388 | return NULL; |
| 389 | |
| 390 | cpu_buffer->cpu = cpu; |
| 391 | cpu_buffer->buffer = buffer; |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 392 | spin_lock_init(&cpu_buffer->reader_lock); |
Steven Rostedt | 3e03fb7 | 2008-11-06 00:09:43 -0500 | [diff] [blame] | 393 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 394 | INIT_LIST_HEAD(&cpu_buffer->pages); |
| 395 | |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 396 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 397 | GFP_KERNEL, cpu_to_node(cpu)); |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 398 | if (!bpage) |
Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 399 | goto fail_free_buffer; |
| 400 | |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 401 | cpu_buffer->reader_page = bpage; |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 402 | addr = __get_free_page(GFP_KERNEL); |
| 403 | if (!addr) |
Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 404 | goto fail_free_reader; |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 405 | bpage->page = (void *)addr; |
| 406 | rb_init_page(bpage->page); |
Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 407 | |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 408 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 409 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 410 | ret = rb_allocate_pages(cpu_buffer, buffer->pages); |
| 411 | if (ret < 0) |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 412 | goto fail_free_reader; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 413 | |
| 414 | cpu_buffer->head_page |
| 415 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 416 | cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 417 | |
| 418 | return cpu_buffer; |
| 419 | |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 420 | fail_free_reader: |
| 421 | free_buffer_page(cpu_buffer->reader_page); |
| 422 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 423 | fail_free_buffer: |
| 424 | kfree(cpu_buffer); |
| 425 | return NULL; |
| 426 | } |
| 427 | |
| 428 | static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) |
| 429 | { |
| 430 | struct list_head *head = &cpu_buffer->pages; |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 431 | struct buffer_page *bpage, *tmp; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 432 | |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 433 | list_del_init(&cpu_buffer->reader_page->list); |
| 434 | free_buffer_page(cpu_buffer->reader_page); |
| 435 | |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 436 | list_for_each_entry_safe(bpage, tmp, head, list) { |
| 437 | list_del_init(&bpage->list); |
| 438 | free_buffer_page(bpage); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 439 | } |
| 440 | kfree(cpu_buffer); |
| 441 | } |
| 442 | |
Steven Rostedt | a7b1374 | 2008-09-29 23:02:39 -0400 | [diff] [blame] | 443 | /* |
| 444 | * Causes compile errors if the struct buffer_page gets bigger |
| 445 | * than the struct page. |
| 446 | */ |
| 447 | extern int ring_buffer_page_too_big(void); |
| 448 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 449 | /** |
| 450 | * ring_buffer_alloc - allocate a new ring_buffer |
Robert Richter | 68814b5 | 2008-11-24 12:24:12 +0100 | [diff] [blame] | 451 | * @size: the size in bytes per cpu that is needed. |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 452 | * @flags: attributes to set for the ring buffer. |
| 453 | * |
| 454 | * Currently the only flag that is available is the RB_FL_OVERWRITE |
| 455 | * flag. This flag means that the buffer will overwrite old data |
| 456 | * when the buffer wraps. If this flag is not set, the buffer will |
| 457 | * drop data when the tail hits the head. |
| 458 | */ |
| 459 | struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) |
| 460 | { |
| 461 | struct ring_buffer *buffer; |
| 462 | int bsize; |
| 463 | int cpu; |
| 464 | |
Steven Rostedt | a7b1374 | 2008-09-29 23:02:39 -0400 | [diff] [blame] | 465 | /* Paranoid! Optimizes out when all is well */ |
| 466 | if (sizeof(struct buffer_page) > sizeof(struct page)) |
| 467 | ring_buffer_page_too_big(); |
| 468 | |
| 469 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 470 | /* keep it in its own cache line */ |
| 471 | buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), |
| 472 | GFP_KERNEL); |
| 473 | if (!buffer) |
| 474 | return NULL; |
| 475 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 476 | if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) |
| 477 | goto fail_free_buffer; |
| 478 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 479 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
| 480 | buffer->flags = flags; |
| 481 | |
| 482 | /* need at least two pages */ |
| 483 | if (buffer->pages == 1) |
| 484 | buffer->pages++; |
| 485 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 486 | cpumask_copy(buffer->cpumask, cpu_possible_mask); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 487 | buffer->cpus = nr_cpu_ids; |
| 488 | |
| 489 | bsize = sizeof(void *) * nr_cpu_ids; |
| 490 | buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), |
| 491 | GFP_KERNEL); |
| 492 | if (!buffer->buffers) |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 493 | goto fail_free_cpumask; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 494 | |
| 495 | for_each_buffer_cpu(buffer, cpu) { |
| 496 | buffer->buffers[cpu] = |
| 497 | rb_allocate_cpu_buffer(buffer, cpu); |
| 498 | if (!buffer->buffers[cpu]) |
| 499 | goto fail_free_buffers; |
| 500 | } |
| 501 | |
| 502 | mutex_init(&buffer->mutex); |
| 503 | |
| 504 | return buffer; |
| 505 | |
| 506 | fail_free_buffers: |
| 507 | for_each_buffer_cpu(buffer, cpu) { |
| 508 | if (buffer->buffers[cpu]) |
| 509 | rb_free_cpu_buffer(buffer->buffers[cpu]); |
| 510 | } |
| 511 | kfree(buffer->buffers); |
| 512 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 513 | fail_free_cpumask: |
| 514 | free_cpumask_var(buffer->cpumask); |
| 515 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 516 | fail_free_buffer: |
| 517 | kfree(buffer); |
| 518 | return NULL; |
| 519 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 520 | EXPORT_SYMBOL_GPL(ring_buffer_alloc); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 521 | |
| 522 | /** |
| 523 | * ring_buffer_free - free a ring buffer. |
| 524 | * @buffer: the buffer to free. |
| 525 | */ |
| 526 | void |
| 527 | ring_buffer_free(struct ring_buffer *buffer) |
| 528 | { |
| 529 | int cpu; |
| 530 | |
| 531 | for_each_buffer_cpu(buffer, cpu) |
| 532 | rb_free_cpu_buffer(buffer->buffers[cpu]); |
| 533 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 534 | free_cpumask_var(buffer->cpumask); |
| 535 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 536 | kfree(buffer); |
| 537 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 538 | EXPORT_SYMBOL_GPL(ring_buffer_free); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 539 | |
| 540 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); |
| 541 | |
| 542 | static void |
| 543 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) |
| 544 | { |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 545 | struct buffer_page *bpage; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 546 | struct list_head *p; |
| 547 | unsigned i; |
| 548 | |
| 549 | atomic_inc(&cpu_buffer->record_disabled); |
| 550 | synchronize_sched(); |
| 551 | |
| 552 | for (i = 0; i < nr_pages; i++) { |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 553 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) |
| 554 | return; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 555 | p = cpu_buffer->pages.next; |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 556 | bpage = list_entry(p, struct buffer_page, list); |
| 557 | list_del_init(&bpage->list); |
| 558 | free_buffer_page(bpage); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 559 | } |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 560 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) |
| 561 | return; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 562 | |
| 563 | rb_reset_cpu(cpu_buffer); |
| 564 | |
| 565 | rb_check_pages(cpu_buffer); |
| 566 | |
| 567 | atomic_dec(&cpu_buffer->record_disabled); |
| 568 | |
| 569 | } |
| 570 | |
| 571 | static void |
| 572 | rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, |
| 573 | struct list_head *pages, unsigned nr_pages) |
| 574 | { |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 575 | struct buffer_page *bpage; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 576 | struct list_head *p; |
| 577 | unsigned i; |
| 578 | |
| 579 | atomic_inc(&cpu_buffer->record_disabled); |
| 580 | synchronize_sched(); |
| 581 | |
| 582 | for (i = 0; i < nr_pages; i++) { |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 583 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) |
| 584 | return; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 585 | p = pages->next; |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 586 | bpage = list_entry(p, struct buffer_page, list); |
| 587 | list_del_init(&bpage->list); |
| 588 | list_add_tail(&bpage->list, &cpu_buffer->pages); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 589 | } |
| 590 | rb_reset_cpu(cpu_buffer); |
| 591 | |
| 592 | rb_check_pages(cpu_buffer); |
| 593 | |
| 594 | atomic_dec(&cpu_buffer->record_disabled); |
| 595 | } |
| 596 | |
| 597 | /** |
| 598 | * ring_buffer_resize - resize the ring buffer |
| 599 | * @buffer: the buffer to resize. |
| 600 | * @size: the new size. |
| 601 | * |
| 602 | * The tracer is responsible for making sure that the buffer is |
| 603 | * not being used while changing the size. |
| 604 | * Note: We may be able to change the above requirement by using |
| 605 | * RCU synchronizations. |
| 606 | * |
| 607 | * Minimum size is 2 * BUF_PAGE_SIZE. |
| 608 | * |
| 609 | * Returns -1 on failure. |
| 610 | */ |
| 611 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) |
| 612 | { |
| 613 | struct ring_buffer_per_cpu *cpu_buffer; |
| 614 | unsigned nr_pages, rm_pages, new_pages; |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 615 | struct buffer_page *bpage, *tmp; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 616 | unsigned long buffer_size; |
| 617 | unsigned long addr; |
| 618 | LIST_HEAD(pages); |
| 619 | int i, cpu; |
| 620 | |
Ingo Molnar | ee51a1d | 2008-11-13 14:58:31 +0100 | [diff] [blame] | 621 | /* |
| 622 | * Always succeed at resizing a non-existent buffer: |
| 623 | */ |
| 624 | if (!buffer) |
| 625 | return size; |
| 626 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 627 | size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
| 628 | size *= BUF_PAGE_SIZE; |
| 629 | buffer_size = buffer->pages * BUF_PAGE_SIZE; |
| 630 | |
| 631 | /* we need a minimum of two pages */ |
| 632 | if (size < BUF_PAGE_SIZE * 2) |
| 633 | size = BUF_PAGE_SIZE * 2; |
| 634 | |
| 635 | if (size == buffer_size) |
| 636 | return size; |
| 637 | |
| 638 | mutex_lock(&buffer->mutex); |
| 639 | |
| 640 | nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
| 641 | |
| 642 | if (size < buffer_size) { |
| 643 | |
| 644 | /* easy case, just free pages */ |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 645 | if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) { |
| 646 | mutex_unlock(&buffer->mutex); |
| 647 | return -1; |
| 648 | } |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 649 | |
| 650 | rm_pages = buffer->pages - nr_pages; |
| 651 | |
| 652 | for_each_buffer_cpu(buffer, cpu) { |
| 653 | cpu_buffer = buffer->buffers[cpu]; |
| 654 | rb_remove_pages(cpu_buffer, rm_pages); |
| 655 | } |
| 656 | goto out; |
| 657 | } |
| 658 | |
| 659 | /* |
| 660 | * This is a bit more difficult. We only want to add pages |
| 661 | * when we can allocate enough for all CPUs. We do this |
| 662 | * by allocating all the pages and storing them on a local |
| 663 | * link list. If we succeed in our allocation, then we |
| 664 | * add these pages to the cpu_buffers. Otherwise we just free |
| 665 | * them all and return -ENOMEM; |
| 666 | */ |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 667 | if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) { |
| 668 | mutex_unlock(&buffer->mutex); |
| 669 | return -1; |
| 670 | } |
Steven Rostedt | f536aaf | 2008-11-10 23:07:30 -0500 | [diff] [blame] | 671 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 672 | new_pages = nr_pages - buffer->pages; |
| 673 | |
| 674 | for_each_buffer_cpu(buffer, cpu) { |
| 675 | for (i = 0; i < new_pages; i++) { |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 676 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), |
Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 677 | cache_line_size()), |
| 678 | GFP_KERNEL, cpu_to_node(cpu)); |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 679 | if (!bpage) |
Steven Rostedt | e4c2ce8 | 2008-10-01 11:14:54 -0400 | [diff] [blame] | 680 | goto free_pages; |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 681 | list_add(&bpage->list, &pages); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 682 | addr = __get_free_page(GFP_KERNEL); |
| 683 | if (!addr) |
| 684 | goto free_pages; |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 685 | bpage->page = (void *)addr; |
| 686 | rb_init_page(bpage->page); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 687 | } |
| 688 | } |
| 689 | |
| 690 | for_each_buffer_cpu(buffer, cpu) { |
| 691 | cpu_buffer = buffer->buffers[cpu]; |
| 692 | rb_insert_pages(cpu_buffer, &pages, new_pages); |
| 693 | } |
| 694 | |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 695 | if (RB_WARN_ON(buffer, !list_empty(&pages))) { |
| 696 | mutex_unlock(&buffer->mutex); |
| 697 | return -1; |
| 698 | } |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 699 | |
| 700 | out: |
| 701 | buffer->pages = nr_pages; |
| 702 | mutex_unlock(&buffer->mutex); |
| 703 | |
| 704 | return size; |
| 705 | |
| 706 | free_pages: |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 707 | list_for_each_entry_safe(bpage, tmp, &pages, list) { |
| 708 | list_del_init(&bpage->list); |
| 709 | free_buffer_page(bpage); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 710 | } |
Vegard Nossum | 641d2f6 | 2008-11-18 19:22:13 +0100 | [diff] [blame] | 711 | mutex_unlock(&buffer->mutex); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 712 | return -ENOMEM; |
| 713 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 714 | EXPORT_SYMBOL_GPL(ring_buffer_resize); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 715 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 716 | static inline int rb_null_event(struct ring_buffer_event *event) |
| 717 | { |
| 718 | return event->type == RINGBUF_TYPE_PADDING; |
| 719 | } |
| 720 | |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 721 | static inline void * |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 722 | __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 723 | { |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 724 | return bpage->data + index; |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 725 | } |
| 726 | |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 727 | static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 728 | { |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 729 | return bpage->page->data + index; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 730 | } |
| 731 | |
| 732 | static inline struct ring_buffer_event * |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 733 | rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 734 | { |
Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 735 | return __rb_page_index(cpu_buffer->reader_page, |
| 736 | cpu_buffer->reader_page->read); |
| 737 | } |
| 738 | |
| 739 | static inline struct ring_buffer_event * |
| 740 | rb_head_event(struct ring_buffer_per_cpu *cpu_buffer) |
| 741 | { |
| 742 | return __rb_page_index(cpu_buffer->head_page, |
| 743 | cpu_buffer->head_page->read); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 744 | } |
| 745 | |
| 746 | static inline struct ring_buffer_event * |
| 747 | rb_iter_head_event(struct ring_buffer_iter *iter) |
| 748 | { |
Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 749 | return __rb_page_index(iter->head_page, iter->head); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 750 | } |
| 751 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 752 | static inline unsigned rb_page_write(struct buffer_page *bpage) |
| 753 | { |
| 754 | return local_read(&bpage->write); |
| 755 | } |
| 756 | |
| 757 | static inline unsigned rb_page_commit(struct buffer_page *bpage) |
| 758 | { |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 759 | return local_read(&bpage->page->commit); |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 760 | } |
| 761 | |
| 762 | /* Size is determined by what has been commited */ |
| 763 | static inline unsigned rb_page_size(struct buffer_page *bpage) |
| 764 | { |
| 765 | return rb_page_commit(bpage); |
| 766 | } |
| 767 | |
| 768 | static inline unsigned |
| 769 | rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) |
| 770 | { |
| 771 | return rb_page_commit(cpu_buffer->commit_page); |
| 772 | } |
| 773 | |
| 774 | static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer) |
| 775 | { |
| 776 | return rb_page_commit(cpu_buffer->head_page); |
| 777 | } |
| 778 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 779 | /* |
| 780 | * When the tail hits the head and the buffer is in overwrite mode, |
| 781 | * the head jumps to the next page and all content on the previous |
| 782 | * page is discarded. But before doing so, we update the overrun |
| 783 | * variable of the buffer. |
| 784 | */ |
| 785 | static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer) |
| 786 | { |
| 787 | struct ring_buffer_event *event; |
| 788 | unsigned long head; |
| 789 | |
| 790 | for (head = 0; head < rb_head_size(cpu_buffer); |
| 791 | head += rb_event_length(event)) { |
| 792 | |
Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 793 | event = __rb_page_index(cpu_buffer->head_page, head); |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 794 | if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) |
| 795 | return; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 796 | /* Only count data entries */ |
| 797 | if (event->type != RINGBUF_TYPE_DATA) |
| 798 | continue; |
| 799 | cpu_buffer->overrun++; |
| 800 | cpu_buffer->entries--; |
| 801 | } |
| 802 | } |
| 803 | |
| 804 | static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 805 | struct buffer_page **bpage) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 806 | { |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 807 | struct list_head *p = (*bpage)->list.next; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 808 | |
| 809 | if (p == &cpu_buffer->pages) |
| 810 | p = p->next; |
| 811 | |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 812 | *bpage = list_entry(p, struct buffer_page, list); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 813 | } |
| 814 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 815 | static inline unsigned |
| 816 | rb_event_index(struct ring_buffer_event *event) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 817 | { |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 818 | unsigned long addr = (unsigned long)event; |
| 819 | |
| 820 | return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 821 | } |
| 822 | |
Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 823 | static int |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 824 | rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer, |
| 825 | struct ring_buffer_event *event) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 826 | { |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 827 | unsigned long addr = (unsigned long)event; |
| 828 | unsigned long index; |
| 829 | |
| 830 | index = rb_event_index(event); |
| 831 | addr &= PAGE_MASK; |
| 832 | |
| 833 | return cpu_buffer->commit_page->page == (void *)addr && |
| 834 | rb_commit_index(cpu_buffer) == index; |
| 835 | } |
| 836 | |
Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 837 | static void |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 838 | rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, |
| 839 | struct ring_buffer_event *event) |
| 840 | { |
| 841 | unsigned long addr = (unsigned long)event; |
| 842 | unsigned long index; |
| 843 | |
| 844 | index = rb_event_index(event); |
| 845 | addr &= PAGE_MASK; |
| 846 | |
| 847 | while (cpu_buffer->commit_page->page != (void *)addr) { |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 848 | if (RB_WARN_ON(cpu_buffer, |
| 849 | cpu_buffer->commit_page == cpu_buffer->tail_page)) |
| 850 | return; |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 851 | cpu_buffer->commit_page->page->commit = |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 852 | cpu_buffer->commit_page->write; |
| 853 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 854 | cpu_buffer->write_stamp = |
| 855 | cpu_buffer->commit_page->page->time_stamp; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 856 | } |
| 857 | |
| 858 | /* Now set the commit to the event's index */ |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 859 | local_set(&cpu_buffer->commit_page->page->commit, index); |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 860 | } |
| 861 | |
Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 862 | static void |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 863 | rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) |
| 864 | { |
| 865 | /* |
| 866 | * We only race with interrupts and NMIs on this CPU. |
| 867 | * If we own the commit event, then we can commit |
| 868 | * all others that interrupted us, since the interruptions |
| 869 | * are in stack format (they finish before they come |
| 870 | * back to us). This allows us to do a simple loop to |
| 871 | * assign the commit to the tail. |
| 872 | */ |
Steven Rostedt | a8ccf1d | 2008-12-23 11:32:24 -0500 | [diff] [blame] | 873 | again: |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 874 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 875 | cpu_buffer->commit_page->page->commit = |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 876 | cpu_buffer->commit_page->write; |
| 877 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 878 | cpu_buffer->write_stamp = |
| 879 | cpu_buffer->commit_page->page->time_stamp; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 880 | /* add barrier to keep gcc from optimizing too much */ |
| 881 | barrier(); |
| 882 | } |
| 883 | while (rb_commit_index(cpu_buffer) != |
| 884 | rb_page_write(cpu_buffer->commit_page)) { |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 885 | cpu_buffer->commit_page->page->commit = |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 886 | cpu_buffer->commit_page->write; |
| 887 | barrier(); |
| 888 | } |
Steven Rostedt | a8ccf1d | 2008-12-23 11:32:24 -0500 | [diff] [blame] | 889 | |
| 890 | /* again, keep gcc from optimizing */ |
| 891 | barrier(); |
| 892 | |
| 893 | /* |
| 894 | * If an interrupt came in just after the first while loop |
| 895 | * and pushed the tail page forward, we will be left with |
| 896 | * a dangling commit that will never go forward. |
| 897 | */ |
| 898 | if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) |
| 899 | goto again; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 900 | } |
| 901 | |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 902 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 903 | { |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 904 | cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; |
Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 905 | cpu_buffer->reader_page->read = 0; |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 906 | } |
| 907 | |
Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 908 | static void rb_inc_iter(struct ring_buffer_iter *iter) |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 909 | { |
| 910 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
| 911 | |
| 912 | /* |
| 913 | * The iterator could be on the reader page (it starts there). |
| 914 | * But the head could have moved, since the reader was |
| 915 | * found. Check for this case and assign the iterator |
| 916 | * to the head page instead of next. |
| 917 | */ |
| 918 | if (iter->head_page == cpu_buffer->reader_page) |
| 919 | iter->head_page = cpu_buffer->head_page; |
| 920 | else |
| 921 | rb_inc_page(cpu_buffer, &iter->head_page); |
| 922 | |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 923 | iter->read_stamp = iter->head_page->page->time_stamp; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 924 | iter->head = 0; |
| 925 | } |
| 926 | |
| 927 | /** |
| 928 | * ring_buffer_update_event - update event type and data |
| 929 | * @event: the even to update |
| 930 | * @type: the type of event |
| 931 | * @length: the size of the event field in the ring buffer |
| 932 | * |
| 933 | * Update the type and data fields of the event. The length |
| 934 | * is the actual size that is written to the ring buffer, |
| 935 | * and with this, we can determine what to place into the |
| 936 | * data field. |
| 937 | */ |
Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 938 | static void |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 939 | rb_update_event(struct ring_buffer_event *event, |
| 940 | unsigned type, unsigned length) |
| 941 | { |
| 942 | event->type = type; |
| 943 | |
| 944 | switch (type) { |
| 945 | |
| 946 | case RINGBUF_TYPE_PADDING: |
| 947 | break; |
| 948 | |
| 949 | case RINGBUF_TYPE_TIME_EXTEND: |
Andrew Morton | 67d3472 | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 950 | event->len = DIV_ROUND_UP(RB_LEN_TIME_EXTEND, RB_ALIGNMENT); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 951 | break; |
| 952 | |
| 953 | case RINGBUF_TYPE_TIME_STAMP: |
Andrew Morton | 67d3472 | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 954 | event->len = DIV_ROUND_UP(RB_LEN_TIME_STAMP, RB_ALIGNMENT); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 955 | break; |
| 956 | |
| 957 | case RINGBUF_TYPE_DATA: |
| 958 | length -= RB_EVNT_HDR_SIZE; |
| 959 | if (length > RB_MAX_SMALL_DATA) { |
| 960 | event->len = 0; |
| 961 | event->array[0] = length; |
| 962 | } else |
Andrew Morton | 67d3472 | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 963 | event->len = DIV_ROUND_UP(length, RB_ALIGNMENT); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 964 | break; |
| 965 | default: |
| 966 | BUG(); |
| 967 | } |
| 968 | } |
| 969 | |
Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 970 | static unsigned rb_calculate_event_length(unsigned length) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 971 | { |
| 972 | struct ring_buffer_event event; /* Used only for sizeof array */ |
| 973 | |
| 974 | /* zero length can cause confusions */ |
| 975 | if (!length) |
| 976 | length = 1; |
| 977 | |
| 978 | if (length > RB_MAX_SMALL_DATA) |
| 979 | length += sizeof(event.array[0]); |
| 980 | |
| 981 | length += RB_EVNT_HDR_SIZE; |
| 982 | length = ALIGN(length, RB_ALIGNMENT); |
| 983 | |
| 984 | return length; |
| 985 | } |
| 986 | |
| 987 | static struct ring_buffer_event * |
| 988 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, |
| 989 | unsigned type, unsigned long length, u64 *ts) |
| 990 | { |
Steven Rostedt | 98db8df | 2008-12-23 11:32:25 -0500 | [diff] [blame] | 991 | struct buffer_page *tail_page, *head_page, *reader_page, *commit_page; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 992 | unsigned long tail, write; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 993 | struct ring_buffer *buffer = cpu_buffer->buffer; |
| 994 | struct ring_buffer_event *event; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 995 | unsigned long flags; |
Steven Rostedt | 78d904b | 2009-02-05 18:43:07 -0500 | [diff] [blame] | 996 | bool lock_taken = false; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 997 | |
Steven Rostedt | 98db8df | 2008-12-23 11:32:25 -0500 | [diff] [blame] | 998 | commit_page = cpu_buffer->commit_page; |
| 999 | /* we just need to protect against interrupts */ |
| 1000 | barrier(); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1001 | tail_page = cpu_buffer->tail_page; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1002 | write = local_add_return(length, &tail_page->write); |
| 1003 | tail = write - length; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1004 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1005 | /* See if we shot pass the end of this buffer page */ |
| 1006 | if (write > BUF_PAGE_SIZE) { |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1007 | struct buffer_page *next_page = tail_page; |
| 1008 | |
Steven Rostedt | 3e03fb7 | 2008-11-06 00:09:43 -0500 | [diff] [blame] | 1009 | local_irq_save(flags); |
Steven Rostedt | 78d904b | 2009-02-05 18:43:07 -0500 | [diff] [blame] | 1010 | /* |
Steven Rostedt | a81bd80 | 2009-02-06 01:45:16 -0500 | [diff] [blame] | 1011 | * Since the write to the buffer is still not |
| 1012 | * fully lockless, we must be careful with NMIs. |
| 1013 | * The locks in the writers are taken when a write |
| 1014 | * crosses to a new page. The locks protect against |
| 1015 | * races with the readers (this will soon be fixed |
| 1016 | * with a lockless solution). |
| 1017 | * |
| 1018 | * Because we can not protect against NMIs, and we |
| 1019 | * want to keep traces reentrant, we need to manage |
| 1020 | * what happens when we are in an NMI. |
| 1021 | * |
Steven Rostedt | 78d904b | 2009-02-05 18:43:07 -0500 | [diff] [blame] | 1022 | * NMIs can happen after we take the lock. |
| 1023 | * If we are in an NMI, only take the lock |
| 1024 | * if it is not already taken. Otherwise |
| 1025 | * simply fail. |
| 1026 | */ |
Steven Rostedt | a81bd80 | 2009-02-06 01:45:16 -0500 | [diff] [blame] | 1027 | if (unlikely(in_nmi())) { |
Steven Rostedt | 78d904b | 2009-02-05 18:43:07 -0500 | [diff] [blame] | 1028 | if (!__raw_spin_trylock(&cpu_buffer->lock)) |
Steven Rostedt | 45141d4 | 2009-02-12 13:19:48 -0500 | [diff] [blame] | 1029 | goto out_reset; |
Steven Rostedt | 78d904b | 2009-02-05 18:43:07 -0500 | [diff] [blame] | 1030 | } else |
| 1031 | __raw_spin_lock(&cpu_buffer->lock); |
| 1032 | |
| 1033 | lock_taken = true; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1034 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1035 | rb_inc_page(cpu_buffer, &next_page); |
| 1036 | |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1037 | head_page = cpu_buffer->head_page; |
| 1038 | reader_page = cpu_buffer->reader_page; |
| 1039 | |
| 1040 | /* we grabbed the lock before incrementing */ |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 1041 | if (RB_WARN_ON(cpu_buffer, next_page == reader_page)) |
Steven Rostedt | 45141d4 | 2009-02-12 13:19:48 -0500 | [diff] [blame] | 1042 | goto out_reset; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1043 | |
| 1044 | /* |
| 1045 | * If for some reason, we had an interrupt storm that made |
| 1046 | * it all the way around the buffer, bail, and warn |
| 1047 | * about it. |
| 1048 | */ |
Steven Rostedt | 98db8df | 2008-12-23 11:32:25 -0500 | [diff] [blame] | 1049 | if (unlikely(next_page == commit_page)) { |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1050 | WARN_ON_ONCE(1); |
Steven Rostedt | 45141d4 | 2009-02-12 13:19:48 -0500 | [diff] [blame] | 1051 | goto out_reset; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1052 | } |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1053 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1054 | if (next_page == head_page) { |
Lai Jiangshan | 6f3b344 | 2009-01-12 11:06:18 +0800 | [diff] [blame] | 1055 | if (!(buffer->flags & RB_FL_OVERWRITE)) |
Steven Rostedt | 45141d4 | 2009-02-12 13:19:48 -0500 | [diff] [blame] | 1056 | goto out_reset; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1057 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1058 | /* tail_page has not moved yet? */ |
| 1059 | if (tail_page == cpu_buffer->tail_page) { |
| 1060 | /* count overflows */ |
| 1061 | rb_update_overflow(cpu_buffer); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1062 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1063 | rb_inc_page(cpu_buffer, &head_page); |
| 1064 | cpu_buffer->head_page = head_page; |
| 1065 | cpu_buffer->head_page->read = 0; |
| 1066 | } |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1067 | } |
| 1068 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1069 | /* |
| 1070 | * If the tail page is still the same as what we think |
| 1071 | * it is, then it is up to us to update the tail |
| 1072 | * pointer. |
| 1073 | */ |
| 1074 | if (tail_page == cpu_buffer->tail_page) { |
| 1075 | local_set(&next_page->write, 0); |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 1076 | local_set(&next_page->page->commit, 0); |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1077 | cpu_buffer->tail_page = next_page; |
| 1078 | |
| 1079 | /* reread the time stamp */ |
| 1080 | *ts = ring_buffer_time_stamp(cpu_buffer->cpu); |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 1081 | cpu_buffer->tail_page->page->time_stamp = *ts; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1082 | } |
| 1083 | |
| 1084 | /* |
| 1085 | * The actual tail page has moved forward. |
| 1086 | */ |
| 1087 | if (tail < BUF_PAGE_SIZE) { |
| 1088 | /* Mark the rest of the page with padding */ |
Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 1089 | event = __rb_page_index(tail_page, tail); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1090 | event->type = RINGBUF_TYPE_PADDING; |
| 1091 | } |
| 1092 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1093 | if (tail <= BUF_PAGE_SIZE) |
| 1094 | /* Set the write back to the previous setting */ |
| 1095 | local_set(&tail_page->write, tail); |
| 1096 | |
| 1097 | /* |
| 1098 | * If this was a commit entry that failed, |
| 1099 | * increment that too |
| 1100 | */ |
| 1101 | if (tail_page == cpu_buffer->commit_page && |
| 1102 | tail == rb_commit_index(cpu_buffer)) { |
| 1103 | rb_set_commit_to_write(cpu_buffer); |
| 1104 | } |
| 1105 | |
Steven Rostedt | 3e03fb7 | 2008-11-06 00:09:43 -0500 | [diff] [blame] | 1106 | __raw_spin_unlock(&cpu_buffer->lock); |
| 1107 | local_irq_restore(flags); |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1108 | |
| 1109 | /* fail and let the caller try again */ |
| 1110 | return ERR_PTR(-EAGAIN); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1111 | } |
| 1112 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1113 | /* We reserved something on the buffer */ |
| 1114 | |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 1115 | if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE)) |
| 1116 | return NULL; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1117 | |
Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 1118 | event = __rb_page_index(tail_page, tail); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1119 | rb_update_event(event, type, length); |
| 1120 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1121 | /* |
| 1122 | * If this is a commit and the tail is zero, then update |
| 1123 | * this page's time stamp. |
| 1124 | */ |
| 1125 | if (!tail && rb_is_commit(cpu_buffer, event)) |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 1126 | cpu_buffer->commit_page->page->time_stamp = *ts; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1127 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1128 | return event; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1129 | |
Steven Rostedt | 45141d4 | 2009-02-12 13:19:48 -0500 | [diff] [blame] | 1130 | out_reset: |
Lai Jiangshan | 6f3b344 | 2009-01-12 11:06:18 +0800 | [diff] [blame] | 1131 | /* reset write */ |
| 1132 | if (tail <= BUF_PAGE_SIZE) |
| 1133 | local_set(&tail_page->write, tail); |
| 1134 | |
Steven Rostedt | 78d904b | 2009-02-05 18:43:07 -0500 | [diff] [blame] | 1135 | if (likely(lock_taken)) |
| 1136 | __raw_spin_unlock(&cpu_buffer->lock); |
Steven Rostedt | 3e03fb7 | 2008-11-06 00:09:43 -0500 | [diff] [blame] | 1137 | local_irq_restore(flags); |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1138 | return NULL; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1139 | } |
| 1140 | |
| 1141 | static int |
| 1142 | rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, |
| 1143 | u64 *ts, u64 *delta) |
| 1144 | { |
| 1145 | struct ring_buffer_event *event; |
| 1146 | static int once; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1147 | int ret; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1148 | |
| 1149 | if (unlikely(*delta > (1ULL << 59) && !once++)) { |
| 1150 | printk(KERN_WARNING "Delta way too big! %llu" |
| 1151 | " ts=%llu write stamp = %llu\n", |
Stephen Rothwell | e2862c9 | 2008-10-27 17:43:28 +1100 | [diff] [blame] | 1152 | (unsigned long long)*delta, |
| 1153 | (unsigned long long)*ts, |
| 1154 | (unsigned long long)cpu_buffer->write_stamp); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1155 | WARN_ON(1); |
| 1156 | } |
| 1157 | |
| 1158 | /* |
| 1159 | * The delta is too big, we to add a |
| 1160 | * new timestamp. |
| 1161 | */ |
| 1162 | event = __rb_reserve_next(cpu_buffer, |
| 1163 | RINGBUF_TYPE_TIME_EXTEND, |
| 1164 | RB_LEN_TIME_EXTEND, |
| 1165 | ts); |
| 1166 | if (!event) |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1167 | return -EBUSY; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1168 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1169 | if (PTR_ERR(event) == -EAGAIN) |
| 1170 | return -EAGAIN; |
| 1171 | |
| 1172 | /* Only a commited time event can update the write stamp */ |
| 1173 | if (rb_is_commit(cpu_buffer, event)) { |
| 1174 | /* |
| 1175 | * If this is the first on the page, then we need to |
| 1176 | * update the page itself, and just put in a zero. |
| 1177 | */ |
| 1178 | if (rb_event_index(event)) { |
| 1179 | event->time_delta = *delta & TS_MASK; |
| 1180 | event->array[0] = *delta >> TS_SHIFT; |
| 1181 | } else { |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 1182 | cpu_buffer->commit_page->page->time_stamp = *ts; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1183 | event->time_delta = 0; |
| 1184 | event->array[0] = 0; |
| 1185 | } |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1186 | cpu_buffer->write_stamp = *ts; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1187 | /* let the caller know this was the commit */ |
| 1188 | ret = 1; |
| 1189 | } else { |
| 1190 | /* Darn, this is just wasted space */ |
| 1191 | event->time_delta = 0; |
| 1192 | event->array[0] = 0; |
| 1193 | ret = 0; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1194 | } |
| 1195 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1196 | *delta = 0; |
| 1197 | |
| 1198 | return ret; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1199 | } |
| 1200 | |
| 1201 | static struct ring_buffer_event * |
| 1202 | rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, |
| 1203 | unsigned type, unsigned long length) |
| 1204 | { |
| 1205 | struct ring_buffer_event *event; |
| 1206 | u64 ts, delta; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1207 | int commit = 0; |
Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 1208 | int nr_loops = 0; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1209 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1210 | again: |
Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 1211 | /* |
| 1212 | * We allow for interrupts to reenter here and do a trace. |
| 1213 | * If one does, it will cause this original code to loop |
| 1214 | * back here. Even with heavy interrupts happening, this |
| 1215 | * should only happen a few times in a row. If this happens |
| 1216 | * 1000 times in a row, there must be either an interrupt |
| 1217 | * storm or we have something buggy. |
| 1218 | * Bail! |
| 1219 | */ |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 1220 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) |
Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 1221 | return NULL; |
Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 1222 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1223 | ts = ring_buffer_time_stamp(cpu_buffer->cpu); |
| 1224 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1225 | /* |
| 1226 | * Only the first commit can update the timestamp. |
| 1227 | * Yes there is a race here. If an interrupt comes in |
| 1228 | * just after the conditional and it traces too, then it |
| 1229 | * will also check the deltas. More than one timestamp may |
| 1230 | * also be made. But only the entry that did the actual |
| 1231 | * commit will be something other than zero. |
| 1232 | */ |
| 1233 | if (cpu_buffer->tail_page == cpu_buffer->commit_page && |
| 1234 | rb_page_write(cpu_buffer->tail_page) == |
| 1235 | rb_commit_index(cpu_buffer)) { |
| 1236 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1237 | delta = ts - cpu_buffer->write_stamp; |
| 1238 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1239 | /* make sure this delta is calculated here */ |
| 1240 | barrier(); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1241 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1242 | /* Did the write stamp get updated already? */ |
| 1243 | if (unlikely(ts < cpu_buffer->write_stamp)) |
Steven Rostedt | 4143c5c | 2008-11-10 21:46:01 -0500 | [diff] [blame] | 1244 | delta = 0; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1245 | |
| 1246 | if (test_time_stamp(delta)) { |
| 1247 | |
| 1248 | commit = rb_add_time_stamp(cpu_buffer, &ts, &delta); |
| 1249 | |
| 1250 | if (commit == -EBUSY) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1251 | return NULL; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1252 | |
| 1253 | if (commit == -EAGAIN) |
| 1254 | goto again; |
| 1255 | |
| 1256 | RB_WARN_ON(cpu_buffer, commit < 0); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1257 | } |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1258 | } else |
| 1259 | /* Non commits have zero deltas */ |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1260 | delta = 0; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1261 | |
| 1262 | event = __rb_reserve_next(cpu_buffer, type, length, &ts); |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1263 | if (PTR_ERR(event) == -EAGAIN) |
| 1264 | goto again; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1265 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1266 | if (!event) { |
| 1267 | if (unlikely(commit)) |
| 1268 | /* |
| 1269 | * Ouch! We needed a timestamp and it was commited. But |
| 1270 | * we didn't get our event reserved. |
| 1271 | */ |
| 1272 | rb_set_commit_to_write(cpu_buffer); |
| 1273 | return NULL; |
| 1274 | } |
| 1275 | |
| 1276 | /* |
| 1277 | * If the timestamp was commited, make the commit our entry |
| 1278 | * now so that we will update it when needed. |
| 1279 | */ |
| 1280 | if (commit) |
| 1281 | rb_set_commit_event(cpu_buffer, event); |
| 1282 | else if (!rb_is_commit(cpu_buffer, event)) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1283 | delta = 0; |
| 1284 | |
| 1285 | event->time_delta = delta; |
| 1286 | |
| 1287 | return event; |
| 1288 | } |
| 1289 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1290 | static DEFINE_PER_CPU(int, rb_need_resched); |
| 1291 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1292 | /** |
| 1293 | * ring_buffer_lock_reserve - reserve a part of the buffer |
| 1294 | * @buffer: the ring buffer to reserve from |
| 1295 | * @length: the length of the data to reserve (excluding event header) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1296 | * |
| 1297 | * Returns a reseverd event on the ring buffer to copy directly to. |
| 1298 | * The user of this interface will need to get the body to write into |
| 1299 | * and can use the ring_buffer_event_data() interface. |
| 1300 | * |
| 1301 | * The length is the length of the data needed, not the event length |
| 1302 | * which also includes the event header. |
| 1303 | * |
| 1304 | * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. |
| 1305 | * If NULL is returned, then nothing has been allocated or locked. |
| 1306 | */ |
| 1307 | struct ring_buffer_event * |
Arnaldo Carvalho de Melo | 0a98775 | 2009-02-05 16:12:56 -0200 | [diff] [blame] | 1308 | ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1309 | { |
| 1310 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1311 | struct ring_buffer_event *event; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1312 | int cpu, resched; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1313 | |
Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 1314 | if (ring_buffer_flags != RB_BUFFERS_ON) |
Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 1315 | return NULL; |
| 1316 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1317 | if (atomic_read(&buffer->record_disabled)) |
| 1318 | return NULL; |
| 1319 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1320 | /* If we are tracing schedule, we don't want to recurse */ |
Steven Rostedt | 182e9f5 | 2008-11-03 23:15:56 -0500 | [diff] [blame] | 1321 | resched = ftrace_preempt_disable(); |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1322 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1323 | cpu = raw_smp_processor_id(); |
| 1324 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1325 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1326 | goto out; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1327 | |
| 1328 | cpu_buffer = buffer->buffers[cpu]; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1329 | |
| 1330 | if (atomic_read(&cpu_buffer->record_disabled)) |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1331 | goto out; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1332 | |
| 1333 | length = rb_calculate_event_length(length); |
| 1334 | if (length > BUF_PAGE_SIZE) |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1335 | goto out; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1336 | |
| 1337 | event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length); |
| 1338 | if (!event) |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1339 | goto out; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1340 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1341 | /* |
| 1342 | * Need to store resched state on this cpu. |
| 1343 | * Only the first needs to. |
| 1344 | */ |
| 1345 | |
| 1346 | if (preempt_count() == 1) |
| 1347 | per_cpu(rb_need_resched, cpu) = resched; |
| 1348 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1349 | return event; |
| 1350 | |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1351 | out: |
Steven Rostedt | 182e9f5 | 2008-11-03 23:15:56 -0500 | [diff] [blame] | 1352 | ftrace_preempt_enable(resched); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1353 | return NULL; |
| 1354 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1355 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1356 | |
| 1357 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, |
| 1358 | struct ring_buffer_event *event) |
| 1359 | { |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1360 | cpu_buffer->entries++; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1361 | |
| 1362 | /* Only process further if we own the commit */ |
| 1363 | if (!rb_is_commit(cpu_buffer, event)) |
| 1364 | return; |
| 1365 | |
| 1366 | cpu_buffer->write_stamp += event->time_delta; |
| 1367 | |
| 1368 | rb_set_commit_to_write(cpu_buffer); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1369 | } |
| 1370 | |
| 1371 | /** |
| 1372 | * ring_buffer_unlock_commit - commit a reserved |
| 1373 | * @buffer: The buffer to commit to |
| 1374 | * @event: The event pointer to commit. |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1375 | * |
| 1376 | * This commits the data to the ring buffer, and releases any locks held. |
| 1377 | * |
| 1378 | * Must be paired with ring_buffer_lock_reserve. |
| 1379 | */ |
| 1380 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, |
Arnaldo Carvalho de Melo | 0a98775 | 2009-02-05 16:12:56 -0200 | [diff] [blame] | 1381 | struct ring_buffer_event *event) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1382 | { |
| 1383 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1384 | int cpu = raw_smp_processor_id(); |
| 1385 | |
| 1386 | cpu_buffer = buffer->buffers[cpu]; |
| 1387 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1388 | rb_commit(cpu_buffer, event); |
| 1389 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1390 | /* |
| 1391 | * Only the last preempt count needs to restore preemption. |
| 1392 | */ |
Steven Rostedt | 182e9f5 | 2008-11-03 23:15:56 -0500 | [diff] [blame] | 1393 | if (preempt_count() == 1) |
| 1394 | ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); |
| 1395 | else |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1396 | preempt_enable_no_resched_notrace(); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1397 | |
| 1398 | return 0; |
| 1399 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1400 | EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1401 | |
| 1402 | /** |
| 1403 | * ring_buffer_write - write data to the buffer without reserving |
| 1404 | * @buffer: The ring buffer to write to. |
| 1405 | * @length: The length of the data being written (excluding the event header) |
| 1406 | * @data: The data to write to the buffer. |
| 1407 | * |
| 1408 | * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as |
| 1409 | * one function. If you already have the data to write to the buffer, it |
| 1410 | * may be easier to simply call this function. |
| 1411 | * |
| 1412 | * Note, like ring_buffer_lock_reserve, the length is the length of the data |
| 1413 | * and not the length of the event which would hold the header. |
| 1414 | */ |
| 1415 | int ring_buffer_write(struct ring_buffer *buffer, |
| 1416 | unsigned long length, |
| 1417 | void *data) |
| 1418 | { |
| 1419 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1420 | struct ring_buffer_event *event; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1421 | unsigned long event_length; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1422 | void *body; |
| 1423 | int ret = -EBUSY; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1424 | int cpu, resched; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1425 | |
Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 1426 | if (ring_buffer_flags != RB_BUFFERS_ON) |
Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 1427 | return -EBUSY; |
| 1428 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1429 | if (atomic_read(&buffer->record_disabled)) |
| 1430 | return -EBUSY; |
| 1431 | |
Steven Rostedt | 182e9f5 | 2008-11-03 23:15:56 -0500 | [diff] [blame] | 1432 | resched = ftrace_preempt_disable(); |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1433 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1434 | cpu = raw_smp_processor_id(); |
| 1435 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1436 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1437 | goto out; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1438 | |
| 1439 | cpu_buffer = buffer->buffers[cpu]; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1440 | |
| 1441 | if (atomic_read(&cpu_buffer->record_disabled)) |
| 1442 | goto out; |
| 1443 | |
| 1444 | event_length = rb_calculate_event_length(length); |
| 1445 | event = rb_reserve_next_event(cpu_buffer, |
| 1446 | RINGBUF_TYPE_DATA, event_length); |
| 1447 | if (!event) |
| 1448 | goto out; |
| 1449 | |
| 1450 | body = rb_event_data(event); |
| 1451 | |
| 1452 | memcpy(body, data, length); |
| 1453 | |
| 1454 | rb_commit(cpu_buffer, event); |
| 1455 | |
| 1456 | ret = 0; |
| 1457 | out: |
Steven Rostedt | 182e9f5 | 2008-11-03 23:15:56 -0500 | [diff] [blame] | 1458 | ftrace_preempt_enable(resched); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1459 | |
| 1460 | return ret; |
| 1461 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1462 | EXPORT_SYMBOL_GPL(ring_buffer_write); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1463 | |
Andrew Morton | 34a148b | 2009-01-09 12:27:09 -0800 | [diff] [blame] | 1464 | static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1465 | { |
| 1466 | struct buffer_page *reader = cpu_buffer->reader_page; |
| 1467 | struct buffer_page *head = cpu_buffer->head_page; |
| 1468 | struct buffer_page *commit = cpu_buffer->commit_page; |
| 1469 | |
| 1470 | return reader->read == rb_page_commit(reader) && |
| 1471 | (commit == reader || |
| 1472 | (commit == head && |
| 1473 | head->read == rb_page_commit(commit))); |
| 1474 | } |
| 1475 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1476 | /** |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1477 | * ring_buffer_record_disable - stop all writes into the buffer |
| 1478 | * @buffer: The ring buffer to stop writes to. |
| 1479 | * |
| 1480 | * This prevents all writes to the buffer. Any attempt to write |
| 1481 | * to the buffer after this will fail and return NULL. |
| 1482 | * |
| 1483 | * The caller should call synchronize_sched() after this. |
| 1484 | */ |
| 1485 | void ring_buffer_record_disable(struct ring_buffer *buffer) |
| 1486 | { |
| 1487 | atomic_inc(&buffer->record_disabled); |
| 1488 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1489 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1490 | |
| 1491 | /** |
| 1492 | * ring_buffer_record_enable - enable writes to the buffer |
| 1493 | * @buffer: The ring buffer to enable writes |
| 1494 | * |
| 1495 | * Note, multiple disables will need the same number of enables |
| 1496 | * to truely enable the writing (much like preempt_disable). |
| 1497 | */ |
| 1498 | void ring_buffer_record_enable(struct ring_buffer *buffer) |
| 1499 | { |
| 1500 | atomic_dec(&buffer->record_disabled); |
| 1501 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1502 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1503 | |
| 1504 | /** |
| 1505 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer |
| 1506 | * @buffer: The ring buffer to stop writes to. |
| 1507 | * @cpu: The CPU buffer to stop |
| 1508 | * |
| 1509 | * This prevents all writes to the buffer. Any attempt to write |
| 1510 | * to the buffer after this will fail and return NULL. |
| 1511 | * |
| 1512 | * The caller should call synchronize_sched() after this. |
| 1513 | */ |
| 1514 | void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) |
| 1515 | { |
| 1516 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1517 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1518 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1519 | return; |
| 1520 | |
| 1521 | cpu_buffer = buffer->buffers[cpu]; |
| 1522 | atomic_inc(&cpu_buffer->record_disabled); |
| 1523 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1524 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1525 | |
| 1526 | /** |
| 1527 | * ring_buffer_record_enable_cpu - enable writes to the buffer |
| 1528 | * @buffer: The ring buffer to enable writes |
| 1529 | * @cpu: The CPU to enable. |
| 1530 | * |
| 1531 | * Note, multiple disables will need the same number of enables |
| 1532 | * to truely enable the writing (much like preempt_disable). |
| 1533 | */ |
| 1534 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) |
| 1535 | { |
| 1536 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1537 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1538 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1539 | return; |
| 1540 | |
| 1541 | cpu_buffer = buffer->buffers[cpu]; |
| 1542 | atomic_dec(&cpu_buffer->record_disabled); |
| 1543 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1544 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1545 | |
| 1546 | /** |
| 1547 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer |
| 1548 | * @buffer: The ring buffer |
| 1549 | * @cpu: The per CPU buffer to get the entries from. |
| 1550 | */ |
| 1551 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) |
| 1552 | { |
| 1553 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1554 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1555 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1556 | return 0; |
| 1557 | |
| 1558 | cpu_buffer = buffer->buffers[cpu]; |
| 1559 | return cpu_buffer->entries; |
| 1560 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1561 | EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1562 | |
| 1563 | /** |
| 1564 | * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer |
| 1565 | * @buffer: The ring buffer |
| 1566 | * @cpu: The per CPU buffer to get the number of overruns from |
| 1567 | */ |
| 1568 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) |
| 1569 | { |
| 1570 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1571 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1572 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1573 | return 0; |
| 1574 | |
| 1575 | cpu_buffer = buffer->buffers[cpu]; |
| 1576 | return cpu_buffer->overrun; |
| 1577 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1578 | EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1579 | |
| 1580 | /** |
| 1581 | * ring_buffer_entries - get the number of entries in a buffer |
| 1582 | * @buffer: The ring buffer |
| 1583 | * |
| 1584 | * Returns the total number of entries in the ring buffer |
| 1585 | * (all CPU entries) |
| 1586 | */ |
| 1587 | unsigned long ring_buffer_entries(struct ring_buffer *buffer) |
| 1588 | { |
| 1589 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1590 | unsigned long entries = 0; |
| 1591 | int cpu; |
| 1592 | |
| 1593 | /* if you care about this being correct, lock the buffer */ |
| 1594 | for_each_buffer_cpu(buffer, cpu) { |
| 1595 | cpu_buffer = buffer->buffers[cpu]; |
| 1596 | entries += cpu_buffer->entries; |
| 1597 | } |
| 1598 | |
| 1599 | return entries; |
| 1600 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1601 | EXPORT_SYMBOL_GPL(ring_buffer_entries); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1602 | |
| 1603 | /** |
| 1604 | * ring_buffer_overrun_cpu - get the number of overruns in buffer |
| 1605 | * @buffer: The ring buffer |
| 1606 | * |
| 1607 | * Returns the total number of overruns in the ring buffer |
| 1608 | * (all CPU entries) |
| 1609 | */ |
| 1610 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer) |
| 1611 | { |
| 1612 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1613 | unsigned long overruns = 0; |
| 1614 | int cpu; |
| 1615 | |
| 1616 | /* if you care about this being correct, lock the buffer */ |
| 1617 | for_each_buffer_cpu(buffer, cpu) { |
| 1618 | cpu_buffer = buffer->buffers[cpu]; |
| 1619 | overruns += cpu_buffer->overrun; |
| 1620 | } |
| 1621 | |
| 1622 | return overruns; |
| 1623 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1624 | EXPORT_SYMBOL_GPL(ring_buffer_overruns); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1625 | |
Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 1626 | static void rb_iter_reset(struct ring_buffer_iter *iter) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1627 | { |
| 1628 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
| 1629 | |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1630 | /* Iterator usage is expected to have record disabled */ |
| 1631 | if (list_empty(&cpu_buffer->reader_page->list)) { |
| 1632 | iter->head_page = cpu_buffer->head_page; |
Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 1633 | iter->head = cpu_buffer->head_page->read; |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1634 | } else { |
| 1635 | iter->head_page = cpu_buffer->reader_page; |
Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 1636 | iter->head = cpu_buffer->reader_page->read; |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1637 | } |
| 1638 | if (iter->head) |
| 1639 | iter->read_stamp = cpu_buffer->read_stamp; |
| 1640 | else |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 1641 | iter->read_stamp = iter->head_page->page->time_stamp; |
Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 1642 | } |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 1643 | |
Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 1644 | /** |
| 1645 | * ring_buffer_iter_reset - reset an iterator |
| 1646 | * @iter: The iterator to reset |
| 1647 | * |
| 1648 | * Resets the iterator, so that it will start from the beginning |
| 1649 | * again. |
| 1650 | */ |
| 1651 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter) |
| 1652 | { |
| 1653 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
| 1654 | unsigned long flags; |
| 1655 | |
| 1656 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 1657 | rb_iter_reset(iter); |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 1658 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1659 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1660 | EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1661 | |
| 1662 | /** |
| 1663 | * ring_buffer_iter_empty - check if an iterator has no more to read |
| 1664 | * @iter: The iterator to check |
| 1665 | */ |
| 1666 | int ring_buffer_iter_empty(struct ring_buffer_iter *iter) |
| 1667 | { |
| 1668 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1669 | |
| 1670 | cpu_buffer = iter->cpu_buffer; |
| 1671 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1672 | return iter->head_page == cpu_buffer->commit_page && |
| 1673 | iter->head == rb_commit_index(cpu_buffer); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1674 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1675 | EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1676 | |
| 1677 | static void |
| 1678 | rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, |
| 1679 | struct ring_buffer_event *event) |
| 1680 | { |
| 1681 | u64 delta; |
| 1682 | |
| 1683 | switch (event->type) { |
| 1684 | case RINGBUF_TYPE_PADDING: |
| 1685 | return; |
| 1686 | |
| 1687 | case RINGBUF_TYPE_TIME_EXTEND: |
| 1688 | delta = event->array[0]; |
| 1689 | delta <<= TS_SHIFT; |
| 1690 | delta += event->time_delta; |
| 1691 | cpu_buffer->read_stamp += delta; |
| 1692 | return; |
| 1693 | |
| 1694 | case RINGBUF_TYPE_TIME_STAMP: |
| 1695 | /* FIXME: not implemented */ |
| 1696 | return; |
| 1697 | |
| 1698 | case RINGBUF_TYPE_DATA: |
| 1699 | cpu_buffer->read_stamp += event->time_delta; |
| 1700 | return; |
| 1701 | |
| 1702 | default: |
| 1703 | BUG(); |
| 1704 | } |
| 1705 | return; |
| 1706 | } |
| 1707 | |
| 1708 | static void |
| 1709 | rb_update_iter_read_stamp(struct ring_buffer_iter *iter, |
| 1710 | struct ring_buffer_event *event) |
| 1711 | { |
| 1712 | u64 delta; |
| 1713 | |
| 1714 | switch (event->type) { |
| 1715 | case RINGBUF_TYPE_PADDING: |
| 1716 | return; |
| 1717 | |
| 1718 | case RINGBUF_TYPE_TIME_EXTEND: |
| 1719 | delta = event->array[0]; |
| 1720 | delta <<= TS_SHIFT; |
| 1721 | delta += event->time_delta; |
| 1722 | iter->read_stamp += delta; |
| 1723 | return; |
| 1724 | |
| 1725 | case RINGBUF_TYPE_TIME_STAMP: |
| 1726 | /* FIXME: not implemented */ |
| 1727 | return; |
| 1728 | |
| 1729 | case RINGBUF_TYPE_DATA: |
| 1730 | iter->read_stamp += event->time_delta; |
| 1731 | return; |
| 1732 | |
| 1733 | default: |
| 1734 | BUG(); |
| 1735 | } |
| 1736 | return; |
| 1737 | } |
| 1738 | |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1739 | static struct buffer_page * |
| 1740 | rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1741 | { |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1742 | struct buffer_page *reader = NULL; |
| 1743 | unsigned long flags; |
Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 1744 | int nr_loops = 0; |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1745 | |
Steven Rostedt | 3e03fb7 | 2008-11-06 00:09:43 -0500 | [diff] [blame] | 1746 | local_irq_save(flags); |
| 1747 | __raw_spin_lock(&cpu_buffer->lock); |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1748 | |
| 1749 | again: |
Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 1750 | /* |
| 1751 | * This should normally only loop twice. But because the |
| 1752 | * start of the reader inserts an empty page, it causes |
| 1753 | * a case where we will loop three times. There should be no |
| 1754 | * reason to loop four times (that I know of). |
| 1755 | */ |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 1756 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { |
Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 1757 | reader = NULL; |
| 1758 | goto out; |
| 1759 | } |
| 1760 | |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1761 | reader = cpu_buffer->reader_page; |
| 1762 | |
| 1763 | /* If there's more to read, return this page */ |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1764 | if (cpu_buffer->reader_page->read < rb_page_size(reader)) |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1765 | goto out; |
| 1766 | |
| 1767 | /* Never should we have an index greater than the size */ |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 1768 | if (RB_WARN_ON(cpu_buffer, |
| 1769 | cpu_buffer->reader_page->read > rb_page_size(reader))) |
| 1770 | goto out; |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1771 | |
| 1772 | /* check if we caught up to the tail */ |
| 1773 | reader = NULL; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1774 | if (cpu_buffer->commit_page == cpu_buffer->reader_page) |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1775 | goto out; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1776 | |
| 1777 | /* |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1778 | * Splice the empty reader page into the list around the head. |
| 1779 | * Reset the reader page to size zero. |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1780 | */ |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1781 | |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1782 | reader = cpu_buffer->head_page; |
| 1783 | cpu_buffer->reader_page->list.next = reader->list.next; |
| 1784 | cpu_buffer->reader_page->list.prev = reader->list.prev; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1785 | |
| 1786 | local_set(&cpu_buffer->reader_page->write, 0); |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 1787 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1788 | |
| 1789 | /* Make the reader page now replace the head */ |
| 1790 | reader->list.prev->next = &cpu_buffer->reader_page->list; |
| 1791 | reader->list.next->prev = &cpu_buffer->reader_page->list; |
| 1792 | |
| 1793 | /* |
| 1794 | * If the tail is on the reader, then we must set the head |
| 1795 | * to the inserted page, otherwise we set it one before. |
| 1796 | */ |
| 1797 | cpu_buffer->head_page = cpu_buffer->reader_page; |
| 1798 | |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1799 | if (cpu_buffer->commit_page != reader) |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1800 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); |
| 1801 | |
| 1802 | /* Finally update the reader page to the new head */ |
| 1803 | cpu_buffer->reader_page = reader; |
| 1804 | rb_reset_reader_page(cpu_buffer); |
| 1805 | |
| 1806 | goto again; |
| 1807 | |
| 1808 | out: |
Steven Rostedt | 3e03fb7 | 2008-11-06 00:09:43 -0500 | [diff] [blame] | 1809 | __raw_spin_unlock(&cpu_buffer->lock); |
| 1810 | local_irq_restore(flags); |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1811 | |
| 1812 | return reader; |
| 1813 | } |
| 1814 | |
| 1815 | static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) |
| 1816 | { |
| 1817 | struct ring_buffer_event *event; |
| 1818 | struct buffer_page *reader; |
| 1819 | unsigned length; |
| 1820 | |
| 1821 | reader = rb_get_reader_page(cpu_buffer); |
| 1822 | |
| 1823 | /* This function should not be called when buffer is empty */ |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 1824 | if (RB_WARN_ON(cpu_buffer, !reader)) |
| 1825 | return; |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1826 | |
| 1827 | event = rb_reader_event(cpu_buffer); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1828 | |
| 1829 | if (event->type == RINGBUF_TYPE_DATA) |
| 1830 | cpu_buffer->entries--; |
| 1831 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1832 | rb_update_read_stamp(cpu_buffer, event); |
| 1833 | |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1834 | length = rb_event_length(event); |
Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 1835 | cpu_buffer->reader_page->read += length; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1836 | } |
| 1837 | |
| 1838 | static void rb_advance_iter(struct ring_buffer_iter *iter) |
| 1839 | { |
| 1840 | struct ring_buffer *buffer; |
| 1841 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1842 | struct ring_buffer_event *event; |
| 1843 | unsigned length; |
| 1844 | |
| 1845 | cpu_buffer = iter->cpu_buffer; |
| 1846 | buffer = cpu_buffer->buffer; |
| 1847 | |
| 1848 | /* |
| 1849 | * Check if we are at the end of the buffer. |
| 1850 | */ |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1851 | if (iter->head >= rb_page_size(iter->head_page)) { |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 1852 | if (RB_WARN_ON(buffer, |
| 1853 | iter->head_page == cpu_buffer->commit_page)) |
| 1854 | return; |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1855 | rb_inc_iter(iter); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1856 | return; |
| 1857 | } |
| 1858 | |
| 1859 | event = rb_iter_head_event(iter); |
| 1860 | |
| 1861 | length = rb_event_length(event); |
| 1862 | |
| 1863 | /* |
| 1864 | * This should not be called to advance the header if we are |
| 1865 | * at the tail of the buffer. |
| 1866 | */ |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 1867 | if (RB_WARN_ON(cpu_buffer, |
Steven Rostedt | f536aaf | 2008-11-10 23:07:30 -0500 | [diff] [blame] | 1868 | (iter->head_page == cpu_buffer->commit_page) && |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 1869 | (iter->head + length > rb_commit_index(cpu_buffer)))) |
| 1870 | return; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1871 | |
| 1872 | rb_update_iter_read_stamp(iter, event); |
| 1873 | |
| 1874 | iter->head += length; |
| 1875 | |
| 1876 | /* check for end of page padding */ |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1877 | if ((iter->head >= rb_page_size(iter->head_page)) && |
| 1878 | (iter->head_page != cpu_buffer->commit_page)) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1879 | rb_advance_iter(iter); |
| 1880 | } |
| 1881 | |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 1882 | static struct ring_buffer_event * |
| 1883 | rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1884 | { |
| 1885 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1886 | struct ring_buffer_event *event; |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1887 | struct buffer_page *reader; |
Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 1888 | int nr_loops = 0; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1889 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 1890 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1891 | return NULL; |
| 1892 | |
| 1893 | cpu_buffer = buffer->buffers[cpu]; |
| 1894 | |
| 1895 | again: |
Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 1896 | /* |
| 1897 | * We repeat when a timestamp is encountered. It is possible |
| 1898 | * to get multiple timestamps from an interrupt entering just |
| 1899 | * as one timestamp is about to be written. The max times |
| 1900 | * that this can happen is the number of nested interrupts we |
| 1901 | * can have. Nesting 10 deep of interrupts is clearly |
| 1902 | * an anomaly. |
| 1903 | */ |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 1904 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10)) |
Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 1905 | return NULL; |
Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 1906 | |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1907 | reader = rb_get_reader_page(cpu_buffer); |
| 1908 | if (!reader) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1909 | return NULL; |
| 1910 | |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1911 | event = rb_reader_event(cpu_buffer); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1912 | |
| 1913 | switch (event->type) { |
| 1914 | case RINGBUF_TYPE_PADDING: |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 1915 | RB_WARN_ON(cpu_buffer, 1); |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1916 | rb_advance_reader(cpu_buffer); |
| 1917 | return NULL; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1918 | |
| 1919 | case RINGBUF_TYPE_TIME_EXTEND: |
| 1920 | /* Internal data, OK to advance */ |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1921 | rb_advance_reader(cpu_buffer); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1922 | goto again; |
| 1923 | |
| 1924 | case RINGBUF_TYPE_TIME_STAMP: |
| 1925 | /* FIXME: not implemented */ |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1926 | rb_advance_reader(cpu_buffer); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1927 | goto again; |
| 1928 | |
| 1929 | case RINGBUF_TYPE_DATA: |
| 1930 | if (ts) { |
| 1931 | *ts = cpu_buffer->read_stamp + event->time_delta; |
| 1932 | ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts); |
| 1933 | } |
| 1934 | return event; |
| 1935 | |
| 1936 | default: |
| 1937 | BUG(); |
| 1938 | } |
| 1939 | |
| 1940 | return NULL; |
| 1941 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 1942 | EXPORT_SYMBOL_GPL(ring_buffer_peek); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1943 | |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 1944 | static struct ring_buffer_event * |
| 1945 | rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1946 | { |
| 1947 | struct ring_buffer *buffer; |
| 1948 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1949 | struct ring_buffer_event *event; |
Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 1950 | int nr_loops = 0; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1951 | |
| 1952 | if (ring_buffer_iter_empty(iter)) |
| 1953 | return NULL; |
| 1954 | |
| 1955 | cpu_buffer = iter->cpu_buffer; |
| 1956 | buffer = cpu_buffer->buffer; |
| 1957 | |
| 1958 | again: |
Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 1959 | /* |
| 1960 | * We repeat when a timestamp is encountered. It is possible |
| 1961 | * to get multiple timestamps from an interrupt entering just |
| 1962 | * as one timestamp is about to be written. The max times |
| 1963 | * that this can happen is the number of nested interrupts we |
| 1964 | * can have. Nesting 10 deep of interrupts is clearly |
| 1965 | * an anomaly. |
| 1966 | */ |
Steven Rostedt | 3e89c7b | 2008-11-11 15:28:41 -0500 | [diff] [blame] | 1967 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10)) |
Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 1968 | return NULL; |
Steven Rostedt | 818e3dd | 2008-10-31 09:58:35 -0400 | [diff] [blame] | 1969 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1970 | if (rb_per_cpu_empty(cpu_buffer)) |
| 1971 | return NULL; |
| 1972 | |
| 1973 | event = rb_iter_head_event(iter); |
| 1974 | |
| 1975 | switch (event->type) { |
| 1976 | case RINGBUF_TYPE_PADDING: |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 1977 | rb_inc_iter(iter); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 1978 | goto again; |
| 1979 | |
| 1980 | case RINGBUF_TYPE_TIME_EXTEND: |
| 1981 | /* Internal data, OK to advance */ |
| 1982 | rb_advance_iter(iter); |
| 1983 | goto again; |
| 1984 | |
| 1985 | case RINGBUF_TYPE_TIME_STAMP: |
| 1986 | /* FIXME: not implemented */ |
| 1987 | rb_advance_iter(iter); |
| 1988 | goto again; |
| 1989 | |
| 1990 | case RINGBUF_TYPE_DATA: |
| 1991 | if (ts) { |
| 1992 | *ts = iter->read_stamp + event->time_delta; |
| 1993 | ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts); |
| 1994 | } |
| 1995 | return event; |
| 1996 | |
| 1997 | default: |
| 1998 | BUG(); |
| 1999 | } |
| 2000 | |
| 2001 | return NULL; |
| 2002 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2003 | EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2004 | |
| 2005 | /** |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 2006 | * ring_buffer_peek - peek at the next event to be read |
| 2007 | * @buffer: The ring buffer to read |
| 2008 | * @cpu: The cpu to peak at |
| 2009 | * @ts: The timestamp counter of this event. |
| 2010 | * |
| 2011 | * This will return the event that will be read next, but does |
| 2012 | * not consume the data. |
| 2013 | */ |
| 2014 | struct ring_buffer_event * |
| 2015 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) |
| 2016 | { |
| 2017 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
| 2018 | struct ring_buffer_event *event; |
| 2019 | unsigned long flags; |
| 2020 | |
| 2021 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 2022 | event = rb_buffer_peek(buffer, cpu, ts); |
| 2023 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 2024 | |
| 2025 | return event; |
| 2026 | } |
| 2027 | |
| 2028 | /** |
| 2029 | * ring_buffer_iter_peek - peek at the next event to be read |
| 2030 | * @iter: The ring buffer iterator |
| 2031 | * @ts: The timestamp counter of this event. |
| 2032 | * |
| 2033 | * This will return the event that will be read next, but does |
| 2034 | * not increment the iterator. |
| 2035 | */ |
| 2036 | struct ring_buffer_event * |
| 2037 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) |
| 2038 | { |
| 2039 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
| 2040 | struct ring_buffer_event *event; |
| 2041 | unsigned long flags; |
| 2042 | |
| 2043 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 2044 | event = rb_iter_peek(iter, ts); |
| 2045 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 2046 | |
| 2047 | return event; |
| 2048 | } |
| 2049 | |
| 2050 | /** |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2051 | * ring_buffer_consume - return an event and consume it |
| 2052 | * @buffer: The ring buffer to get the next event from |
| 2053 | * |
| 2054 | * Returns the next event in the ring buffer, and that event is consumed. |
| 2055 | * Meaning, that sequential reads will keep returning a different event, |
| 2056 | * and eventually empty the ring buffer if the producer is slower. |
| 2057 | */ |
| 2058 | struct ring_buffer_event * |
| 2059 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) |
| 2060 | { |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 2061 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2062 | struct ring_buffer_event *event; |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 2063 | unsigned long flags; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2064 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2065 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2066 | return NULL; |
| 2067 | |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 2068 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2069 | |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 2070 | event = rb_buffer_peek(buffer, cpu, ts); |
| 2071 | if (!event) |
| 2072 | goto out; |
| 2073 | |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2074 | rb_advance_reader(cpu_buffer); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2075 | |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 2076 | out: |
| 2077 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 2078 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2079 | return event; |
| 2080 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2081 | EXPORT_SYMBOL_GPL(ring_buffer_consume); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2082 | |
| 2083 | /** |
| 2084 | * ring_buffer_read_start - start a non consuming read of the buffer |
| 2085 | * @buffer: The ring buffer to read from |
| 2086 | * @cpu: The cpu buffer to iterate over |
| 2087 | * |
| 2088 | * This starts up an iteration through the buffer. It also disables |
| 2089 | * the recording to the buffer until the reading is finished. |
| 2090 | * This prevents the reading from being corrupted. This is not |
| 2091 | * a consuming read, so a producer is not expected. |
| 2092 | * |
| 2093 | * Must be paired with ring_buffer_finish. |
| 2094 | */ |
| 2095 | struct ring_buffer_iter * |
| 2096 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu) |
| 2097 | { |
| 2098 | struct ring_buffer_per_cpu *cpu_buffer; |
| 2099 | struct ring_buffer_iter *iter; |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2100 | unsigned long flags; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2101 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2102 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2103 | return NULL; |
| 2104 | |
| 2105 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); |
| 2106 | if (!iter) |
| 2107 | return NULL; |
| 2108 | |
| 2109 | cpu_buffer = buffer->buffers[cpu]; |
| 2110 | |
| 2111 | iter->cpu_buffer = cpu_buffer; |
| 2112 | |
| 2113 | atomic_inc(&cpu_buffer->record_disabled); |
| 2114 | synchronize_sched(); |
| 2115 | |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 2116 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
Steven Rostedt | 3e03fb7 | 2008-11-06 00:09:43 -0500 | [diff] [blame] | 2117 | __raw_spin_lock(&cpu_buffer->lock); |
Steven Rostedt | 642edba | 2008-11-12 00:01:26 -0500 | [diff] [blame] | 2118 | rb_iter_reset(iter); |
Steven Rostedt | 3e03fb7 | 2008-11-06 00:09:43 -0500 | [diff] [blame] | 2119 | __raw_spin_unlock(&cpu_buffer->lock); |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 2120 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2121 | |
| 2122 | return iter; |
| 2123 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2124 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2125 | |
| 2126 | /** |
| 2127 | * ring_buffer_finish - finish reading the iterator of the buffer |
| 2128 | * @iter: The iterator retrieved by ring_buffer_start |
| 2129 | * |
| 2130 | * This re-enables the recording to the buffer, and frees the |
| 2131 | * iterator. |
| 2132 | */ |
| 2133 | void |
| 2134 | ring_buffer_read_finish(struct ring_buffer_iter *iter) |
| 2135 | { |
| 2136 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
| 2137 | |
| 2138 | atomic_dec(&cpu_buffer->record_disabled); |
| 2139 | kfree(iter); |
| 2140 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2141 | EXPORT_SYMBOL_GPL(ring_buffer_read_finish); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2142 | |
| 2143 | /** |
| 2144 | * ring_buffer_read - read the next item in the ring buffer by the iterator |
| 2145 | * @iter: The ring buffer iterator |
| 2146 | * @ts: The time stamp of the event read. |
| 2147 | * |
| 2148 | * This reads the next event in the ring buffer and increments the iterator. |
| 2149 | */ |
| 2150 | struct ring_buffer_event * |
| 2151 | ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) |
| 2152 | { |
| 2153 | struct ring_buffer_event *event; |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 2154 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
| 2155 | unsigned long flags; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2156 | |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 2157 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 2158 | event = rb_iter_peek(iter, ts); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2159 | if (!event) |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 2160 | goto out; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2161 | |
| 2162 | rb_advance_iter(iter); |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 2163 | out: |
| 2164 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2165 | |
| 2166 | return event; |
| 2167 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2168 | EXPORT_SYMBOL_GPL(ring_buffer_read); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2169 | |
| 2170 | /** |
| 2171 | * ring_buffer_size - return the size of the ring buffer (in bytes) |
| 2172 | * @buffer: The ring buffer. |
| 2173 | */ |
| 2174 | unsigned long ring_buffer_size(struct ring_buffer *buffer) |
| 2175 | { |
| 2176 | return BUF_PAGE_SIZE * buffer->pages; |
| 2177 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2178 | EXPORT_SYMBOL_GPL(ring_buffer_size); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2179 | |
| 2180 | static void |
| 2181 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) |
| 2182 | { |
| 2183 | cpu_buffer->head_page |
| 2184 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2185 | local_set(&cpu_buffer->head_page->write, 0); |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 2186 | local_set(&cpu_buffer->head_page->page->commit, 0); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2187 | |
Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 2188 | cpu_buffer->head_page->read = 0; |
Steven Rostedt | bf41a15 | 2008-10-04 02:00:59 -0400 | [diff] [blame] | 2189 | |
| 2190 | cpu_buffer->tail_page = cpu_buffer->head_page; |
| 2191 | cpu_buffer->commit_page = cpu_buffer->head_page; |
| 2192 | |
| 2193 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
| 2194 | local_set(&cpu_buffer->reader_page->write, 0); |
Steven Rostedt | abc9b56 | 2008-12-02 15:34:06 -0500 | [diff] [blame] | 2195 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
Steven Rostedt | 6f807ac | 2008-10-04 02:00:58 -0400 | [diff] [blame] | 2196 | cpu_buffer->reader_page->read = 0; |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2197 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2198 | cpu_buffer->overrun = 0; |
| 2199 | cpu_buffer->entries = 0; |
Steven Rostedt | 69507c0 | 2009-01-21 18:45:57 -0500 | [diff] [blame] | 2200 | |
| 2201 | cpu_buffer->write_stamp = 0; |
| 2202 | cpu_buffer->read_stamp = 0; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2203 | } |
| 2204 | |
| 2205 | /** |
| 2206 | * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer |
| 2207 | * @buffer: The ring buffer to reset a per cpu buffer of |
| 2208 | * @cpu: The CPU buffer to be reset |
| 2209 | */ |
| 2210 | void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) |
| 2211 | { |
| 2212 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
| 2213 | unsigned long flags; |
| 2214 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2215 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2216 | return; |
| 2217 | |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 2218 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 2219 | |
Steven Rostedt | 3e03fb7 | 2008-11-06 00:09:43 -0500 | [diff] [blame] | 2220 | __raw_spin_lock(&cpu_buffer->lock); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2221 | |
| 2222 | rb_reset_cpu(cpu_buffer); |
| 2223 | |
Steven Rostedt | 3e03fb7 | 2008-11-06 00:09:43 -0500 | [diff] [blame] | 2224 | __raw_spin_unlock(&cpu_buffer->lock); |
Steven Rostedt | f83c9d0 | 2008-11-11 18:47:44 +0100 | [diff] [blame] | 2225 | |
| 2226 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2227 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2228 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2229 | |
| 2230 | /** |
| 2231 | * ring_buffer_reset - reset a ring buffer |
| 2232 | * @buffer: The ring buffer to reset all cpu buffers |
| 2233 | */ |
| 2234 | void ring_buffer_reset(struct ring_buffer *buffer) |
| 2235 | { |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2236 | int cpu; |
| 2237 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2238 | for_each_buffer_cpu(buffer, cpu) |
Steven Rostedt | d769041 | 2008-10-01 00:29:53 -0400 | [diff] [blame] | 2239 | ring_buffer_reset_cpu(buffer, cpu); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2240 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2241 | EXPORT_SYMBOL_GPL(ring_buffer_reset); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2242 | |
| 2243 | /** |
| 2244 | * rind_buffer_empty - is the ring buffer empty? |
| 2245 | * @buffer: The ring buffer to test |
| 2246 | */ |
| 2247 | int ring_buffer_empty(struct ring_buffer *buffer) |
| 2248 | { |
| 2249 | struct ring_buffer_per_cpu *cpu_buffer; |
| 2250 | int cpu; |
| 2251 | |
| 2252 | /* yes this is racy, but if you don't like the race, lock the buffer */ |
| 2253 | for_each_buffer_cpu(buffer, cpu) { |
| 2254 | cpu_buffer = buffer->buffers[cpu]; |
| 2255 | if (!rb_per_cpu_empty(cpu_buffer)) |
| 2256 | return 0; |
| 2257 | } |
| 2258 | return 1; |
| 2259 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2260 | EXPORT_SYMBOL_GPL(ring_buffer_empty); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2261 | |
| 2262 | /** |
| 2263 | * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? |
| 2264 | * @buffer: The ring buffer |
| 2265 | * @cpu: The CPU buffer to test |
| 2266 | */ |
| 2267 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) |
| 2268 | { |
| 2269 | struct ring_buffer_per_cpu *cpu_buffer; |
| 2270 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2271 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2272 | return 1; |
| 2273 | |
| 2274 | cpu_buffer = buffer->buffers[cpu]; |
| 2275 | return rb_per_cpu_empty(cpu_buffer); |
| 2276 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2277 | EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2278 | |
| 2279 | /** |
| 2280 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers |
| 2281 | * @buffer_a: One buffer to swap with |
| 2282 | * @buffer_b: The other buffer to swap with |
| 2283 | * |
| 2284 | * This function is useful for tracers that want to take a "snapshot" |
| 2285 | * of a CPU buffer and has another back up buffer lying around. |
| 2286 | * it is expected that the tracer handles the cpu buffer not being |
| 2287 | * used at the moment. |
| 2288 | */ |
| 2289 | int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, |
| 2290 | struct ring_buffer *buffer_b, int cpu) |
| 2291 | { |
| 2292 | struct ring_buffer_per_cpu *cpu_buffer_a; |
| 2293 | struct ring_buffer_per_cpu *cpu_buffer_b; |
| 2294 | |
Rusty Russell | 9e01c1b | 2009-01-01 10:12:22 +1030 | [diff] [blame] | 2295 | if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || |
| 2296 | !cpumask_test_cpu(cpu, buffer_b->cpumask)) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2297 | return -EINVAL; |
| 2298 | |
| 2299 | /* At least make sure the two buffers are somewhat the same */ |
Lai Jiangshan | 6d102bc | 2008-12-17 17:48:23 +0800 | [diff] [blame] | 2300 | if (buffer_a->pages != buffer_b->pages) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2301 | return -EINVAL; |
| 2302 | |
Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 2303 | if (ring_buffer_flags != RB_BUFFERS_ON) |
| 2304 | return -EAGAIN; |
| 2305 | |
| 2306 | if (atomic_read(&buffer_a->record_disabled)) |
| 2307 | return -EAGAIN; |
| 2308 | |
| 2309 | if (atomic_read(&buffer_b->record_disabled)) |
| 2310 | return -EAGAIN; |
| 2311 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2312 | cpu_buffer_a = buffer_a->buffers[cpu]; |
| 2313 | cpu_buffer_b = buffer_b->buffers[cpu]; |
| 2314 | |
Steven Rostedt | 97b17ef | 2009-01-21 15:24:56 -0500 | [diff] [blame] | 2315 | if (atomic_read(&cpu_buffer_a->record_disabled)) |
| 2316 | return -EAGAIN; |
| 2317 | |
| 2318 | if (atomic_read(&cpu_buffer_b->record_disabled)) |
| 2319 | return -EAGAIN; |
| 2320 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2321 | /* |
| 2322 | * We can't do a synchronize_sched here because this |
| 2323 | * function can be called in atomic context. |
| 2324 | * Normally this will be called from the same CPU as cpu. |
| 2325 | * If not it's up to the caller to protect this. |
| 2326 | */ |
| 2327 | atomic_inc(&cpu_buffer_a->record_disabled); |
| 2328 | atomic_inc(&cpu_buffer_b->record_disabled); |
| 2329 | |
| 2330 | buffer_a->buffers[cpu] = cpu_buffer_b; |
| 2331 | buffer_b->buffers[cpu] = cpu_buffer_a; |
| 2332 | |
| 2333 | cpu_buffer_b->buffer = buffer_a; |
| 2334 | cpu_buffer_a->buffer = buffer_b; |
| 2335 | |
| 2336 | atomic_dec(&cpu_buffer_a->record_disabled); |
| 2337 | atomic_dec(&cpu_buffer_b->record_disabled); |
| 2338 | |
| 2339 | return 0; |
| 2340 | } |
Robert Richter | c4f5018 | 2008-12-11 16:49:22 +0100 | [diff] [blame] | 2341 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2342 | |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2343 | static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, |
Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 2344 | struct buffer_data_page *bpage, |
| 2345 | unsigned int offset) |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2346 | { |
| 2347 | struct ring_buffer_event *event; |
| 2348 | unsigned long head; |
| 2349 | |
| 2350 | __raw_spin_lock(&cpu_buffer->lock); |
Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 2351 | for (head = offset; head < local_read(&bpage->commit); |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2352 | head += rb_event_length(event)) { |
| 2353 | |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 2354 | event = __rb_data_page_index(bpage, head); |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2355 | if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) |
| 2356 | return; |
| 2357 | /* Only count data entries */ |
| 2358 | if (event->type != RINGBUF_TYPE_DATA) |
| 2359 | continue; |
| 2360 | cpu_buffer->entries--; |
| 2361 | } |
| 2362 | __raw_spin_unlock(&cpu_buffer->lock); |
| 2363 | } |
| 2364 | |
| 2365 | /** |
| 2366 | * ring_buffer_alloc_read_page - allocate a page to read from buffer |
| 2367 | * @buffer: the buffer to allocate for. |
| 2368 | * |
| 2369 | * This function is used in conjunction with ring_buffer_read_page. |
| 2370 | * When reading a full page from the ring buffer, these functions |
| 2371 | * can be used to speed up the process. The calling function should |
| 2372 | * allocate a few pages first with this function. Then when it |
| 2373 | * needs to get pages from the ring buffer, it passes the result |
| 2374 | * of this function into ring_buffer_read_page, which will swap |
| 2375 | * the page that was allocated, with the read page of the buffer. |
| 2376 | * |
| 2377 | * Returns: |
| 2378 | * The page allocated, or NULL on error. |
| 2379 | */ |
| 2380 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) |
| 2381 | { |
| 2382 | unsigned long addr; |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 2383 | struct buffer_data_page *bpage; |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2384 | |
| 2385 | addr = __get_free_page(GFP_KERNEL); |
| 2386 | if (!addr) |
| 2387 | return NULL; |
| 2388 | |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 2389 | bpage = (void *)addr; |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2390 | |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 2391 | return bpage; |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2392 | } |
| 2393 | |
| 2394 | /** |
| 2395 | * ring_buffer_free_read_page - free an allocated read page |
| 2396 | * @buffer: the buffer the page was allocate for |
| 2397 | * @data: the page to free |
| 2398 | * |
| 2399 | * Free a page allocated from ring_buffer_alloc_read_page. |
| 2400 | */ |
| 2401 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) |
| 2402 | { |
| 2403 | free_page((unsigned long)data); |
| 2404 | } |
| 2405 | |
| 2406 | /** |
| 2407 | * ring_buffer_read_page - extract a page from the ring buffer |
| 2408 | * @buffer: buffer to extract from |
| 2409 | * @data_page: the page to use allocated from ring_buffer_alloc_read_page |
| 2410 | * @cpu: the cpu of the buffer to extract |
| 2411 | * @full: should the extraction only happen when the page is full. |
| 2412 | * |
| 2413 | * This function will pull out a page from the ring buffer and consume it. |
| 2414 | * @data_page must be the address of the variable that was returned |
| 2415 | * from ring_buffer_alloc_read_page. This is because the page might be used |
| 2416 | * to swap with a page in the ring buffer. |
| 2417 | * |
| 2418 | * for example: |
Lai Jiangshan | b85fa01 | 2009-02-09 14:21:14 +0800 | [diff] [blame] | 2419 | * rpage = ring_buffer_alloc_read_page(buffer); |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2420 | * if (!rpage) |
| 2421 | * return error; |
| 2422 | * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); |
Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 2423 | * if (ret >= 0) |
| 2424 | * process_page(rpage, ret); |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2425 | * |
| 2426 | * When @full is set, the function will not return true unless |
| 2427 | * the writer is off the reader page. |
| 2428 | * |
| 2429 | * Note: it is up to the calling functions to handle sleeps and wakeups. |
| 2430 | * The ring buffer can be used anywhere in the kernel and can not |
| 2431 | * blindly call wake_up. The layer that uses the ring buffer must be |
| 2432 | * responsible for that. |
| 2433 | * |
| 2434 | * Returns: |
Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 2435 | * >=0 if data has been transferred, returns the offset of consumed data. |
| 2436 | * <0 if no data has been transferred. |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2437 | */ |
| 2438 | int ring_buffer_read_page(struct ring_buffer *buffer, |
| 2439 | void **data_page, int cpu, int full) |
| 2440 | { |
| 2441 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
| 2442 | struct ring_buffer_event *event; |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 2443 | struct buffer_data_page *bpage; |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2444 | unsigned long flags; |
Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 2445 | unsigned int read; |
| 2446 | int ret = -1; |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2447 | |
| 2448 | if (!data_page) |
| 2449 | return 0; |
| 2450 | |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 2451 | bpage = *data_page; |
| 2452 | if (!bpage) |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2453 | return 0; |
| 2454 | |
| 2455 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 2456 | |
| 2457 | /* |
| 2458 | * rb_buffer_peek will get the next ring buffer if |
| 2459 | * the current reader page is empty. |
| 2460 | */ |
| 2461 | event = rb_buffer_peek(buffer, cpu, NULL); |
| 2462 | if (!event) |
| 2463 | goto out; |
| 2464 | |
| 2465 | /* check for data */ |
| 2466 | if (!local_read(&cpu_buffer->reader_page->page->commit)) |
| 2467 | goto out; |
Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 2468 | |
| 2469 | read = cpu_buffer->reader_page->read; |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2470 | /* |
| 2471 | * If the writer is already off of the read page, then simply |
| 2472 | * switch the read page with the given page. Otherwise |
| 2473 | * we need to copy the data from the reader to the writer. |
| 2474 | */ |
| 2475 | if (cpu_buffer->reader_page == cpu_buffer->commit_page) { |
Lai Jiangshan | b85fa01 | 2009-02-09 14:21:14 +0800 | [diff] [blame] | 2476 | unsigned int commit = rb_page_commit(cpu_buffer->reader_page); |
Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 2477 | struct buffer_data_page *rpage = cpu_buffer->reader_page->page; |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2478 | |
| 2479 | if (full) |
| 2480 | goto out; |
| 2481 | /* The writer is still on the reader page, we must copy */ |
Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 2482 | memcpy(bpage->data + read, rpage->data + read, commit - read); |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2483 | |
| 2484 | /* consume what was read */ |
Lai Jiangshan | b85fa01 | 2009-02-09 14:21:14 +0800 | [diff] [blame] | 2485 | cpu_buffer->reader_page->read = commit; |
Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 2486 | |
| 2487 | /* update bpage */ |
| 2488 | local_set(&bpage->commit, commit); |
| 2489 | if (!read) |
| 2490 | bpage->time_stamp = rpage->time_stamp; |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2491 | } else { |
| 2492 | /* swap the pages */ |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 2493 | rb_init_page(bpage); |
| 2494 | bpage = cpu_buffer->reader_page->page; |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2495 | cpu_buffer->reader_page->page = *data_page; |
| 2496 | cpu_buffer->reader_page->read = 0; |
Steven Rostedt | 044fa78 | 2008-12-02 23:50:03 -0500 | [diff] [blame] | 2497 | *data_page = bpage; |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2498 | } |
Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 2499 | ret = read; |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2500 | |
| 2501 | /* update the entry counter */ |
Lai Jiangshan | 667d241 | 2009-02-09 14:21:17 +0800 | [diff] [blame] | 2502 | rb_remove_entries(cpu_buffer, bpage, read); |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 2503 | out: |
| 2504 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 2505 | |
| 2506 | return ret; |
| 2507 | } |
| 2508 | |
Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 2509 | static ssize_t |
| 2510 | rb_simple_read(struct file *filp, char __user *ubuf, |
| 2511 | size_t cnt, loff_t *ppos) |
| 2512 | { |
Hannes Eder | 5e39841 | 2009-02-10 19:44:34 +0100 | [diff] [blame] | 2513 | unsigned long *p = filp->private_data; |
Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 2514 | char buf[64]; |
| 2515 | int r; |
| 2516 | |
Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 2517 | if (test_bit(RB_BUFFERS_DISABLED_BIT, p)) |
| 2518 | r = sprintf(buf, "permanently disabled\n"); |
| 2519 | else |
| 2520 | r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p)); |
Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 2521 | |
| 2522 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
| 2523 | } |
| 2524 | |
| 2525 | static ssize_t |
| 2526 | rb_simple_write(struct file *filp, const char __user *ubuf, |
| 2527 | size_t cnt, loff_t *ppos) |
| 2528 | { |
Hannes Eder | 5e39841 | 2009-02-10 19:44:34 +0100 | [diff] [blame] | 2529 | unsigned long *p = filp->private_data; |
Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 2530 | char buf[64]; |
Hannes Eder | 5e39841 | 2009-02-10 19:44:34 +0100 | [diff] [blame] | 2531 | unsigned long val; |
Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 2532 | int ret; |
| 2533 | |
| 2534 | if (cnt >= sizeof(buf)) |
| 2535 | return -EINVAL; |
| 2536 | |
| 2537 | if (copy_from_user(&buf, ubuf, cnt)) |
| 2538 | return -EFAULT; |
| 2539 | |
| 2540 | buf[cnt] = 0; |
| 2541 | |
| 2542 | ret = strict_strtoul(buf, 10, &val); |
| 2543 | if (ret < 0) |
| 2544 | return ret; |
| 2545 | |
Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 2546 | if (val) |
| 2547 | set_bit(RB_BUFFERS_ON_BIT, p); |
| 2548 | else |
| 2549 | clear_bit(RB_BUFFERS_ON_BIT, p); |
Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 2550 | |
| 2551 | (*ppos)++; |
| 2552 | |
| 2553 | return cnt; |
| 2554 | } |
| 2555 | |
| 2556 | static struct file_operations rb_simple_fops = { |
| 2557 | .open = tracing_open_generic, |
| 2558 | .read = rb_simple_read, |
| 2559 | .write = rb_simple_write, |
| 2560 | }; |
| 2561 | |
| 2562 | |
| 2563 | static __init int rb_init_debugfs(void) |
| 2564 | { |
| 2565 | struct dentry *d_tracer; |
| 2566 | struct dentry *entry; |
| 2567 | |
| 2568 | d_tracer = tracing_init_dentry(); |
| 2569 | |
| 2570 | entry = debugfs_create_file("tracing_on", 0644, d_tracer, |
Steven Rostedt | 033601a | 2008-11-21 12:41:55 -0500 | [diff] [blame] | 2571 | &ring_buffer_flags, &rb_simple_fops); |
Steven Rostedt | a358324 | 2008-11-11 15:01:42 -0500 | [diff] [blame] | 2572 | if (!entry) |
| 2573 | pr_warning("Could not create debugfs 'tracing_on' entry\n"); |
| 2574 | |
| 2575 | return 0; |
| 2576 | } |
| 2577 | |
| 2578 | fs_initcall(rb_init_debugfs); |