blob: e43c928356eeccd07d88a730944c8d3649b8c800 [file] [log] [blame]
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
Ingo Molnar14131f22009-02-26 18:47:11 +01007#include <linux/trace_clock.h>
Steven Rostedt78d904b2009-02-05 18:43:07 -05008#include <linux/ftrace_irq.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04009#include <linux/spinlock.h>
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
Steven Rostedta81bd802009-02-06 01:45:16 -050012#include <linux/hardirq.h>
Vegard Nossum1744a212009-02-28 08:29:44 +010013#include <linux/kmemcheck.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040014#include <linux/module.h>
15#include <linux/percpu.h>
16#include <linux/mutex.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040017#include <linux/init.h>
18#include <linux/hash.h>
19#include <linux/list.h>
Steven Rostedt554f7862009-03-11 22:00:13 -040020#include <linux/cpu.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040021#include <linux/fs.h>
22
Steven Rostedt182e9f52008-11-03 23:15:56 -050023#include "trace.h"
24
Steven Rostedt033601a2008-11-21 12:41:55 -050025/*
Steven Rostedtd1b182a2009-04-15 16:53:47 -040026 * The ring buffer header is special. We must manually up keep it.
27 */
28int ring_buffer_print_entry_header(struct trace_seq *s)
29{
30 int ret;
31
Lai Jiangshan334d4162009-04-24 11:27:05 +080032 ret = trace_seq_printf(s, "# compressed entry header\n");
33 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
Steven Rostedtd1b182a2009-04-15 16:53:47 -040034 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
35 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
36 ret = trace_seq_printf(s, "\n");
37 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
38 RINGBUF_TYPE_PADDING);
39 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
40 RINGBUF_TYPE_TIME_EXTEND);
Lai Jiangshan334d4162009-04-24 11:27:05 +080041 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
42 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
Steven Rostedtd1b182a2009-04-15 16:53:47 -040043
44 return ret;
45}
46
47/*
Steven Rostedt5cc98542009-03-12 22:24:17 -040048 * The ring buffer is made up of a list of pages. A separate list of pages is
49 * allocated for each CPU. A writer may only write to a buffer that is
50 * associated with the CPU it is currently executing on. A reader may read
51 * from any per cpu buffer.
52 *
53 * The reader is special. For each per cpu buffer, the reader has its own
54 * reader page. When a reader has read the entire reader page, this reader
55 * page is swapped with another page in the ring buffer.
56 *
57 * Now, as long as the writer is off the reader page, the reader can do what
58 * ever it wants with that page. The writer will never write to that page
59 * again (as long as it is out of the ring buffer).
60 *
61 * Here's some silly ASCII art.
62 *
63 * +------+
64 * |reader| RING BUFFER
65 * |page |
66 * +------+ +---+ +---+ +---+
67 * | |-->| |-->| |
68 * +---+ +---+ +---+
69 * ^ |
70 * | |
71 * +---------------+
72 *
73 *
74 * +------+
75 * |reader| RING BUFFER
76 * |page |------------------v
77 * +------+ +---+ +---+ +---+
78 * | |-->| |-->| |
79 * +---+ +---+ +---+
80 * ^ |
81 * | |
82 * +---------------+
83 *
84 *
85 * +------+
86 * |reader| RING BUFFER
87 * |page |------------------v
88 * +------+ +---+ +---+ +---+
89 * ^ | |-->| |-->| |
90 * | +---+ +---+ +---+
91 * | |
92 * | |
93 * +------------------------------+
94 *
95 *
96 * +------+
97 * |buffer| RING BUFFER
98 * |page |------------------v
99 * +------+ +---+ +---+ +---+
100 * ^ | | | |-->| |
101 * | New +---+ +---+ +---+
102 * | Reader------^ |
103 * | page |
104 * +------------------------------+
105 *
106 *
107 * After we make this swap, the reader can hand this page off to the splice
108 * code and be done with it. It can even allocate a new page if it needs to
109 * and swap that into the ring buffer.
110 *
111 * We will be using cmpxchg soon to make all this lockless.
112 *
113 */
114
115/*
Steven Rostedt033601a2008-11-21 12:41:55 -0500116 * A fast way to enable or disable all ring buffers is to
117 * call tracing_on or tracing_off. Turning off the ring buffers
118 * prevents all ring buffers from being recorded to.
119 * Turning this switch on, makes it OK to write to the
120 * ring buffer, if the ring buffer is enabled itself.
121 *
122 * There's three layers that must be on in order to write
123 * to the ring buffer.
124 *
125 * 1) This global flag must be set.
126 * 2) The ring buffer must be enabled for recording.
127 * 3) The per cpu buffer must be enabled for recording.
128 *
129 * In case of an anomaly, this global flag has a bit set that
130 * will permantly disable all ring buffers.
131 */
132
133/*
134 * Global flag to disable all recording to ring buffers
135 * This has two bits: ON, DISABLED
136 *
137 * ON DISABLED
138 * ---- ----------
139 * 0 0 : ring buffers are off
140 * 1 0 : ring buffers are on
141 * X 1 : ring buffers are permanently disabled
142 */
143
144enum {
145 RB_BUFFERS_ON_BIT = 0,
146 RB_BUFFERS_DISABLED_BIT = 1,
147};
148
149enum {
150 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
151 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
152};
153
Hannes Eder5e398412009-02-10 19:44:34 +0100154static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
Steven Rostedta3583242008-11-11 15:01:42 -0500155
Steven Rostedt474d32b2009-03-03 19:51:40 -0500156#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
157
Steven Rostedta3583242008-11-11 15:01:42 -0500158/**
159 * tracing_on - enable all tracing buffers
160 *
161 * This function enables all tracing buffers that may have been
162 * disabled with tracing_off.
163 */
164void tracing_on(void)
165{
Steven Rostedt033601a2008-11-21 12:41:55 -0500166 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -0500167}
Robert Richterc4f50182008-12-11 16:49:22 +0100168EXPORT_SYMBOL_GPL(tracing_on);
Steven Rostedta3583242008-11-11 15:01:42 -0500169
170/**
171 * tracing_off - turn off all tracing buffers
172 *
173 * This function stops all tracing buffers from recording data.
174 * It does not disable any overhead the tracers themselves may
175 * be causing. This function simply causes all recording to
176 * the ring buffers to fail.
177 */
178void tracing_off(void)
179{
Steven Rostedt033601a2008-11-21 12:41:55 -0500180 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
181}
Robert Richterc4f50182008-12-11 16:49:22 +0100182EXPORT_SYMBOL_GPL(tracing_off);
Steven Rostedt033601a2008-11-21 12:41:55 -0500183
184/**
185 * tracing_off_permanent - permanently disable ring buffers
186 *
187 * This function, once called, will disable all ring buffers
Wenji Huangc3706f02009-02-10 01:03:18 -0500188 * permanently.
Steven Rostedt033601a2008-11-21 12:41:55 -0500189 */
190void tracing_off_permanent(void)
191{
192 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -0500193}
194
Steven Rostedt988ae9d2009-02-14 19:17:02 -0500195/**
196 * tracing_is_on - show state of ring buffers enabled
197 */
198int tracing_is_on(void)
199{
200 return ring_buffer_flags == RB_BUFFERS_ON;
201}
202EXPORT_SYMBOL_GPL(tracing_is_on);
203
Steven Rostedte3d6bf02009-03-03 13:53:07 -0500204#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
Andrew Morton67d34722009-01-09 12:27:09 -0800205#define RB_ALIGNMENT 4U
Lai Jiangshan334d4162009-04-24 11:27:05 +0800206#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
Steven Rostedtc7b09302009-06-11 11:12:00 -0400207#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
Lai Jiangshan334d4162009-04-24 11:27:05 +0800208
209/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
210#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400211
212enum {
213 RB_LEN_TIME_EXTEND = 8,
214 RB_LEN_TIME_STAMP = 16,
215};
216
Tom Zanussi2d622712009-03-22 03:30:49 -0500217static inline int rb_null_event(struct ring_buffer_event *event)
218{
Steven Rostedta1863c22009-09-03 10:23:58 -0400219 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
Tom Zanussi2d622712009-03-22 03:30:49 -0500220}
221
222static void rb_event_set_padding(struct ring_buffer_event *event)
223{
Steven Rostedta1863c22009-09-03 10:23:58 -0400224 /* padding has a NULL time_delta */
Lai Jiangshan334d4162009-04-24 11:27:05 +0800225 event->type_len = RINGBUF_TYPE_PADDING;
Tom Zanussi2d622712009-03-22 03:30:49 -0500226 event->time_delta = 0;
227}
228
Tom Zanussi2d622712009-03-22 03:30:49 -0500229static unsigned
230rb_event_data_length(struct ring_buffer_event *event)
231{
232 unsigned length;
233
Lai Jiangshan334d4162009-04-24 11:27:05 +0800234 if (event->type_len)
235 length = event->type_len * RB_ALIGNMENT;
Tom Zanussi2d622712009-03-22 03:30:49 -0500236 else
237 length = event->array[0];
238 return length + RB_EVNT_HDR_SIZE;
239}
240
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400241/* inline for ring buffer fast paths */
Andrew Morton34a148b2009-01-09 12:27:09 -0800242static unsigned
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400243rb_event_length(struct ring_buffer_event *event)
244{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800245 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400246 case RINGBUF_TYPE_PADDING:
Tom Zanussi2d622712009-03-22 03:30:49 -0500247 if (rb_null_event(event))
248 /* undefined */
249 return -1;
Lai Jiangshan334d4162009-04-24 11:27:05 +0800250 return event->array[0] + RB_EVNT_HDR_SIZE;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400251
252 case RINGBUF_TYPE_TIME_EXTEND:
253 return RB_LEN_TIME_EXTEND;
254
255 case RINGBUF_TYPE_TIME_STAMP:
256 return RB_LEN_TIME_STAMP;
257
258 case RINGBUF_TYPE_DATA:
Tom Zanussi2d622712009-03-22 03:30:49 -0500259 return rb_event_data_length(event);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400260 default:
261 BUG();
262 }
263 /* not hit */
264 return 0;
265}
266
267/**
268 * ring_buffer_event_length - return the length of the event
269 * @event: the event to get the length of
270 */
271unsigned ring_buffer_event_length(struct ring_buffer_event *event)
272{
Robert Richter465634a2009-01-07 15:32:11 +0100273 unsigned length = rb_event_length(event);
Lai Jiangshan334d4162009-04-24 11:27:05 +0800274 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
Robert Richter465634a2009-01-07 15:32:11 +0100275 return length;
276 length -= RB_EVNT_HDR_SIZE;
277 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
278 length -= sizeof(event->array[0]);
279 return length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400280}
Robert Richterc4f50182008-12-11 16:49:22 +0100281EXPORT_SYMBOL_GPL(ring_buffer_event_length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400282
283/* inline for ring buffer fast paths */
Andrew Morton34a148b2009-01-09 12:27:09 -0800284static void *
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400285rb_event_data(struct ring_buffer_event *event)
286{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800287 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400288 /* If length is in len field, then array[0] has the data */
Lai Jiangshan334d4162009-04-24 11:27:05 +0800289 if (event->type_len)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400290 return (void *)&event->array[0];
291 /* Otherwise length is in array[0] and array[1] has the data */
292 return (void *)&event->array[1];
293}
294
295/**
296 * ring_buffer_event_data - return the data of the event
297 * @event: the event to get the data from
298 */
299void *ring_buffer_event_data(struct ring_buffer_event *event)
300{
301 return rb_event_data(event);
302}
Robert Richterc4f50182008-12-11 16:49:22 +0100303EXPORT_SYMBOL_GPL(ring_buffer_event_data);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400304
305#define for_each_buffer_cpu(buffer, cpu) \
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030306 for_each_cpu(cpu, buffer->cpumask)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400307
308#define TS_SHIFT 27
309#define TS_MASK ((1ULL << TS_SHIFT) - 1)
310#define TS_DELTA_TEST (~TS_MASK)
311
Steven Rostedtabc9b562008-12-02 15:34:06 -0500312struct buffer_data_page {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400313 u64 time_stamp; /* page time stamp */
Wenji Huangc3706f02009-02-10 01:03:18 -0500314 local_t commit; /* write committed index */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500315 unsigned char data[]; /* data of buffer page */
316};
317
Steven Rostedt77ae3652009-03-27 11:00:29 -0400318/*
319 * Note, the buffer_page list must be first. The buffer pages
320 * are allocated in cache lines, which means that each buffer
321 * page will be at the beginning of a cache line, and thus
322 * the least significant bits will be zero. We use this to
323 * add flags in the list struct pointers, to make the ring buffer
324 * lockless.
325 */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500326struct buffer_page {
Steven Rostedt778c55d2009-05-01 18:44:45 -0400327 struct list_head list; /* list of buffer pages */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500328 local_t write; /* index for next write */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400329 unsigned read; /* index for next read */
Steven Rostedt778c55d2009-05-01 18:44:45 -0400330 local_t entries; /* entries on this page */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500331 struct buffer_data_page *page; /* Actual data page */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400332};
333
Steven Rostedt77ae3652009-03-27 11:00:29 -0400334/*
335 * The buffer page counters, write and entries, must be reset
336 * atomically when crossing page boundaries. To synchronize this
337 * update, two counters are inserted into the number. One is
338 * the actual counter for the write position or count on the page.
339 *
340 * The other is a counter of updaters. Before an update happens
341 * the update partition of the counter is incremented. This will
342 * allow the updater to update the counter atomically.
343 *
344 * The counter is 20 bits, and the state data is 12.
345 */
346#define RB_WRITE_MASK 0xfffff
347#define RB_WRITE_INTCNT (1 << 20)
348
Steven Rostedt044fa782008-12-02 23:50:03 -0500349static void rb_init_page(struct buffer_data_page *bpage)
Steven Rostedtabc9b562008-12-02 15:34:06 -0500350{
Steven Rostedt044fa782008-12-02 23:50:03 -0500351 local_set(&bpage->commit, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -0500352}
353
Steven Rostedt474d32b2009-03-03 19:51:40 -0500354/**
355 * ring_buffer_page_len - the size of data on the page.
356 * @page: The page to read
357 *
358 * Returns the amount of data on the page, including buffer page header.
359 */
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500360size_t ring_buffer_page_len(void *page)
361{
Steven Rostedt474d32b2009-03-03 19:51:40 -0500362 return local_read(&((struct buffer_data_page *)page)->commit)
363 + BUF_PAGE_HDR_SIZE;
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500364}
365
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400366/*
Steven Rostedted568292008-09-29 23:02:40 -0400367 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
368 * this issue out.
369 */
Andrew Morton34a148b2009-01-09 12:27:09 -0800370static void free_buffer_page(struct buffer_page *bpage)
Steven Rostedted568292008-09-29 23:02:40 -0400371{
Andrew Morton34a148b2009-01-09 12:27:09 -0800372 free_page((unsigned long)bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400373 kfree(bpage);
Steven Rostedted568292008-09-29 23:02:40 -0400374}
375
376/*
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400377 * We need to fit the time_stamp delta into 27 bits.
378 */
379static inline int test_time_stamp(u64 delta)
380{
381 if (delta & TS_DELTA_TEST)
382 return 1;
383 return 0;
384}
385
Steven Rostedt474d32b2009-03-03 19:51:40 -0500386#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400387
Steven Rostedtbe957c42009-05-11 14:42:53 -0400388/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
389#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
390
Steven Rostedtea05b572009-06-03 09:30:10 -0400391/* Max number of timestamps that can fit on a page */
392#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
393
Steven Rostedtd1b182a2009-04-15 16:53:47 -0400394int ring_buffer_print_page_header(struct trace_seq *s)
395{
396 struct buffer_data_page field;
397 int ret;
398
399 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
Tom Zanussi26a50742009-10-06 01:09:50 -0500400 "offset:0;\tsize:%u;\tsigned:%u;\n",
401 (unsigned int)sizeof(field.time_stamp),
402 (unsigned int)is_signed_type(u64));
Steven Rostedtd1b182a2009-04-15 16:53:47 -0400403
404 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
Tom Zanussi26a50742009-10-06 01:09:50 -0500405 "offset:%u;\tsize:%u;\tsigned:%u;\n",
Steven Rostedtd1b182a2009-04-15 16:53:47 -0400406 (unsigned int)offsetof(typeof(field), commit),
Tom Zanussi26a50742009-10-06 01:09:50 -0500407 (unsigned int)sizeof(field.commit),
408 (unsigned int)is_signed_type(long));
Steven Rostedtd1b182a2009-04-15 16:53:47 -0400409
410 ret = trace_seq_printf(s, "\tfield: char data;\t"
Tom Zanussi26a50742009-10-06 01:09:50 -0500411 "offset:%u;\tsize:%u;\tsigned:%u;\n",
Steven Rostedtd1b182a2009-04-15 16:53:47 -0400412 (unsigned int)offsetof(typeof(field), data),
Tom Zanussi26a50742009-10-06 01:09:50 -0500413 (unsigned int)BUF_PAGE_SIZE,
414 (unsigned int)is_signed_type(char));
Steven Rostedtd1b182a2009-04-15 16:53:47 -0400415
416 return ret;
417}
418
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400419/*
420 * head_page == tail_page && head == tail then buffer is empty.
421 */
422struct ring_buffer_per_cpu {
423 int cpu;
424 struct ring_buffer *buffer;
Steven Rostedt77ae3652009-03-27 11:00:29 -0400425 spinlock_t reader_lock; /* serialize readers */
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500426 raw_spinlock_t lock;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400427 struct lock_class_key lock_key;
Steven Rostedt3adc54f2009-03-30 15:32:01 -0400428 struct list_head *pages;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400429 struct buffer_page *head_page; /* read from head */
430 struct buffer_page *tail_page; /* write to tail */
Wenji Huangc3706f02009-02-10 01:03:18 -0500431 struct buffer_page *commit_page; /* committed pages */
Steven Rostedtd7690412008-10-01 00:29:53 -0400432 struct buffer_page *reader_page;
Steven Rostedt77ae3652009-03-27 11:00:29 -0400433 local_t commit_overrun;
434 local_t overrun;
Steven Rostedte4906ef2009-04-30 20:49:44 -0400435 local_t entries;
Steven Rostedtfa743952009-06-16 12:37:57 -0400436 local_t committing;
437 local_t commits;
Steven Rostedt77ae3652009-03-27 11:00:29 -0400438 unsigned long read;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400439 u64 write_stamp;
440 u64 read_stamp;
441 atomic_t record_disabled;
442};
443
444struct ring_buffer {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400445 unsigned pages;
446 unsigned flags;
447 int cpus;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400448 atomic_t record_disabled;
Arnaldo Carvalho de Melo00f62f62009-02-09 17:04:06 -0200449 cpumask_var_t cpumask;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400450
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200451 struct lock_class_key *reader_lock_key;
452
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400453 struct mutex mutex;
454
455 struct ring_buffer_per_cpu **buffers;
Steven Rostedt554f7862009-03-11 22:00:13 -0400456
Steven Rostedt59222ef2009-03-12 11:46:03 -0400457#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400458 struct notifier_block cpu_notify;
459#endif
Steven Rostedt37886f62009-03-17 17:22:06 -0400460 u64 (*clock)(void);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400461};
462
463struct ring_buffer_iter {
464 struct ring_buffer_per_cpu *cpu_buffer;
465 unsigned long head;
466 struct buffer_page *head_page;
467 u64 read_stamp;
468};
469
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500470/* buffer may be either ring_buffer or ring_buffer_per_cpu */
Steven Rostedt077c5402009-09-03 19:53:46 -0400471#define RB_WARN_ON(b, cond) \
472 ({ \
473 int _____ret = unlikely(cond); \
474 if (_____ret) { \
475 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
476 struct ring_buffer_per_cpu *__b = \
477 (void *)b; \
478 atomic_inc(&__b->buffer->record_disabled); \
479 } else \
480 atomic_inc(&b->record_disabled); \
481 WARN_ON(1); \
482 } \
483 _____ret; \
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500484 })
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500485
Steven Rostedt37886f62009-03-17 17:22:06 -0400486/* Up this if you want to test the TIME_EXTENTS and normalization */
487#define DEBUG_SHIFT 0
488
Steven Rostedt88eb0122009-05-11 16:28:23 -0400489static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
490{
491 /* shift to debug/test normalization and TIME_EXTENTS */
492 return buffer->clock() << DEBUG_SHIFT;
493}
494
Steven Rostedt37886f62009-03-17 17:22:06 -0400495u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
496{
497 u64 time;
498
499 preempt_disable_notrace();
Steven Rostedt88eb0122009-05-11 16:28:23 -0400500 time = rb_time_stamp(buffer, cpu);
Steven Rostedt37886f62009-03-17 17:22:06 -0400501 preempt_enable_no_resched_notrace();
502
503 return time;
504}
505EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
506
507void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
508 int cpu, u64 *ts)
509{
510 /* Just stupid testing the normalize function and deltas */
511 *ts >>= DEBUG_SHIFT;
512}
513EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
514
Steven Rostedt77ae3652009-03-27 11:00:29 -0400515/*
516 * Making the ring buffer lockless makes things tricky.
517 * Although writes only happen on the CPU that they are on,
518 * and they only need to worry about interrupts. Reads can
519 * happen on any CPU.
520 *
521 * The reader page is always off the ring buffer, but when the
522 * reader finishes with a page, it needs to swap its page with
523 * a new one from the buffer. The reader needs to take from
524 * the head (writes go to the tail). But if a writer is in overwrite
525 * mode and wraps, it must push the head page forward.
526 *
527 * Here lies the problem.
528 *
529 * The reader must be careful to replace only the head page, and
530 * not another one. As described at the top of the file in the
531 * ASCII art, the reader sets its old page to point to the next
532 * page after head. It then sets the page after head to point to
533 * the old reader page. But if the writer moves the head page
534 * during this operation, the reader could end up with the tail.
535 *
536 * We use cmpxchg to help prevent this race. We also do something
537 * special with the page before head. We set the LSB to 1.
538 *
539 * When the writer must push the page forward, it will clear the
540 * bit that points to the head page, move the head, and then set
541 * the bit that points to the new head page.
542 *
543 * We also don't want an interrupt coming in and moving the head
544 * page on another writer. Thus we use the second LSB to catch
545 * that too. Thus:
546 *
547 * head->list->prev->next bit 1 bit 0
548 * ------- -------
549 * Normal page 0 0
550 * Points to head page 0 1
551 * New head page 1 0
552 *
553 * Note we can not trust the prev pointer of the head page, because:
554 *
555 * +----+ +-----+ +-----+
556 * | |------>| T |---X--->| N |
557 * | |<------| | | |
558 * +----+ +-----+ +-----+
559 * ^ ^ |
560 * | +-----+ | |
561 * +----------| R |----------+ |
562 * | |<-----------+
563 * +-----+
564 *
565 * Key: ---X--> HEAD flag set in pointer
566 * T Tail page
567 * R Reader page
568 * N Next page
569 *
570 * (see __rb_reserve_next() to see where this happens)
571 *
572 * What the above shows is that the reader just swapped out
573 * the reader page with a page in the buffer, but before it
574 * could make the new header point back to the new page added
575 * it was preempted by a writer. The writer moved forward onto
576 * the new page added by the reader and is about to move forward
577 * again.
578 *
579 * You can see, it is legitimate for the previous pointer of
580 * the head (or any page) not to point back to itself. But only
581 * temporarially.
582 */
583
584#define RB_PAGE_NORMAL 0UL
585#define RB_PAGE_HEAD 1UL
586#define RB_PAGE_UPDATE 2UL
587
588
589#define RB_FLAG_MASK 3UL
590
591/* PAGE_MOVED is not part of the mask */
592#define RB_PAGE_MOVED 4UL
593
594/*
595 * rb_list_head - remove any bit
596 */
597static struct list_head *rb_list_head(struct list_head *list)
598{
599 unsigned long val = (unsigned long)list;
600
601 return (struct list_head *)(val & ~RB_FLAG_MASK);
602}
603
604/*
605 * rb_is_head_page - test if the give page is the head page
606 *
607 * Because the reader may move the head_page pointer, we can
608 * not trust what the head page is (it may be pointing to
609 * the reader page). But if the next page is a header page,
610 * its flags will be non zero.
611 */
612static int inline
613rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
614 struct buffer_page *page, struct list_head *list)
615{
616 unsigned long val;
617
618 val = (unsigned long)list->next;
619
620 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
621 return RB_PAGE_MOVED;
622
623 return val & RB_FLAG_MASK;
624}
625
626/*
627 * rb_is_reader_page
628 *
629 * The unique thing about the reader page, is that, if the
630 * writer is ever on it, the previous pointer never points
631 * back to the reader page.
632 */
633static int rb_is_reader_page(struct buffer_page *page)
634{
635 struct list_head *list = page->list.prev;
636
637 return rb_list_head(list->next) != &page->list;
638}
639
640/*
641 * rb_set_list_to_head - set a list_head to be pointing to head.
642 */
643static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
644 struct list_head *list)
645{
646 unsigned long *ptr;
647
648 ptr = (unsigned long *)&list->next;
649 *ptr |= RB_PAGE_HEAD;
650 *ptr &= ~RB_PAGE_UPDATE;
651}
652
653/*
654 * rb_head_page_activate - sets up head page
655 */
656static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
657{
658 struct buffer_page *head;
659
660 head = cpu_buffer->head_page;
661 if (!head)
662 return;
663
664 /*
665 * Set the previous list pointer to have the HEAD flag.
666 */
667 rb_set_list_to_head(cpu_buffer, head->list.prev);
668}
669
670static void rb_list_head_clear(struct list_head *list)
671{
672 unsigned long *ptr = (unsigned long *)&list->next;
673
674 *ptr &= ~RB_FLAG_MASK;
675}
676
677/*
678 * rb_head_page_dactivate - clears head page ptr (for free list)
679 */
680static void
681rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
682{
683 struct list_head *hd;
684
685 /* Go through the whole list and clear any pointers found. */
686 rb_list_head_clear(cpu_buffer->pages);
687
688 list_for_each(hd, cpu_buffer->pages)
689 rb_list_head_clear(hd);
690}
691
692static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
693 struct buffer_page *head,
694 struct buffer_page *prev,
695 int old_flag, int new_flag)
696{
697 struct list_head *list;
698 unsigned long val = (unsigned long)&head->list;
699 unsigned long ret;
700
701 list = &prev->list;
702
703 val &= ~RB_FLAG_MASK;
704
Steven Rostedt08a40812009-09-14 09:31:35 -0400705 ret = cmpxchg((unsigned long *)&list->next,
706 val | old_flag, val | new_flag);
Steven Rostedt77ae3652009-03-27 11:00:29 -0400707
708 /* check if the reader took the page */
709 if ((ret & ~RB_FLAG_MASK) != val)
710 return RB_PAGE_MOVED;
711
712 return ret & RB_FLAG_MASK;
713}
714
715static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
716 struct buffer_page *head,
717 struct buffer_page *prev,
718 int old_flag)
719{
720 return rb_head_page_set(cpu_buffer, head, prev,
721 old_flag, RB_PAGE_UPDATE);
722}
723
724static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
725 struct buffer_page *head,
726 struct buffer_page *prev,
727 int old_flag)
728{
729 return rb_head_page_set(cpu_buffer, head, prev,
730 old_flag, RB_PAGE_HEAD);
731}
732
733static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
734 struct buffer_page *head,
735 struct buffer_page *prev,
736 int old_flag)
737{
738 return rb_head_page_set(cpu_buffer, head, prev,
739 old_flag, RB_PAGE_NORMAL);
740}
741
742static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
743 struct buffer_page **bpage)
744{
745 struct list_head *p = rb_list_head((*bpage)->list.next);
746
747 *bpage = list_entry(p, struct buffer_page, list);
748}
749
750static struct buffer_page *
751rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
752{
753 struct buffer_page *head;
754 struct buffer_page *page;
755 struct list_head *list;
756 int i;
757
758 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
759 return NULL;
760
761 /* sanity check */
762 list = cpu_buffer->pages;
763 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
764 return NULL;
765
766 page = head = cpu_buffer->head_page;
767 /*
768 * It is possible that the writer moves the header behind
769 * where we started, and we miss in one loop.
770 * A second loop should grab the header, but we'll do
771 * three loops just because I'm paranoid.
772 */
773 for (i = 0; i < 3; i++) {
774 do {
775 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
776 cpu_buffer->head_page = page;
777 return page;
778 }
779 rb_inc_page(cpu_buffer, &page);
780 } while (page != head);
781 }
782
783 RB_WARN_ON(cpu_buffer, 1);
784
785 return NULL;
786}
787
788static int rb_head_page_replace(struct buffer_page *old,
789 struct buffer_page *new)
790{
791 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
792 unsigned long val;
793 unsigned long ret;
794
795 val = *ptr & ~RB_FLAG_MASK;
796 val |= RB_PAGE_HEAD;
797
Steven Rostedt08a40812009-09-14 09:31:35 -0400798 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
Steven Rostedt77ae3652009-03-27 11:00:29 -0400799
800 return ret == val;
801}
802
803/*
804 * rb_tail_page_update - move the tail page forward
805 *
806 * Returns 1 if moved tail page, 0 if someone else did.
807 */
808static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
809 struct buffer_page *tail_page,
810 struct buffer_page *next_page)
811{
812 struct buffer_page *old_tail;
813 unsigned long old_entries;
814 unsigned long old_write;
815 int ret = 0;
816
817 /*
818 * The tail page now needs to be moved forward.
819 *
820 * We need to reset the tail page, but without messing
821 * with possible erasing of data brought in by interrupts
822 * that have moved the tail page and are currently on it.
823 *
824 * We add a counter to the write field to denote this.
825 */
826 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
827 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
828
829 /*
830 * Just make sure we have seen our old_write and synchronize
831 * with any interrupts that come in.
832 */
833 barrier();
834
835 /*
836 * If the tail page is still the same as what we think
837 * it is, then it is up to us to update the tail
838 * pointer.
839 */
840 if (tail_page == cpu_buffer->tail_page) {
841 /* Zero the write counter */
842 unsigned long val = old_write & ~RB_WRITE_MASK;
843 unsigned long eval = old_entries & ~RB_WRITE_MASK;
844
845 /*
846 * This will only succeed if an interrupt did
847 * not come in and change it. In which case, we
848 * do not want to modify it.
Lai Jiangshanda706d82009-07-15 16:27:30 +0800849 *
850 * We add (void) to let the compiler know that we do not care
851 * about the return value of these functions. We use the
852 * cmpxchg to only update if an interrupt did not already
853 * do it for us. If the cmpxchg fails, we don't care.
Steven Rostedt77ae3652009-03-27 11:00:29 -0400854 */
Lai Jiangshanda706d82009-07-15 16:27:30 +0800855 (void)local_cmpxchg(&next_page->write, old_write, val);
856 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
Steven Rostedt77ae3652009-03-27 11:00:29 -0400857
858 /*
859 * No need to worry about races with clearing out the commit.
860 * it only can increment when a commit takes place. But that
861 * only happens in the outer most nested commit.
862 */
863 local_set(&next_page->page->commit, 0);
864
865 old_tail = cmpxchg(&cpu_buffer->tail_page,
866 tail_page, next_page);
867
868 if (old_tail == tail_page)
869 ret = 1;
870 }
871
872 return ret;
873}
874
875static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
876 struct buffer_page *bpage)
877{
878 unsigned long val = (unsigned long)bpage;
879
880 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
881 return 1;
882
883 return 0;
884}
885
886/**
887 * rb_check_list - make sure a pointer to a list has the last bits zero
888 */
889static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
890 struct list_head *list)
891{
892 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
893 return 1;
894 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
895 return 1;
896 return 0;
897}
898
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400899/**
900 * check_pages - integrity check of buffer pages
901 * @cpu_buffer: CPU buffer with pages to test
902 *
Wenji Huangc3706f02009-02-10 01:03:18 -0500903 * As a safety measure we check to make sure the data pages have not
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400904 * been corrupted.
905 */
906static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
907{
Steven Rostedt3adc54f2009-03-30 15:32:01 -0400908 struct list_head *head = cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500909 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400910
Steven Rostedt77ae3652009-03-27 11:00:29 -0400911 rb_head_page_deactivate(cpu_buffer);
912
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500913 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
914 return -1;
915 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
916 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400917
Steven Rostedt77ae3652009-03-27 11:00:29 -0400918 if (rb_check_list(cpu_buffer, head))
919 return -1;
920
Steven Rostedt044fa782008-12-02 23:50:03 -0500921 list_for_each_entry_safe(bpage, tmp, head, list) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500922 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500923 bpage->list.next->prev != &bpage->list))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500924 return -1;
925 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500926 bpage->list.prev->next != &bpage->list))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500927 return -1;
Steven Rostedt77ae3652009-03-27 11:00:29 -0400928 if (rb_check_list(cpu_buffer, &bpage->list))
929 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400930 }
931
Steven Rostedt77ae3652009-03-27 11:00:29 -0400932 rb_head_page_activate(cpu_buffer);
933
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400934 return 0;
935}
936
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400937static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
938 unsigned nr_pages)
939{
Steven Rostedt044fa782008-12-02 23:50:03 -0500940 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400941 unsigned long addr;
942 LIST_HEAD(pages);
943 unsigned i;
944
Steven Rostedt3adc54f2009-03-30 15:32:01 -0400945 WARN_ON(!nr_pages);
946
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400947 for (i = 0; i < nr_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -0500948 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedtaa1e0e32008-10-02 19:18:09 -0400949 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500950 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400951 goto free_pages;
Steven Rostedt77ae3652009-03-27 11:00:29 -0400952
953 rb_check_bpage(cpu_buffer, bpage);
954
Steven Rostedt044fa782008-12-02 23:50:03 -0500955 list_add(&bpage->list, &pages);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400956
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400957 addr = __get_free_page(GFP_KERNEL);
958 if (!addr)
959 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500960 bpage->page = (void *)addr;
961 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400962 }
963
Steven Rostedt3adc54f2009-03-30 15:32:01 -0400964 /*
965 * The ring buffer page list is a circular list that does not
966 * start and end with a list head. All page list items point to
967 * other pages.
968 */
969 cpu_buffer->pages = pages.next;
970 list_del(&pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400971
972 rb_check_pages(cpu_buffer);
973
974 return 0;
975
976 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -0500977 list_for_each_entry_safe(bpage, tmp, &pages, list) {
978 list_del_init(&bpage->list);
979 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400980 }
981 return -ENOMEM;
982}
983
984static struct ring_buffer_per_cpu *
985rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
986{
987 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt044fa782008-12-02 23:50:03 -0500988 struct buffer_page *bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -0400989 unsigned long addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400990 int ret;
991
992 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
993 GFP_KERNEL, cpu_to_node(cpu));
994 if (!cpu_buffer)
995 return NULL;
996
997 cpu_buffer->cpu = cpu;
998 cpu_buffer->buffer = buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100999 spin_lock_init(&cpu_buffer->reader_lock);
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +02001000 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001001 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001002
Steven Rostedt044fa782008-12-02 23:50:03 -05001003 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedte4c2ce82008-10-01 11:14:54 -04001004 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -05001005 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -04001006 goto fail_free_buffer;
1007
Steven Rostedt77ae3652009-03-27 11:00:29 -04001008 rb_check_bpage(cpu_buffer, bpage);
1009
Steven Rostedt044fa782008-12-02 23:50:03 -05001010 cpu_buffer->reader_page = bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -04001011 addr = __get_free_page(GFP_KERNEL);
1012 if (!addr)
Steven Rostedte4c2ce82008-10-01 11:14:54 -04001013 goto fail_free_reader;
Steven Rostedt044fa782008-12-02 23:50:03 -05001014 bpage->page = (void *)addr;
1015 rb_init_page(bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -04001016
Steven Rostedtd7690412008-10-01 00:29:53 -04001017 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
Steven Rostedtd7690412008-10-01 00:29:53 -04001018
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001019 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
1020 if (ret < 0)
Steven Rostedtd7690412008-10-01 00:29:53 -04001021 goto fail_free_reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001022
1023 cpu_buffer->head_page
Steven Rostedt3adc54f2009-03-30 15:32:01 -04001024 = list_entry(cpu_buffer->pages, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001025 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001026
Steven Rostedt77ae3652009-03-27 11:00:29 -04001027 rb_head_page_activate(cpu_buffer);
1028
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001029 return cpu_buffer;
1030
Steven Rostedtd7690412008-10-01 00:29:53 -04001031 fail_free_reader:
1032 free_buffer_page(cpu_buffer->reader_page);
1033
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001034 fail_free_buffer:
1035 kfree(cpu_buffer);
1036 return NULL;
1037}
1038
1039static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1040{
Steven Rostedt3adc54f2009-03-30 15:32:01 -04001041 struct list_head *head = cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -05001042 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001043
Steven Rostedtd7690412008-10-01 00:29:53 -04001044 free_buffer_page(cpu_buffer->reader_page);
1045
Steven Rostedt77ae3652009-03-27 11:00:29 -04001046 rb_head_page_deactivate(cpu_buffer);
1047
Steven Rostedt3adc54f2009-03-30 15:32:01 -04001048 if (head) {
1049 list_for_each_entry_safe(bpage, tmp, head, list) {
1050 list_del_init(&bpage->list);
1051 free_buffer_page(bpage);
1052 }
1053 bpage = list_entry(head, struct buffer_page, list);
Steven Rostedt044fa782008-12-02 23:50:03 -05001054 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001055 }
Steven Rostedt3adc54f2009-03-30 15:32:01 -04001056
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001057 kfree(cpu_buffer);
1058}
1059
Steven Rostedt59222ef2009-03-12 11:46:03 -04001060#ifdef CONFIG_HOTPLUG_CPU
Frederic Weisbecker09c9e842009-03-21 04:33:36 +01001061static int rb_cpu_notify(struct notifier_block *self,
1062 unsigned long action, void *hcpu);
Steven Rostedt554f7862009-03-11 22:00:13 -04001063#endif
1064
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001065/**
1066 * ring_buffer_alloc - allocate a new ring_buffer
Robert Richter68814b52008-11-24 12:24:12 +01001067 * @size: the size in bytes per cpu that is needed.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001068 * @flags: attributes to set for the ring buffer.
1069 *
1070 * Currently the only flag that is available is the RB_FL_OVERWRITE
1071 * flag. This flag means that the buffer will overwrite old data
1072 * when the buffer wraps. If this flag is not set, the buffer will
1073 * drop data when the tail hits the head.
1074 */
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +02001075struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1076 struct lock_class_key *key)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001077{
1078 struct ring_buffer *buffer;
1079 int bsize;
1080 int cpu;
1081
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001082 /* keep it in its own cache line */
1083 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1084 GFP_KERNEL);
1085 if (!buffer)
1086 return NULL;
1087
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301088 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1089 goto fail_free_buffer;
1090
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001091 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1092 buffer->flags = flags;
Steven Rostedt37886f62009-03-17 17:22:06 -04001093 buffer->clock = trace_clock_local;
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +02001094 buffer->reader_lock_key = key;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001095
1096 /* need at least two pages */
Steven Rostedt5f78abe2009-06-17 14:11:10 -04001097 if (buffer->pages < 2)
1098 buffer->pages = 2;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001099
Frederic Weisbecker3bf832c2009-03-19 14:47:33 +01001100 /*
1101 * In case of non-hotplug cpu, if the ring-buffer is allocated
1102 * in early initcall, it will not be notified of secondary cpus.
1103 * In that off case, we need to allocate for all possible cpus.
1104 */
1105#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -04001106 get_online_cpus();
1107 cpumask_copy(buffer->cpumask, cpu_online_mask);
Frederic Weisbecker3bf832c2009-03-19 14:47:33 +01001108#else
1109 cpumask_copy(buffer->cpumask, cpu_possible_mask);
1110#endif
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001111 buffer->cpus = nr_cpu_ids;
1112
1113 bsize = sizeof(void *) * nr_cpu_ids;
1114 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1115 GFP_KERNEL);
1116 if (!buffer->buffers)
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301117 goto fail_free_cpumask;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001118
1119 for_each_buffer_cpu(buffer, cpu) {
1120 buffer->buffers[cpu] =
1121 rb_allocate_cpu_buffer(buffer, cpu);
1122 if (!buffer->buffers[cpu])
1123 goto fail_free_buffers;
1124 }
1125
Steven Rostedt59222ef2009-03-12 11:46:03 -04001126#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -04001127 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1128 buffer->cpu_notify.priority = 0;
1129 register_cpu_notifier(&buffer->cpu_notify);
1130#endif
1131
1132 put_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001133 mutex_init(&buffer->mutex);
1134
1135 return buffer;
1136
1137 fail_free_buffers:
1138 for_each_buffer_cpu(buffer, cpu) {
1139 if (buffer->buffers[cpu])
1140 rb_free_cpu_buffer(buffer->buffers[cpu]);
1141 }
1142 kfree(buffer->buffers);
1143
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301144 fail_free_cpumask:
1145 free_cpumask_var(buffer->cpumask);
Steven Rostedt554f7862009-03-11 22:00:13 -04001146 put_online_cpus();
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301147
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001148 fail_free_buffer:
1149 kfree(buffer);
1150 return NULL;
1151}
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +02001152EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001153
1154/**
1155 * ring_buffer_free - free a ring buffer.
1156 * @buffer: the buffer to free.
1157 */
1158void
1159ring_buffer_free(struct ring_buffer *buffer)
1160{
1161 int cpu;
1162
Steven Rostedt554f7862009-03-11 22:00:13 -04001163 get_online_cpus();
1164
Steven Rostedt59222ef2009-03-12 11:46:03 -04001165#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -04001166 unregister_cpu_notifier(&buffer->cpu_notify);
1167#endif
1168
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001169 for_each_buffer_cpu(buffer, cpu)
1170 rb_free_cpu_buffer(buffer->buffers[cpu]);
1171
Steven Rostedt554f7862009-03-11 22:00:13 -04001172 put_online_cpus();
1173
Eric Dumazetbd3f0222009-08-07 12:49:29 +02001174 kfree(buffer->buffers);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301175 free_cpumask_var(buffer->cpumask);
1176
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001177 kfree(buffer);
1178}
Robert Richterc4f50182008-12-11 16:49:22 +01001179EXPORT_SYMBOL_GPL(ring_buffer_free);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001180
Steven Rostedt37886f62009-03-17 17:22:06 -04001181void ring_buffer_set_clock(struct ring_buffer *buffer,
1182 u64 (*clock)(void))
1183{
1184 buffer->clock = clock;
1185}
1186
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001187static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1188
1189static void
1190rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1191{
Steven Rostedt044fa782008-12-02 23:50:03 -05001192 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001193 struct list_head *p;
1194 unsigned i;
1195
1196 atomic_inc(&cpu_buffer->record_disabled);
1197 synchronize_sched();
1198
Steven Rostedt77ae3652009-03-27 11:00:29 -04001199 rb_head_page_deactivate(cpu_buffer);
1200
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001201 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3adc54f2009-03-30 15:32:01 -04001202 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001203 return;
Steven Rostedt3adc54f2009-03-30 15:32:01 -04001204 p = cpu_buffer->pages->next;
Steven Rostedt044fa782008-12-02 23:50:03 -05001205 bpage = list_entry(p, struct buffer_page, list);
1206 list_del_init(&bpage->list);
1207 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001208 }
Steven Rostedt3adc54f2009-03-30 15:32:01 -04001209 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001210 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001211
1212 rb_reset_cpu(cpu_buffer);
1213
1214 rb_check_pages(cpu_buffer);
1215
1216 atomic_dec(&cpu_buffer->record_disabled);
1217
1218}
1219
1220static void
1221rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1222 struct list_head *pages, unsigned nr_pages)
1223{
Steven Rostedt044fa782008-12-02 23:50:03 -05001224 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001225 struct list_head *p;
1226 unsigned i;
1227
1228 atomic_inc(&cpu_buffer->record_disabled);
1229 synchronize_sched();
1230
Steven Rostedt77ae3652009-03-27 11:00:29 -04001231 spin_lock_irq(&cpu_buffer->reader_lock);
1232 rb_head_page_deactivate(cpu_buffer);
1233
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001234 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001235 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1236 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001237 p = pages->next;
Steven Rostedt044fa782008-12-02 23:50:03 -05001238 bpage = list_entry(p, struct buffer_page, list);
1239 list_del_init(&bpage->list);
Steven Rostedt3adc54f2009-03-30 15:32:01 -04001240 list_add_tail(&bpage->list, cpu_buffer->pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001241 }
1242 rb_reset_cpu(cpu_buffer);
Steven Rostedt77ae3652009-03-27 11:00:29 -04001243 spin_unlock_irq(&cpu_buffer->reader_lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001244
1245 rb_check_pages(cpu_buffer);
1246
1247 atomic_dec(&cpu_buffer->record_disabled);
1248}
1249
1250/**
1251 * ring_buffer_resize - resize the ring buffer
1252 * @buffer: the buffer to resize.
1253 * @size: the new size.
1254 *
1255 * The tracer is responsible for making sure that the buffer is
1256 * not being used while changing the size.
1257 * Note: We may be able to change the above requirement by using
1258 * RCU synchronizations.
1259 *
1260 * Minimum size is 2 * BUF_PAGE_SIZE.
1261 *
1262 * Returns -1 on failure.
1263 */
1264int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1265{
1266 struct ring_buffer_per_cpu *cpu_buffer;
1267 unsigned nr_pages, rm_pages, new_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -05001268 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001269 unsigned long buffer_size;
1270 unsigned long addr;
1271 LIST_HEAD(pages);
1272 int i, cpu;
1273
Ingo Molnaree51a1d2008-11-13 14:58:31 +01001274 /*
1275 * Always succeed at resizing a non-existent buffer:
1276 */
1277 if (!buffer)
1278 return size;
1279
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001280 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1281 size *= BUF_PAGE_SIZE;
1282 buffer_size = buffer->pages * BUF_PAGE_SIZE;
1283
1284 /* we need a minimum of two pages */
1285 if (size < BUF_PAGE_SIZE * 2)
1286 size = BUF_PAGE_SIZE * 2;
1287
1288 if (size == buffer_size)
1289 return size;
1290
1291 mutex_lock(&buffer->mutex);
Steven Rostedt554f7862009-03-11 22:00:13 -04001292 get_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001293
1294 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1295
1296 if (size < buffer_size) {
1297
1298 /* easy case, just free pages */
Steven Rostedt554f7862009-03-11 22:00:13 -04001299 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
1300 goto out_fail;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001301
1302 rm_pages = buffer->pages - nr_pages;
1303
1304 for_each_buffer_cpu(buffer, cpu) {
1305 cpu_buffer = buffer->buffers[cpu];
1306 rb_remove_pages(cpu_buffer, rm_pages);
1307 }
1308 goto out;
1309 }
1310
1311 /*
1312 * This is a bit more difficult. We only want to add pages
1313 * when we can allocate enough for all CPUs. We do this
1314 * by allocating all the pages and storing them on a local
1315 * link list. If we succeed in our allocation, then we
1316 * add these pages to the cpu_buffers. Otherwise we just free
1317 * them all and return -ENOMEM;
1318 */
Steven Rostedt554f7862009-03-11 22:00:13 -04001319 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
1320 goto out_fail;
Steven Rostedtf536aaf2008-11-10 23:07:30 -05001321
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001322 new_pages = nr_pages - buffer->pages;
1323
1324 for_each_buffer_cpu(buffer, cpu) {
1325 for (i = 0; i < new_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -05001326 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
Steven Rostedte4c2ce82008-10-01 11:14:54 -04001327 cache_line_size()),
1328 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -05001329 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -04001330 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -05001331 list_add(&bpage->list, &pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001332 addr = __get_free_page(GFP_KERNEL);
1333 if (!addr)
1334 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -05001335 bpage->page = (void *)addr;
1336 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001337 }
1338 }
1339
1340 for_each_buffer_cpu(buffer, cpu) {
1341 cpu_buffer = buffer->buffers[cpu];
1342 rb_insert_pages(cpu_buffer, &pages, new_pages);
1343 }
1344
Steven Rostedt554f7862009-03-11 22:00:13 -04001345 if (RB_WARN_ON(buffer, !list_empty(&pages)))
1346 goto out_fail;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001347
1348 out:
1349 buffer->pages = nr_pages;
Steven Rostedt554f7862009-03-11 22:00:13 -04001350 put_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001351 mutex_unlock(&buffer->mutex);
1352
1353 return size;
1354
1355 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -05001356 list_for_each_entry_safe(bpage, tmp, &pages, list) {
1357 list_del_init(&bpage->list);
1358 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001359 }
Steven Rostedt554f7862009-03-11 22:00:13 -04001360 put_online_cpus();
Vegard Nossum641d2f62008-11-18 19:22:13 +01001361 mutex_unlock(&buffer->mutex);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001362 return -ENOMEM;
Steven Rostedt554f7862009-03-11 22:00:13 -04001363
1364 /*
1365 * Something went totally wrong, and we are too paranoid
1366 * to even clean up the mess.
1367 */
1368 out_fail:
1369 put_online_cpus();
1370 mutex_unlock(&buffer->mutex);
1371 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001372}
Robert Richterc4f50182008-12-11 16:49:22 +01001373EXPORT_SYMBOL_GPL(ring_buffer_resize);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001374
Steven Rostedt8789a9e2008-12-02 15:34:07 -05001375static inline void *
Steven Rostedt044fa782008-12-02 23:50:03 -05001376__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
Steven Rostedt8789a9e2008-12-02 15:34:07 -05001377{
Steven Rostedt044fa782008-12-02 23:50:03 -05001378 return bpage->data + index;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05001379}
1380
Steven Rostedt044fa782008-12-02 23:50:03 -05001381static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001382{
Steven Rostedt044fa782008-12-02 23:50:03 -05001383 return bpage->page->data + index;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001384}
1385
1386static inline struct ring_buffer_event *
Steven Rostedtd7690412008-10-01 00:29:53 -04001387rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001388{
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001389 return __rb_page_index(cpu_buffer->reader_page,
1390 cpu_buffer->reader_page->read);
1391}
1392
1393static inline struct ring_buffer_event *
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001394rb_iter_head_event(struct ring_buffer_iter *iter)
1395{
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001396 return __rb_page_index(iter->head_page, iter->head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001397}
1398
Steven Rostedt77ae3652009-03-27 11:00:29 -04001399static inline unsigned long rb_page_write(struct buffer_page *bpage)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001400{
Steven Rostedt77ae3652009-03-27 11:00:29 -04001401 return local_read(&bpage->write) & RB_WRITE_MASK;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001402}
1403
1404static inline unsigned rb_page_commit(struct buffer_page *bpage)
1405{
Steven Rostedtabc9b562008-12-02 15:34:06 -05001406 return local_read(&bpage->page->commit);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001407}
1408
Steven Rostedt77ae3652009-03-27 11:00:29 -04001409static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1410{
1411 return local_read(&bpage->entries) & RB_WRITE_MASK;
1412}
1413
Steven Rostedtbf41a152008-10-04 02:00:59 -04001414/* Size is determined by what has been commited */
1415static inline unsigned rb_page_size(struct buffer_page *bpage)
1416{
1417 return rb_page_commit(bpage);
1418}
1419
1420static inline unsigned
1421rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1422{
1423 return rb_page_commit(cpu_buffer->commit_page);
1424}
1425
Steven Rostedtbf41a152008-10-04 02:00:59 -04001426static inline unsigned
1427rb_event_index(struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001428{
Steven Rostedtbf41a152008-10-04 02:00:59 -04001429 unsigned long addr = (unsigned long)event;
1430
Steven Rostedt22f470f2009-06-11 09:29:58 -04001431 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001432}
1433
Steven Rostedt0f0c85f2009-05-11 16:08:00 -04001434static inline int
Steven Rostedtfa743952009-06-16 12:37:57 -04001435rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1436 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001437{
Steven Rostedtbf41a152008-10-04 02:00:59 -04001438 unsigned long addr = (unsigned long)event;
1439 unsigned long index;
1440
1441 index = rb_event_index(event);
1442 addr &= PAGE_MASK;
1443
1444 return cpu_buffer->commit_page->page == (void *)addr &&
1445 rb_commit_index(cpu_buffer) == index;
1446}
1447
Andrew Morton34a148b2009-01-09 12:27:09 -08001448static void
Steven Rostedtbf41a152008-10-04 02:00:59 -04001449rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1450{
Steven Rostedt77ae3652009-03-27 11:00:29 -04001451 unsigned long max_count;
1452
Steven Rostedtbf41a152008-10-04 02:00:59 -04001453 /*
1454 * We only race with interrupts and NMIs on this CPU.
1455 * If we own the commit event, then we can commit
1456 * all others that interrupted us, since the interruptions
1457 * are in stack format (they finish before they come
1458 * back to us). This allows us to do a simple loop to
1459 * assign the commit to the tail.
1460 */
Steven Rostedta8ccf1d2008-12-23 11:32:24 -05001461 again:
Steven Rostedt77ae3652009-03-27 11:00:29 -04001462 max_count = cpu_buffer->buffer->pages * 100;
1463
Steven Rostedtbf41a152008-10-04 02:00:59 -04001464 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
Steven Rostedt77ae3652009-03-27 11:00:29 -04001465 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1466 return;
1467 if (RB_WARN_ON(cpu_buffer,
1468 rb_is_reader_page(cpu_buffer->tail_page)))
1469 return;
1470 local_set(&cpu_buffer->commit_page->page->commit,
1471 rb_page_write(cpu_buffer->commit_page));
Steven Rostedtbf41a152008-10-04 02:00:59 -04001472 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001473 cpu_buffer->write_stamp =
1474 cpu_buffer->commit_page->page->time_stamp;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001475 /* add barrier to keep gcc from optimizing too much */
1476 barrier();
1477 }
1478 while (rb_commit_index(cpu_buffer) !=
1479 rb_page_write(cpu_buffer->commit_page)) {
Steven Rostedt77ae3652009-03-27 11:00:29 -04001480
1481 local_set(&cpu_buffer->commit_page->page->commit,
1482 rb_page_write(cpu_buffer->commit_page));
1483 RB_WARN_ON(cpu_buffer,
1484 local_read(&cpu_buffer->commit_page->page->commit) &
1485 ~RB_WRITE_MASK);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001486 barrier();
1487 }
Steven Rostedta8ccf1d2008-12-23 11:32:24 -05001488
1489 /* again, keep gcc from optimizing */
1490 barrier();
1491
1492 /*
1493 * If an interrupt came in just after the first while loop
1494 * and pushed the tail page forward, we will be left with
1495 * a dangling commit that will never go forward.
1496 */
1497 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1498 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001499}
1500
Steven Rostedtd7690412008-10-01 00:29:53 -04001501static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001502{
Steven Rostedtabc9b562008-12-02 15:34:06 -05001503 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001504 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04001505}
1506
Andrew Morton34a148b2009-01-09 12:27:09 -08001507static void rb_inc_iter(struct ring_buffer_iter *iter)
Steven Rostedtd7690412008-10-01 00:29:53 -04001508{
1509 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1510
1511 /*
1512 * The iterator could be on the reader page (it starts there).
1513 * But the head could have moved, since the reader was
1514 * found. Check for this case and assign the iterator
1515 * to the head page instead of next.
1516 */
1517 if (iter->head_page == cpu_buffer->reader_page)
Steven Rostedt77ae3652009-03-27 11:00:29 -04001518 iter->head_page = rb_set_head_page(cpu_buffer);
Steven Rostedtd7690412008-10-01 00:29:53 -04001519 else
1520 rb_inc_page(cpu_buffer, &iter->head_page);
1521
Steven Rostedtabc9b562008-12-02 15:34:06 -05001522 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001523 iter->head = 0;
1524}
1525
1526/**
1527 * ring_buffer_update_event - update event type and data
1528 * @event: the even to update
1529 * @type: the type of event
1530 * @length: the size of the event field in the ring buffer
1531 *
1532 * Update the type and data fields of the event. The length
1533 * is the actual size that is written to the ring buffer,
1534 * and with this, we can determine what to place into the
1535 * data field.
1536 */
Andrew Morton34a148b2009-01-09 12:27:09 -08001537static void
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001538rb_update_event(struct ring_buffer_event *event,
1539 unsigned type, unsigned length)
1540{
Lai Jiangshan334d4162009-04-24 11:27:05 +08001541 event->type_len = type;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001542
1543 switch (type) {
1544
1545 case RINGBUF_TYPE_PADDING:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001546 case RINGBUF_TYPE_TIME_EXTEND:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001547 case RINGBUF_TYPE_TIME_STAMP:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001548 break;
1549
Lai Jiangshan334d4162009-04-24 11:27:05 +08001550 case 0:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001551 length -= RB_EVNT_HDR_SIZE;
Lai Jiangshan334d4162009-04-24 11:27:05 +08001552 if (length > RB_MAX_SMALL_DATA)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001553 event->array[0] = length;
Lai Jiangshan334d4162009-04-24 11:27:05 +08001554 else
1555 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001556 break;
1557 default:
1558 BUG();
1559 }
1560}
1561
Steven Rostedt77ae3652009-03-27 11:00:29 -04001562/*
1563 * rb_handle_head_page - writer hit the head page
1564 *
1565 * Returns: +1 to retry page
1566 * 0 to continue
1567 * -1 on error
1568 */
1569static int
1570rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1571 struct buffer_page *tail_page,
1572 struct buffer_page *next_page)
1573{
1574 struct buffer_page *new_head;
1575 int entries;
1576 int type;
1577 int ret;
1578
1579 entries = rb_page_entries(next_page);
1580
1581 /*
1582 * The hard part is here. We need to move the head
1583 * forward, and protect against both readers on
1584 * other CPUs and writers coming in via interrupts.
1585 */
1586 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1587 RB_PAGE_HEAD);
1588
1589 /*
1590 * type can be one of four:
1591 * NORMAL - an interrupt already moved it for us
1592 * HEAD - we are the first to get here.
1593 * UPDATE - we are the interrupt interrupting
1594 * a current move.
1595 * MOVED - a reader on another CPU moved the next
1596 * pointer to its reader page. Give up
1597 * and try again.
1598 */
1599
1600 switch (type) {
1601 case RB_PAGE_HEAD:
1602 /*
1603 * We changed the head to UPDATE, thus
1604 * it is our responsibility to update
1605 * the counters.
1606 */
1607 local_add(entries, &cpu_buffer->overrun);
1608
1609 /*
1610 * The entries will be zeroed out when we move the
1611 * tail page.
1612 */
1613
1614 /* still more to do */
1615 break;
1616
1617 case RB_PAGE_UPDATE:
1618 /*
1619 * This is an interrupt that interrupt the
1620 * previous update. Still more to do.
1621 */
1622 break;
1623 case RB_PAGE_NORMAL:
1624 /*
1625 * An interrupt came in before the update
1626 * and processed this for us.
1627 * Nothing left to do.
1628 */
1629 return 1;
1630 case RB_PAGE_MOVED:
1631 /*
1632 * The reader is on another CPU and just did
1633 * a swap with our next_page.
1634 * Try again.
1635 */
1636 return 1;
1637 default:
1638 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1639 return -1;
1640 }
1641
1642 /*
1643 * Now that we are here, the old head pointer is
1644 * set to UPDATE. This will keep the reader from
1645 * swapping the head page with the reader page.
1646 * The reader (on another CPU) will spin till
1647 * we are finished.
1648 *
1649 * We just need to protect against interrupts
1650 * doing the job. We will set the next pointer
1651 * to HEAD. After that, we set the old pointer
1652 * to NORMAL, but only if it was HEAD before.
1653 * otherwise we are an interrupt, and only
1654 * want the outer most commit to reset it.
1655 */
1656 new_head = next_page;
1657 rb_inc_page(cpu_buffer, &new_head);
1658
1659 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1660 RB_PAGE_NORMAL);
1661
1662 /*
1663 * Valid returns are:
1664 * HEAD - an interrupt came in and already set it.
1665 * NORMAL - One of two things:
1666 * 1) We really set it.
1667 * 2) A bunch of interrupts came in and moved
1668 * the page forward again.
1669 */
1670 switch (ret) {
1671 case RB_PAGE_HEAD:
1672 case RB_PAGE_NORMAL:
1673 /* OK */
1674 break;
1675 default:
1676 RB_WARN_ON(cpu_buffer, 1);
1677 return -1;
1678 }
1679
1680 /*
1681 * It is possible that an interrupt came in,
1682 * set the head up, then more interrupts came in
1683 * and moved it again. When we get back here,
1684 * the page would have been set to NORMAL but we
1685 * just set it back to HEAD.
1686 *
1687 * How do you detect this? Well, if that happened
1688 * the tail page would have moved.
1689 */
1690 if (ret == RB_PAGE_NORMAL) {
1691 /*
1692 * If the tail had moved passed next, then we need
1693 * to reset the pointer.
1694 */
1695 if (cpu_buffer->tail_page != tail_page &&
1696 cpu_buffer->tail_page != next_page)
1697 rb_head_page_set_normal(cpu_buffer, new_head,
1698 next_page,
1699 RB_PAGE_HEAD);
1700 }
1701
1702 /*
1703 * If this was the outer most commit (the one that
1704 * changed the original pointer from HEAD to UPDATE),
1705 * then it is up to us to reset it to NORMAL.
1706 */
1707 if (type == RB_PAGE_HEAD) {
1708 ret = rb_head_page_set_normal(cpu_buffer, next_page,
1709 tail_page,
1710 RB_PAGE_UPDATE);
1711 if (RB_WARN_ON(cpu_buffer,
1712 ret != RB_PAGE_UPDATE))
1713 return -1;
1714 }
1715
1716 return 0;
1717}
1718
Andrew Morton34a148b2009-01-09 12:27:09 -08001719static unsigned rb_calculate_event_length(unsigned length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001720{
1721 struct ring_buffer_event event; /* Used only for sizeof array */
1722
1723 /* zero length can cause confusions */
1724 if (!length)
1725 length = 1;
1726
1727 if (length > RB_MAX_SMALL_DATA)
1728 length += sizeof(event.array[0]);
1729
1730 length += RB_EVNT_HDR_SIZE;
1731 length = ALIGN(length, RB_ALIGNMENT);
1732
1733 return length;
1734}
1735
Steven Rostedtc7b09302009-06-11 11:12:00 -04001736static inline void
1737rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1738 struct buffer_page *tail_page,
1739 unsigned long tail, unsigned long length)
1740{
1741 struct ring_buffer_event *event;
1742
1743 /*
1744 * Only the event that crossed the page boundary
1745 * must fill the old tail_page with padding.
1746 */
1747 if (tail >= BUF_PAGE_SIZE) {
1748 local_sub(length, &tail_page->write);
1749 return;
1750 }
1751
1752 event = __rb_page_index(tail_page, tail);
Linus Torvaldsb0b70652009-06-20 10:56:46 -07001753 kmemcheck_annotate_bitfield(event, bitfield);
Steven Rostedtc7b09302009-06-11 11:12:00 -04001754
1755 /*
1756 * If this event is bigger than the minimum size, then
1757 * we need to be careful that we don't subtract the
1758 * write counter enough to allow another writer to slip
1759 * in on this page.
1760 * We put in a discarded commit instead, to make sure
1761 * that this space is not used again.
1762 *
1763 * If we are less than the minimum size, we don't need to
1764 * worry about it.
1765 */
1766 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
1767 /* No room for any events */
1768
1769 /* Mark the rest of the page with padding */
1770 rb_event_set_padding(event);
1771
1772 /* Set the write back to the previous setting */
1773 local_sub(length, &tail_page->write);
1774 return;
1775 }
1776
1777 /* Put in a discarded event */
1778 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
1779 event->type_len = RINGBUF_TYPE_PADDING;
1780 /* time delta must be non zero */
1781 event->time_delta = 1;
Steven Rostedtc7b09302009-06-11 11:12:00 -04001782
1783 /* Set write to end of buffer */
1784 length = (tail + length) - BUF_PAGE_SIZE;
1785 local_sub(length, &tail_page->write);
1786}
Steven Rostedt6634ff22009-05-06 15:30:07 -04001787
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001788static struct ring_buffer_event *
Steven Rostedt6634ff22009-05-06 15:30:07 -04001789rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1790 unsigned long length, unsigned long tail,
1791 struct buffer_page *commit_page,
1792 struct buffer_page *tail_page, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001793{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001794 struct ring_buffer *buffer = cpu_buffer->buffer;
Steven Rostedt77ae3652009-03-27 11:00:29 -04001795 struct buffer_page *next_page;
1796 int ret;
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001797
1798 next_page = tail_page;
1799
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001800 rb_inc_page(cpu_buffer, &next_page);
1801
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001802 /*
1803 * If for some reason, we had an interrupt storm that made
1804 * it all the way around the buffer, bail, and warn
1805 * about it.
1806 */
1807 if (unlikely(next_page == commit_page)) {
Steven Rostedt77ae3652009-03-27 11:00:29 -04001808 local_inc(&cpu_buffer->commit_overrun);
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001809 goto out_reset;
1810 }
1811
Steven Rostedt77ae3652009-03-27 11:00:29 -04001812 /*
1813 * This is where the fun begins!
1814 *
1815 * We are fighting against races between a reader that
1816 * could be on another CPU trying to swap its reader
1817 * page with the buffer head.
1818 *
1819 * We are also fighting against interrupts coming in and
1820 * moving the head or tail on us as well.
1821 *
1822 * If the next page is the head page then we have filled
1823 * the buffer, unless the commit page is still on the
1824 * reader page.
1825 */
1826 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001827
Steven Rostedt77ae3652009-03-27 11:00:29 -04001828 /*
1829 * If the commit is not on the reader page, then
1830 * move the header page.
1831 */
1832 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
1833 /*
1834 * If we are not in overwrite mode,
1835 * this is easy, just stop here.
1836 */
1837 if (!(buffer->flags & RB_FL_OVERWRITE))
1838 goto out_reset;
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001839
Steven Rostedt77ae3652009-03-27 11:00:29 -04001840 ret = rb_handle_head_page(cpu_buffer,
1841 tail_page,
1842 next_page);
1843 if (ret < 0)
1844 goto out_reset;
1845 if (ret)
1846 goto out_again;
1847 } else {
1848 /*
1849 * We need to be careful here too. The
1850 * commit page could still be on the reader
1851 * page. We could have a small buffer, and
1852 * have filled up the buffer with events
1853 * from interrupts and such, and wrapped.
1854 *
1855 * Note, if the tail page is also the on the
1856 * reader_page, we let it move out.
1857 */
1858 if (unlikely((cpu_buffer->commit_page !=
1859 cpu_buffer->tail_page) &&
1860 (cpu_buffer->commit_page ==
1861 cpu_buffer->reader_page))) {
1862 local_inc(&cpu_buffer->commit_overrun);
1863 goto out_reset;
1864 }
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001865 }
1866 }
1867
Steven Rostedt77ae3652009-03-27 11:00:29 -04001868 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
1869 if (ret) {
1870 /*
1871 * Nested commits always have zero deltas, so
1872 * just reread the time stamp
1873 */
Steven Rostedt88eb0122009-05-11 16:28:23 -04001874 *ts = rb_time_stamp(buffer, cpu_buffer->cpu);
Steven Rostedt77ae3652009-03-27 11:00:29 -04001875 next_page->page->time_stamp = *ts;
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001876 }
1877
Steven Rostedt77ae3652009-03-27 11:00:29 -04001878 out_again:
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001879
Steven Rostedt77ae3652009-03-27 11:00:29 -04001880 rb_reset_tail(cpu_buffer, tail_page, tail, length);
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001881
1882 /* fail and let the caller try again */
1883 return ERR_PTR(-EAGAIN);
1884
Steven Rostedt45141d42009-02-12 13:19:48 -05001885 out_reset:
Lai Jiangshan6f3b3442009-01-12 11:06:18 +08001886 /* reset write */
Steven Rostedtc7b09302009-06-11 11:12:00 -04001887 rb_reset_tail(cpu_buffer, tail_page, tail, length);
Lai Jiangshan6f3b3442009-01-12 11:06:18 +08001888
Steven Rostedtbf41a152008-10-04 02:00:59 -04001889 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001890}
1891
Steven Rostedt6634ff22009-05-06 15:30:07 -04001892static struct ring_buffer_event *
1893__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1894 unsigned type, unsigned long length, u64 *ts)
1895{
1896 struct buffer_page *tail_page, *commit_page;
1897 struct ring_buffer_event *event;
1898 unsigned long tail, write;
1899
1900 commit_page = cpu_buffer->commit_page;
1901 /* we just need to protect against interrupts */
1902 barrier();
1903 tail_page = cpu_buffer->tail_page;
1904 write = local_add_return(length, &tail_page->write);
Steven Rostedt77ae3652009-03-27 11:00:29 -04001905
1906 /* set write to only the index of the write */
1907 write &= RB_WRITE_MASK;
Steven Rostedt6634ff22009-05-06 15:30:07 -04001908 tail = write - length;
1909
1910 /* See if we shot pass the end of this buffer page */
1911 if (write > BUF_PAGE_SIZE)
1912 return rb_move_tail(cpu_buffer, length, tail,
1913 commit_page, tail_page, ts);
1914
1915 /* We reserved something on the buffer */
1916
Steven Rostedt6634ff22009-05-06 15:30:07 -04001917 event = __rb_page_index(tail_page, tail);
Vegard Nossum1744a212009-02-28 08:29:44 +01001918 kmemcheck_annotate_bitfield(event, bitfield);
Steven Rostedt6634ff22009-05-06 15:30:07 -04001919 rb_update_event(event, type, length);
1920
1921 /* The passed in type is zero for DATA */
1922 if (likely(!type))
1923 local_inc(&tail_page->entries);
1924
1925 /*
Steven Rostedtfa743952009-06-16 12:37:57 -04001926 * If this is the first commit on the page, then update
1927 * its timestamp.
Steven Rostedt6634ff22009-05-06 15:30:07 -04001928 */
Steven Rostedtfa743952009-06-16 12:37:57 -04001929 if (!tail)
1930 tail_page->page->time_stamp = *ts;
Steven Rostedt6634ff22009-05-06 15:30:07 -04001931
1932 return event;
1933}
1934
Steven Rostedtedd813b2009-06-02 23:00:53 -04001935static inline int
1936rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1937 struct ring_buffer_event *event)
1938{
1939 unsigned long new_index, old_index;
1940 struct buffer_page *bpage;
1941 unsigned long index;
1942 unsigned long addr;
1943
1944 new_index = rb_event_index(event);
1945 old_index = new_index + rb_event_length(event);
1946 addr = (unsigned long)event;
1947 addr &= PAGE_MASK;
1948
1949 bpage = cpu_buffer->tail_page;
1950
1951 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
Steven Rostedt77ae3652009-03-27 11:00:29 -04001952 unsigned long write_mask =
1953 local_read(&bpage->write) & ~RB_WRITE_MASK;
Steven Rostedtedd813b2009-06-02 23:00:53 -04001954 /*
1955 * This is on the tail page. It is possible that
1956 * a write could come in and move the tail page
1957 * and write to the next page. That is fine
1958 * because we just shorten what is on this page.
1959 */
Steven Rostedt77ae3652009-03-27 11:00:29 -04001960 old_index += write_mask;
1961 new_index += write_mask;
Steven Rostedtedd813b2009-06-02 23:00:53 -04001962 index = local_cmpxchg(&bpage->write, old_index, new_index);
1963 if (index == old_index)
1964 return 1;
1965 }
1966
1967 /* could not discard */
1968 return 0;
1969}
1970
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001971static int
1972rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1973 u64 *ts, u64 *delta)
1974{
1975 struct ring_buffer_event *event;
1976 static int once;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001977 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001978
1979 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1980 printk(KERN_WARNING "Delta way too big! %llu"
1981 " ts=%llu write stamp = %llu\n",
Stephen Rothwelle2862c92008-10-27 17:43:28 +11001982 (unsigned long long)*delta,
1983 (unsigned long long)*ts,
1984 (unsigned long long)cpu_buffer->write_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001985 WARN_ON(1);
1986 }
1987
1988 /*
1989 * The delta is too big, we to add a
1990 * new timestamp.
1991 */
1992 event = __rb_reserve_next(cpu_buffer,
1993 RINGBUF_TYPE_TIME_EXTEND,
1994 RB_LEN_TIME_EXTEND,
1995 ts);
1996 if (!event)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001997 return -EBUSY;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001998
Steven Rostedtbf41a152008-10-04 02:00:59 -04001999 if (PTR_ERR(event) == -EAGAIN)
2000 return -EAGAIN;
2001
2002 /* Only a commited time event can update the write stamp */
Steven Rostedtfa743952009-06-16 12:37:57 -04002003 if (rb_event_is_commit(cpu_buffer, event)) {
Steven Rostedtbf41a152008-10-04 02:00:59 -04002004 /*
Steven Rostedtfa743952009-06-16 12:37:57 -04002005 * If this is the first on the page, then it was
2006 * updated with the page itself. Try to discard it
2007 * and if we can't just make it zero.
Steven Rostedtbf41a152008-10-04 02:00:59 -04002008 */
2009 if (rb_event_index(event)) {
2010 event->time_delta = *delta & TS_MASK;
2011 event->array[0] = *delta >> TS_SHIFT;
2012 } else {
Steven Rostedtea05b572009-06-03 09:30:10 -04002013 /* try to discard, since we do not need this */
2014 if (!rb_try_to_discard(cpu_buffer, event)) {
2015 /* nope, just zero it */
2016 event->time_delta = 0;
2017 event->array[0] = 0;
2018 }
Steven Rostedtbf41a152008-10-04 02:00:59 -04002019 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002020 cpu_buffer->write_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002021 /* let the caller know this was the commit */
2022 ret = 1;
2023 } else {
Steven Rostedtedd813b2009-06-02 23:00:53 -04002024 /* Try to discard the event */
2025 if (!rb_try_to_discard(cpu_buffer, event)) {
2026 /* Darn, this is just wasted space */
2027 event->time_delta = 0;
2028 event->array[0] = 0;
Steven Rostedtedd813b2009-06-02 23:00:53 -04002029 }
Steven Rostedtf57a8a12009-06-05 14:11:30 -04002030 ret = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002031 }
2032
Steven Rostedtbf41a152008-10-04 02:00:59 -04002033 *delta = 0;
2034
2035 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002036}
2037
Steven Rostedtfa743952009-06-16 12:37:57 -04002038static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2039{
2040 local_inc(&cpu_buffer->committing);
2041 local_inc(&cpu_buffer->commits);
2042}
2043
2044static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2045{
2046 unsigned long commits;
2047
2048 if (RB_WARN_ON(cpu_buffer,
2049 !local_read(&cpu_buffer->committing)))
2050 return;
2051
2052 again:
2053 commits = local_read(&cpu_buffer->commits);
2054 /* synchronize with interrupts */
2055 barrier();
2056 if (local_read(&cpu_buffer->committing) == 1)
2057 rb_set_commit_to_write(cpu_buffer);
2058
2059 local_dec(&cpu_buffer->committing);
2060
2061 /* synchronize with interrupts */
2062 barrier();
2063
2064 /*
2065 * Need to account for interrupts coming in between the
2066 * updating of the commit page and the clearing of the
2067 * committing counter.
2068 */
2069 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2070 !local_read(&cpu_buffer->committing)) {
2071 local_inc(&cpu_buffer->committing);
2072 goto again;
2073 }
2074}
2075
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002076static struct ring_buffer_event *
Steven Rostedt62f0b3e2009-09-04 14:11:34 -04002077rb_reserve_next_event(struct ring_buffer *buffer,
2078 struct ring_buffer_per_cpu *cpu_buffer,
Steven Rostedt1cd8d732009-05-11 14:08:09 -04002079 unsigned long length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002080{
2081 struct ring_buffer_event *event;
Steven Rostedt168b6b12009-05-11 22:11:05 -04002082 u64 ts, delta = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002083 int commit = 0;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002084 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002085
Steven Rostedtfa743952009-06-16 12:37:57 -04002086 rb_start_commit(cpu_buffer);
2087
Steven Rostedt85bac322009-09-04 14:24:40 -04002088#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Steven Rostedt62f0b3e2009-09-04 14:11:34 -04002089 /*
2090 * Due to the ability to swap a cpu buffer from a buffer
2091 * it is possible it was swapped before we committed.
2092 * (committing stops a swap). We check for it here and
2093 * if it happened, we have to fail the write.
2094 */
2095 barrier();
2096 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2097 local_dec(&cpu_buffer->committing);
2098 local_dec(&cpu_buffer->commits);
2099 return NULL;
2100 }
Steven Rostedt85bac322009-09-04 14:24:40 -04002101#endif
Steven Rostedt62f0b3e2009-09-04 14:11:34 -04002102
Steven Rostedtbe957c42009-05-11 14:42:53 -04002103 length = rb_calculate_event_length(length);
Steven Rostedtbf41a152008-10-04 02:00:59 -04002104 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002105 /*
2106 * We allow for interrupts to reenter here and do a trace.
2107 * If one does, it will cause this original code to loop
2108 * back here. Even with heavy interrupts happening, this
2109 * should only happen a few times in a row. If this happens
2110 * 1000 times in a row, there must be either an interrupt
2111 * storm or we have something buggy.
2112 * Bail!
2113 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002114 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
Steven Rostedtfa743952009-06-16 12:37:57 -04002115 goto out_fail;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002116
Steven Rostedt88eb0122009-05-11 16:28:23 -04002117 ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002118
Steven Rostedtbf41a152008-10-04 02:00:59 -04002119 /*
2120 * Only the first commit can update the timestamp.
2121 * Yes there is a race here. If an interrupt comes in
2122 * just after the conditional and it traces too, then it
2123 * will also check the deltas. More than one timestamp may
2124 * also be made. But only the entry that did the actual
2125 * commit will be something other than zero.
2126 */
Steven Rostedt0f0c85f2009-05-11 16:08:00 -04002127 if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
2128 rb_page_write(cpu_buffer->tail_page) ==
2129 rb_commit_index(cpu_buffer))) {
Steven Rostedt168b6b12009-05-11 22:11:05 -04002130 u64 diff;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002131
Steven Rostedt168b6b12009-05-11 22:11:05 -04002132 diff = ts - cpu_buffer->write_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002133
Steven Rostedt168b6b12009-05-11 22:11:05 -04002134 /* make sure this diff is calculated here */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002135 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002136
Steven Rostedtbf41a152008-10-04 02:00:59 -04002137 /* Did the write stamp get updated already? */
2138 if (unlikely(ts < cpu_buffer->write_stamp))
Steven Rostedt168b6b12009-05-11 22:11:05 -04002139 goto get_event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002140
Steven Rostedt168b6b12009-05-11 22:11:05 -04002141 delta = diff;
2142 if (unlikely(test_time_stamp(delta))) {
Steven Rostedtbf41a152008-10-04 02:00:59 -04002143
2144 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
Steven Rostedtbf41a152008-10-04 02:00:59 -04002145 if (commit == -EBUSY)
Steven Rostedtfa743952009-06-16 12:37:57 -04002146 goto out_fail;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002147
2148 if (commit == -EAGAIN)
2149 goto again;
2150
2151 RB_WARN_ON(cpu_buffer, commit < 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002152 }
Steven Rostedt168b6b12009-05-11 22:11:05 -04002153 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002154
Steven Rostedt168b6b12009-05-11 22:11:05 -04002155 get_event:
Steven Rostedt1cd8d732009-05-11 14:08:09 -04002156 event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
Steven Rostedt168b6b12009-05-11 22:11:05 -04002157 if (unlikely(PTR_ERR(event) == -EAGAIN))
Steven Rostedtbf41a152008-10-04 02:00:59 -04002158 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002159
Steven Rostedtfa743952009-06-16 12:37:57 -04002160 if (!event)
2161 goto out_fail;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002162
Steven Rostedtfa743952009-06-16 12:37:57 -04002163 if (!rb_event_is_commit(cpu_buffer, event))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002164 delta = 0;
2165
2166 event->time_delta = delta;
2167
2168 return event;
Steven Rostedtfa743952009-06-16 12:37:57 -04002169
2170 out_fail:
2171 rb_end_commit(cpu_buffer);
2172 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002173}
2174
Paul Mundt1155de42009-06-25 14:30:12 +09002175#ifdef CONFIG_TRACING
2176
Steven Rostedtaa18efb2009-04-20 16:16:11 -04002177#define TRACE_RECURSIVE_DEPTH 16
Steven Rostedt261842b2009-04-16 21:41:52 -04002178
2179static int trace_recursive_lock(void)
2180{
Steven Rostedtaa18efb2009-04-20 16:16:11 -04002181 current->trace_recursion++;
Steven Rostedt261842b2009-04-16 21:41:52 -04002182
Steven Rostedtaa18efb2009-04-20 16:16:11 -04002183 if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
2184 return 0;
Steven Rostedt261842b2009-04-16 21:41:52 -04002185
Steven Rostedtaa18efb2009-04-20 16:16:11 -04002186 /* Disable all tracing before we do anything else */
2187 tracing_off_permanent();
Frederic Weisbeckere057a5e2009-04-19 23:38:12 +02002188
Steven Rostedt7d7d2b82009-04-27 12:37:49 -04002189 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
Steven Rostedtaa18efb2009-04-20 16:16:11 -04002190 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2191 current->trace_recursion,
2192 hardirq_count() >> HARDIRQ_SHIFT,
2193 softirq_count() >> SOFTIRQ_SHIFT,
2194 in_nmi());
Frederic Weisbeckere057a5e2009-04-19 23:38:12 +02002195
Steven Rostedtaa18efb2009-04-20 16:16:11 -04002196 WARN_ON_ONCE(1);
2197 return -1;
Steven Rostedt261842b2009-04-16 21:41:52 -04002198}
2199
2200static void trace_recursive_unlock(void)
2201{
Steven Rostedtaa18efb2009-04-20 16:16:11 -04002202 WARN_ON_ONCE(!current->trace_recursion);
Steven Rostedt261842b2009-04-16 21:41:52 -04002203
Steven Rostedtaa18efb2009-04-20 16:16:11 -04002204 current->trace_recursion--;
Steven Rostedt261842b2009-04-16 21:41:52 -04002205}
2206
Paul Mundt1155de42009-06-25 14:30:12 +09002207#else
2208
2209#define trace_recursive_lock() (0)
2210#define trace_recursive_unlock() do { } while (0)
2211
2212#endif
2213
Steven Rostedtbf41a152008-10-04 02:00:59 -04002214static DEFINE_PER_CPU(int, rb_need_resched);
2215
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002216/**
2217 * ring_buffer_lock_reserve - reserve a part of the buffer
2218 * @buffer: the ring buffer to reserve from
2219 * @length: the length of the data to reserve (excluding event header)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002220 *
2221 * Returns a reseverd event on the ring buffer to copy directly to.
2222 * The user of this interface will need to get the body to write into
2223 * and can use the ring_buffer_event_data() interface.
2224 *
2225 * The length is the length of the data needed, not the event length
2226 * which also includes the event header.
2227 *
2228 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2229 * If NULL is returned, then nothing has been allocated or locked.
2230 */
2231struct ring_buffer_event *
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -02002232ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002233{
2234 struct ring_buffer_per_cpu *cpu_buffer;
2235 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002236 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002237
Steven Rostedt033601a2008-11-21 12:41:55 -05002238 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05002239 return NULL;
2240
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002241 if (atomic_read(&buffer->record_disabled))
2242 return NULL;
2243
Steven Rostedtbf41a152008-10-04 02:00:59 -04002244 /* If we are tracing schedule, we don't want to recurse */
Steven Rostedt182e9f52008-11-03 23:15:56 -05002245 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04002246
Steven Rostedt261842b2009-04-16 21:41:52 -04002247 if (trace_recursive_lock())
2248 goto out_nocheck;
2249
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002250 cpu = raw_smp_processor_id();
2251
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302252 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04002253 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002254
2255 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002256
2257 if (atomic_read(&cpu_buffer->record_disabled))
Steven Rostedtd7690412008-10-01 00:29:53 -04002258 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002259
Steven Rostedtbe957c42009-05-11 14:42:53 -04002260 if (length > BUF_MAX_DATA_SIZE)
Steven Rostedtbf41a152008-10-04 02:00:59 -04002261 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002262
Steven Rostedt62f0b3e2009-09-04 14:11:34 -04002263 event = rb_reserve_next_event(buffer, cpu_buffer, length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002264 if (!event)
Steven Rostedtd7690412008-10-01 00:29:53 -04002265 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002266
Steven Rostedtbf41a152008-10-04 02:00:59 -04002267 /*
2268 * Need to store resched state on this cpu.
2269 * Only the first needs to.
2270 */
2271
2272 if (preempt_count() == 1)
2273 per_cpu(rb_need_resched, cpu) = resched;
2274
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002275 return event;
2276
Steven Rostedtd7690412008-10-01 00:29:53 -04002277 out:
Steven Rostedt261842b2009-04-16 21:41:52 -04002278 trace_recursive_unlock();
2279
2280 out_nocheck:
Steven Rostedt182e9f52008-11-03 23:15:56 -05002281 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002282 return NULL;
2283}
Robert Richterc4f50182008-12-11 16:49:22 +01002284EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002285
Steven Rostedta1863c22009-09-03 10:23:58 -04002286static void
2287rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002288 struct ring_buffer_event *event)
2289{
Steven Rostedtfa743952009-06-16 12:37:57 -04002290 /*
2291 * The event first in the commit queue updates the
2292 * time stamp.
2293 */
2294 if (rb_event_is_commit(cpu_buffer, event))
2295 cpu_buffer->write_stamp += event->time_delta;
Steven Rostedta1863c22009-09-03 10:23:58 -04002296}
Steven Rostedtbf41a152008-10-04 02:00:59 -04002297
Steven Rostedta1863c22009-09-03 10:23:58 -04002298static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2299 struct ring_buffer_event *event)
2300{
2301 local_inc(&cpu_buffer->entries);
2302 rb_update_write_stamp(cpu_buffer, event);
Steven Rostedtfa743952009-06-16 12:37:57 -04002303 rb_end_commit(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002304}
2305
2306/**
2307 * ring_buffer_unlock_commit - commit a reserved
2308 * @buffer: The buffer to commit to
2309 * @event: The event pointer to commit.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002310 *
2311 * This commits the data to the ring buffer, and releases any locks held.
2312 *
2313 * Must be paired with ring_buffer_lock_reserve.
2314 */
2315int ring_buffer_unlock_commit(struct ring_buffer *buffer,
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -02002316 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002317{
2318 struct ring_buffer_per_cpu *cpu_buffer;
2319 int cpu = raw_smp_processor_id();
2320
2321 cpu_buffer = buffer->buffers[cpu];
2322
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002323 rb_commit(cpu_buffer, event);
2324
Steven Rostedt261842b2009-04-16 21:41:52 -04002325 trace_recursive_unlock();
2326
Steven Rostedtbf41a152008-10-04 02:00:59 -04002327 /*
2328 * Only the last preempt count needs to restore preemption.
2329 */
Steven Rostedt182e9f52008-11-03 23:15:56 -05002330 if (preempt_count() == 1)
2331 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
2332 else
Steven Rostedtbf41a152008-10-04 02:00:59 -04002333 preempt_enable_no_resched_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002334
2335 return 0;
2336}
Robert Richterc4f50182008-12-11 16:49:22 +01002337EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002338
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02002339static inline void rb_event_discard(struct ring_buffer_event *event)
2340{
Lai Jiangshan334d4162009-04-24 11:27:05 +08002341 /* array[0] holds the actual length for the discarded event */
2342 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2343 event->type_len = RINGBUF_TYPE_PADDING;
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02002344 /* time delta must be non zero */
2345 if (!event->time_delta)
2346 event->time_delta = 1;
2347}
2348
Steven Rostedta1863c22009-09-03 10:23:58 -04002349/*
2350 * Decrement the entries to the page that an event is on.
2351 * The event does not even need to exist, only the pointer
2352 * to the page it is on. This may only be called before the commit
2353 * takes place.
2354 */
2355static inline void
2356rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2357 struct ring_buffer_event *event)
2358{
2359 unsigned long addr = (unsigned long)event;
2360 struct buffer_page *bpage = cpu_buffer->commit_page;
2361 struct buffer_page *start;
2362
2363 addr &= PAGE_MASK;
2364
2365 /* Do the likely case first */
2366 if (likely(bpage->page == (void *)addr)) {
2367 local_dec(&bpage->entries);
2368 return;
2369 }
2370
2371 /*
2372 * Because the commit page may be on the reader page we
2373 * start with the next page and check the end loop there.
2374 */
2375 rb_inc_page(cpu_buffer, &bpage);
2376 start = bpage;
2377 do {
2378 if (bpage->page == (void *)addr) {
2379 local_dec(&bpage->entries);
2380 return;
2381 }
2382 rb_inc_page(cpu_buffer, &bpage);
2383 } while (bpage != start);
2384
2385 /* commit not part of this buffer?? */
2386 RB_WARN_ON(cpu_buffer, 1);
2387}
2388
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002389/**
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04002390 * ring_buffer_commit_discard - discard an event that has not been committed
2391 * @buffer: the ring buffer
2392 * @event: non committed event to discard
2393 *
Steven Rostedtdc892f72009-09-03 15:33:41 -04002394 * Sometimes an event that is in the ring buffer needs to be ignored.
2395 * This function lets the user discard an event in the ring buffer
2396 * and then that event will not be read later.
2397 *
2398 * This function only works if it is called before the the item has been
2399 * committed. It will try to free the event from the ring buffer
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04002400 * if another event has not been added behind it.
2401 *
2402 * If another event has been added behind it, it will set the event
2403 * up as discarded, and perform the commit.
2404 *
2405 * If this function is called, do not call ring_buffer_unlock_commit on
2406 * the event.
2407 */
2408void ring_buffer_discard_commit(struct ring_buffer *buffer,
2409 struct ring_buffer_event *event)
2410{
2411 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04002412 int cpu;
2413
2414 /* The event is discarded regardless */
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02002415 rb_event_discard(event);
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04002416
Steven Rostedtfa743952009-06-16 12:37:57 -04002417 cpu = smp_processor_id();
2418 cpu_buffer = buffer->buffers[cpu];
2419
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04002420 /*
2421 * This must only be called if the event has not been
2422 * committed yet. Thus we can assume that preemption
2423 * is still disabled.
2424 */
Steven Rostedtfa743952009-06-16 12:37:57 -04002425 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04002426
Steven Rostedta1863c22009-09-03 10:23:58 -04002427 rb_decrement_entry(cpu_buffer, event);
Steven Rostedt0f2541d2009-08-05 12:02:48 -04002428 if (rb_try_to_discard(cpu_buffer, event))
Steven Rostedtedd813b2009-06-02 23:00:53 -04002429 goto out;
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04002430
2431 /*
2432 * The commit is still visible by the reader, so we
Steven Rostedta1863c22009-09-03 10:23:58 -04002433 * must still update the timestamp.
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04002434 */
Steven Rostedta1863c22009-09-03 10:23:58 -04002435 rb_update_write_stamp(cpu_buffer, event);
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04002436 out:
Steven Rostedtfa743952009-06-16 12:37:57 -04002437 rb_end_commit(cpu_buffer);
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04002438
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02002439 trace_recursive_unlock();
2440
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04002441 /*
2442 * Only the last preempt count needs to restore preemption.
2443 */
2444 if (preempt_count() == 1)
2445 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
2446 else
2447 preempt_enable_no_resched_notrace();
2448
2449}
2450EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2451
2452/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002453 * ring_buffer_write - write data to the buffer without reserving
2454 * @buffer: The ring buffer to write to.
2455 * @length: The length of the data being written (excluding the event header)
2456 * @data: The data to write to the buffer.
2457 *
2458 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2459 * one function. If you already have the data to write to the buffer, it
2460 * may be easier to simply call this function.
2461 *
2462 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2463 * and not the length of the event which would hold the header.
2464 */
2465int ring_buffer_write(struct ring_buffer *buffer,
2466 unsigned long length,
2467 void *data)
2468{
2469 struct ring_buffer_per_cpu *cpu_buffer;
2470 struct ring_buffer_event *event;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002471 void *body;
2472 int ret = -EBUSY;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002473 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002474
Steven Rostedt033601a2008-11-21 12:41:55 -05002475 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05002476 return -EBUSY;
2477
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002478 if (atomic_read(&buffer->record_disabled))
2479 return -EBUSY;
2480
Steven Rostedt182e9f52008-11-03 23:15:56 -05002481 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04002482
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002483 cpu = raw_smp_processor_id();
2484
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302485 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04002486 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002487
2488 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002489
2490 if (atomic_read(&cpu_buffer->record_disabled))
2491 goto out;
2492
Steven Rostedtbe957c42009-05-11 14:42:53 -04002493 if (length > BUF_MAX_DATA_SIZE)
2494 goto out;
2495
Steven Rostedt62f0b3e2009-09-04 14:11:34 -04002496 event = rb_reserve_next_event(buffer, cpu_buffer, length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002497 if (!event)
2498 goto out;
2499
2500 body = rb_event_data(event);
2501
2502 memcpy(body, data, length);
2503
2504 rb_commit(cpu_buffer, event);
2505
2506 ret = 0;
2507 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05002508 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002509
2510 return ret;
2511}
Robert Richterc4f50182008-12-11 16:49:22 +01002512EXPORT_SYMBOL_GPL(ring_buffer_write);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002513
Andrew Morton34a148b2009-01-09 12:27:09 -08002514static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedtbf41a152008-10-04 02:00:59 -04002515{
2516 struct buffer_page *reader = cpu_buffer->reader_page;
Steven Rostedt77ae3652009-03-27 11:00:29 -04002517 struct buffer_page *head = rb_set_head_page(cpu_buffer);
Steven Rostedtbf41a152008-10-04 02:00:59 -04002518 struct buffer_page *commit = cpu_buffer->commit_page;
2519
Steven Rostedt77ae3652009-03-27 11:00:29 -04002520 /* In case of error, head will be NULL */
2521 if (unlikely(!head))
2522 return 1;
2523
Steven Rostedtbf41a152008-10-04 02:00:59 -04002524 return reader->read == rb_page_commit(reader) &&
2525 (commit == reader ||
2526 (commit == head &&
2527 head->read == rb_page_commit(commit)));
2528}
2529
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002530/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002531 * ring_buffer_record_disable - stop all writes into the buffer
2532 * @buffer: The ring buffer to stop writes to.
2533 *
2534 * This prevents all writes to the buffer. Any attempt to write
2535 * to the buffer after this will fail and return NULL.
2536 *
2537 * The caller should call synchronize_sched() after this.
2538 */
2539void ring_buffer_record_disable(struct ring_buffer *buffer)
2540{
2541 atomic_inc(&buffer->record_disabled);
2542}
Robert Richterc4f50182008-12-11 16:49:22 +01002543EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002544
2545/**
2546 * ring_buffer_record_enable - enable writes to the buffer
2547 * @buffer: The ring buffer to enable writes
2548 *
2549 * Note, multiple disables will need the same number of enables
2550 * to truely enable the writing (much like preempt_disable).
2551 */
2552void ring_buffer_record_enable(struct ring_buffer *buffer)
2553{
2554 atomic_dec(&buffer->record_disabled);
2555}
Robert Richterc4f50182008-12-11 16:49:22 +01002556EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002557
2558/**
2559 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2560 * @buffer: The ring buffer to stop writes to.
2561 * @cpu: The CPU buffer to stop
2562 *
2563 * This prevents all writes to the buffer. Any attempt to write
2564 * to the buffer after this will fail and return NULL.
2565 *
2566 * The caller should call synchronize_sched() after this.
2567 */
2568void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
2569{
2570 struct ring_buffer_per_cpu *cpu_buffer;
2571
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302572 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002573 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002574
2575 cpu_buffer = buffer->buffers[cpu];
2576 atomic_inc(&cpu_buffer->record_disabled);
2577}
Robert Richterc4f50182008-12-11 16:49:22 +01002578EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002579
2580/**
2581 * ring_buffer_record_enable_cpu - enable writes to the buffer
2582 * @buffer: The ring buffer to enable writes
2583 * @cpu: The CPU to enable.
2584 *
2585 * Note, multiple disables will need the same number of enables
2586 * to truely enable the writing (much like preempt_disable).
2587 */
2588void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2589{
2590 struct ring_buffer_per_cpu *cpu_buffer;
2591
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302592 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002593 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002594
2595 cpu_buffer = buffer->buffers[cpu];
2596 atomic_dec(&cpu_buffer->record_disabled);
2597}
Robert Richterc4f50182008-12-11 16:49:22 +01002598EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002599
2600/**
2601 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2602 * @buffer: The ring buffer
2603 * @cpu: The per CPU buffer to get the entries from.
2604 */
2605unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
2606{
2607 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04002608 unsigned long ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002609
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302610 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002611 return 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002612
2613 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt77ae3652009-03-27 11:00:29 -04002614 ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
Steven Rostedte4906ef2009-04-30 20:49:44 -04002615 - cpu_buffer->read;
Steven Rostedt554f7862009-03-11 22:00:13 -04002616
2617 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002618}
Robert Richterc4f50182008-12-11 16:49:22 +01002619EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002620
2621/**
2622 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
2623 * @buffer: The ring buffer
2624 * @cpu: The per CPU buffer to get the number of overruns from
2625 */
2626unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
2627{
2628 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04002629 unsigned long ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002630
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302631 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002632 return 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002633
2634 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt77ae3652009-03-27 11:00:29 -04002635 ret = local_read(&cpu_buffer->overrun);
Steven Rostedt554f7862009-03-11 22:00:13 -04002636
2637 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002638}
Robert Richterc4f50182008-12-11 16:49:22 +01002639EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002640
2641/**
Steven Rostedtf0d2c682009-04-29 13:43:37 -04002642 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
2643 * @buffer: The ring buffer
2644 * @cpu: The per CPU buffer to get the number of overruns from
2645 */
2646unsigned long
2647ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
2648{
2649 struct ring_buffer_per_cpu *cpu_buffer;
2650 unsigned long ret;
2651
2652 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2653 return 0;
2654
2655 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt77ae3652009-03-27 11:00:29 -04002656 ret = local_read(&cpu_buffer->commit_overrun);
Steven Rostedtf0d2c682009-04-29 13:43:37 -04002657
2658 return ret;
2659}
2660EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2661
2662/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002663 * ring_buffer_entries - get the number of entries in a buffer
2664 * @buffer: The ring buffer
2665 *
2666 * Returns the total number of entries in the ring buffer
2667 * (all CPU entries)
2668 */
2669unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2670{
2671 struct ring_buffer_per_cpu *cpu_buffer;
2672 unsigned long entries = 0;
2673 int cpu;
2674
2675 /* if you care about this being correct, lock the buffer */
2676 for_each_buffer_cpu(buffer, cpu) {
2677 cpu_buffer = buffer->buffers[cpu];
Steven Rostedte4906ef2009-04-30 20:49:44 -04002678 entries += (local_read(&cpu_buffer->entries) -
Steven Rostedt77ae3652009-03-27 11:00:29 -04002679 local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002680 }
2681
2682 return entries;
2683}
Robert Richterc4f50182008-12-11 16:49:22 +01002684EXPORT_SYMBOL_GPL(ring_buffer_entries);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002685
2686/**
2687 * ring_buffer_overrun_cpu - get the number of overruns in buffer
2688 * @buffer: The ring buffer
2689 *
2690 * Returns the total number of overruns in the ring buffer
2691 * (all CPU entries)
2692 */
2693unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2694{
2695 struct ring_buffer_per_cpu *cpu_buffer;
2696 unsigned long overruns = 0;
2697 int cpu;
2698
2699 /* if you care about this being correct, lock the buffer */
2700 for_each_buffer_cpu(buffer, cpu) {
2701 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt77ae3652009-03-27 11:00:29 -04002702 overruns += local_read(&cpu_buffer->overrun);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002703 }
2704
2705 return overruns;
2706}
Robert Richterc4f50182008-12-11 16:49:22 +01002707EXPORT_SYMBOL_GPL(ring_buffer_overruns);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002708
Steven Rostedt642edba2008-11-12 00:01:26 -05002709static void rb_iter_reset(struct ring_buffer_iter *iter)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002710{
2711 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2712
Steven Rostedtd7690412008-10-01 00:29:53 -04002713 /* Iterator usage is expected to have record disabled */
2714 if (list_empty(&cpu_buffer->reader_page->list)) {
Steven Rostedt77ae3652009-03-27 11:00:29 -04002715 iter->head_page = rb_set_head_page(cpu_buffer);
2716 if (unlikely(!iter->head_page))
2717 return;
2718 iter->head = iter->head_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04002719 } else {
2720 iter->head_page = cpu_buffer->reader_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002721 iter->head = cpu_buffer->reader_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04002722 }
2723 if (iter->head)
2724 iter->read_stamp = cpu_buffer->read_stamp;
2725 else
Steven Rostedtabc9b562008-12-02 15:34:06 -05002726 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt642edba2008-11-12 00:01:26 -05002727}
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002728
Steven Rostedt642edba2008-11-12 00:01:26 -05002729/**
2730 * ring_buffer_iter_reset - reset an iterator
2731 * @iter: The iterator to reset
2732 *
2733 * Resets the iterator, so that it will start from the beginning
2734 * again.
2735 */
2736void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2737{
Steven Rostedt554f7862009-03-11 22:00:13 -04002738 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt642edba2008-11-12 00:01:26 -05002739 unsigned long flags;
2740
Steven Rostedt554f7862009-03-11 22:00:13 -04002741 if (!iter)
2742 return;
2743
2744 cpu_buffer = iter->cpu_buffer;
2745
Steven Rostedt642edba2008-11-12 00:01:26 -05002746 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2747 rb_iter_reset(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002748 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002749}
Robert Richterc4f50182008-12-11 16:49:22 +01002750EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002751
2752/**
2753 * ring_buffer_iter_empty - check if an iterator has no more to read
2754 * @iter: The iterator to check
2755 */
2756int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2757{
2758 struct ring_buffer_per_cpu *cpu_buffer;
2759
2760 cpu_buffer = iter->cpu_buffer;
2761
Steven Rostedtbf41a152008-10-04 02:00:59 -04002762 return iter->head_page == cpu_buffer->commit_page &&
2763 iter->head == rb_commit_index(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002764}
Robert Richterc4f50182008-12-11 16:49:22 +01002765EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002766
2767static void
2768rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2769 struct ring_buffer_event *event)
2770{
2771 u64 delta;
2772
Lai Jiangshan334d4162009-04-24 11:27:05 +08002773 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002774 case RINGBUF_TYPE_PADDING:
2775 return;
2776
2777 case RINGBUF_TYPE_TIME_EXTEND:
2778 delta = event->array[0];
2779 delta <<= TS_SHIFT;
2780 delta += event->time_delta;
2781 cpu_buffer->read_stamp += delta;
2782 return;
2783
2784 case RINGBUF_TYPE_TIME_STAMP:
2785 /* FIXME: not implemented */
2786 return;
2787
2788 case RINGBUF_TYPE_DATA:
2789 cpu_buffer->read_stamp += event->time_delta;
2790 return;
2791
2792 default:
2793 BUG();
2794 }
2795 return;
2796}
2797
2798static void
2799rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2800 struct ring_buffer_event *event)
2801{
2802 u64 delta;
2803
Lai Jiangshan334d4162009-04-24 11:27:05 +08002804 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002805 case RINGBUF_TYPE_PADDING:
2806 return;
2807
2808 case RINGBUF_TYPE_TIME_EXTEND:
2809 delta = event->array[0];
2810 delta <<= TS_SHIFT;
2811 delta += event->time_delta;
2812 iter->read_stamp += delta;
2813 return;
2814
2815 case RINGBUF_TYPE_TIME_STAMP:
2816 /* FIXME: not implemented */
2817 return;
2818
2819 case RINGBUF_TYPE_DATA:
2820 iter->read_stamp += event->time_delta;
2821 return;
2822
2823 default:
2824 BUG();
2825 }
2826 return;
2827}
2828
Steven Rostedtd7690412008-10-01 00:29:53 -04002829static struct buffer_page *
2830rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002831{
Steven Rostedtd7690412008-10-01 00:29:53 -04002832 struct buffer_page *reader = NULL;
2833 unsigned long flags;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002834 int nr_loops = 0;
Steven Rostedt77ae3652009-03-27 11:00:29 -04002835 int ret;
Steven Rostedtd7690412008-10-01 00:29:53 -04002836
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002837 local_irq_save(flags);
2838 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtd7690412008-10-01 00:29:53 -04002839
2840 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002841 /*
2842 * This should normally only loop twice. But because the
2843 * start of the reader inserts an empty page, it causes
2844 * a case where we will loop three times. There should be no
2845 * reason to loop four times (that I know of).
2846 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002847 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002848 reader = NULL;
2849 goto out;
2850 }
2851
Steven Rostedtd7690412008-10-01 00:29:53 -04002852 reader = cpu_buffer->reader_page;
2853
2854 /* If there's more to read, return this page */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002855 if (cpu_buffer->reader_page->read < rb_page_size(reader))
Steven Rostedtd7690412008-10-01 00:29:53 -04002856 goto out;
2857
2858 /* Never should we have an index greater than the size */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002859 if (RB_WARN_ON(cpu_buffer,
2860 cpu_buffer->reader_page->read > rb_page_size(reader)))
2861 goto out;
Steven Rostedtd7690412008-10-01 00:29:53 -04002862
2863 /* check if we caught up to the tail */
2864 reader = NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002865 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
Steven Rostedtd7690412008-10-01 00:29:53 -04002866 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002867
2868 /*
Steven Rostedtd7690412008-10-01 00:29:53 -04002869 * Reset the reader page to size zero.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002870 */
Steven Rostedt77ae3652009-03-27 11:00:29 -04002871 local_set(&cpu_buffer->reader_page->write, 0);
2872 local_set(&cpu_buffer->reader_page->entries, 0);
2873 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002874
Steven Rostedt77ae3652009-03-27 11:00:29 -04002875 spin:
2876 /*
2877 * Splice the empty reader page into the list around the head.
2878 */
2879 reader = rb_set_head_page(cpu_buffer);
Steven Rostedtd7690412008-10-01 00:29:53 -04002880 cpu_buffer->reader_page->list.next = reader->list.next;
2881 cpu_buffer->reader_page->list.prev = reader->list.prev;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002882
Steven Rostedt3adc54f2009-03-30 15:32:01 -04002883 /*
2884 * cpu_buffer->pages just needs to point to the buffer, it
2885 * has no specific buffer page to point to. Lets move it out
2886 * of our way so we don't accidently swap it.
2887 */
2888 cpu_buffer->pages = reader->list.prev;
2889
Steven Rostedt77ae3652009-03-27 11:00:29 -04002890 /* The reader page will be pointing to the new head */
2891 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
Steven Rostedtd7690412008-10-01 00:29:53 -04002892
2893 /*
Steven Rostedt77ae3652009-03-27 11:00:29 -04002894 * Here's the tricky part.
2895 *
2896 * We need to move the pointer past the header page.
2897 * But we can only do that if a writer is not currently
2898 * moving it. The page before the header page has the
2899 * flag bit '1' set if it is pointing to the page we want.
2900 * but if the writer is in the process of moving it
2901 * than it will be '2' or already moved '0'.
Steven Rostedtd7690412008-10-01 00:29:53 -04002902 */
Steven Rostedtd7690412008-10-01 00:29:53 -04002903
Steven Rostedt77ae3652009-03-27 11:00:29 -04002904 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
2905
2906 /*
2907 * If we did not convert it, then we must try again.
2908 */
2909 if (!ret)
2910 goto spin;
2911
2912 /*
2913 * Yeah! We succeeded in replacing the page.
2914 *
2915 * Now make the new head point back to the reader page.
2916 */
2917 reader->list.next->prev = &cpu_buffer->reader_page->list;
2918 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
Steven Rostedtd7690412008-10-01 00:29:53 -04002919
2920 /* Finally update the reader page to the new head */
2921 cpu_buffer->reader_page = reader;
2922 rb_reset_reader_page(cpu_buffer);
2923
2924 goto again;
2925
2926 out:
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002927 __raw_spin_unlock(&cpu_buffer->lock);
2928 local_irq_restore(flags);
Steven Rostedtd7690412008-10-01 00:29:53 -04002929
2930 return reader;
2931}
2932
2933static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2934{
2935 struct ring_buffer_event *event;
2936 struct buffer_page *reader;
2937 unsigned length;
2938
2939 reader = rb_get_reader_page(cpu_buffer);
2940
2941 /* This function should not be called when buffer is empty */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002942 if (RB_WARN_ON(cpu_buffer, !reader))
2943 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04002944
2945 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002946
Steven Rostedta1863c22009-09-03 10:23:58 -04002947 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
Steven Rostedte4906ef2009-04-30 20:49:44 -04002948 cpu_buffer->read++;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002949
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002950 rb_update_read_stamp(cpu_buffer, event);
2951
Steven Rostedtd7690412008-10-01 00:29:53 -04002952 length = rb_event_length(event);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002953 cpu_buffer->reader_page->read += length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002954}
2955
2956static void rb_advance_iter(struct ring_buffer_iter *iter)
2957{
2958 struct ring_buffer *buffer;
2959 struct ring_buffer_per_cpu *cpu_buffer;
2960 struct ring_buffer_event *event;
2961 unsigned length;
2962
2963 cpu_buffer = iter->cpu_buffer;
2964 buffer = cpu_buffer->buffer;
2965
2966 /*
2967 * Check if we are at the end of the buffer.
2968 */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002969 if (iter->head >= rb_page_size(iter->head_page)) {
Steven Rostedtea05b572009-06-03 09:30:10 -04002970 /* discarded commits can make the page empty */
2971 if (iter->head_page == cpu_buffer->commit_page)
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002972 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04002973 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002974 return;
2975 }
2976
2977 event = rb_iter_head_event(iter);
2978
2979 length = rb_event_length(event);
2980
2981 /*
2982 * This should not be called to advance the header if we are
2983 * at the tail of the buffer.
2984 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002985 if (RB_WARN_ON(cpu_buffer,
Steven Rostedtf536aaf2008-11-10 23:07:30 -05002986 (iter->head_page == cpu_buffer->commit_page) &&
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002987 (iter->head + length > rb_commit_index(cpu_buffer))))
2988 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002989
2990 rb_update_iter_read_stamp(iter, event);
2991
2992 iter->head += length;
2993
2994 /* check for end of page padding */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002995 if ((iter->head >= rb_page_size(iter->head_page)) &&
2996 (iter->head_page != cpu_buffer->commit_page))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002997 rb_advance_iter(iter);
2998}
2999
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003000static struct ring_buffer_event *
Robert Richterd8eeb2d2009-07-31 14:58:04 +02003001rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003002{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003003 struct ring_buffer_event *event;
Steven Rostedtd7690412008-10-01 00:29:53 -04003004 struct buffer_page *reader;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04003005 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003006
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003007 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04003008 /*
3009 * We repeat when a timestamp is encountered. It is possible
3010 * to get multiple timestamps from an interrupt entering just
Steven Rostedtea05b572009-06-03 09:30:10 -04003011 * as one timestamp is about to be written, or from discarded
3012 * commits. The most that we can have is the number on a single page.
Steven Rostedt818e3dd2008-10-31 09:58:35 -04003013 */
Steven Rostedtea05b572009-06-03 09:30:10 -04003014 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04003015 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04003016
Steven Rostedtd7690412008-10-01 00:29:53 -04003017 reader = rb_get_reader_page(cpu_buffer);
3018 if (!reader)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003019 return NULL;
3020
Steven Rostedtd7690412008-10-01 00:29:53 -04003021 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003022
Lai Jiangshan334d4162009-04-24 11:27:05 +08003023 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003024 case RINGBUF_TYPE_PADDING:
Tom Zanussi2d622712009-03-22 03:30:49 -05003025 if (rb_null_event(event))
3026 RB_WARN_ON(cpu_buffer, 1);
3027 /*
3028 * Because the writer could be discarding every
3029 * event it creates (which would probably be bad)
3030 * if we were to go back to "again" then we may never
3031 * catch up, and will trigger the warn on, or lock
3032 * the box. Return the padding, and we will release
3033 * the current locks, and try again.
3034 */
Tom Zanussi2d622712009-03-22 03:30:49 -05003035 return event;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003036
3037 case RINGBUF_TYPE_TIME_EXTEND:
3038 /* Internal data, OK to advance */
Steven Rostedtd7690412008-10-01 00:29:53 -04003039 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003040 goto again;
3041
3042 case RINGBUF_TYPE_TIME_STAMP:
3043 /* FIXME: not implemented */
Steven Rostedtd7690412008-10-01 00:29:53 -04003044 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003045 goto again;
3046
3047 case RINGBUF_TYPE_DATA:
3048 if (ts) {
3049 *ts = cpu_buffer->read_stamp + event->time_delta;
Robert Richterd8eeb2d2009-07-31 14:58:04 +02003050 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
Steven Rostedt37886f62009-03-17 17:22:06 -04003051 cpu_buffer->cpu, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003052 }
3053 return event;
3054
3055 default:
3056 BUG();
3057 }
3058
3059 return NULL;
3060}
Robert Richterc4f50182008-12-11 16:49:22 +01003061EXPORT_SYMBOL_GPL(ring_buffer_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003062
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003063static struct ring_buffer_event *
3064rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003065{
3066 struct ring_buffer *buffer;
3067 struct ring_buffer_per_cpu *cpu_buffer;
3068 struct ring_buffer_event *event;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04003069 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003070
3071 if (ring_buffer_iter_empty(iter))
3072 return NULL;
3073
3074 cpu_buffer = iter->cpu_buffer;
3075 buffer = cpu_buffer->buffer;
3076
3077 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04003078 /*
Steven Rostedtea05b572009-06-03 09:30:10 -04003079 * We repeat when a timestamp is encountered.
3080 * We can get multiple timestamps by nested interrupts or also
3081 * if filtering is on (discarding commits). Since discarding
3082 * commits can be frequent we can get a lot of timestamps.
3083 * But we limit them by not adding timestamps if they begin
3084 * at the start of a page.
Steven Rostedt818e3dd2008-10-31 09:58:35 -04003085 */
Steven Rostedtea05b572009-06-03 09:30:10 -04003086 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04003087 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04003088
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003089 if (rb_per_cpu_empty(cpu_buffer))
3090 return NULL;
3091
3092 event = rb_iter_head_event(iter);
3093
Lai Jiangshan334d4162009-04-24 11:27:05 +08003094 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003095 case RINGBUF_TYPE_PADDING:
Tom Zanussi2d622712009-03-22 03:30:49 -05003096 if (rb_null_event(event)) {
3097 rb_inc_iter(iter);
3098 goto again;
3099 }
3100 rb_advance_iter(iter);
3101 return event;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003102
3103 case RINGBUF_TYPE_TIME_EXTEND:
3104 /* Internal data, OK to advance */
3105 rb_advance_iter(iter);
3106 goto again;
3107
3108 case RINGBUF_TYPE_TIME_STAMP:
3109 /* FIXME: not implemented */
3110 rb_advance_iter(iter);
3111 goto again;
3112
3113 case RINGBUF_TYPE_DATA:
3114 if (ts) {
3115 *ts = iter->read_stamp + event->time_delta;
Steven Rostedt37886f62009-03-17 17:22:06 -04003116 ring_buffer_normalize_time_stamp(buffer,
3117 cpu_buffer->cpu, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003118 }
3119 return event;
3120
3121 default:
3122 BUG();
3123 }
3124
3125 return NULL;
3126}
Robert Richterc4f50182008-12-11 16:49:22 +01003127EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003128
Steven Rostedt8d707e82009-06-16 21:22:48 -04003129static inline int rb_ok_to_lock(void)
3130{
3131 /*
3132 * If an NMI die dumps out the content of the ring buffer
3133 * do not grab locks. We also permanently disable the ring
3134 * buffer too. A one time deal is all you get from reading
3135 * the ring buffer from an NMI.
3136 */
Steven Rostedt464e85e2009-08-05 15:26:37 -04003137 if (likely(!in_nmi()))
Steven Rostedt8d707e82009-06-16 21:22:48 -04003138 return 1;
3139
3140 tracing_off_permanent();
3141 return 0;
3142}
3143
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003144/**
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003145 * ring_buffer_peek - peek at the next event to be read
3146 * @buffer: The ring buffer to read
3147 * @cpu: The cpu to peak at
3148 * @ts: The timestamp counter of this event.
3149 *
3150 * This will return the event that will be read next, but does
3151 * not consume the data.
3152 */
3153struct ring_buffer_event *
3154ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
3155{
3156 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
Steven Rostedt8aabee52009-03-12 13:13:49 -04003157 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003158 unsigned long flags;
Steven Rostedt8d707e82009-06-16 21:22:48 -04003159 int dolock;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003160
Steven Rostedt554f7862009-03-11 22:00:13 -04003161 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04003162 return NULL;
Steven Rostedt554f7862009-03-11 22:00:13 -04003163
Steven Rostedt8d707e82009-06-16 21:22:48 -04003164 dolock = rb_ok_to_lock();
Tom Zanussi2d622712009-03-22 03:30:49 -05003165 again:
Steven Rostedt8d707e82009-06-16 21:22:48 -04003166 local_irq_save(flags);
3167 if (dolock)
3168 spin_lock(&cpu_buffer->reader_lock);
Robert Richterd8eeb2d2009-07-31 14:58:04 +02003169 event = rb_buffer_peek(cpu_buffer, ts);
Robert Richter469535a2009-07-30 19:19:18 +02003170 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3171 rb_advance_reader(cpu_buffer);
Steven Rostedt8d707e82009-06-16 21:22:48 -04003172 if (dolock)
3173 spin_unlock(&cpu_buffer->reader_lock);
3174 local_irq_restore(flags);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003175
Steven Rostedt1b959e12009-09-03 10:12:13 -04003176 if (event && event->type_len == RINGBUF_TYPE_PADDING)
Tom Zanussi2d622712009-03-22 03:30:49 -05003177 goto again;
Tom Zanussi2d622712009-03-22 03:30:49 -05003178
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003179 return event;
3180}
3181
3182/**
3183 * ring_buffer_iter_peek - peek at the next event to be read
3184 * @iter: The ring buffer iterator
3185 * @ts: The timestamp counter of this event.
3186 *
3187 * This will return the event that will be read next, but does
3188 * not increment the iterator.
3189 */
3190struct ring_buffer_event *
3191ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3192{
3193 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3194 struct ring_buffer_event *event;
3195 unsigned long flags;
3196
Tom Zanussi2d622712009-03-22 03:30:49 -05003197 again:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003198 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3199 event = rb_iter_peek(iter, ts);
3200 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3201
Steven Rostedt1b959e12009-09-03 10:12:13 -04003202 if (event && event->type_len == RINGBUF_TYPE_PADDING)
Tom Zanussi2d622712009-03-22 03:30:49 -05003203 goto again;
Tom Zanussi2d622712009-03-22 03:30:49 -05003204
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003205 return event;
3206}
3207
3208/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003209 * ring_buffer_consume - return an event and consume it
3210 * @buffer: The ring buffer to get the next event from
3211 *
3212 * Returns the next event in the ring buffer, and that event is consumed.
3213 * Meaning, that sequential reads will keep returning a different event,
3214 * and eventually empty the ring buffer if the producer is slower.
3215 */
3216struct ring_buffer_event *
3217ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
3218{
Steven Rostedt554f7862009-03-11 22:00:13 -04003219 struct ring_buffer_per_cpu *cpu_buffer;
3220 struct ring_buffer_event *event = NULL;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003221 unsigned long flags;
Steven Rostedt8d707e82009-06-16 21:22:48 -04003222 int dolock;
3223
3224 dolock = rb_ok_to_lock();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003225
Tom Zanussi2d622712009-03-22 03:30:49 -05003226 again:
Steven Rostedt554f7862009-03-11 22:00:13 -04003227 /* might be called in atomic */
3228 preempt_disable();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003229
Steven Rostedt554f7862009-03-11 22:00:13 -04003230 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3231 goto out;
3232
3233 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt8d707e82009-06-16 21:22:48 -04003234 local_irq_save(flags);
3235 if (dolock)
3236 spin_lock(&cpu_buffer->reader_lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003237
Robert Richterd8eeb2d2009-07-31 14:58:04 +02003238 event = rb_buffer_peek(cpu_buffer, ts);
Robert Richter469535a2009-07-30 19:19:18 +02003239 if (event)
3240 rb_advance_reader(cpu_buffer);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003241
Steven Rostedt8d707e82009-06-16 21:22:48 -04003242 if (dolock)
3243 spin_unlock(&cpu_buffer->reader_lock);
3244 local_irq_restore(flags);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003245
Steven Rostedt554f7862009-03-11 22:00:13 -04003246 out:
3247 preempt_enable();
3248
Steven Rostedt1b959e12009-09-03 10:12:13 -04003249 if (event && event->type_len == RINGBUF_TYPE_PADDING)
Tom Zanussi2d622712009-03-22 03:30:49 -05003250 goto again;
Tom Zanussi2d622712009-03-22 03:30:49 -05003251
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003252 return event;
3253}
Robert Richterc4f50182008-12-11 16:49:22 +01003254EXPORT_SYMBOL_GPL(ring_buffer_consume);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003255
3256/**
3257 * ring_buffer_read_start - start a non consuming read of the buffer
3258 * @buffer: The ring buffer to read from
3259 * @cpu: The cpu buffer to iterate over
3260 *
3261 * This starts up an iteration through the buffer. It also disables
3262 * the recording to the buffer until the reading is finished.
3263 * This prevents the reading from being corrupted. This is not
3264 * a consuming read, so a producer is not expected.
3265 *
3266 * Must be paired with ring_buffer_finish.
3267 */
3268struct ring_buffer_iter *
3269ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
3270{
3271 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04003272 struct ring_buffer_iter *iter;
Steven Rostedtd7690412008-10-01 00:29:53 -04003273 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003274
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303275 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04003276 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003277
3278 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3279 if (!iter)
Steven Rostedt8aabee52009-03-12 13:13:49 -04003280 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003281
3282 cpu_buffer = buffer->buffers[cpu];
3283
3284 iter->cpu_buffer = cpu_buffer;
3285
3286 atomic_inc(&cpu_buffer->record_disabled);
3287 synchronize_sched();
3288
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003289 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05003290 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt642edba2008-11-12 00:01:26 -05003291 rb_iter_reset(iter);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05003292 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003293 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003294
3295 return iter;
3296}
Robert Richterc4f50182008-12-11 16:49:22 +01003297EXPORT_SYMBOL_GPL(ring_buffer_read_start);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003298
3299/**
3300 * ring_buffer_finish - finish reading the iterator of the buffer
3301 * @iter: The iterator retrieved by ring_buffer_start
3302 *
3303 * This re-enables the recording to the buffer, and frees the
3304 * iterator.
3305 */
3306void
3307ring_buffer_read_finish(struct ring_buffer_iter *iter)
3308{
3309 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3310
3311 atomic_dec(&cpu_buffer->record_disabled);
3312 kfree(iter);
3313}
Robert Richterc4f50182008-12-11 16:49:22 +01003314EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003315
3316/**
3317 * ring_buffer_read - read the next item in the ring buffer by the iterator
3318 * @iter: The ring buffer iterator
3319 * @ts: The time stamp of the event read.
3320 *
3321 * This reads the next event in the ring buffer and increments the iterator.
3322 */
3323struct ring_buffer_event *
3324ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3325{
3326 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003327 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3328 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003329
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003330 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt7e9391c2009-09-03 10:02:09 -04003331 again:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003332 event = rb_iter_peek(iter, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003333 if (!event)
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003334 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003335
Steven Rostedt7e9391c2009-09-03 10:02:09 -04003336 if (event->type_len == RINGBUF_TYPE_PADDING)
3337 goto again;
3338
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003339 rb_advance_iter(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003340 out:
3341 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003342
3343 return event;
3344}
Robert Richterc4f50182008-12-11 16:49:22 +01003345EXPORT_SYMBOL_GPL(ring_buffer_read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003346
3347/**
3348 * ring_buffer_size - return the size of the ring buffer (in bytes)
3349 * @buffer: The ring buffer.
3350 */
3351unsigned long ring_buffer_size(struct ring_buffer *buffer)
3352{
3353 return BUF_PAGE_SIZE * buffer->pages;
3354}
Robert Richterc4f50182008-12-11 16:49:22 +01003355EXPORT_SYMBOL_GPL(ring_buffer_size);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003356
3357static void
3358rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3359{
Steven Rostedt77ae3652009-03-27 11:00:29 -04003360 rb_head_page_deactivate(cpu_buffer);
3361
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003362 cpu_buffer->head_page
Steven Rostedt3adc54f2009-03-30 15:32:01 -04003363 = list_entry(cpu_buffer->pages, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -04003364 local_set(&cpu_buffer->head_page->write, 0);
Steven Rostedt778c55d2009-05-01 18:44:45 -04003365 local_set(&cpu_buffer->head_page->entries, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05003366 local_set(&cpu_buffer->head_page->page->commit, 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003367
Steven Rostedt6f807ac2008-10-04 02:00:58 -04003368 cpu_buffer->head_page->read = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04003369
3370 cpu_buffer->tail_page = cpu_buffer->head_page;
3371 cpu_buffer->commit_page = cpu_buffer->head_page;
3372
3373 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3374 local_set(&cpu_buffer->reader_page->write, 0);
Steven Rostedt778c55d2009-05-01 18:44:45 -04003375 local_set(&cpu_buffer->reader_page->entries, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05003376 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04003377 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04003378
Steven Rostedt77ae3652009-03-27 11:00:29 -04003379 local_set(&cpu_buffer->commit_overrun, 0);
3380 local_set(&cpu_buffer->overrun, 0);
Steven Rostedte4906ef2009-04-30 20:49:44 -04003381 local_set(&cpu_buffer->entries, 0);
Steven Rostedtfa743952009-06-16 12:37:57 -04003382 local_set(&cpu_buffer->committing, 0);
3383 local_set(&cpu_buffer->commits, 0);
Steven Rostedt77ae3652009-03-27 11:00:29 -04003384 cpu_buffer->read = 0;
Steven Rostedt69507c02009-01-21 18:45:57 -05003385
3386 cpu_buffer->write_stamp = 0;
3387 cpu_buffer->read_stamp = 0;
Steven Rostedt77ae3652009-03-27 11:00:29 -04003388
3389 rb_head_page_activate(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003390}
3391
3392/**
3393 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
3394 * @buffer: The ring buffer to reset a per cpu buffer of
3395 * @cpu: The CPU buffer to be reset
3396 */
3397void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3398{
3399 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3400 unsigned long flags;
3401
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303402 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04003403 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003404
Steven Rostedt41ede232009-05-01 20:26:54 -04003405 atomic_inc(&cpu_buffer->record_disabled);
3406
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003407 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3408
Steven Rostedt41b6a952009-09-02 09:59:48 -04003409 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3410 goto out;
3411
Steven Rostedt3e03fb72008-11-06 00:09:43 -05003412 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003413
3414 rb_reset_cpu(cpu_buffer);
3415
Steven Rostedt3e03fb72008-11-06 00:09:43 -05003416 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003417
Steven Rostedt41b6a952009-09-02 09:59:48 -04003418 out:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01003419 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt41ede232009-05-01 20:26:54 -04003420
3421 atomic_dec(&cpu_buffer->record_disabled);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003422}
Robert Richterc4f50182008-12-11 16:49:22 +01003423EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003424
3425/**
3426 * ring_buffer_reset - reset a ring buffer
3427 * @buffer: The ring buffer to reset all cpu buffers
3428 */
3429void ring_buffer_reset(struct ring_buffer *buffer)
3430{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003431 int cpu;
3432
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003433 for_each_buffer_cpu(buffer, cpu)
Steven Rostedtd7690412008-10-01 00:29:53 -04003434 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003435}
Robert Richterc4f50182008-12-11 16:49:22 +01003436EXPORT_SYMBOL_GPL(ring_buffer_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003437
3438/**
3439 * rind_buffer_empty - is the ring buffer empty?
3440 * @buffer: The ring buffer to test
3441 */
3442int ring_buffer_empty(struct ring_buffer *buffer)
3443{
3444 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedtd4788202009-06-17 00:39:43 -04003445 unsigned long flags;
Steven Rostedt8d707e82009-06-16 21:22:48 -04003446 int dolock;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003447 int cpu;
Steven Rostedtd4788202009-06-17 00:39:43 -04003448 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003449
Steven Rostedt8d707e82009-06-16 21:22:48 -04003450 dolock = rb_ok_to_lock();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003451
3452 /* yes this is racy, but if you don't like the race, lock the buffer */
3453 for_each_buffer_cpu(buffer, cpu) {
3454 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt8d707e82009-06-16 21:22:48 -04003455 local_irq_save(flags);
3456 if (dolock)
3457 spin_lock(&cpu_buffer->reader_lock);
Steven Rostedtd4788202009-06-17 00:39:43 -04003458 ret = rb_per_cpu_empty(cpu_buffer);
Steven Rostedt8d707e82009-06-16 21:22:48 -04003459 if (dolock)
3460 spin_unlock(&cpu_buffer->reader_lock);
3461 local_irq_restore(flags);
3462
Steven Rostedtd4788202009-06-17 00:39:43 -04003463 if (!ret)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003464 return 0;
3465 }
Steven Rostedt554f7862009-03-11 22:00:13 -04003466
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003467 return 1;
3468}
Robert Richterc4f50182008-12-11 16:49:22 +01003469EXPORT_SYMBOL_GPL(ring_buffer_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003470
3471/**
3472 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
3473 * @buffer: The ring buffer
3474 * @cpu: The CPU buffer to test
3475 */
3476int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
3477{
3478 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedtd4788202009-06-17 00:39:43 -04003479 unsigned long flags;
Steven Rostedt8d707e82009-06-16 21:22:48 -04003480 int dolock;
Steven Rostedt8aabee52009-03-12 13:13:49 -04003481 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003482
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303483 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04003484 return 1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003485
Steven Rostedt8d707e82009-06-16 21:22:48 -04003486 dolock = rb_ok_to_lock();
Steven Rostedt554f7862009-03-11 22:00:13 -04003487
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003488 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt8d707e82009-06-16 21:22:48 -04003489 local_irq_save(flags);
3490 if (dolock)
3491 spin_lock(&cpu_buffer->reader_lock);
Steven Rostedt554f7862009-03-11 22:00:13 -04003492 ret = rb_per_cpu_empty(cpu_buffer);
Steven Rostedt8d707e82009-06-16 21:22:48 -04003493 if (dolock)
3494 spin_unlock(&cpu_buffer->reader_lock);
3495 local_irq_restore(flags);
Steven Rostedt554f7862009-03-11 22:00:13 -04003496
3497 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003498}
Robert Richterc4f50182008-12-11 16:49:22 +01003499EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003500
Steven Rostedt85bac322009-09-04 14:24:40 -04003501#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003502/**
3503 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
3504 * @buffer_a: One buffer to swap with
3505 * @buffer_b: The other buffer to swap with
3506 *
3507 * This function is useful for tracers that want to take a "snapshot"
3508 * of a CPU buffer and has another back up buffer lying around.
3509 * it is expected that the tracer handles the cpu buffer not being
3510 * used at the moment.
3511 */
3512int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
3513 struct ring_buffer *buffer_b, int cpu)
3514{
3515 struct ring_buffer_per_cpu *cpu_buffer_a;
3516 struct ring_buffer_per_cpu *cpu_buffer_b;
Steven Rostedt554f7862009-03-11 22:00:13 -04003517 int ret = -EINVAL;
3518
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303519 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
3520 !cpumask_test_cpu(cpu, buffer_b->cpumask))
Steven Rostedt554f7862009-03-11 22:00:13 -04003521 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003522
3523 /* At least make sure the two buffers are somewhat the same */
Lai Jiangshan6d102bc2008-12-17 17:48:23 +08003524 if (buffer_a->pages != buffer_b->pages)
Steven Rostedt554f7862009-03-11 22:00:13 -04003525 goto out;
3526
3527 ret = -EAGAIN;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003528
Steven Rostedt97b17ef2009-01-21 15:24:56 -05003529 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedt554f7862009-03-11 22:00:13 -04003530 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05003531
3532 if (atomic_read(&buffer_a->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04003533 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05003534
3535 if (atomic_read(&buffer_b->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04003536 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05003537
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003538 cpu_buffer_a = buffer_a->buffers[cpu];
3539 cpu_buffer_b = buffer_b->buffers[cpu];
3540
Steven Rostedt97b17ef2009-01-21 15:24:56 -05003541 if (atomic_read(&cpu_buffer_a->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04003542 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05003543
3544 if (atomic_read(&cpu_buffer_b->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04003545 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05003546
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003547 /*
3548 * We can't do a synchronize_sched here because this
3549 * function can be called in atomic context.
3550 * Normally this will be called from the same CPU as cpu.
3551 * If not it's up to the caller to protect this.
3552 */
3553 atomic_inc(&cpu_buffer_a->record_disabled);
3554 atomic_inc(&cpu_buffer_b->record_disabled);
3555
Steven Rostedt98277992009-09-02 10:56:15 -04003556 ret = -EBUSY;
3557 if (local_read(&cpu_buffer_a->committing))
3558 goto out_dec;
3559 if (local_read(&cpu_buffer_b->committing))
3560 goto out_dec;
3561
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003562 buffer_a->buffers[cpu] = cpu_buffer_b;
3563 buffer_b->buffers[cpu] = cpu_buffer_a;
3564
3565 cpu_buffer_b->buffer = buffer_a;
3566 cpu_buffer_a->buffer = buffer_b;
3567
Steven Rostedt98277992009-09-02 10:56:15 -04003568 ret = 0;
3569
3570out_dec:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003571 atomic_dec(&cpu_buffer_a->record_disabled);
3572 atomic_dec(&cpu_buffer_b->record_disabled);
Steven Rostedt554f7862009-03-11 22:00:13 -04003573out:
Steven Rostedt554f7862009-03-11 22:00:13 -04003574 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003575}
Robert Richterc4f50182008-12-11 16:49:22 +01003576EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
Steven Rostedt85bac322009-09-04 14:24:40 -04003577#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04003578
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003579/**
3580 * ring_buffer_alloc_read_page - allocate a page to read from buffer
3581 * @buffer: the buffer to allocate for.
3582 *
3583 * This function is used in conjunction with ring_buffer_read_page.
3584 * When reading a full page from the ring buffer, these functions
3585 * can be used to speed up the process. The calling function should
3586 * allocate a few pages first with this function. Then when it
3587 * needs to get pages from the ring buffer, it passes the result
3588 * of this function into ring_buffer_read_page, which will swap
3589 * the page that was allocated, with the read page of the buffer.
3590 *
3591 * Returns:
3592 * The page allocated, or NULL on error.
3593 */
3594void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
3595{
Steven Rostedt044fa782008-12-02 23:50:03 -05003596 struct buffer_data_page *bpage;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003597 unsigned long addr;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003598
3599 addr = __get_free_page(GFP_KERNEL);
3600 if (!addr)
3601 return NULL;
3602
Steven Rostedt044fa782008-12-02 23:50:03 -05003603 bpage = (void *)addr;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003604
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003605 rb_init_page(bpage);
3606
Steven Rostedt044fa782008-12-02 23:50:03 -05003607 return bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003608}
Steven Rostedtd6ce96d2009-05-05 01:15:24 -04003609EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003610
3611/**
3612 * ring_buffer_free_read_page - free an allocated read page
3613 * @buffer: the buffer the page was allocate for
3614 * @data: the page to free
3615 *
3616 * Free a page allocated from ring_buffer_alloc_read_page.
3617 */
3618void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
3619{
3620 free_page((unsigned long)data);
3621}
Steven Rostedtd6ce96d2009-05-05 01:15:24 -04003622EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003623
3624/**
3625 * ring_buffer_read_page - extract a page from the ring buffer
3626 * @buffer: buffer to extract from
3627 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003628 * @len: amount to extract
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003629 * @cpu: the cpu of the buffer to extract
3630 * @full: should the extraction only happen when the page is full.
3631 *
3632 * This function will pull out a page from the ring buffer and consume it.
3633 * @data_page must be the address of the variable that was returned
3634 * from ring_buffer_alloc_read_page. This is because the page might be used
3635 * to swap with a page in the ring buffer.
3636 *
3637 * for example:
Lai Jiangshanb85fa012009-02-09 14:21:14 +08003638 * rpage = ring_buffer_alloc_read_page(buffer);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003639 * if (!rpage)
3640 * return error;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003641 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
Lai Jiangshan667d2412009-02-09 14:21:17 +08003642 * if (ret >= 0)
3643 * process_page(rpage, ret);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003644 *
3645 * When @full is set, the function will not return true unless
3646 * the writer is off the reader page.
3647 *
3648 * Note: it is up to the calling functions to handle sleeps and wakeups.
3649 * The ring buffer can be used anywhere in the kernel and can not
3650 * blindly call wake_up. The layer that uses the ring buffer must be
3651 * responsible for that.
3652 *
3653 * Returns:
Lai Jiangshan667d2412009-02-09 14:21:17 +08003654 * >=0 if data has been transferred, returns the offset of consumed data.
3655 * <0 if no data has been transferred.
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003656 */
3657int ring_buffer_read_page(struct ring_buffer *buffer,
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003658 void **data_page, size_t len, int cpu, int full)
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003659{
3660 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3661 struct ring_buffer_event *event;
Steven Rostedt044fa782008-12-02 23:50:03 -05003662 struct buffer_data_page *bpage;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003663 struct buffer_page *reader;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003664 unsigned long flags;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003665 unsigned int commit;
Lai Jiangshan667d2412009-02-09 14:21:17 +08003666 unsigned int read;
Steven Rostedt4f3640f2009-03-03 23:52:42 -05003667 u64 save_timestamp;
Lai Jiangshan667d2412009-02-09 14:21:17 +08003668 int ret = -1;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003669
Steven Rostedt554f7862009-03-11 22:00:13 -04003670 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3671 goto out;
3672
Steven Rostedt474d32b2009-03-03 19:51:40 -05003673 /*
3674 * If len is not big enough to hold the page header, then
3675 * we can not copy anything.
3676 */
3677 if (len <= BUF_PAGE_HDR_SIZE)
Steven Rostedt554f7862009-03-11 22:00:13 -04003678 goto out;
Steven Rostedt474d32b2009-03-03 19:51:40 -05003679
3680 len -= BUF_PAGE_HDR_SIZE;
3681
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003682 if (!data_page)
Steven Rostedt554f7862009-03-11 22:00:13 -04003683 goto out;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003684
Steven Rostedt044fa782008-12-02 23:50:03 -05003685 bpage = *data_page;
3686 if (!bpage)
Steven Rostedt554f7862009-03-11 22:00:13 -04003687 goto out;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003688
3689 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3690
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003691 reader = rb_get_reader_page(cpu_buffer);
3692 if (!reader)
Steven Rostedt554f7862009-03-11 22:00:13 -04003693 goto out_unlock;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003694
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003695 event = rb_reader_event(cpu_buffer);
Lai Jiangshan667d2412009-02-09 14:21:17 +08003696
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003697 read = reader->read;
3698 commit = rb_page_commit(reader);
3699
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003700 /*
Steven Rostedt474d32b2009-03-03 19:51:40 -05003701 * If this page has been partially read or
3702 * if len is not big enough to read the rest of the page or
3703 * a writer is still on the page, then
3704 * we must copy the data from the page to the buffer.
3705 * Otherwise, we can simply swap the page with the one passed in.
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003706 */
Steven Rostedt474d32b2009-03-03 19:51:40 -05003707 if (read || (len < (commit - read)) ||
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003708 cpu_buffer->reader_page == cpu_buffer->commit_page) {
Lai Jiangshan667d2412009-02-09 14:21:17 +08003709 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
Steven Rostedt474d32b2009-03-03 19:51:40 -05003710 unsigned int rpos = read;
3711 unsigned int pos = 0;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003712 unsigned int size;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003713
3714 if (full)
Steven Rostedt554f7862009-03-11 22:00:13 -04003715 goto out_unlock;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003716
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003717 if (len > (commit - read))
3718 len = (commit - read);
3719
3720 size = rb_event_length(event);
3721
3722 if (len < size)
Steven Rostedt554f7862009-03-11 22:00:13 -04003723 goto out_unlock;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003724
Steven Rostedt4f3640f2009-03-03 23:52:42 -05003725 /* save the current timestamp, since the user will need it */
3726 save_timestamp = cpu_buffer->read_stamp;
3727
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003728 /* Need to copy one event at a time */
3729 do {
Steven Rostedt474d32b2009-03-03 19:51:40 -05003730 memcpy(bpage->data + pos, rpage->data + rpos, size);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003731
3732 len -= size;
3733
3734 rb_advance_reader(cpu_buffer);
Steven Rostedt474d32b2009-03-03 19:51:40 -05003735 rpos = reader->read;
3736 pos += size;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003737
3738 event = rb_reader_event(cpu_buffer);
3739 size = rb_event_length(event);
3740 } while (len > size);
Lai Jiangshan667d2412009-02-09 14:21:17 +08003741
3742 /* update bpage */
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003743 local_set(&bpage->commit, pos);
Steven Rostedt4f3640f2009-03-03 23:52:42 -05003744 bpage->time_stamp = save_timestamp;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003745
Steven Rostedt474d32b2009-03-03 19:51:40 -05003746 /* we copied everything to the beginning */
3747 read = 0;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003748 } else {
Steven Rostedtafbab762009-05-01 19:40:05 -04003749 /* update the entry counter */
Steven Rostedt77ae3652009-03-27 11:00:29 -04003750 cpu_buffer->read += rb_page_entries(reader);
Steven Rostedtafbab762009-05-01 19:40:05 -04003751
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003752 /* swap the pages */
Steven Rostedt044fa782008-12-02 23:50:03 -05003753 rb_init_page(bpage);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003754 bpage = reader->page;
3755 reader->page = *data_page;
3756 local_set(&reader->write, 0);
Steven Rostedt778c55d2009-05-01 18:44:45 -04003757 local_set(&reader->entries, 0);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003758 reader->read = 0;
Steven Rostedt044fa782008-12-02 23:50:03 -05003759 *data_page = bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003760 }
Lai Jiangshan667d2412009-02-09 14:21:17 +08003761 ret = read;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003762
Steven Rostedt554f7862009-03-11 22:00:13 -04003763 out_unlock:
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003764 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3765
Steven Rostedt554f7862009-03-11 22:00:13 -04003766 out:
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003767 return ret;
3768}
Steven Rostedtd6ce96d2009-05-05 01:15:24 -04003769EXPORT_SYMBOL_GPL(ring_buffer_read_page);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003770
Paul Mundt1155de42009-06-25 14:30:12 +09003771#ifdef CONFIG_TRACING
Steven Rostedta3583242008-11-11 15:01:42 -05003772static ssize_t
3773rb_simple_read(struct file *filp, char __user *ubuf,
3774 size_t cnt, loff_t *ppos)
3775{
Hannes Eder5e398412009-02-10 19:44:34 +01003776 unsigned long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05003777 char buf[64];
3778 int r;
3779
Steven Rostedt033601a2008-11-21 12:41:55 -05003780 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3781 r = sprintf(buf, "permanently disabled\n");
3782 else
3783 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
Steven Rostedta3583242008-11-11 15:01:42 -05003784
3785 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3786}
3787
3788static ssize_t
3789rb_simple_write(struct file *filp, const char __user *ubuf,
3790 size_t cnt, loff_t *ppos)
3791{
Hannes Eder5e398412009-02-10 19:44:34 +01003792 unsigned long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05003793 char buf[64];
Hannes Eder5e398412009-02-10 19:44:34 +01003794 unsigned long val;
Steven Rostedta3583242008-11-11 15:01:42 -05003795 int ret;
3796
3797 if (cnt >= sizeof(buf))
3798 return -EINVAL;
3799
3800 if (copy_from_user(&buf, ubuf, cnt))
3801 return -EFAULT;
3802
3803 buf[cnt] = 0;
3804
3805 ret = strict_strtoul(buf, 10, &val);
3806 if (ret < 0)
3807 return ret;
3808
Steven Rostedt033601a2008-11-21 12:41:55 -05003809 if (val)
3810 set_bit(RB_BUFFERS_ON_BIT, p);
3811 else
3812 clear_bit(RB_BUFFERS_ON_BIT, p);
Steven Rostedta3583242008-11-11 15:01:42 -05003813
3814 (*ppos)++;
3815
3816 return cnt;
3817}
3818
Steven Rostedt5e2336a02009-03-05 21:44:55 -05003819static const struct file_operations rb_simple_fops = {
Steven Rostedta3583242008-11-11 15:01:42 -05003820 .open = tracing_open_generic,
3821 .read = rb_simple_read,
3822 .write = rb_simple_write,
3823};
3824
3825
3826static __init int rb_init_debugfs(void)
3827{
3828 struct dentry *d_tracer;
Steven Rostedta3583242008-11-11 15:01:42 -05003829
3830 d_tracer = tracing_init_dentry();
3831
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003832 trace_create_file("tracing_on", 0644, d_tracer,
3833 &ring_buffer_flags, &rb_simple_fops);
Steven Rostedta3583242008-11-11 15:01:42 -05003834
3835 return 0;
3836}
3837
3838fs_initcall(rb_init_debugfs);
Paul Mundt1155de42009-06-25 14:30:12 +09003839#endif
Steven Rostedt554f7862009-03-11 22:00:13 -04003840
Steven Rostedt59222ef2009-03-12 11:46:03 -04003841#ifdef CONFIG_HOTPLUG_CPU
Frederic Weisbecker09c9e842009-03-21 04:33:36 +01003842static int rb_cpu_notify(struct notifier_block *self,
3843 unsigned long action, void *hcpu)
Steven Rostedt554f7862009-03-11 22:00:13 -04003844{
3845 struct ring_buffer *buffer =
3846 container_of(self, struct ring_buffer, cpu_notify);
3847 long cpu = (long)hcpu;
3848
3849 switch (action) {
3850 case CPU_UP_PREPARE:
3851 case CPU_UP_PREPARE_FROZEN:
Rusty Russell3f237a72009-06-12 21:15:30 +09303852 if (cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt554f7862009-03-11 22:00:13 -04003853 return NOTIFY_OK;
3854
3855 buffer->buffers[cpu] =
3856 rb_allocate_cpu_buffer(buffer, cpu);
3857 if (!buffer->buffers[cpu]) {
3858 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3859 cpu);
3860 return NOTIFY_OK;
3861 }
3862 smp_wmb();
Rusty Russell3f237a72009-06-12 21:15:30 +09303863 cpumask_set_cpu(cpu, buffer->cpumask);
Steven Rostedt554f7862009-03-11 22:00:13 -04003864 break;
3865 case CPU_DOWN_PREPARE:
3866 case CPU_DOWN_PREPARE_FROZEN:
3867 /*
3868 * Do nothing.
3869 * If we were to free the buffer, then the user would
3870 * lose any trace that was in the buffer.
3871 */
3872 break;
3873 default:
3874 break;
3875 }
3876 return NOTIFY_OK;
3877}
3878#endif