blob: a330513d96ce321ae0ea50e9fa648d6e9cbfc7e5 [file] [log] [blame]
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
Ingo Molnar14131f22009-02-26 18:47:11 +01007#include <linux/trace_clock.h>
Steven Rostedt78d904b2009-02-05 18:43:07 -05008#include <linux/ftrace_irq.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04009#include <linux/spinlock.h>
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
Steven Rostedta81bd802009-02-06 01:45:16 -050012#include <linux/hardirq.h>
Vegard Nossum1744a212009-02-28 08:29:44 +010013#include <linux/kmemcheck.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040014#include <linux/module.h>
15#include <linux/percpu.h>
16#include <linux/mutex.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040017#include <linux/init.h>
18#include <linux/hash.h>
19#include <linux/list.h>
Steven Rostedt554f7862009-03-11 22:00:13 -040020#include <linux/cpu.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040021#include <linux/fs.h>
22
Steven Rostedt182e9f52008-11-03 23:15:56 -050023#include "trace.h"
24
Steven Rostedt033601a2008-11-21 12:41:55 -050025/*
Steven Rostedtd1b182a2009-04-15 16:53:47 -040026 * The ring buffer header is special. We must manually up keep it.
27 */
28int ring_buffer_print_entry_header(struct trace_seq *s)
29{
30 int ret;
31
Lai Jiangshan334d4162009-04-24 11:27:05 +080032 ret = trace_seq_printf(s, "# compressed entry header\n");
33 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
Steven Rostedtd1b182a2009-04-15 16:53:47 -040034 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
35 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
36 ret = trace_seq_printf(s, "\n");
37 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
38 RINGBUF_TYPE_PADDING);
39 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
40 RINGBUF_TYPE_TIME_EXTEND);
Lai Jiangshan334d4162009-04-24 11:27:05 +080041 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
42 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
Steven Rostedtd1b182a2009-04-15 16:53:47 -040043
44 return ret;
45}
46
47/*
Steven Rostedt5cc98542009-03-12 22:24:17 -040048 * The ring buffer is made up of a list of pages. A separate list of pages is
49 * allocated for each CPU. A writer may only write to a buffer that is
50 * associated with the CPU it is currently executing on. A reader may read
51 * from any per cpu buffer.
52 *
53 * The reader is special. For each per cpu buffer, the reader has its own
54 * reader page. When a reader has read the entire reader page, this reader
55 * page is swapped with another page in the ring buffer.
56 *
57 * Now, as long as the writer is off the reader page, the reader can do what
58 * ever it wants with that page. The writer will never write to that page
59 * again (as long as it is out of the ring buffer).
60 *
61 * Here's some silly ASCII art.
62 *
63 * +------+
64 * |reader| RING BUFFER
65 * |page |
66 * +------+ +---+ +---+ +---+
67 * | |-->| |-->| |
68 * +---+ +---+ +---+
69 * ^ |
70 * | |
71 * +---------------+
72 *
73 *
74 * +------+
75 * |reader| RING BUFFER
76 * |page |------------------v
77 * +------+ +---+ +---+ +---+
78 * | |-->| |-->| |
79 * +---+ +---+ +---+
80 * ^ |
81 * | |
82 * +---------------+
83 *
84 *
85 * +------+
86 * |reader| RING BUFFER
87 * |page |------------------v
88 * +------+ +---+ +---+ +---+
89 * ^ | |-->| |-->| |
90 * | +---+ +---+ +---+
91 * | |
92 * | |
93 * +------------------------------+
94 *
95 *
96 * +------+
97 * |buffer| RING BUFFER
98 * |page |------------------v
99 * +------+ +---+ +---+ +---+
100 * ^ | | | |-->| |
101 * | New +---+ +---+ +---+
102 * | Reader------^ |
103 * | page |
104 * +------------------------------+
105 *
106 *
107 * After we make this swap, the reader can hand this page off to the splice
108 * code and be done with it. It can even allocate a new page if it needs to
109 * and swap that into the ring buffer.
110 *
111 * We will be using cmpxchg soon to make all this lockless.
112 *
113 */
114
115/*
Steven Rostedt033601a2008-11-21 12:41:55 -0500116 * A fast way to enable or disable all ring buffers is to
117 * call tracing_on or tracing_off. Turning off the ring buffers
118 * prevents all ring buffers from being recorded to.
119 * Turning this switch on, makes it OK to write to the
120 * ring buffer, if the ring buffer is enabled itself.
121 *
122 * There's three layers that must be on in order to write
123 * to the ring buffer.
124 *
125 * 1) This global flag must be set.
126 * 2) The ring buffer must be enabled for recording.
127 * 3) The per cpu buffer must be enabled for recording.
128 *
129 * In case of an anomaly, this global flag has a bit set that
130 * will permantly disable all ring buffers.
131 */
132
133/*
134 * Global flag to disable all recording to ring buffers
135 * This has two bits: ON, DISABLED
136 *
137 * ON DISABLED
138 * ---- ----------
139 * 0 0 : ring buffers are off
140 * 1 0 : ring buffers are on
141 * X 1 : ring buffers are permanently disabled
142 */
143
144enum {
145 RB_BUFFERS_ON_BIT = 0,
146 RB_BUFFERS_DISABLED_BIT = 1,
147};
148
149enum {
150 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
151 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
152};
153
Hannes Eder5e398412009-02-10 19:44:34 +0100154static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
Steven Rostedta3583242008-11-11 15:01:42 -0500155
Steven Rostedt474d32b2009-03-03 19:51:40 -0500156#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
157
Steven Rostedta3583242008-11-11 15:01:42 -0500158/**
159 * tracing_on - enable all tracing buffers
160 *
161 * This function enables all tracing buffers that may have been
162 * disabled with tracing_off.
163 */
164void tracing_on(void)
165{
Steven Rostedt033601a2008-11-21 12:41:55 -0500166 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -0500167}
Robert Richterc4f50182008-12-11 16:49:22 +0100168EXPORT_SYMBOL_GPL(tracing_on);
Steven Rostedta3583242008-11-11 15:01:42 -0500169
170/**
171 * tracing_off - turn off all tracing buffers
172 *
173 * This function stops all tracing buffers from recording data.
174 * It does not disable any overhead the tracers themselves may
175 * be causing. This function simply causes all recording to
176 * the ring buffers to fail.
177 */
178void tracing_off(void)
179{
Steven Rostedt033601a2008-11-21 12:41:55 -0500180 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
181}
Robert Richterc4f50182008-12-11 16:49:22 +0100182EXPORT_SYMBOL_GPL(tracing_off);
Steven Rostedt033601a2008-11-21 12:41:55 -0500183
184/**
185 * tracing_off_permanent - permanently disable ring buffers
186 *
187 * This function, once called, will disable all ring buffers
Wenji Huangc3706f02009-02-10 01:03:18 -0500188 * permanently.
Steven Rostedt033601a2008-11-21 12:41:55 -0500189 */
190void tracing_off_permanent(void)
191{
192 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -0500193}
194
Steven Rostedt988ae9d2009-02-14 19:17:02 -0500195/**
196 * tracing_is_on - show state of ring buffers enabled
197 */
198int tracing_is_on(void)
199{
200 return ring_buffer_flags == RB_BUFFERS_ON;
201}
202EXPORT_SYMBOL_GPL(tracing_is_on);
203
Ingo Molnard06bbd62008-11-12 10:11:37 +0100204#include "trace.h"
205
Steven Rostedte3d6bf02009-03-03 13:53:07 -0500206#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
Andrew Morton67d34722009-01-09 12:27:09 -0800207#define RB_ALIGNMENT 4U
Lai Jiangshan334d4162009-04-24 11:27:05 +0800208#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
Steven Rostedtc7b09302009-06-11 11:12:00 -0400209#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
Lai Jiangshan334d4162009-04-24 11:27:05 +0800210
211/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
212#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400213
214enum {
215 RB_LEN_TIME_EXTEND = 8,
216 RB_LEN_TIME_STAMP = 16,
217};
218
Tom Zanussi2d622712009-03-22 03:30:49 -0500219static inline int rb_null_event(struct ring_buffer_event *event)
220{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800221 return event->type_len == RINGBUF_TYPE_PADDING
222 && event->time_delta == 0;
Tom Zanussi2d622712009-03-22 03:30:49 -0500223}
224
225static inline int rb_discarded_event(struct ring_buffer_event *event)
226{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800227 return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
Tom Zanussi2d622712009-03-22 03:30:49 -0500228}
229
230static void rb_event_set_padding(struct ring_buffer_event *event)
231{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800232 event->type_len = RINGBUF_TYPE_PADDING;
Tom Zanussi2d622712009-03-22 03:30:49 -0500233 event->time_delta = 0;
234}
235
Tom Zanussi2d622712009-03-22 03:30:49 -0500236static unsigned
237rb_event_data_length(struct ring_buffer_event *event)
238{
239 unsigned length;
240
Lai Jiangshan334d4162009-04-24 11:27:05 +0800241 if (event->type_len)
242 length = event->type_len * RB_ALIGNMENT;
Tom Zanussi2d622712009-03-22 03:30:49 -0500243 else
244 length = event->array[0];
245 return length + RB_EVNT_HDR_SIZE;
246}
247
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400248/* inline for ring buffer fast paths */
Andrew Morton34a148b2009-01-09 12:27:09 -0800249static unsigned
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400250rb_event_length(struct ring_buffer_event *event)
251{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800252 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400253 case RINGBUF_TYPE_PADDING:
Tom Zanussi2d622712009-03-22 03:30:49 -0500254 if (rb_null_event(event))
255 /* undefined */
256 return -1;
Lai Jiangshan334d4162009-04-24 11:27:05 +0800257 return event->array[0] + RB_EVNT_HDR_SIZE;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400258
259 case RINGBUF_TYPE_TIME_EXTEND:
260 return RB_LEN_TIME_EXTEND;
261
262 case RINGBUF_TYPE_TIME_STAMP:
263 return RB_LEN_TIME_STAMP;
264
265 case RINGBUF_TYPE_DATA:
Tom Zanussi2d622712009-03-22 03:30:49 -0500266 return rb_event_data_length(event);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400267 default:
268 BUG();
269 }
270 /* not hit */
271 return 0;
272}
273
274/**
275 * ring_buffer_event_length - return the length of the event
276 * @event: the event to get the length of
277 */
278unsigned ring_buffer_event_length(struct ring_buffer_event *event)
279{
Robert Richter465634a2009-01-07 15:32:11 +0100280 unsigned length = rb_event_length(event);
Lai Jiangshan334d4162009-04-24 11:27:05 +0800281 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
Robert Richter465634a2009-01-07 15:32:11 +0100282 return length;
283 length -= RB_EVNT_HDR_SIZE;
284 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
285 length -= sizeof(event->array[0]);
286 return length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400287}
Robert Richterc4f50182008-12-11 16:49:22 +0100288EXPORT_SYMBOL_GPL(ring_buffer_event_length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400289
290/* inline for ring buffer fast paths */
Andrew Morton34a148b2009-01-09 12:27:09 -0800291static void *
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400292rb_event_data(struct ring_buffer_event *event)
293{
Lai Jiangshan334d4162009-04-24 11:27:05 +0800294 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400295 /* If length is in len field, then array[0] has the data */
Lai Jiangshan334d4162009-04-24 11:27:05 +0800296 if (event->type_len)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400297 return (void *)&event->array[0];
298 /* Otherwise length is in array[0] and array[1] has the data */
299 return (void *)&event->array[1];
300}
301
302/**
303 * ring_buffer_event_data - return the data of the event
304 * @event: the event to get the data from
305 */
306void *ring_buffer_event_data(struct ring_buffer_event *event)
307{
308 return rb_event_data(event);
309}
Robert Richterc4f50182008-12-11 16:49:22 +0100310EXPORT_SYMBOL_GPL(ring_buffer_event_data);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400311
312#define for_each_buffer_cpu(buffer, cpu) \
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030313 for_each_cpu(cpu, buffer->cpumask)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400314
315#define TS_SHIFT 27
316#define TS_MASK ((1ULL << TS_SHIFT) - 1)
317#define TS_DELTA_TEST (~TS_MASK)
318
Steven Rostedtabc9b562008-12-02 15:34:06 -0500319struct buffer_data_page {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400320 u64 time_stamp; /* page time stamp */
Wenji Huangc3706f02009-02-10 01:03:18 -0500321 local_t commit; /* write committed index */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500322 unsigned char data[]; /* data of buffer page */
323};
324
325struct buffer_page {
Steven Rostedt778c55d2009-05-01 18:44:45 -0400326 struct list_head list; /* list of buffer pages */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500327 local_t write; /* index for next write */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400328 unsigned read; /* index for next read */
Steven Rostedt778c55d2009-05-01 18:44:45 -0400329 local_t entries; /* entries on this page */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500330 struct buffer_data_page *page; /* Actual data page */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400331};
332
Steven Rostedt044fa782008-12-02 23:50:03 -0500333static void rb_init_page(struct buffer_data_page *bpage)
Steven Rostedtabc9b562008-12-02 15:34:06 -0500334{
Steven Rostedt044fa782008-12-02 23:50:03 -0500335 local_set(&bpage->commit, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -0500336}
337
Steven Rostedt474d32b2009-03-03 19:51:40 -0500338/**
339 * ring_buffer_page_len - the size of data on the page.
340 * @page: The page to read
341 *
342 * Returns the amount of data on the page, including buffer page header.
343 */
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500344size_t ring_buffer_page_len(void *page)
345{
Steven Rostedt474d32b2009-03-03 19:51:40 -0500346 return local_read(&((struct buffer_data_page *)page)->commit)
347 + BUF_PAGE_HDR_SIZE;
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500348}
349
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400350/*
Steven Rostedted568292008-09-29 23:02:40 -0400351 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
352 * this issue out.
353 */
Andrew Morton34a148b2009-01-09 12:27:09 -0800354static void free_buffer_page(struct buffer_page *bpage)
Steven Rostedted568292008-09-29 23:02:40 -0400355{
Andrew Morton34a148b2009-01-09 12:27:09 -0800356 free_page((unsigned long)bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400357 kfree(bpage);
Steven Rostedted568292008-09-29 23:02:40 -0400358}
359
360/*
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400361 * We need to fit the time_stamp delta into 27 bits.
362 */
363static inline int test_time_stamp(u64 delta)
364{
365 if (delta & TS_DELTA_TEST)
366 return 1;
367 return 0;
368}
369
Steven Rostedt474d32b2009-03-03 19:51:40 -0500370#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400371
Steven Rostedtbe957c42009-05-11 14:42:53 -0400372/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
373#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
374
Steven Rostedtea05b572009-06-03 09:30:10 -0400375/* Max number of timestamps that can fit on a page */
376#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
377
Steven Rostedtd1b182a2009-04-15 16:53:47 -0400378int ring_buffer_print_page_header(struct trace_seq *s)
379{
380 struct buffer_data_page field;
381 int ret;
382
383 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
384 "offset:0;\tsize:%u;\n",
385 (unsigned int)sizeof(field.time_stamp));
386
387 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
388 "offset:%u;\tsize:%u;\n",
389 (unsigned int)offsetof(typeof(field), commit),
390 (unsigned int)sizeof(field.commit));
391
392 ret = trace_seq_printf(s, "\tfield: char data;\t"
393 "offset:%u;\tsize:%u;\n",
394 (unsigned int)offsetof(typeof(field), data),
395 (unsigned int)BUF_PAGE_SIZE);
396
397 return ret;
398}
399
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400400/*
401 * head_page == tail_page && head == tail then buffer is empty.
402 */
403struct ring_buffer_per_cpu {
404 int cpu;
405 struct ring_buffer *buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100406 spinlock_t reader_lock; /* serialize readers */
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500407 raw_spinlock_t lock;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400408 struct lock_class_key lock_key;
409 struct list_head pages;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400410 struct buffer_page *head_page; /* read from head */
411 struct buffer_page *tail_page; /* write to tail */
Wenji Huangc3706f02009-02-10 01:03:18 -0500412 struct buffer_page *commit_page; /* committed pages */
Steven Rostedtd7690412008-10-01 00:29:53 -0400413 struct buffer_page *reader_page;
Steven Rostedtf0d2c682009-04-29 13:43:37 -0400414 unsigned long nmi_dropped;
415 unsigned long commit_overrun;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400416 unsigned long overrun;
Steven Rostedte4906ef2009-04-30 20:49:44 -0400417 unsigned long read;
418 local_t entries;
Steven Rostedtfa743952009-06-16 12:37:57 -0400419 local_t committing;
420 local_t commits;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400421 u64 write_stamp;
422 u64 read_stamp;
423 atomic_t record_disabled;
424};
425
426struct ring_buffer {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400427 unsigned pages;
428 unsigned flags;
429 int cpus;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400430 atomic_t record_disabled;
Arnaldo Carvalho de Melo00f62f62009-02-09 17:04:06 -0200431 cpumask_var_t cpumask;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400432
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200433 struct lock_class_key *reader_lock_key;
434
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400435 struct mutex mutex;
436
437 struct ring_buffer_per_cpu **buffers;
Steven Rostedt554f7862009-03-11 22:00:13 -0400438
Steven Rostedt59222ef2009-03-12 11:46:03 -0400439#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400440 struct notifier_block cpu_notify;
441#endif
Steven Rostedt37886f62009-03-17 17:22:06 -0400442 u64 (*clock)(void);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400443};
444
445struct ring_buffer_iter {
446 struct ring_buffer_per_cpu *cpu_buffer;
447 unsigned long head;
448 struct buffer_page *head_page;
449 u64 read_stamp;
450};
451
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500452/* buffer may be either ring_buffer or ring_buffer_per_cpu */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400453#define RB_WARN_ON(buffer, cond) \
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500454 ({ \
455 int _____ret = unlikely(cond); \
456 if (_____ret) { \
Steven Rostedtbf41a152008-10-04 02:00:59 -0400457 atomic_inc(&buffer->record_disabled); \
458 WARN_ON(1); \
459 } \
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500460 _____ret; \
461 })
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500462
Steven Rostedt37886f62009-03-17 17:22:06 -0400463/* Up this if you want to test the TIME_EXTENTS and normalization */
464#define DEBUG_SHIFT 0
465
Steven Rostedt88eb0122009-05-11 16:28:23 -0400466static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
467{
468 /* shift to debug/test normalization and TIME_EXTENTS */
469 return buffer->clock() << DEBUG_SHIFT;
470}
471
Steven Rostedt37886f62009-03-17 17:22:06 -0400472u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
473{
474 u64 time;
475
476 preempt_disable_notrace();
Steven Rostedt88eb0122009-05-11 16:28:23 -0400477 time = rb_time_stamp(buffer, cpu);
Steven Rostedt37886f62009-03-17 17:22:06 -0400478 preempt_enable_no_resched_notrace();
479
480 return time;
481}
482EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
483
484void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
485 int cpu, u64 *ts)
486{
487 /* Just stupid testing the normalize function and deltas */
488 *ts >>= DEBUG_SHIFT;
489}
490EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
491
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400492/**
493 * check_pages - integrity check of buffer pages
494 * @cpu_buffer: CPU buffer with pages to test
495 *
Wenji Huangc3706f02009-02-10 01:03:18 -0500496 * As a safety measure we check to make sure the data pages have not
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400497 * been corrupted.
498 */
499static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
500{
501 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500502 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400503
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500504 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
505 return -1;
506 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
507 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400508
Steven Rostedt044fa782008-12-02 23:50:03 -0500509 list_for_each_entry_safe(bpage, tmp, head, list) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500510 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500511 bpage->list.next->prev != &bpage->list))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500512 return -1;
513 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500514 bpage->list.prev->next != &bpage->list))
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500515 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400516 }
517
518 return 0;
519}
520
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400521static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
522 unsigned nr_pages)
523{
524 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500525 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400526 unsigned long addr;
527 LIST_HEAD(pages);
528 unsigned i;
529
530 for (i = 0; i < nr_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -0500531 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedtaa1e0e32008-10-02 19:18:09 -0400532 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500533 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400534 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500535 list_add(&bpage->list, &pages);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400536
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400537 addr = __get_free_page(GFP_KERNEL);
538 if (!addr)
539 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500540 bpage->page = (void *)addr;
541 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400542 }
543
544 list_splice(&pages, head);
545
546 rb_check_pages(cpu_buffer);
547
548 return 0;
549
550 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -0500551 list_for_each_entry_safe(bpage, tmp, &pages, list) {
552 list_del_init(&bpage->list);
553 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400554 }
555 return -ENOMEM;
556}
557
558static struct ring_buffer_per_cpu *
559rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
560{
561 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt044fa782008-12-02 23:50:03 -0500562 struct buffer_page *bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -0400563 unsigned long addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400564 int ret;
565
566 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
567 GFP_KERNEL, cpu_to_node(cpu));
568 if (!cpu_buffer)
569 return NULL;
570
571 cpu_buffer->cpu = cpu;
572 cpu_buffer->buffer = buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100573 spin_lock_init(&cpu_buffer->reader_lock);
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200574 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500575 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400576 INIT_LIST_HEAD(&cpu_buffer->pages);
577
Steven Rostedt044fa782008-12-02 23:50:03 -0500578 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400579 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500580 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400581 goto fail_free_buffer;
582
Steven Rostedt044fa782008-12-02 23:50:03 -0500583 cpu_buffer->reader_page = bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -0400584 addr = __get_free_page(GFP_KERNEL);
585 if (!addr)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400586 goto fail_free_reader;
Steven Rostedt044fa782008-12-02 23:50:03 -0500587 bpage->page = (void *)addr;
588 rb_init_page(bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400589
Steven Rostedtd7690412008-10-01 00:29:53 -0400590 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
Steven Rostedtd7690412008-10-01 00:29:53 -0400591
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400592 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
593 if (ret < 0)
Steven Rostedtd7690412008-10-01 00:29:53 -0400594 goto fail_free_reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400595
596 cpu_buffer->head_page
597 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400598 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400599
600 return cpu_buffer;
601
Steven Rostedtd7690412008-10-01 00:29:53 -0400602 fail_free_reader:
603 free_buffer_page(cpu_buffer->reader_page);
604
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400605 fail_free_buffer:
606 kfree(cpu_buffer);
607 return NULL;
608}
609
610static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
611{
612 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500613 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400614
Steven Rostedtd7690412008-10-01 00:29:53 -0400615 free_buffer_page(cpu_buffer->reader_page);
616
Steven Rostedt044fa782008-12-02 23:50:03 -0500617 list_for_each_entry_safe(bpage, tmp, head, list) {
618 list_del_init(&bpage->list);
619 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400620 }
621 kfree(cpu_buffer);
622}
623
Steven Rostedt59222ef2009-03-12 11:46:03 -0400624#ifdef CONFIG_HOTPLUG_CPU
Frederic Weisbecker09c9e842009-03-21 04:33:36 +0100625static int rb_cpu_notify(struct notifier_block *self,
626 unsigned long action, void *hcpu);
Steven Rostedt554f7862009-03-11 22:00:13 -0400627#endif
628
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400629/**
630 * ring_buffer_alloc - allocate a new ring_buffer
Robert Richter68814b52008-11-24 12:24:12 +0100631 * @size: the size in bytes per cpu that is needed.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400632 * @flags: attributes to set for the ring buffer.
633 *
634 * Currently the only flag that is available is the RB_FL_OVERWRITE
635 * flag. This flag means that the buffer will overwrite old data
636 * when the buffer wraps. If this flag is not set, the buffer will
637 * drop data when the tail hits the head.
638 */
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200639struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
640 struct lock_class_key *key)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400641{
642 struct ring_buffer *buffer;
643 int bsize;
644 int cpu;
645
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400646 /* keep it in its own cache line */
647 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
648 GFP_KERNEL);
649 if (!buffer)
650 return NULL;
651
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030652 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
653 goto fail_free_buffer;
654
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400655 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
656 buffer->flags = flags;
Steven Rostedt37886f62009-03-17 17:22:06 -0400657 buffer->clock = trace_clock_local;
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200658 buffer->reader_lock_key = key;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400659
660 /* need at least two pages */
Steven Rostedt5f78abe2009-06-17 14:11:10 -0400661 if (buffer->pages < 2)
662 buffer->pages = 2;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400663
Frederic Weisbecker3bf832c2009-03-19 14:47:33 +0100664 /*
665 * In case of non-hotplug cpu, if the ring-buffer is allocated
666 * in early initcall, it will not be notified of secondary cpus.
667 * In that off case, we need to allocate for all possible cpus.
668 */
669#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400670 get_online_cpus();
671 cpumask_copy(buffer->cpumask, cpu_online_mask);
Frederic Weisbecker3bf832c2009-03-19 14:47:33 +0100672#else
673 cpumask_copy(buffer->cpumask, cpu_possible_mask);
674#endif
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400675 buffer->cpus = nr_cpu_ids;
676
677 bsize = sizeof(void *) * nr_cpu_ids;
678 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
679 GFP_KERNEL);
680 if (!buffer->buffers)
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030681 goto fail_free_cpumask;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400682
683 for_each_buffer_cpu(buffer, cpu) {
684 buffer->buffers[cpu] =
685 rb_allocate_cpu_buffer(buffer, cpu);
686 if (!buffer->buffers[cpu])
687 goto fail_free_buffers;
688 }
689
Steven Rostedt59222ef2009-03-12 11:46:03 -0400690#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400691 buffer->cpu_notify.notifier_call = rb_cpu_notify;
692 buffer->cpu_notify.priority = 0;
693 register_cpu_notifier(&buffer->cpu_notify);
694#endif
695
696 put_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400697 mutex_init(&buffer->mutex);
698
699 return buffer;
700
701 fail_free_buffers:
702 for_each_buffer_cpu(buffer, cpu) {
703 if (buffer->buffers[cpu])
704 rb_free_cpu_buffer(buffer->buffers[cpu]);
705 }
706 kfree(buffer->buffers);
707
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030708 fail_free_cpumask:
709 free_cpumask_var(buffer->cpumask);
Steven Rostedt554f7862009-03-11 22:00:13 -0400710 put_online_cpus();
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030711
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400712 fail_free_buffer:
713 kfree(buffer);
714 return NULL;
715}
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +0200716EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400717
718/**
719 * ring_buffer_free - free a ring buffer.
720 * @buffer: the buffer to free.
721 */
722void
723ring_buffer_free(struct ring_buffer *buffer)
724{
725 int cpu;
726
Steven Rostedt554f7862009-03-11 22:00:13 -0400727 get_online_cpus();
728
Steven Rostedt59222ef2009-03-12 11:46:03 -0400729#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400730 unregister_cpu_notifier(&buffer->cpu_notify);
731#endif
732
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400733 for_each_buffer_cpu(buffer, cpu)
734 rb_free_cpu_buffer(buffer->buffers[cpu]);
735
Steven Rostedt554f7862009-03-11 22:00:13 -0400736 put_online_cpus();
737
Eric Dumazetbd3f0222009-08-07 12:49:29 +0200738 kfree(buffer->buffers);
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030739 free_cpumask_var(buffer->cpumask);
740
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400741 kfree(buffer);
742}
Robert Richterc4f50182008-12-11 16:49:22 +0100743EXPORT_SYMBOL_GPL(ring_buffer_free);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400744
Steven Rostedt37886f62009-03-17 17:22:06 -0400745void ring_buffer_set_clock(struct ring_buffer *buffer,
746 u64 (*clock)(void))
747{
748 buffer->clock = clock;
749}
750
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400751static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
752
753static void
754rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
755{
Steven Rostedt044fa782008-12-02 23:50:03 -0500756 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400757 struct list_head *p;
758 unsigned i;
759
760 atomic_inc(&cpu_buffer->record_disabled);
761 synchronize_sched();
762
763 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500764 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
765 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400766 p = cpu_buffer->pages.next;
Steven Rostedt044fa782008-12-02 23:50:03 -0500767 bpage = list_entry(p, struct buffer_page, list);
768 list_del_init(&bpage->list);
769 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400770 }
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500771 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
772 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400773
774 rb_reset_cpu(cpu_buffer);
775
776 rb_check_pages(cpu_buffer);
777
778 atomic_dec(&cpu_buffer->record_disabled);
779
780}
781
782static void
783rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
784 struct list_head *pages, unsigned nr_pages)
785{
Steven Rostedt044fa782008-12-02 23:50:03 -0500786 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400787 struct list_head *p;
788 unsigned i;
789
790 atomic_inc(&cpu_buffer->record_disabled);
791 synchronize_sched();
792
793 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7b2008-11-11 15:28:41 -0500794 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
795 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400796 p = pages->next;
Steven Rostedt044fa782008-12-02 23:50:03 -0500797 bpage = list_entry(p, struct buffer_page, list);
798 list_del_init(&bpage->list);
799 list_add_tail(&bpage->list, &cpu_buffer->pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400800 }
801 rb_reset_cpu(cpu_buffer);
802
803 rb_check_pages(cpu_buffer);
804
805 atomic_dec(&cpu_buffer->record_disabled);
806}
807
808/**
809 * ring_buffer_resize - resize the ring buffer
810 * @buffer: the buffer to resize.
811 * @size: the new size.
812 *
813 * The tracer is responsible for making sure that the buffer is
814 * not being used while changing the size.
815 * Note: We may be able to change the above requirement by using
816 * RCU synchronizations.
817 *
818 * Minimum size is 2 * BUF_PAGE_SIZE.
819 *
820 * Returns -1 on failure.
821 */
822int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
823{
824 struct ring_buffer_per_cpu *cpu_buffer;
825 unsigned nr_pages, rm_pages, new_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500826 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400827 unsigned long buffer_size;
828 unsigned long addr;
829 LIST_HEAD(pages);
830 int i, cpu;
831
Ingo Molnaree51a1d2008-11-13 14:58:31 +0100832 /*
833 * Always succeed at resizing a non-existent buffer:
834 */
835 if (!buffer)
836 return size;
837
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400838 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
839 size *= BUF_PAGE_SIZE;
840 buffer_size = buffer->pages * BUF_PAGE_SIZE;
841
842 /* we need a minimum of two pages */
843 if (size < BUF_PAGE_SIZE * 2)
844 size = BUF_PAGE_SIZE * 2;
845
846 if (size == buffer_size)
847 return size;
848
849 mutex_lock(&buffer->mutex);
Steven Rostedt554f7862009-03-11 22:00:13 -0400850 get_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400851
852 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
853
854 if (size < buffer_size) {
855
856 /* easy case, just free pages */
Steven Rostedt554f7862009-03-11 22:00:13 -0400857 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
858 goto out_fail;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400859
860 rm_pages = buffer->pages - nr_pages;
861
862 for_each_buffer_cpu(buffer, cpu) {
863 cpu_buffer = buffer->buffers[cpu];
864 rb_remove_pages(cpu_buffer, rm_pages);
865 }
866 goto out;
867 }
868
869 /*
870 * This is a bit more difficult. We only want to add pages
871 * when we can allocate enough for all CPUs. We do this
872 * by allocating all the pages and storing them on a local
873 * link list. If we succeed in our allocation, then we
874 * add these pages to the cpu_buffers. Otherwise we just free
875 * them all and return -ENOMEM;
876 */
Steven Rostedt554f7862009-03-11 22:00:13 -0400877 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
878 goto out_fail;
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500879
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400880 new_pages = nr_pages - buffer->pages;
881
882 for_each_buffer_cpu(buffer, cpu) {
883 for (i = 0; i < new_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -0500884 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400885 cache_line_size()),
886 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500887 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400888 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500889 list_add(&bpage->list, &pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400890 addr = __get_free_page(GFP_KERNEL);
891 if (!addr)
892 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500893 bpage->page = (void *)addr;
894 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400895 }
896 }
897
898 for_each_buffer_cpu(buffer, cpu) {
899 cpu_buffer = buffer->buffers[cpu];
900 rb_insert_pages(cpu_buffer, &pages, new_pages);
901 }
902
Steven Rostedt554f7862009-03-11 22:00:13 -0400903 if (RB_WARN_ON(buffer, !list_empty(&pages)))
904 goto out_fail;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400905
906 out:
907 buffer->pages = nr_pages;
Steven Rostedt554f7862009-03-11 22:00:13 -0400908 put_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400909 mutex_unlock(&buffer->mutex);
910
911 return size;
912
913 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -0500914 list_for_each_entry_safe(bpage, tmp, &pages, list) {
915 list_del_init(&bpage->list);
916 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400917 }
Steven Rostedt554f7862009-03-11 22:00:13 -0400918 put_online_cpus();
Vegard Nossum641d2f62008-11-18 19:22:13 +0100919 mutex_unlock(&buffer->mutex);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400920 return -ENOMEM;
Steven Rostedt554f7862009-03-11 22:00:13 -0400921
922 /*
923 * Something went totally wrong, and we are too paranoid
924 * to even clean up the mess.
925 */
926 out_fail:
927 put_online_cpus();
928 mutex_unlock(&buffer->mutex);
929 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400930}
Robert Richterc4f50182008-12-11 16:49:22 +0100931EXPORT_SYMBOL_GPL(ring_buffer_resize);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400932
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500933static inline void *
Steven Rostedt044fa782008-12-02 23:50:03 -0500934__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500935{
Steven Rostedt044fa782008-12-02 23:50:03 -0500936 return bpage->data + index;
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500937}
938
Steven Rostedt044fa782008-12-02 23:50:03 -0500939static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400940{
Steven Rostedt044fa782008-12-02 23:50:03 -0500941 return bpage->page->data + index;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400942}
943
944static inline struct ring_buffer_event *
Steven Rostedtd7690412008-10-01 00:29:53 -0400945rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400946{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400947 return __rb_page_index(cpu_buffer->reader_page,
948 cpu_buffer->reader_page->read);
949}
950
951static inline struct ring_buffer_event *
952rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
953{
954 return __rb_page_index(cpu_buffer->head_page,
955 cpu_buffer->head_page->read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400956}
957
958static inline struct ring_buffer_event *
959rb_iter_head_event(struct ring_buffer_iter *iter)
960{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400961 return __rb_page_index(iter->head_page, iter->head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400962}
963
Steven Rostedtbf41a152008-10-04 02:00:59 -0400964static inline unsigned rb_page_write(struct buffer_page *bpage)
965{
966 return local_read(&bpage->write);
967}
968
969static inline unsigned rb_page_commit(struct buffer_page *bpage)
970{
Steven Rostedtabc9b562008-12-02 15:34:06 -0500971 return local_read(&bpage->page->commit);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400972}
973
974/* Size is determined by what has been commited */
975static inline unsigned rb_page_size(struct buffer_page *bpage)
976{
977 return rb_page_commit(bpage);
978}
979
980static inline unsigned
981rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
982{
983 return rb_page_commit(cpu_buffer->commit_page);
984}
985
986static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
987{
988 return rb_page_commit(cpu_buffer->head_page);
989}
990
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400991static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500992 struct buffer_page **bpage)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400993{
Steven Rostedt044fa782008-12-02 23:50:03 -0500994 struct list_head *p = (*bpage)->list.next;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400995
996 if (p == &cpu_buffer->pages)
997 p = p->next;
998
Steven Rostedt044fa782008-12-02 23:50:03 -0500999 *bpage = list_entry(p, struct buffer_page, list);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001000}
1001
Steven Rostedtbf41a152008-10-04 02:00:59 -04001002static inline unsigned
1003rb_event_index(struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001004{
Steven Rostedtbf41a152008-10-04 02:00:59 -04001005 unsigned long addr = (unsigned long)event;
1006
Steven Rostedt22f470f2009-06-11 09:29:58 -04001007 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001008}
1009
Steven Rostedt0f0c85f2009-05-11 16:08:00 -04001010static inline int
Steven Rostedtfa743952009-06-16 12:37:57 -04001011rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1012 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001013{
Steven Rostedtbf41a152008-10-04 02:00:59 -04001014 unsigned long addr = (unsigned long)event;
1015 unsigned long index;
1016
1017 index = rb_event_index(event);
1018 addr &= PAGE_MASK;
1019
1020 return cpu_buffer->commit_page->page == (void *)addr &&
1021 rb_commit_index(cpu_buffer) == index;
1022}
1023
Andrew Morton34a148b2009-01-09 12:27:09 -08001024static void
Steven Rostedtbf41a152008-10-04 02:00:59 -04001025rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1026{
1027 /*
1028 * We only race with interrupts and NMIs on this CPU.
1029 * If we own the commit event, then we can commit
1030 * all others that interrupted us, since the interruptions
1031 * are in stack format (they finish before they come
1032 * back to us). This allows us to do a simple loop to
1033 * assign the commit to the tail.
1034 */
Steven Rostedta8ccf1d2008-12-23 11:32:24 -05001035 again:
Steven Rostedtbf41a152008-10-04 02:00:59 -04001036 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
Steven Rostedtabc9b562008-12-02 15:34:06 -05001037 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -04001038 cpu_buffer->commit_page->write;
1039 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001040 cpu_buffer->write_stamp =
1041 cpu_buffer->commit_page->page->time_stamp;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001042 /* add barrier to keep gcc from optimizing too much */
1043 barrier();
1044 }
1045 while (rb_commit_index(cpu_buffer) !=
1046 rb_page_write(cpu_buffer->commit_page)) {
Steven Rostedtabc9b562008-12-02 15:34:06 -05001047 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -04001048 cpu_buffer->commit_page->write;
1049 barrier();
1050 }
Steven Rostedta8ccf1d2008-12-23 11:32:24 -05001051
1052 /* again, keep gcc from optimizing */
1053 barrier();
1054
1055 /*
1056 * If an interrupt came in just after the first while loop
1057 * and pushed the tail page forward, we will be left with
1058 * a dangling commit that will never go forward.
1059 */
1060 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1061 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001062}
1063
Steven Rostedtd7690412008-10-01 00:29:53 -04001064static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001065{
Steven Rostedtabc9b562008-12-02 15:34:06 -05001066 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001067 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04001068}
1069
Andrew Morton34a148b2009-01-09 12:27:09 -08001070static void rb_inc_iter(struct ring_buffer_iter *iter)
Steven Rostedtd7690412008-10-01 00:29:53 -04001071{
1072 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1073
1074 /*
1075 * The iterator could be on the reader page (it starts there).
1076 * But the head could have moved, since the reader was
1077 * found. Check for this case and assign the iterator
1078 * to the head page instead of next.
1079 */
1080 if (iter->head_page == cpu_buffer->reader_page)
1081 iter->head_page = cpu_buffer->head_page;
1082 else
1083 rb_inc_page(cpu_buffer, &iter->head_page);
1084
Steven Rostedtabc9b562008-12-02 15:34:06 -05001085 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001086 iter->head = 0;
1087}
1088
1089/**
1090 * ring_buffer_update_event - update event type and data
1091 * @event: the even to update
1092 * @type: the type of event
1093 * @length: the size of the event field in the ring buffer
1094 *
1095 * Update the type and data fields of the event. The length
1096 * is the actual size that is written to the ring buffer,
1097 * and with this, we can determine what to place into the
1098 * data field.
1099 */
Andrew Morton34a148b2009-01-09 12:27:09 -08001100static void
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001101rb_update_event(struct ring_buffer_event *event,
1102 unsigned type, unsigned length)
1103{
Lai Jiangshan334d4162009-04-24 11:27:05 +08001104 event->type_len = type;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001105
1106 switch (type) {
1107
1108 case RINGBUF_TYPE_PADDING:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001109 case RINGBUF_TYPE_TIME_EXTEND:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001110 case RINGBUF_TYPE_TIME_STAMP:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001111 break;
1112
Lai Jiangshan334d4162009-04-24 11:27:05 +08001113 case 0:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001114 length -= RB_EVNT_HDR_SIZE;
Lai Jiangshan334d4162009-04-24 11:27:05 +08001115 if (length > RB_MAX_SMALL_DATA)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001116 event->array[0] = length;
Lai Jiangshan334d4162009-04-24 11:27:05 +08001117 else
1118 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001119 break;
1120 default:
1121 BUG();
1122 }
1123}
1124
Andrew Morton34a148b2009-01-09 12:27:09 -08001125static unsigned rb_calculate_event_length(unsigned length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001126{
1127 struct ring_buffer_event event; /* Used only for sizeof array */
1128
1129 /* zero length can cause confusions */
1130 if (!length)
1131 length = 1;
1132
1133 if (length > RB_MAX_SMALL_DATA)
1134 length += sizeof(event.array[0]);
1135
1136 length += RB_EVNT_HDR_SIZE;
1137 length = ALIGN(length, RB_ALIGNMENT);
1138
1139 return length;
1140}
1141
Steven Rostedtc7b09302009-06-11 11:12:00 -04001142static inline void
1143rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1144 struct buffer_page *tail_page,
1145 unsigned long tail, unsigned long length)
1146{
1147 struct ring_buffer_event *event;
1148
1149 /*
1150 * Only the event that crossed the page boundary
1151 * must fill the old tail_page with padding.
1152 */
1153 if (tail >= BUF_PAGE_SIZE) {
1154 local_sub(length, &tail_page->write);
1155 return;
1156 }
1157
1158 event = __rb_page_index(tail_page, tail);
Linus Torvaldsb0b70652009-06-20 10:56:46 -07001159 kmemcheck_annotate_bitfield(event, bitfield);
Steven Rostedtc7b09302009-06-11 11:12:00 -04001160
1161 /*
1162 * If this event is bigger than the minimum size, then
1163 * we need to be careful that we don't subtract the
1164 * write counter enough to allow another writer to slip
1165 * in on this page.
1166 * We put in a discarded commit instead, to make sure
1167 * that this space is not used again.
1168 *
1169 * If we are less than the minimum size, we don't need to
1170 * worry about it.
1171 */
1172 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
1173 /* No room for any events */
1174
1175 /* Mark the rest of the page with padding */
1176 rb_event_set_padding(event);
1177
1178 /* Set the write back to the previous setting */
1179 local_sub(length, &tail_page->write);
1180 return;
1181 }
1182
1183 /* Put in a discarded event */
1184 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
1185 event->type_len = RINGBUF_TYPE_PADDING;
1186 /* time delta must be non zero */
1187 event->time_delta = 1;
1188 /* Account for this as an entry */
1189 local_inc(&tail_page->entries);
1190 local_inc(&cpu_buffer->entries);
1191
1192 /* Set write to end of buffer */
1193 length = (tail + length) - BUF_PAGE_SIZE;
1194 local_sub(length, &tail_page->write);
1195}
Steven Rostedt6634ff22009-05-06 15:30:07 -04001196
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001197static struct ring_buffer_event *
Steven Rostedt6634ff22009-05-06 15:30:07 -04001198rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1199 unsigned long length, unsigned long tail,
1200 struct buffer_page *commit_page,
1201 struct buffer_page *tail_page, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001202{
Steven Rostedt6634ff22009-05-06 15:30:07 -04001203 struct buffer_page *next_page, *head_page, *reader_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001204 struct ring_buffer *buffer = cpu_buffer->buffer;
Steven Rostedt78d904b2009-02-05 18:43:07 -05001205 bool lock_taken = false;
Steven Rostedt6634ff22009-05-06 15:30:07 -04001206 unsigned long flags;
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001207
1208 next_page = tail_page;
1209
1210 local_irq_save(flags);
1211 /*
1212 * Since the write to the buffer is still not
1213 * fully lockless, we must be careful with NMIs.
1214 * The locks in the writers are taken when a write
1215 * crosses to a new page. The locks protect against
1216 * races with the readers (this will soon be fixed
1217 * with a lockless solution).
1218 *
1219 * Because we can not protect against NMIs, and we
1220 * want to keep traces reentrant, we need to manage
1221 * what happens when we are in an NMI.
1222 *
1223 * NMIs can happen after we take the lock.
1224 * If we are in an NMI, only take the lock
1225 * if it is not already taken. Otherwise
1226 * simply fail.
1227 */
1228 if (unlikely(in_nmi())) {
1229 if (!__raw_spin_trylock(&cpu_buffer->lock)) {
1230 cpu_buffer->nmi_dropped++;
1231 goto out_reset;
1232 }
1233 } else
1234 __raw_spin_lock(&cpu_buffer->lock);
1235
1236 lock_taken = true;
1237
1238 rb_inc_page(cpu_buffer, &next_page);
1239
1240 head_page = cpu_buffer->head_page;
1241 reader_page = cpu_buffer->reader_page;
1242
1243 /* we grabbed the lock before incrementing */
1244 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1245 goto out_reset;
1246
1247 /*
1248 * If for some reason, we had an interrupt storm that made
1249 * it all the way around the buffer, bail, and warn
1250 * about it.
1251 */
1252 if (unlikely(next_page == commit_page)) {
1253 cpu_buffer->commit_overrun++;
1254 goto out_reset;
1255 }
1256
1257 if (next_page == head_page) {
1258 if (!(buffer->flags & RB_FL_OVERWRITE))
1259 goto out_reset;
1260
1261 /* tail_page has not moved yet? */
1262 if (tail_page == cpu_buffer->tail_page) {
1263 /* count overflows */
1264 cpu_buffer->overrun +=
1265 local_read(&head_page->entries);
1266
1267 rb_inc_page(cpu_buffer, &head_page);
1268 cpu_buffer->head_page = head_page;
1269 cpu_buffer->head_page->read = 0;
1270 }
1271 }
1272
1273 /*
1274 * If the tail page is still the same as what we think
1275 * it is, then it is up to us to update the tail
1276 * pointer.
1277 */
1278 if (tail_page == cpu_buffer->tail_page) {
1279 local_set(&next_page->write, 0);
1280 local_set(&next_page->entries, 0);
1281 local_set(&next_page->page->commit, 0);
1282 cpu_buffer->tail_page = next_page;
1283
1284 /* reread the time stamp */
Steven Rostedt88eb0122009-05-11 16:28:23 -04001285 *ts = rb_time_stamp(buffer, cpu_buffer->cpu);
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001286 cpu_buffer->tail_page->page->time_stamp = *ts;
1287 }
1288
Steven Rostedtc7b09302009-06-11 11:12:00 -04001289 rb_reset_tail(cpu_buffer, tail_page, tail, length);
Steven Rostedtaa20ae82009-05-05 21:16:11 -04001290
1291 __raw_spin_unlock(&cpu_buffer->lock);
1292 local_irq_restore(flags);
1293
1294 /* fail and let the caller try again */
1295 return ERR_PTR(-EAGAIN);
1296
Steven Rostedt45141d42009-02-12 13:19:48 -05001297 out_reset:
Lai Jiangshan6f3b3442009-01-12 11:06:18 +08001298 /* reset write */
Steven Rostedtc7b09302009-06-11 11:12:00 -04001299 rb_reset_tail(cpu_buffer, tail_page, tail, length);
Lai Jiangshan6f3b3442009-01-12 11:06:18 +08001300
Steven Rostedt78d904b2009-02-05 18:43:07 -05001301 if (likely(lock_taken))
1302 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001303 local_irq_restore(flags);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001304 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001305}
1306
Steven Rostedt6634ff22009-05-06 15:30:07 -04001307static struct ring_buffer_event *
1308__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1309 unsigned type, unsigned long length, u64 *ts)
1310{
1311 struct buffer_page *tail_page, *commit_page;
1312 struct ring_buffer_event *event;
1313 unsigned long tail, write;
1314
1315 commit_page = cpu_buffer->commit_page;
1316 /* we just need to protect against interrupts */
1317 barrier();
1318 tail_page = cpu_buffer->tail_page;
1319 write = local_add_return(length, &tail_page->write);
1320 tail = write - length;
1321
1322 /* See if we shot pass the end of this buffer page */
1323 if (write > BUF_PAGE_SIZE)
1324 return rb_move_tail(cpu_buffer, length, tail,
1325 commit_page, tail_page, ts);
1326
1327 /* We reserved something on the buffer */
1328
Steven Rostedt6634ff22009-05-06 15:30:07 -04001329 event = __rb_page_index(tail_page, tail);
Vegard Nossum1744a212009-02-28 08:29:44 +01001330 kmemcheck_annotate_bitfield(event, bitfield);
Steven Rostedt6634ff22009-05-06 15:30:07 -04001331 rb_update_event(event, type, length);
1332
1333 /* The passed in type is zero for DATA */
1334 if (likely(!type))
1335 local_inc(&tail_page->entries);
1336
1337 /*
Steven Rostedtfa743952009-06-16 12:37:57 -04001338 * If this is the first commit on the page, then update
1339 * its timestamp.
Steven Rostedt6634ff22009-05-06 15:30:07 -04001340 */
Steven Rostedtfa743952009-06-16 12:37:57 -04001341 if (!tail)
1342 tail_page->page->time_stamp = *ts;
Steven Rostedt6634ff22009-05-06 15:30:07 -04001343
1344 return event;
1345}
1346
Steven Rostedtedd813bf2009-06-02 23:00:53 -04001347static inline int
1348rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1349 struct ring_buffer_event *event)
1350{
1351 unsigned long new_index, old_index;
1352 struct buffer_page *bpage;
1353 unsigned long index;
1354 unsigned long addr;
1355
1356 new_index = rb_event_index(event);
1357 old_index = new_index + rb_event_length(event);
1358 addr = (unsigned long)event;
1359 addr &= PAGE_MASK;
1360
1361 bpage = cpu_buffer->tail_page;
1362
1363 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
1364 /*
1365 * This is on the tail page. It is possible that
1366 * a write could come in and move the tail page
1367 * and write to the next page. That is fine
1368 * because we just shorten what is on this page.
1369 */
1370 index = local_cmpxchg(&bpage->write, old_index, new_index);
1371 if (index == old_index)
1372 return 1;
1373 }
1374
1375 /* could not discard */
1376 return 0;
1377}
1378
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001379static int
1380rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1381 u64 *ts, u64 *delta)
1382{
1383 struct ring_buffer_event *event;
1384 static int once;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001385 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001386
1387 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1388 printk(KERN_WARNING "Delta way too big! %llu"
1389 " ts=%llu write stamp = %llu\n",
Stephen Rothwelle2862c92008-10-27 17:43:28 +11001390 (unsigned long long)*delta,
1391 (unsigned long long)*ts,
1392 (unsigned long long)cpu_buffer->write_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001393 WARN_ON(1);
1394 }
1395
1396 /*
1397 * The delta is too big, we to add a
1398 * new timestamp.
1399 */
1400 event = __rb_reserve_next(cpu_buffer,
1401 RINGBUF_TYPE_TIME_EXTEND,
1402 RB_LEN_TIME_EXTEND,
1403 ts);
1404 if (!event)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001405 return -EBUSY;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001406
Steven Rostedtbf41a152008-10-04 02:00:59 -04001407 if (PTR_ERR(event) == -EAGAIN)
1408 return -EAGAIN;
1409
1410 /* Only a commited time event can update the write stamp */
Steven Rostedtfa743952009-06-16 12:37:57 -04001411 if (rb_event_is_commit(cpu_buffer, event)) {
Steven Rostedtbf41a152008-10-04 02:00:59 -04001412 /*
Steven Rostedtfa743952009-06-16 12:37:57 -04001413 * If this is the first on the page, then it was
1414 * updated with the page itself. Try to discard it
1415 * and if we can't just make it zero.
Steven Rostedtbf41a152008-10-04 02:00:59 -04001416 */
1417 if (rb_event_index(event)) {
1418 event->time_delta = *delta & TS_MASK;
1419 event->array[0] = *delta >> TS_SHIFT;
1420 } else {
Steven Rostedtea05b572009-06-03 09:30:10 -04001421 /* try to discard, since we do not need this */
1422 if (!rb_try_to_discard(cpu_buffer, event)) {
1423 /* nope, just zero it */
1424 event->time_delta = 0;
1425 event->array[0] = 0;
1426 }
Steven Rostedtbf41a152008-10-04 02:00:59 -04001427 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001428 cpu_buffer->write_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001429 /* let the caller know this was the commit */
1430 ret = 1;
1431 } else {
Steven Rostedtedd813bf2009-06-02 23:00:53 -04001432 /* Try to discard the event */
1433 if (!rb_try_to_discard(cpu_buffer, event)) {
1434 /* Darn, this is just wasted space */
1435 event->time_delta = 0;
1436 event->array[0] = 0;
Steven Rostedtedd813bf2009-06-02 23:00:53 -04001437 }
Steven Rostedtf57a8a12009-06-05 14:11:30 -04001438 ret = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001439 }
1440
Steven Rostedtbf41a152008-10-04 02:00:59 -04001441 *delta = 0;
1442
1443 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001444}
1445
Steven Rostedtfa743952009-06-16 12:37:57 -04001446static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
1447{
1448 local_inc(&cpu_buffer->committing);
1449 local_inc(&cpu_buffer->commits);
1450}
1451
1452static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
1453{
1454 unsigned long commits;
1455
1456 if (RB_WARN_ON(cpu_buffer,
1457 !local_read(&cpu_buffer->committing)))
1458 return;
1459
1460 again:
1461 commits = local_read(&cpu_buffer->commits);
1462 /* synchronize with interrupts */
1463 barrier();
1464 if (local_read(&cpu_buffer->committing) == 1)
1465 rb_set_commit_to_write(cpu_buffer);
1466
1467 local_dec(&cpu_buffer->committing);
1468
1469 /* synchronize with interrupts */
1470 barrier();
1471
1472 /*
1473 * Need to account for interrupts coming in between the
1474 * updating of the commit page and the clearing of the
1475 * committing counter.
1476 */
1477 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
1478 !local_read(&cpu_buffer->committing)) {
1479 local_inc(&cpu_buffer->committing);
1480 goto again;
1481 }
1482}
1483
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001484static struct ring_buffer_event *
1485rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
Steven Rostedt1cd8d732009-05-11 14:08:09 -04001486 unsigned long length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001487{
1488 struct ring_buffer_event *event;
Steven Rostedt168b6b12009-05-11 22:11:05 -04001489 u64 ts, delta = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001490 int commit = 0;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001491 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001492
Steven Rostedtfa743952009-06-16 12:37:57 -04001493 rb_start_commit(cpu_buffer);
1494
Steven Rostedtbe957c42009-05-11 14:42:53 -04001495 length = rb_calculate_event_length(length);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001496 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001497 /*
1498 * We allow for interrupts to reenter here and do a trace.
1499 * If one does, it will cause this original code to loop
1500 * back here. Even with heavy interrupts happening, this
1501 * should only happen a few times in a row. If this happens
1502 * 1000 times in a row, there must be either an interrupt
1503 * storm or we have something buggy.
1504 * Bail!
1505 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05001506 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
Steven Rostedtfa743952009-06-16 12:37:57 -04001507 goto out_fail;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001508
Steven Rostedt88eb0122009-05-11 16:28:23 -04001509 ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001510
Steven Rostedtbf41a152008-10-04 02:00:59 -04001511 /*
1512 * Only the first commit can update the timestamp.
1513 * Yes there is a race here. If an interrupt comes in
1514 * just after the conditional and it traces too, then it
1515 * will also check the deltas. More than one timestamp may
1516 * also be made. But only the entry that did the actual
1517 * commit will be something other than zero.
1518 */
Steven Rostedt0f0c85f2009-05-11 16:08:00 -04001519 if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
1520 rb_page_write(cpu_buffer->tail_page) ==
1521 rb_commit_index(cpu_buffer))) {
Steven Rostedt168b6b12009-05-11 22:11:05 -04001522 u64 diff;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001523
Steven Rostedt168b6b12009-05-11 22:11:05 -04001524 diff = ts - cpu_buffer->write_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001525
Steven Rostedt168b6b12009-05-11 22:11:05 -04001526 /* make sure this diff is calculated here */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001527 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001528
Steven Rostedtbf41a152008-10-04 02:00:59 -04001529 /* Did the write stamp get updated already? */
1530 if (unlikely(ts < cpu_buffer->write_stamp))
Steven Rostedt168b6b12009-05-11 22:11:05 -04001531 goto get_event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001532
Steven Rostedt168b6b12009-05-11 22:11:05 -04001533 delta = diff;
1534 if (unlikely(test_time_stamp(delta))) {
Steven Rostedtbf41a152008-10-04 02:00:59 -04001535
1536 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001537 if (commit == -EBUSY)
Steven Rostedtfa743952009-06-16 12:37:57 -04001538 goto out_fail;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001539
1540 if (commit == -EAGAIN)
1541 goto again;
1542
1543 RB_WARN_ON(cpu_buffer, commit < 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001544 }
Steven Rostedt168b6b12009-05-11 22:11:05 -04001545 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001546
Steven Rostedt168b6b12009-05-11 22:11:05 -04001547 get_event:
Steven Rostedt1cd8d732009-05-11 14:08:09 -04001548 event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
Steven Rostedt168b6b12009-05-11 22:11:05 -04001549 if (unlikely(PTR_ERR(event) == -EAGAIN))
Steven Rostedtbf41a152008-10-04 02:00:59 -04001550 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001551
Steven Rostedtfa743952009-06-16 12:37:57 -04001552 if (!event)
1553 goto out_fail;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001554
Steven Rostedtfa743952009-06-16 12:37:57 -04001555 if (!rb_event_is_commit(cpu_buffer, event))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001556 delta = 0;
1557
1558 event->time_delta = delta;
1559
1560 return event;
Steven Rostedtfa743952009-06-16 12:37:57 -04001561
1562 out_fail:
1563 rb_end_commit(cpu_buffer);
1564 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001565}
1566
Paul Mundt1155de42009-06-25 14:30:12 +09001567#ifdef CONFIG_TRACING
1568
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001569#define TRACE_RECURSIVE_DEPTH 16
Steven Rostedt261842b2009-04-16 21:41:52 -04001570
1571static int trace_recursive_lock(void)
1572{
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001573 current->trace_recursion++;
Steven Rostedt261842b2009-04-16 21:41:52 -04001574
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001575 if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
1576 return 0;
Steven Rostedt261842b2009-04-16 21:41:52 -04001577
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001578 /* Disable all tracing before we do anything else */
1579 tracing_off_permanent();
Frederic Weisbeckere057a5e2009-04-19 23:38:12 +02001580
Steven Rostedt7d7d2b82009-04-27 12:37:49 -04001581 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001582 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
1583 current->trace_recursion,
1584 hardirq_count() >> HARDIRQ_SHIFT,
1585 softirq_count() >> SOFTIRQ_SHIFT,
1586 in_nmi());
Frederic Weisbeckere057a5e2009-04-19 23:38:12 +02001587
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001588 WARN_ON_ONCE(1);
1589 return -1;
Steven Rostedt261842b2009-04-16 21:41:52 -04001590}
1591
1592static void trace_recursive_unlock(void)
1593{
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001594 WARN_ON_ONCE(!current->trace_recursion);
Steven Rostedt261842b2009-04-16 21:41:52 -04001595
Steven Rostedtaa18efb2009-04-20 16:16:11 -04001596 current->trace_recursion--;
Steven Rostedt261842b2009-04-16 21:41:52 -04001597}
1598
Paul Mundt1155de42009-06-25 14:30:12 +09001599#else
1600
1601#define trace_recursive_lock() (0)
1602#define trace_recursive_unlock() do { } while (0)
1603
1604#endif
1605
Steven Rostedtbf41a152008-10-04 02:00:59 -04001606static DEFINE_PER_CPU(int, rb_need_resched);
1607
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001608/**
1609 * ring_buffer_lock_reserve - reserve a part of the buffer
1610 * @buffer: the ring buffer to reserve from
1611 * @length: the length of the data to reserve (excluding event header)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001612 *
1613 * Returns a reseverd event on the ring buffer to copy directly to.
1614 * The user of this interface will need to get the body to write into
1615 * and can use the ring_buffer_event_data() interface.
1616 *
1617 * The length is the length of the data needed, not the event length
1618 * which also includes the event header.
1619 *
1620 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1621 * If NULL is returned, then nothing has been allocated or locked.
1622 */
1623struct ring_buffer_event *
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -02001624ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001625{
1626 struct ring_buffer_per_cpu *cpu_buffer;
1627 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001628 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001629
Steven Rostedt033601a2008-11-21 12:41:55 -05001630 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05001631 return NULL;
1632
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001633 if (atomic_read(&buffer->record_disabled))
1634 return NULL;
1635
Steven Rostedtbf41a152008-10-04 02:00:59 -04001636 /* If we are tracing schedule, we don't want to recurse */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001637 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001638
Steven Rostedt261842b2009-04-16 21:41:52 -04001639 if (trace_recursive_lock())
1640 goto out_nocheck;
1641
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001642 cpu = raw_smp_processor_id();
1643
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301644 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001645 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001646
1647 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001648
1649 if (atomic_read(&cpu_buffer->record_disabled))
Steven Rostedtd7690412008-10-01 00:29:53 -04001650 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001651
Steven Rostedtbe957c42009-05-11 14:42:53 -04001652 if (length > BUF_MAX_DATA_SIZE)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001653 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001654
Steven Rostedt1cd8d732009-05-11 14:08:09 -04001655 event = rb_reserve_next_event(cpu_buffer, length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001656 if (!event)
Steven Rostedtd7690412008-10-01 00:29:53 -04001657 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001658
Steven Rostedtbf41a152008-10-04 02:00:59 -04001659 /*
1660 * Need to store resched state on this cpu.
1661 * Only the first needs to.
1662 */
1663
1664 if (preempt_count() == 1)
1665 per_cpu(rb_need_resched, cpu) = resched;
1666
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001667 return event;
1668
Steven Rostedtd7690412008-10-01 00:29:53 -04001669 out:
Steven Rostedt261842b2009-04-16 21:41:52 -04001670 trace_recursive_unlock();
1671
1672 out_nocheck:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001673 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001674 return NULL;
1675}
Robert Richterc4f50182008-12-11 16:49:22 +01001676EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001677
1678static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1679 struct ring_buffer_event *event)
1680{
Steven Rostedte4906ef2009-04-30 20:49:44 -04001681 local_inc(&cpu_buffer->entries);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001682
Steven Rostedtfa743952009-06-16 12:37:57 -04001683 /*
1684 * The event first in the commit queue updates the
1685 * time stamp.
1686 */
1687 if (rb_event_is_commit(cpu_buffer, event))
1688 cpu_buffer->write_stamp += event->time_delta;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001689
Steven Rostedtfa743952009-06-16 12:37:57 -04001690 rb_end_commit(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001691}
1692
1693/**
1694 * ring_buffer_unlock_commit - commit a reserved
1695 * @buffer: The buffer to commit to
1696 * @event: The event pointer to commit.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001697 *
1698 * This commits the data to the ring buffer, and releases any locks held.
1699 *
1700 * Must be paired with ring_buffer_lock_reserve.
1701 */
1702int ring_buffer_unlock_commit(struct ring_buffer *buffer,
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -02001703 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001704{
1705 struct ring_buffer_per_cpu *cpu_buffer;
1706 int cpu = raw_smp_processor_id();
1707
1708 cpu_buffer = buffer->buffers[cpu];
1709
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001710 rb_commit(cpu_buffer, event);
1711
Steven Rostedt261842b2009-04-16 21:41:52 -04001712 trace_recursive_unlock();
1713
Steven Rostedtbf41a152008-10-04 02:00:59 -04001714 /*
1715 * Only the last preempt count needs to restore preemption.
1716 */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001717 if (preempt_count() == 1)
1718 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1719 else
Steven Rostedtbf41a152008-10-04 02:00:59 -04001720 preempt_enable_no_resched_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001721
1722 return 0;
1723}
Robert Richterc4f50182008-12-11 16:49:22 +01001724EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001725
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02001726static inline void rb_event_discard(struct ring_buffer_event *event)
1727{
Lai Jiangshan334d4162009-04-24 11:27:05 +08001728 /* array[0] holds the actual length for the discarded event */
1729 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
1730 event->type_len = RINGBUF_TYPE_PADDING;
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02001731 /* time delta must be non zero */
1732 if (!event->time_delta)
1733 event->time_delta = 1;
1734}
1735
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001736/**
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001737 * ring_buffer_event_discard - discard any event in the ring buffer
1738 * @event: the event to discard
1739 *
1740 * Sometimes a event that is in the ring buffer needs to be ignored.
1741 * This function lets the user discard an event in the ring buffer
1742 * and then that event will not be read later.
1743 *
1744 * Note, it is up to the user to be careful with this, and protect
1745 * against races. If the user discards an event that has been consumed
1746 * it is possible that it could corrupt the ring buffer.
1747 */
1748void ring_buffer_event_discard(struct ring_buffer_event *event)
1749{
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02001750 rb_event_discard(event);
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001751}
1752EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
1753
1754/**
1755 * ring_buffer_commit_discard - discard an event that has not been committed
1756 * @buffer: the ring buffer
1757 * @event: non committed event to discard
1758 *
1759 * This is similar to ring_buffer_event_discard but must only be
1760 * performed on an event that has not been committed yet. The difference
1761 * is that this will also try to free the event from the ring buffer
1762 * if another event has not been added behind it.
1763 *
1764 * If another event has been added behind it, it will set the event
1765 * up as discarded, and perform the commit.
1766 *
1767 * If this function is called, do not call ring_buffer_unlock_commit on
1768 * the event.
1769 */
1770void ring_buffer_discard_commit(struct ring_buffer *buffer,
1771 struct ring_buffer_event *event)
1772{
1773 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001774 int cpu;
1775
1776 /* The event is discarded regardless */
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02001777 rb_event_discard(event);
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001778
Steven Rostedtfa743952009-06-16 12:37:57 -04001779 cpu = smp_processor_id();
1780 cpu_buffer = buffer->buffers[cpu];
1781
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001782 /*
1783 * This must only be called if the event has not been
1784 * committed yet. Thus we can assume that preemption
1785 * is still disabled.
1786 */
Steven Rostedtfa743952009-06-16 12:37:57 -04001787 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001788
Steven Rostedt0f2541d2009-08-05 12:02:48 -04001789 if (rb_try_to_discard(cpu_buffer, event))
Steven Rostedtedd813bf2009-06-02 23:00:53 -04001790 goto out;
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001791
1792 /*
1793 * The commit is still visible by the reader, so we
1794 * must increment entries.
1795 */
Steven Rostedte4906ef2009-04-30 20:49:44 -04001796 local_inc(&cpu_buffer->entries);
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001797 out:
Steven Rostedtfa743952009-06-16 12:37:57 -04001798 rb_end_commit(cpu_buffer);
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001799
Frederic Weisbeckerf3b9aae2009-04-19 23:39:33 +02001800 trace_recursive_unlock();
1801
Steven Rostedtfa1b47d2009-04-02 00:09:41 -04001802 /*
1803 * Only the last preempt count needs to restore preemption.
1804 */
1805 if (preempt_count() == 1)
1806 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1807 else
1808 preempt_enable_no_resched_notrace();
1809
1810}
1811EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
1812
1813/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001814 * ring_buffer_write - write data to the buffer without reserving
1815 * @buffer: The ring buffer to write to.
1816 * @length: The length of the data being written (excluding the event header)
1817 * @data: The data to write to the buffer.
1818 *
1819 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1820 * one function. If you already have the data to write to the buffer, it
1821 * may be easier to simply call this function.
1822 *
1823 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1824 * and not the length of the event which would hold the header.
1825 */
1826int ring_buffer_write(struct ring_buffer *buffer,
1827 unsigned long length,
1828 void *data)
1829{
1830 struct ring_buffer_per_cpu *cpu_buffer;
1831 struct ring_buffer_event *event;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001832 void *body;
1833 int ret = -EBUSY;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001834 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001835
Steven Rostedt033601a2008-11-21 12:41:55 -05001836 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05001837 return -EBUSY;
1838
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001839 if (atomic_read(&buffer->record_disabled))
1840 return -EBUSY;
1841
Steven Rostedt182e9f52008-11-03 23:15:56 -05001842 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001843
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001844 cpu = raw_smp_processor_id();
1845
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301846 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001847 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001848
1849 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001850
1851 if (atomic_read(&cpu_buffer->record_disabled))
1852 goto out;
1853
Steven Rostedtbe957c42009-05-11 14:42:53 -04001854 if (length > BUF_MAX_DATA_SIZE)
1855 goto out;
1856
1857 event = rb_reserve_next_event(cpu_buffer, length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001858 if (!event)
1859 goto out;
1860
1861 body = rb_event_data(event);
1862
1863 memcpy(body, data, length);
1864
1865 rb_commit(cpu_buffer, event);
1866
1867 ret = 0;
1868 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001869 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001870
1871 return ret;
1872}
Robert Richterc4f50182008-12-11 16:49:22 +01001873EXPORT_SYMBOL_GPL(ring_buffer_write);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001874
Andrew Morton34a148b2009-01-09 12:27:09 -08001875static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001876{
1877 struct buffer_page *reader = cpu_buffer->reader_page;
1878 struct buffer_page *head = cpu_buffer->head_page;
1879 struct buffer_page *commit = cpu_buffer->commit_page;
1880
1881 return reader->read == rb_page_commit(reader) &&
1882 (commit == reader ||
1883 (commit == head &&
1884 head->read == rb_page_commit(commit)));
1885}
1886
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001887/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001888 * ring_buffer_record_disable - stop all writes into the buffer
1889 * @buffer: The ring buffer to stop writes to.
1890 *
1891 * This prevents all writes to the buffer. Any attempt to write
1892 * to the buffer after this will fail and return NULL.
1893 *
1894 * The caller should call synchronize_sched() after this.
1895 */
1896void ring_buffer_record_disable(struct ring_buffer *buffer)
1897{
1898 atomic_inc(&buffer->record_disabled);
1899}
Robert Richterc4f50182008-12-11 16:49:22 +01001900EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001901
1902/**
1903 * ring_buffer_record_enable - enable writes to the buffer
1904 * @buffer: The ring buffer to enable writes
1905 *
1906 * Note, multiple disables will need the same number of enables
1907 * to truely enable the writing (much like preempt_disable).
1908 */
1909void ring_buffer_record_enable(struct ring_buffer *buffer)
1910{
1911 atomic_dec(&buffer->record_disabled);
1912}
Robert Richterc4f50182008-12-11 16:49:22 +01001913EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001914
1915/**
1916 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1917 * @buffer: The ring buffer to stop writes to.
1918 * @cpu: The CPU buffer to stop
1919 *
1920 * This prevents all writes to the buffer. Any attempt to write
1921 * to the buffer after this will fail and return NULL.
1922 *
1923 * The caller should call synchronize_sched() after this.
1924 */
1925void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1926{
1927 struct ring_buffer_per_cpu *cpu_buffer;
1928
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301929 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001930 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001931
1932 cpu_buffer = buffer->buffers[cpu];
1933 atomic_inc(&cpu_buffer->record_disabled);
1934}
Robert Richterc4f50182008-12-11 16:49:22 +01001935EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001936
1937/**
1938 * ring_buffer_record_enable_cpu - enable writes to the buffer
1939 * @buffer: The ring buffer to enable writes
1940 * @cpu: The CPU to enable.
1941 *
1942 * Note, multiple disables will need the same number of enables
1943 * to truely enable the writing (much like preempt_disable).
1944 */
1945void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1946{
1947 struct ring_buffer_per_cpu *cpu_buffer;
1948
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301949 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001950 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001951
1952 cpu_buffer = buffer->buffers[cpu];
1953 atomic_dec(&cpu_buffer->record_disabled);
1954}
Robert Richterc4f50182008-12-11 16:49:22 +01001955EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001956
1957/**
1958 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1959 * @buffer: The ring buffer
1960 * @cpu: The per CPU buffer to get the entries from.
1961 */
1962unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1963{
1964 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04001965 unsigned long ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001966
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301967 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001968 return 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001969
1970 cpu_buffer = buffer->buffers[cpu];
Steven Rostedte4906ef2009-04-30 20:49:44 -04001971 ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
1972 - cpu_buffer->read;
Steven Rostedt554f7862009-03-11 22:00:13 -04001973
1974 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001975}
Robert Richterc4f50182008-12-11 16:49:22 +01001976EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001977
1978/**
1979 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1980 * @buffer: The ring buffer
1981 * @cpu: The per CPU buffer to get the number of overruns from
1982 */
1983unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1984{
1985 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04001986 unsigned long ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001987
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301988 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001989 return 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001990
1991 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt554f7862009-03-11 22:00:13 -04001992 ret = cpu_buffer->overrun;
Steven Rostedt554f7862009-03-11 22:00:13 -04001993
1994 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001995}
Robert Richterc4f50182008-12-11 16:49:22 +01001996EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001997
1998/**
Steven Rostedtf0d2c682009-04-29 13:43:37 -04001999 * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
2000 * @buffer: The ring buffer
2001 * @cpu: The per CPU buffer to get the number of overruns from
2002 */
2003unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
2004{
2005 struct ring_buffer_per_cpu *cpu_buffer;
2006 unsigned long ret;
2007
2008 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2009 return 0;
2010
2011 cpu_buffer = buffer->buffers[cpu];
2012 ret = cpu_buffer->nmi_dropped;
2013
2014 return ret;
2015}
2016EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
2017
2018/**
2019 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
2020 * @buffer: The ring buffer
2021 * @cpu: The per CPU buffer to get the number of overruns from
2022 */
2023unsigned long
2024ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
2025{
2026 struct ring_buffer_per_cpu *cpu_buffer;
2027 unsigned long ret;
2028
2029 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2030 return 0;
2031
2032 cpu_buffer = buffer->buffers[cpu];
2033 ret = cpu_buffer->commit_overrun;
2034
2035 return ret;
2036}
2037EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2038
2039/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002040 * ring_buffer_entries - get the number of entries in a buffer
2041 * @buffer: The ring buffer
2042 *
2043 * Returns the total number of entries in the ring buffer
2044 * (all CPU entries)
2045 */
2046unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2047{
2048 struct ring_buffer_per_cpu *cpu_buffer;
2049 unsigned long entries = 0;
2050 int cpu;
2051
2052 /* if you care about this being correct, lock the buffer */
2053 for_each_buffer_cpu(buffer, cpu) {
2054 cpu_buffer = buffer->buffers[cpu];
Steven Rostedte4906ef2009-04-30 20:49:44 -04002055 entries += (local_read(&cpu_buffer->entries) -
2056 cpu_buffer->overrun) - cpu_buffer->read;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002057 }
2058
2059 return entries;
2060}
Robert Richterc4f50182008-12-11 16:49:22 +01002061EXPORT_SYMBOL_GPL(ring_buffer_entries);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002062
2063/**
2064 * ring_buffer_overrun_cpu - get the number of overruns in buffer
2065 * @buffer: The ring buffer
2066 *
2067 * Returns the total number of overruns in the ring buffer
2068 * (all CPU entries)
2069 */
2070unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2071{
2072 struct ring_buffer_per_cpu *cpu_buffer;
2073 unsigned long overruns = 0;
2074 int cpu;
2075
2076 /* if you care about this being correct, lock the buffer */
2077 for_each_buffer_cpu(buffer, cpu) {
2078 cpu_buffer = buffer->buffers[cpu];
2079 overruns += cpu_buffer->overrun;
2080 }
2081
2082 return overruns;
2083}
Robert Richterc4f50182008-12-11 16:49:22 +01002084EXPORT_SYMBOL_GPL(ring_buffer_overruns);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002085
Steven Rostedt642edba2008-11-12 00:01:26 -05002086static void rb_iter_reset(struct ring_buffer_iter *iter)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002087{
2088 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2089
Steven Rostedtd7690412008-10-01 00:29:53 -04002090 /* Iterator usage is expected to have record disabled */
2091 if (list_empty(&cpu_buffer->reader_page->list)) {
2092 iter->head_page = cpu_buffer->head_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002093 iter->head = cpu_buffer->head_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04002094 } else {
2095 iter->head_page = cpu_buffer->reader_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002096 iter->head = cpu_buffer->reader_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04002097 }
2098 if (iter->head)
2099 iter->read_stamp = cpu_buffer->read_stamp;
2100 else
Steven Rostedtabc9b562008-12-02 15:34:06 -05002101 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt642edba2008-11-12 00:01:26 -05002102}
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002103
Steven Rostedt642edba2008-11-12 00:01:26 -05002104/**
2105 * ring_buffer_iter_reset - reset an iterator
2106 * @iter: The iterator to reset
2107 *
2108 * Resets the iterator, so that it will start from the beginning
2109 * again.
2110 */
2111void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2112{
Steven Rostedt554f7862009-03-11 22:00:13 -04002113 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt642edba2008-11-12 00:01:26 -05002114 unsigned long flags;
2115
Steven Rostedt554f7862009-03-11 22:00:13 -04002116 if (!iter)
2117 return;
2118
2119 cpu_buffer = iter->cpu_buffer;
2120
Steven Rostedt642edba2008-11-12 00:01:26 -05002121 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2122 rb_iter_reset(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002123 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002124}
Robert Richterc4f50182008-12-11 16:49:22 +01002125EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002126
2127/**
2128 * ring_buffer_iter_empty - check if an iterator has no more to read
2129 * @iter: The iterator to check
2130 */
2131int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2132{
2133 struct ring_buffer_per_cpu *cpu_buffer;
2134
2135 cpu_buffer = iter->cpu_buffer;
2136
Steven Rostedtbf41a152008-10-04 02:00:59 -04002137 return iter->head_page == cpu_buffer->commit_page &&
2138 iter->head == rb_commit_index(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002139}
Robert Richterc4f50182008-12-11 16:49:22 +01002140EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002141
2142static void
2143rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2144 struct ring_buffer_event *event)
2145{
2146 u64 delta;
2147
Lai Jiangshan334d4162009-04-24 11:27:05 +08002148 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002149 case RINGBUF_TYPE_PADDING:
2150 return;
2151
2152 case RINGBUF_TYPE_TIME_EXTEND:
2153 delta = event->array[0];
2154 delta <<= TS_SHIFT;
2155 delta += event->time_delta;
2156 cpu_buffer->read_stamp += delta;
2157 return;
2158
2159 case RINGBUF_TYPE_TIME_STAMP:
2160 /* FIXME: not implemented */
2161 return;
2162
2163 case RINGBUF_TYPE_DATA:
2164 cpu_buffer->read_stamp += event->time_delta;
2165 return;
2166
2167 default:
2168 BUG();
2169 }
2170 return;
2171}
2172
2173static void
2174rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2175 struct ring_buffer_event *event)
2176{
2177 u64 delta;
2178
Lai Jiangshan334d4162009-04-24 11:27:05 +08002179 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002180 case RINGBUF_TYPE_PADDING:
2181 return;
2182
2183 case RINGBUF_TYPE_TIME_EXTEND:
2184 delta = event->array[0];
2185 delta <<= TS_SHIFT;
2186 delta += event->time_delta;
2187 iter->read_stamp += delta;
2188 return;
2189
2190 case RINGBUF_TYPE_TIME_STAMP:
2191 /* FIXME: not implemented */
2192 return;
2193
2194 case RINGBUF_TYPE_DATA:
2195 iter->read_stamp += event->time_delta;
2196 return;
2197
2198 default:
2199 BUG();
2200 }
2201 return;
2202}
2203
Steven Rostedtd7690412008-10-01 00:29:53 -04002204static struct buffer_page *
2205rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002206{
Steven Rostedtd7690412008-10-01 00:29:53 -04002207 struct buffer_page *reader = NULL;
2208 unsigned long flags;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002209 int nr_loops = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04002210
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002211 local_irq_save(flags);
2212 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtd7690412008-10-01 00:29:53 -04002213
2214 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002215 /*
2216 * This should normally only loop twice. But because the
2217 * start of the reader inserts an empty page, it causes
2218 * a case where we will loop three times. There should be no
2219 * reason to loop four times (that I know of).
2220 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002221 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002222 reader = NULL;
2223 goto out;
2224 }
2225
Steven Rostedtd7690412008-10-01 00:29:53 -04002226 reader = cpu_buffer->reader_page;
2227
2228 /* If there's more to read, return this page */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002229 if (cpu_buffer->reader_page->read < rb_page_size(reader))
Steven Rostedtd7690412008-10-01 00:29:53 -04002230 goto out;
2231
2232 /* Never should we have an index greater than the size */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002233 if (RB_WARN_ON(cpu_buffer,
2234 cpu_buffer->reader_page->read > rb_page_size(reader)))
2235 goto out;
Steven Rostedtd7690412008-10-01 00:29:53 -04002236
2237 /* check if we caught up to the tail */
2238 reader = NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002239 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
Steven Rostedtd7690412008-10-01 00:29:53 -04002240 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002241
2242 /*
Steven Rostedtd7690412008-10-01 00:29:53 -04002243 * Splice the empty reader page into the list around the head.
2244 * Reset the reader page to size zero.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002245 */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002246
Steven Rostedtd7690412008-10-01 00:29:53 -04002247 reader = cpu_buffer->head_page;
2248 cpu_buffer->reader_page->list.next = reader->list.next;
2249 cpu_buffer->reader_page->list.prev = reader->list.prev;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002250
2251 local_set(&cpu_buffer->reader_page->write, 0);
Steven Rostedt778c55d2009-05-01 18:44:45 -04002252 local_set(&cpu_buffer->reader_page->entries, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002253 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedtd7690412008-10-01 00:29:53 -04002254
2255 /* Make the reader page now replace the head */
2256 reader->list.prev->next = &cpu_buffer->reader_page->list;
2257 reader->list.next->prev = &cpu_buffer->reader_page->list;
2258
2259 /*
2260 * If the tail is on the reader, then we must set the head
2261 * to the inserted page, otherwise we set it one before.
2262 */
2263 cpu_buffer->head_page = cpu_buffer->reader_page;
2264
Steven Rostedtbf41a152008-10-04 02:00:59 -04002265 if (cpu_buffer->commit_page != reader)
Steven Rostedtd7690412008-10-01 00:29:53 -04002266 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2267
2268 /* Finally update the reader page to the new head */
2269 cpu_buffer->reader_page = reader;
2270 rb_reset_reader_page(cpu_buffer);
2271
2272 goto again;
2273
2274 out:
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002275 __raw_spin_unlock(&cpu_buffer->lock);
2276 local_irq_restore(flags);
Steven Rostedtd7690412008-10-01 00:29:53 -04002277
2278 return reader;
2279}
2280
2281static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2282{
2283 struct ring_buffer_event *event;
2284 struct buffer_page *reader;
2285 unsigned length;
2286
2287 reader = rb_get_reader_page(cpu_buffer);
2288
2289 /* This function should not be called when buffer is empty */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002290 if (RB_WARN_ON(cpu_buffer, !reader))
2291 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04002292
2293 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002294
Lai Jiangshan334d4162009-04-24 11:27:05 +08002295 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
2296 || rb_discarded_event(event))
Steven Rostedte4906ef2009-04-30 20:49:44 -04002297 cpu_buffer->read++;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002298
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002299 rb_update_read_stamp(cpu_buffer, event);
2300
Steven Rostedtd7690412008-10-01 00:29:53 -04002301 length = rb_event_length(event);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002302 cpu_buffer->reader_page->read += length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002303}
2304
2305static void rb_advance_iter(struct ring_buffer_iter *iter)
2306{
2307 struct ring_buffer *buffer;
2308 struct ring_buffer_per_cpu *cpu_buffer;
2309 struct ring_buffer_event *event;
2310 unsigned length;
2311
2312 cpu_buffer = iter->cpu_buffer;
2313 buffer = cpu_buffer->buffer;
2314
2315 /*
2316 * Check if we are at the end of the buffer.
2317 */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002318 if (iter->head >= rb_page_size(iter->head_page)) {
Steven Rostedtea05b572009-06-03 09:30:10 -04002319 /* discarded commits can make the page empty */
2320 if (iter->head_page == cpu_buffer->commit_page)
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002321 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04002322 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002323 return;
2324 }
2325
2326 event = rb_iter_head_event(iter);
2327
2328 length = rb_event_length(event);
2329
2330 /*
2331 * This should not be called to advance the header if we are
2332 * at the tail of the buffer.
2333 */
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002334 if (RB_WARN_ON(cpu_buffer,
Steven Rostedtf536aaf2008-11-10 23:07:30 -05002335 (iter->head_page == cpu_buffer->commit_page) &&
Steven Rostedt3e89c7b2008-11-11 15:28:41 -05002336 (iter->head + length > rb_commit_index(cpu_buffer))))
2337 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002338
2339 rb_update_iter_read_stamp(iter, event);
2340
2341 iter->head += length;
2342
2343 /* check for end of page padding */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002344 if ((iter->head >= rb_page_size(iter->head_page)) &&
2345 (iter->head_page != cpu_buffer->commit_page))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002346 rb_advance_iter(iter);
2347}
2348
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002349static struct ring_buffer_event *
2350rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002351{
2352 struct ring_buffer_per_cpu *cpu_buffer;
2353 struct ring_buffer_event *event;
Steven Rostedtd7690412008-10-01 00:29:53 -04002354 struct buffer_page *reader;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002355 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002356
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002357 cpu_buffer = buffer->buffers[cpu];
2358
2359 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002360 /*
2361 * We repeat when a timestamp is encountered. It is possible
2362 * to get multiple timestamps from an interrupt entering just
Steven Rostedtea05b572009-06-03 09:30:10 -04002363 * as one timestamp is about to be written, or from discarded
2364 * commits. The most that we can have is the number on a single page.
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002365 */
Steven Rostedtea05b572009-06-03 09:30:10 -04002366 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002367 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002368
Steven Rostedtd7690412008-10-01 00:29:53 -04002369 reader = rb_get_reader_page(cpu_buffer);
2370 if (!reader)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002371 return NULL;
2372
Steven Rostedtd7690412008-10-01 00:29:53 -04002373 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002374
Lai Jiangshan334d4162009-04-24 11:27:05 +08002375 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002376 case RINGBUF_TYPE_PADDING:
Tom Zanussi2d622712009-03-22 03:30:49 -05002377 if (rb_null_event(event))
2378 RB_WARN_ON(cpu_buffer, 1);
2379 /*
2380 * Because the writer could be discarding every
2381 * event it creates (which would probably be bad)
2382 * if we were to go back to "again" then we may never
2383 * catch up, and will trigger the warn on, or lock
2384 * the box. Return the padding, and we will release
2385 * the current locks, and try again.
2386 */
Tom Zanussi2d622712009-03-22 03:30:49 -05002387 return event;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002388
2389 case RINGBUF_TYPE_TIME_EXTEND:
2390 /* Internal data, OK to advance */
Steven Rostedtd7690412008-10-01 00:29:53 -04002391 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002392 goto again;
2393
2394 case RINGBUF_TYPE_TIME_STAMP:
2395 /* FIXME: not implemented */
Steven Rostedtd7690412008-10-01 00:29:53 -04002396 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002397 goto again;
2398
2399 case RINGBUF_TYPE_DATA:
2400 if (ts) {
2401 *ts = cpu_buffer->read_stamp + event->time_delta;
Steven Rostedt37886f62009-03-17 17:22:06 -04002402 ring_buffer_normalize_time_stamp(buffer,
2403 cpu_buffer->cpu, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002404 }
2405 return event;
2406
2407 default:
2408 BUG();
2409 }
2410
2411 return NULL;
2412}
Robert Richterc4f50182008-12-11 16:49:22 +01002413EXPORT_SYMBOL_GPL(ring_buffer_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002414
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002415static struct ring_buffer_event *
2416rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002417{
2418 struct ring_buffer *buffer;
2419 struct ring_buffer_per_cpu *cpu_buffer;
2420 struct ring_buffer_event *event;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002421 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002422
2423 if (ring_buffer_iter_empty(iter))
2424 return NULL;
2425
2426 cpu_buffer = iter->cpu_buffer;
2427 buffer = cpu_buffer->buffer;
2428
2429 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002430 /*
Steven Rostedtea05b572009-06-03 09:30:10 -04002431 * We repeat when a timestamp is encountered.
2432 * We can get multiple timestamps by nested interrupts or also
2433 * if filtering is on (discarding commits). Since discarding
2434 * commits can be frequent we can get a lot of timestamps.
2435 * But we limit them by not adding timestamps if they begin
2436 * at the start of a page.
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002437 */
Steven Rostedtea05b572009-06-03 09:30:10 -04002438 if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002439 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002440
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002441 if (rb_per_cpu_empty(cpu_buffer))
2442 return NULL;
2443
2444 event = rb_iter_head_event(iter);
2445
Lai Jiangshan334d4162009-04-24 11:27:05 +08002446 switch (event->type_len) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002447 case RINGBUF_TYPE_PADDING:
Tom Zanussi2d622712009-03-22 03:30:49 -05002448 if (rb_null_event(event)) {
2449 rb_inc_iter(iter);
2450 goto again;
2451 }
2452 rb_advance_iter(iter);
2453 return event;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002454
2455 case RINGBUF_TYPE_TIME_EXTEND:
2456 /* Internal data, OK to advance */
2457 rb_advance_iter(iter);
2458 goto again;
2459
2460 case RINGBUF_TYPE_TIME_STAMP:
2461 /* FIXME: not implemented */
2462 rb_advance_iter(iter);
2463 goto again;
2464
2465 case RINGBUF_TYPE_DATA:
2466 if (ts) {
2467 *ts = iter->read_stamp + event->time_delta;
Steven Rostedt37886f62009-03-17 17:22:06 -04002468 ring_buffer_normalize_time_stamp(buffer,
2469 cpu_buffer->cpu, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002470 }
2471 return event;
2472
2473 default:
2474 BUG();
2475 }
2476
2477 return NULL;
2478}
Robert Richterc4f50182008-12-11 16:49:22 +01002479EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002480
Steven Rostedt8d707e82009-06-16 21:22:48 -04002481static inline int rb_ok_to_lock(void)
2482{
2483 /*
2484 * If an NMI die dumps out the content of the ring buffer
2485 * do not grab locks. We also permanently disable the ring
2486 * buffer too. A one time deal is all you get from reading
2487 * the ring buffer from an NMI.
2488 */
Steven Rostedt464e85e2009-08-05 15:26:37 -04002489 if (likely(!in_nmi()))
Steven Rostedt8d707e82009-06-16 21:22:48 -04002490 return 1;
2491
2492 tracing_off_permanent();
2493 return 0;
2494}
2495
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002496/**
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002497 * ring_buffer_peek - peek at the next event to be read
2498 * @buffer: The ring buffer to read
2499 * @cpu: The cpu to peak at
2500 * @ts: The timestamp counter of this event.
2501 *
2502 * This will return the event that will be read next, but does
2503 * not consume the data.
2504 */
2505struct ring_buffer_event *
2506ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2507{
2508 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
Steven Rostedt8aabee52009-03-12 13:13:49 -04002509 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002510 unsigned long flags;
Steven Rostedt8d707e82009-06-16 21:22:48 -04002511 int dolock;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002512
Steven Rostedt554f7862009-03-11 22:00:13 -04002513 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002514 return NULL;
Steven Rostedt554f7862009-03-11 22:00:13 -04002515
Steven Rostedt8d707e82009-06-16 21:22:48 -04002516 dolock = rb_ok_to_lock();
Tom Zanussi2d622712009-03-22 03:30:49 -05002517 again:
Steven Rostedt8d707e82009-06-16 21:22:48 -04002518 local_irq_save(flags);
2519 if (dolock)
2520 spin_lock(&cpu_buffer->reader_lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002521 event = rb_buffer_peek(buffer, cpu, ts);
Robert Richter469535a2009-07-30 19:19:18 +02002522 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2523 rb_advance_reader(cpu_buffer);
Steven Rostedt8d707e82009-06-16 21:22:48 -04002524 if (dolock)
2525 spin_unlock(&cpu_buffer->reader_lock);
2526 local_irq_restore(flags);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002527
Lai Jiangshan334d4162009-04-24 11:27:05 +08002528 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
Tom Zanussi2d622712009-03-22 03:30:49 -05002529 cpu_relax();
2530 goto again;
2531 }
2532
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002533 return event;
2534}
2535
2536/**
2537 * ring_buffer_iter_peek - peek at the next event to be read
2538 * @iter: The ring buffer iterator
2539 * @ts: The timestamp counter of this event.
2540 *
2541 * This will return the event that will be read next, but does
2542 * not increment the iterator.
2543 */
2544struct ring_buffer_event *
2545ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2546{
2547 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2548 struct ring_buffer_event *event;
2549 unsigned long flags;
2550
Tom Zanussi2d622712009-03-22 03:30:49 -05002551 again:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002552 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2553 event = rb_iter_peek(iter, ts);
2554 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2555
Lai Jiangshan334d4162009-04-24 11:27:05 +08002556 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
Tom Zanussi2d622712009-03-22 03:30:49 -05002557 cpu_relax();
2558 goto again;
2559 }
2560
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002561 return event;
2562}
2563
2564/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002565 * ring_buffer_consume - return an event and consume it
2566 * @buffer: The ring buffer to get the next event from
2567 *
2568 * Returns the next event in the ring buffer, and that event is consumed.
2569 * Meaning, that sequential reads will keep returning a different event,
2570 * and eventually empty the ring buffer if the producer is slower.
2571 */
2572struct ring_buffer_event *
2573ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2574{
Steven Rostedt554f7862009-03-11 22:00:13 -04002575 struct ring_buffer_per_cpu *cpu_buffer;
2576 struct ring_buffer_event *event = NULL;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002577 unsigned long flags;
Steven Rostedt8d707e82009-06-16 21:22:48 -04002578 int dolock;
2579
2580 dolock = rb_ok_to_lock();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002581
Tom Zanussi2d622712009-03-22 03:30:49 -05002582 again:
Steven Rostedt554f7862009-03-11 22:00:13 -04002583 /* might be called in atomic */
2584 preempt_disable();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002585
Steven Rostedt554f7862009-03-11 22:00:13 -04002586 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2587 goto out;
2588
2589 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt8d707e82009-06-16 21:22:48 -04002590 local_irq_save(flags);
2591 if (dolock)
2592 spin_lock(&cpu_buffer->reader_lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002593
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002594 event = rb_buffer_peek(buffer, cpu, ts);
Robert Richter469535a2009-07-30 19:19:18 +02002595 if (event)
2596 rb_advance_reader(cpu_buffer);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002597
Steven Rostedt8d707e82009-06-16 21:22:48 -04002598 if (dolock)
2599 spin_unlock(&cpu_buffer->reader_lock);
2600 local_irq_restore(flags);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002601
Steven Rostedt554f7862009-03-11 22:00:13 -04002602 out:
2603 preempt_enable();
2604
Lai Jiangshan334d4162009-04-24 11:27:05 +08002605 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
Tom Zanussi2d622712009-03-22 03:30:49 -05002606 cpu_relax();
2607 goto again;
2608 }
2609
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002610 return event;
2611}
Robert Richterc4f50182008-12-11 16:49:22 +01002612EXPORT_SYMBOL_GPL(ring_buffer_consume);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002613
2614/**
2615 * ring_buffer_read_start - start a non consuming read of the buffer
2616 * @buffer: The ring buffer to read from
2617 * @cpu: The cpu buffer to iterate over
2618 *
2619 * This starts up an iteration through the buffer. It also disables
2620 * the recording to the buffer until the reading is finished.
2621 * This prevents the reading from being corrupted. This is not
2622 * a consuming read, so a producer is not expected.
2623 *
2624 * Must be paired with ring_buffer_finish.
2625 */
2626struct ring_buffer_iter *
2627ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2628{
2629 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04002630 struct ring_buffer_iter *iter;
Steven Rostedtd7690412008-10-01 00:29:53 -04002631 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002632
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302633 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002634 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002635
2636 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2637 if (!iter)
Steven Rostedt8aabee52009-03-12 13:13:49 -04002638 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002639
2640 cpu_buffer = buffer->buffers[cpu];
2641
2642 iter->cpu_buffer = cpu_buffer;
2643
2644 atomic_inc(&cpu_buffer->record_disabled);
2645 synchronize_sched();
2646
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002647 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002648 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt642edba2008-11-12 00:01:26 -05002649 rb_iter_reset(iter);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002650 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002651 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002652
2653 return iter;
2654}
Robert Richterc4f50182008-12-11 16:49:22 +01002655EXPORT_SYMBOL_GPL(ring_buffer_read_start);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002656
2657/**
2658 * ring_buffer_finish - finish reading the iterator of the buffer
2659 * @iter: The iterator retrieved by ring_buffer_start
2660 *
2661 * This re-enables the recording to the buffer, and frees the
2662 * iterator.
2663 */
2664void
2665ring_buffer_read_finish(struct ring_buffer_iter *iter)
2666{
2667 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2668
2669 atomic_dec(&cpu_buffer->record_disabled);
2670 kfree(iter);
2671}
Robert Richterc4f50182008-12-11 16:49:22 +01002672EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002673
2674/**
2675 * ring_buffer_read - read the next item in the ring buffer by the iterator
2676 * @iter: The ring buffer iterator
2677 * @ts: The time stamp of the event read.
2678 *
2679 * This reads the next event in the ring buffer and increments the iterator.
2680 */
2681struct ring_buffer_event *
2682ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2683{
2684 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002685 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2686 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002687
Tom Zanussi2d622712009-03-22 03:30:49 -05002688 again:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002689 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2690 event = rb_iter_peek(iter, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002691 if (!event)
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002692 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002693
2694 rb_advance_iter(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002695 out:
2696 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002697
Lai Jiangshan334d4162009-04-24 11:27:05 +08002698 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
Tom Zanussi2d622712009-03-22 03:30:49 -05002699 cpu_relax();
2700 goto again;
2701 }
2702
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002703 return event;
2704}
Robert Richterc4f50182008-12-11 16:49:22 +01002705EXPORT_SYMBOL_GPL(ring_buffer_read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002706
2707/**
2708 * ring_buffer_size - return the size of the ring buffer (in bytes)
2709 * @buffer: The ring buffer.
2710 */
2711unsigned long ring_buffer_size(struct ring_buffer *buffer)
2712{
2713 return BUF_PAGE_SIZE * buffer->pages;
2714}
Robert Richterc4f50182008-12-11 16:49:22 +01002715EXPORT_SYMBOL_GPL(ring_buffer_size);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002716
2717static void
2718rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2719{
2720 cpu_buffer->head_page
2721 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -04002722 local_set(&cpu_buffer->head_page->write, 0);
Steven Rostedt778c55d2009-05-01 18:44:45 -04002723 local_set(&cpu_buffer->head_page->entries, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002724 local_set(&cpu_buffer->head_page->page->commit, 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002725
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002726 cpu_buffer->head_page->read = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002727
2728 cpu_buffer->tail_page = cpu_buffer->head_page;
2729 cpu_buffer->commit_page = cpu_buffer->head_page;
2730
2731 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2732 local_set(&cpu_buffer->reader_page->write, 0);
Steven Rostedt778c55d2009-05-01 18:44:45 -04002733 local_set(&cpu_buffer->reader_page->entries, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002734 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002735 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04002736
Steven Rostedtf0d2c682009-04-29 13:43:37 -04002737 cpu_buffer->nmi_dropped = 0;
2738 cpu_buffer->commit_overrun = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002739 cpu_buffer->overrun = 0;
Steven Rostedte4906ef2009-04-30 20:49:44 -04002740 cpu_buffer->read = 0;
2741 local_set(&cpu_buffer->entries, 0);
Steven Rostedtfa743952009-06-16 12:37:57 -04002742 local_set(&cpu_buffer->committing, 0);
2743 local_set(&cpu_buffer->commits, 0);
Steven Rostedt69507c02009-01-21 18:45:57 -05002744
2745 cpu_buffer->write_stamp = 0;
2746 cpu_buffer->read_stamp = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002747}
2748
2749/**
2750 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2751 * @buffer: The ring buffer to reset a per cpu buffer of
2752 * @cpu: The CPU buffer to be reset
2753 */
2754void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2755{
2756 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2757 unsigned long flags;
2758
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302759 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002760 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002761
Steven Rostedt41ede232009-05-01 20:26:54 -04002762 atomic_inc(&cpu_buffer->record_disabled);
2763
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002764 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2765
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002766 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002767
2768 rb_reset_cpu(cpu_buffer);
2769
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002770 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002771
2772 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt41ede232009-05-01 20:26:54 -04002773
2774 atomic_dec(&cpu_buffer->record_disabled);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002775}
Robert Richterc4f50182008-12-11 16:49:22 +01002776EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002777
2778/**
2779 * ring_buffer_reset - reset a ring buffer
2780 * @buffer: The ring buffer to reset all cpu buffers
2781 */
2782void ring_buffer_reset(struct ring_buffer *buffer)
2783{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002784 int cpu;
2785
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002786 for_each_buffer_cpu(buffer, cpu)
Steven Rostedtd7690412008-10-01 00:29:53 -04002787 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002788}
Robert Richterc4f50182008-12-11 16:49:22 +01002789EXPORT_SYMBOL_GPL(ring_buffer_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002790
2791/**
2792 * rind_buffer_empty - is the ring buffer empty?
2793 * @buffer: The ring buffer to test
2794 */
2795int ring_buffer_empty(struct ring_buffer *buffer)
2796{
2797 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedtd4788202009-06-17 00:39:43 -04002798 unsigned long flags;
Steven Rostedt8d707e82009-06-16 21:22:48 -04002799 int dolock;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002800 int cpu;
Steven Rostedtd4788202009-06-17 00:39:43 -04002801 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002802
Steven Rostedt8d707e82009-06-16 21:22:48 -04002803 dolock = rb_ok_to_lock();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002804
2805 /* yes this is racy, but if you don't like the race, lock the buffer */
2806 for_each_buffer_cpu(buffer, cpu) {
2807 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt8d707e82009-06-16 21:22:48 -04002808 local_irq_save(flags);
2809 if (dolock)
2810 spin_lock(&cpu_buffer->reader_lock);
Steven Rostedtd4788202009-06-17 00:39:43 -04002811 ret = rb_per_cpu_empty(cpu_buffer);
Steven Rostedt8d707e82009-06-16 21:22:48 -04002812 if (dolock)
2813 spin_unlock(&cpu_buffer->reader_lock);
2814 local_irq_restore(flags);
2815
Steven Rostedtd4788202009-06-17 00:39:43 -04002816 if (!ret)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002817 return 0;
2818 }
Steven Rostedt554f7862009-03-11 22:00:13 -04002819
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002820 return 1;
2821}
Robert Richterc4f50182008-12-11 16:49:22 +01002822EXPORT_SYMBOL_GPL(ring_buffer_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002823
2824/**
2825 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2826 * @buffer: The ring buffer
2827 * @cpu: The CPU buffer to test
2828 */
2829int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2830{
2831 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedtd4788202009-06-17 00:39:43 -04002832 unsigned long flags;
Steven Rostedt8d707e82009-06-16 21:22:48 -04002833 int dolock;
Steven Rostedt8aabee52009-03-12 13:13:49 -04002834 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002835
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302836 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002837 return 1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002838
Steven Rostedt8d707e82009-06-16 21:22:48 -04002839 dolock = rb_ok_to_lock();
Steven Rostedt554f7862009-03-11 22:00:13 -04002840
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002841 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt8d707e82009-06-16 21:22:48 -04002842 local_irq_save(flags);
2843 if (dolock)
2844 spin_lock(&cpu_buffer->reader_lock);
Steven Rostedt554f7862009-03-11 22:00:13 -04002845 ret = rb_per_cpu_empty(cpu_buffer);
Steven Rostedt8d707e82009-06-16 21:22:48 -04002846 if (dolock)
2847 spin_unlock(&cpu_buffer->reader_lock);
2848 local_irq_restore(flags);
Steven Rostedt554f7862009-03-11 22:00:13 -04002849
2850 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002851}
Robert Richterc4f50182008-12-11 16:49:22 +01002852EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002853
2854/**
2855 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2856 * @buffer_a: One buffer to swap with
2857 * @buffer_b: The other buffer to swap with
2858 *
2859 * This function is useful for tracers that want to take a "snapshot"
2860 * of a CPU buffer and has another back up buffer lying around.
2861 * it is expected that the tracer handles the cpu buffer not being
2862 * used at the moment.
2863 */
2864int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2865 struct ring_buffer *buffer_b, int cpu)
2866{
2867 struct ring_buffer_per_cpu *cpu_buffer_a;
2868 struct ring_buffer_per_cpu *cpu_buffer_b;
Steven Rostedt554f7862009-03-11 22:00:13 -04002869 int ret = -EINVAL;
2870
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302871 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2872 !cpumask_test_cpu(cpu, buffer_b->cpumask))
Steven Rostedt554f7862009-03-11 22:00:13 -04002873 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002874
2875 /* At least make sure the two buffers are somewhat the same */
Lai Jiangshan6d102bc2008-12-17 17:48:23 +08002876 if (buffer_a->pages != buffer_b->pages)
Steven Rostedt554f7862009-03-11 22:00:13 -04002877 goto out;
2878
2879 ret = -EAGAIN;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002880
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002881 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedt554f7862009-03-11 22:00:13 -04002882 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002883
2884 if (atomic_read(&buffer_a->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002885 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002886
2887 if (atomic_read(&buffer_b->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002888 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002889
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002890 cpu_buffer_a = buffer_a->buffers[cpu];
2891 cpu_buffer_b = buffer_b->buffers[cpu];
2892
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002893 if (atomic_read(&cpu_buffer_a->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002894 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002895
2896 if (atomic_read(&cpu_buffer_b->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002897 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002898
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002899 /*
2900 * We can't do a synchronize_sched here because this
2901 * function can be called in atomic context.
2902 * Normally this will be called from the same CPU as cpu.
2903 * If not it's up to the caller to protect this.
2904 */
2905 atomic_inc(&cpu_buffer_a->record_disabled);
2906 atomic_inc(&cpu_buffer_b->record_disabled);
2907
2908 buffer_a->buffers[cpu] = cpu_buffer_b;
2909 buffer_b->buffers[cpu] = cpu_buffer_a;
2910
2911 cpu_buffer_b->buffer = buffer_a;
2912 cpu_buffer_a->buffer = buffer_b;
2913
2914 atomic_dec(&cpu_buffer_a->record_disabled);
2915 atomic_dec(&cpu_buffer_b->record_disabled);
2916
Steven Rostedt554f7862009-03-11 22:00:13 -04002917 ret = 0;
2918out:
Steven Rostedt554f7862009-03-11 22:00:13 -04002919 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002920}
Robert Richterc4f50182008-12-11 16:49:22 +01002921EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002922
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002923/**
2924 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2925 * @buffer: the buffer to allocate for.
2926 *
2927 * This function is used in conjunction with ring_buffer_read_page.
2928 * When reading a full page from the ring buffer, these functions
2929 * can be used to speed up the process. The calling function should
2930 * allocate a few pages first with this function. Then when it
2931 * needs to get pages from the ring buffer, it passes the result
2932 * of this function into ring_buffer_read_page, which will swap
2933 * the page that was allocated, with the read page of the buffer.
2934 *
2935 * Returns:
2936 * The page allocated, or NULL on error.
2937 */
2938void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2939{
Steven Rostedt044fa782008-12-02 23:50:03 -05002940 struct buffer_data_page *bpage;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002941 unsigned long addr;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002942
2943 addr = __get_free_page(GFP_KERNEL);
2944 if (!addr)
2945 return NULL;
2946
Steven Rostedt044fa782008-12-02 23:50:03 -05002947 bpage = (void *)addr;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002948
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002949 rb_init_page(bpage);
2950
Steven Rostedt044fa782008-12-02 23:50:03 -05002951 return bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002952}
Steven Rostedtd6ce96d2009-05-05 01:15:24 -04002953EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002954
2955/**
2956 * ring_buffer_free_read_page - free an allocated read page
2957 * @buffer: the buffer the page was allocate for
2958 * @data: the page to free
2959 *
2960 * Free a page allocated from ring_buffer_alloc_read_page.
2961 */
2962void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2963{
2964 free_page((unsigned long)data);
2965}
Steven Rostedtd6ce96d2009-05-05 01:15:24 -04002966EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002967
2968/**
2969 * ring_buffer_read_page - extract a page from the ring buffer
2970 * @buffer: buffer to extract from
2971 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002972 * @len: amount to extract
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002973 * @cpu: the cpu of the buffer to extract
2974 * @full: should the extraction only happen when the page is full.
2975 *
2976 * This function will pull out a page from the ring buffer and consume it.
2977 * @data_page must be the address of the variable that was returned
2978 * from ring_buffer_alloc_read_page. This is because the page might be used
2979 * to swap with a page in the ring buffer.
2980 *
2981 * for example:
Lai Jiangshanb85fa012009-02-09 14:21:14 +08002982 * rpage = ring_buffer_alloc_read_page(buffer);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002983 * if (!rpage)
2984 * return error;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002985 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
Lai Jiangshan667d2412009-02-09 14:21:17 +08002986 * if (ret >= 0)
2987 * process_page(rpage, ret);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002988 *
2989 * When @full is set, the function will not return true unless
2990 * the writer is off the reader page.
2991 *
2992 * Note: it is up to the calling functions to handle sleeps and wakeups.
2993 * The ring buffer can be used anywhere in the kernel and can not
2994 * blindly call wake_up. The layer that uses the ring buffer must be
2995 * responsible for that.
2996 *
2997 * Returns:
Lai Jiangshan667d2412009-02-09 14:21:17 +08002998 * >=0 if data has been transferred, returns the offset of consumed data.
2999 * <0 if no data has been transferred.
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003000 */
3001int ring_buffer_read_page(struct ring_buffer *buffer,
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003002 void **data_page, size_t len, int cpu, int full)
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003003{
3004 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3005 struct ring_buffer_event *event;
Steven Rostedt044fa782008-12-02 23:50:03 -05003006 struct buffer_data_page *bpage;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003007 struct buffer_page *reader;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003008 unsigned long flags;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003009 unsigned int commit;
Lai Jiangshan667d2412009-02-09 14:21:17 +08003010 unsigned int read;
Steven Rostedt4f3640f2009-03-03 23:52:42 -05003011 u64 save_timestamp;
Lai Jiangshan667d2412009-02-09 14:21:17 +08003012 int ret = -1;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003013
Steven Rostedt554f7862009-03-11 22:00:13 -04003014 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3015 goto out;
3016
Steven Rostedt474d32b2009-03-03 19:51:40 -05003017 /*
3018 * If len is not big enough to hold the page header, then
3019 * we can not copy anything.
3020 */
3021 if (len <= BUF_PAGE_HDR_SIZE)
Steven Rostedt554f7862009-03-11 22:00:13 -04003022 goto out;
Steven Rostedt474d32b2009-03-03 19:51:40 -05003023
3024 len -= BUF_PAGE_HDR_SIZE;
3025
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003026 if (!data_page)
Steven Rostedt554f7862009-03-11 22:00:13 -04003027 goto out;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003028
Steven Rostedt044fa782008-12-02 23:50:03 -05003029 bpage = *data_page;
3030 if (!bpage)
Steven Rostedt554f7862009-03-11 22:00:13 -04003031 goto out;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003032
3033 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3034
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003035 reader = rb_get_reader_page(cpu_buffer);
3036 if (!reader)
Steven Rostedt554f7862009-03-11 22:00:13 -04003037 goto out_unlock;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003038
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003039 event = rb_reader_event(cpu_buffer);
Lai Jiangshan667d2412009-02-09 14:21:17 +08003040
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003041 read = reader->read;
3042 commit = rb_page_commit(reader);
3043
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003044 /*
Steven Rostedt474d32b2009-03-03 19:51:40 -05003045 * If this page has been partially read or
3046 * if len is not big enough to read the rest of the page or
3047 * a writer is still on the page, then
3048 * we must copy the data from the page to the buffer.
3049 * Otherwise, we can simply swap the page with the one passed in.
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003050 */
Steven Rostedt474d32b2009-03-03 19:51:40 -05003051 if (read || (len < (commit - read)) ||
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003052 cpu_buffer->reader_page == cpu_buffer->commit_page) {
Lai Jiangshan667d2412009-02-09 14:21:17 +08003053 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
Steven Rostedt474d32b2009-03-03 19:51:40 -05003054 unsigned int rpos = read;
3055 unsigned int pos = 0;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003056 unsigned int size;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003057
3058 if (full)
Steven Rostedt554f7862009-03-11 22:00:13 -04003059 goto out_unlock;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003060
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003061 if (len > (commit - read))
3062 len = (commit - read);
3063
3064 size = rb_event_length(event);
3065
3066 if (len < size)
Steven Rostedt554f7862009-03-11 22:00:13 -04003067 goto out_unlock;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003068
Steven Rostedt4f3640f2009-03-03 23:52:42 -05003069 /* save the current timestamp, since the user will need it */
3070 save_timestamp = cpu_buffer->read_stamp;
3071
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003072 /* Need to copy one event at a time */
3073 do {
Steven Rostedt474d32b2009-03-03 19:51:40 -05003074 memcpy(bpage->data + pos, rpage->data + rpos, size);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003075
3076 len -= size;
3077
3078 rb_advance_reader(cpu_buffer);
Steven Rostedt474d32b2009-03-03 19:51:40 -05003079 rpos = reader->read;
3080 pos += size;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003081
3082 event = rb_reader_event(cpu_buffer);
3083 size = rb_event_length(event);
3084 } while (len > size);
Lai Jiangshan667d2412009-02-09 14:21:17 +08003085
3086 /* update bpage */
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003087 local_set(&bpage->commit, pos);
Steven Rostedt4f3640f2009-03-03 23:52:42 -05003088 bpage->time_stamp = save_timestamp;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003089
Steven Rostedt474d32b2009-03-03 19:51:40 -05003090 /* we copied everything to the beginning */
3091 read = 0;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003092 } else {
Steven Rostedtafbab762009-05-01 19:40:05 -04003093 /* update the entry counter */
3094 cpu_buffer->read += local_read(&reader->entries);
3095
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003096 /* swap the pages */
Steven Rostedt044fa782008-12-02 23:50:03 -05003097 rb_init_page(bpage);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003098 bpage = reader->page;
3099 reader->page = *data_page;
3100 local_set(&reader->write, 0);
Steven Rostedt778c55d2009-05-01 18:44:45 -04003101 local_set(&reader->entries, 0);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05003102 reader->read = 0;
Steven Rostedt044fa782008-12-02 23:50:03 -05003103 *data_page = bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003104 }
Lai Jiangshan667d2412009-02-09 14:21:17 +08003105 ret = read;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003106
Steven Rostedt554f7862009-03-11 22:00:13 -04003107 out_unlock:
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003108 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3109
Steven Rostedt554f7862009-03-11 22:00:13 -04003110 out:
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003111 return ret;
3112}
Steven Rostedtd6ce96d2009-05-05 01:15:24 -04003113EXPORT_SYMBOL_GPL(ring_buffer_read_page);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05003114
Paul Mundt1155de42009-06-25 14:30:12 +09003115#ifdef CONFIG_TRACING
Steven Rostedta3583242008-11-11 15:01:42 -05003116static ssize_t
3117rb_simple_read(struct file *filp, char __user *ubuf,
3118 size_t cnt, loff_t *ppos)
3119{
Hannes Eder5e398412009-02-10 19:44:34 +01003120 unsigned long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05003121 char buf[64];
3122 int r;
3123
Steven Rostedt033601a2008-11-21 12:41:55 -05003124 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3125 r = sprintf(buf, "permanently disabled\n");
3126 else
3127 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
Steven Rostedta3583242008-11-11 15:01:42 -05003128
3129 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3130}
3131
3132static ssize_t
3133rb_simple_write(struct file *filp, const char __user *ubuf,
3134 size_t cnt, loff_t *ppos)
3135{
Hannes Eder5e398412009-02-10 19:44:34 +01003136 unsigned long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05003137 char buf[64];
Hannes Eder5e398412009-02-10 19:44:34 +01003138 unsigned long val;
Steven Rostedta3583242008-11-11 15:01:42 -05003139 int ret;
3140
3141 if (cnt >= sizeof(buf))
3142 return -EINVAL;
3143
3144 if (copy_from_user(&buf, ubuf, cnt))
3145 return -EFAULT;
3146
3147 buf[cnt] = 0;
3148
3149 ret = strict_strtoul(buf, 10, &val);
3150 if (ret < 0)
3151 return ret;
3152
Steven Rostedt033601a2008-11-21 12:41:55 -05003153 if (val)
3154 set_bit(RB_BUFFERS_ON_BIT, p);
3155 else
3156 clear_bit(RB_BUFFERS_ON_BIT, p);
Steven Rostedta3583242008-11-11 15:01:42 -05003157
3158 (*ppos)++;
3159
3160 return cnt;
3161}
3162
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003163static const struct file_operations rb_simple_fops = {
Steven Rostedta3583242008-11-11 15:01:42 -05003164 .open = tracing_open_generic,
3165 .read = rb_simple_read,
3166 .write = rb_simple_write,
3167};
3168
3169
3170static __init int rb_init_debugfs(void)
3171{
3172 struct dentry *d_tracer;
Steven Rostedta3583242008-11-11 15:01:42 -05003173
3174 d_tracer = tracing_init_dentry();
3175
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003176 trace_create_file("tracing_on", 0644, d_tracer,
3177 &ring_buffer_flags, &rb_simple_fops);
Steven Rostedta3583242008-11-11 15:01:42 -05003178
3179 return 0;
3180}
3181
3182fs_initcall(rb_init_debugfs);
Paul Mundt1155de42009-06-25 14:30:12 +09003183#endif
Steven Rostedt554f7862009-03-11 22:00:13 -04003184
Steven Rostedt59222ef2009-03-12 11:46:03 -04003185#ifdef CONFIG_HOTPLUG_CPU
Frederic Weisbecker09c9e842009-03-21 04:33:36 +01003186static int rb_cpu_notify(struct notifier_block *self,
3187 unsigned long action, void *hcpu)
Steven Rostedt554f7862009-03-11 22:00:13 -04003188{
3189 struct ring_buffer *buffer =
3190 container_of(self, struct ring_buffer, cpu_notify);
3191 long cpu = (long)hcpu;
3192
3193 switch (action) {
3194 case CPU_UP_PREPARE:
3195 case CPU_UP_PREPARE_FROZEN:
Rusty Russell3f237a72009-06-12 21:15:30 +09303196 if (cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt554f7862009-03-11 22:00:13 -04003197 return NOTIFY_OK;
3198
3199 buffer->buffers[cpu] =
3200 rb_allocate_cpu_buffer(buffer, cpu);
3201 if (!buffer->buffers[cpu]) {
3202 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3203 cpu);
3204 return NOTIFY_OK;
3205 }
3206 smp_wmb();
Rusty Russell3f237a72009-06-12 21:15:30 +09303207 cpumask_set_cpu(cpu, buffer->cpumask);
Steven Rostedt554f7862009-03-11 22:00:13 -04003208 break;
3209 case CPU_DOWN_PREPARE:
3210 case CPU_DOWN_PREPARE_FROZEN:
3211 /*
3212 * Do nothing.
3213 * If we were to free the buffer, then the user would
3214 * lose any trace that was in the buffer.
3215 */
3216 break;
3217 default:
3218 break;
3219 }
3220 return NOTIFY_OK;
3221}
3222#endif