blob: 04a14cc8d9a8e5a9232e4f3f34436c2b0d607a1a [file] [log] [blame]
Frederic Weisbecker76369132011-05-19 19:55:04 +02001#ifndef _KERNEL_EVENTS_INTERNAL_H
2#define _KERNEL_EVENTS_INTERNAL_H
3
Borislav Petkov9251f902011-10-16 17:15:04 +02004#include <linux/hardirq.h>
5
6/* Buffer handling */
7
Frederic Weisbecker76369132011-05-19 19:55:04 +02008#define RING_BUFFER_WRITABLE 0x01
9
10struct ring_buffer {
11 atomic_t refcount;
12 struct rcu_head rcu_head;
13#ifdef CONFIG_PERF_USE_VMALLOC
14 struct work_struct work;
15 int page_order; /* allocation order */
16#endif
17 int nr_pages; /* nr of data pages */
18 int writable; /* are we writable */
19
20 atomic_t poll; /* POLL_ for wakeups */
21
22 local_t head; /* write position */
23 local_t nest; /* nested writers */
24 local_t events; /* event limit */
25 local_t wakeup; /* wakeup stamp */
26 local_t lost; /* nr records lost */
27
28 long watermark; /* wakeup watermark */
Peter Zijlstra10c6db12011-11-26 02:47:31 +010029 /* poll crap */
30 spinlock_t event_lock;
31 struct list_head event_list;
Frederic Weisbecker76369132011-05-19 19:55:04 +020032
Peter Zijlstra535fad82013-05-28 10:55:48 +020033 int mmap_locked;
34 struct user_struct *mmap_user;
35
Frederic Weisbecker76369132011-05-19 19:55:04 +020036 struct perf_event_mmap_page *user_page;
37 void *data_pages[0];
38};
39
Frederic Weisbecker76369132011-05-19 19:55:04 +020040extern void rb_free(struct ring_buffer *rb);
41extern struct ring_buffer *
42rb_alloc(int nr_pages, long watermark, int cpu, int flags);
43extern void perf_event_wakeup(struct perf_event *event);
44
45extern void
46perf_event_header__init_id(struct perf_event_header *header,
47 struct perf_sample_data *data,
48 struct perf_event *event);
49extern void
50perf_event__output_id_sample(struct perf_event *event,
51 struct perf_output_handle *handle,
52 struct perf_sample_data *sample);
53
54extern struct page *
55perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
56
57#ifdef CONFIG_PERF_USE_VMALLOC
58/*
59 * Back perf_mmap() with vmalloc memory.
60 *
61 * Required for architectures that have d-cache aliasing issues.
62 */
63
64static inline int page_order(struct ring_buffer *rb)
65{
66 return rb->page_order;
67}
68
69#else
70
71static inline int page_order(struct ring_buffer *rb)
72{
73 return 0;
74}
75#endif
76
Borislav Petkov9251f902011-10-16 17:15:04 +020077static inline unsigned long perf_data_size(struct ring_buffer *rb)
Frederic Weisbecker76369132011-05-19 19:55:04 +020078{
79 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
80}
81
82static inline void
83__output_copy(struct perf_output_handle *handle,
84 const void *buf, unsigned int len)
85{
86 do {
87 unsigned long size = min_t(unsigned long, handle->size, len);
88
89 memcpy(handle->addr, buf, size);
90
91 len -= size;
92 handle->addr += size;
93 buf += size;
94 handle->size -= size;
95 if (!handle->size) {
96 struct ring_buffer *rb = handle->rb;
97
98 handle->page++;
99 handle->page &= rb->nr_pages - 1;
100 handle->addr = rb->data_pages[handle->page];
101 handle->size = PAGE_SIZE << page_order(rb);
102 }
103 } while (len);
104}
105
Borislav Petkov9251f902011-10-16 17:15:04 +0200106/* Callchain handling */
107extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
108extern int get_callchain_buffers(void);
109extern void put_callchain_buffers(void);
110
111static inline int get_recursion_context(int *recursion)
112{
113 int rctx;
114
115 if (in_nmi())
116 rctx = 3;
117 else if (in_irq())
118 rctx = 2;
119 else if (in_softirq())
120 rctx = 1;
121 else
122 rctx = 0;
123
124 if (recursion[rctx])
125 return -1;
126
127 recursion[rctx]++;
128 barrier();
129
130 return rctx;
131}
132
133static inline void put_recursion_context(int *recursion, int rctx)
134{
135 barrier();
136 recursion[rctx]--;
137}
138
Frederic Weisbecker76369132011-05-19 19:55:04 +0200139#endif /* _KERNEL_EVENTS_INTERNAL_H */