blob: ca6599723be5624cdeb4a3cad5ef5dd10a4e1c7b [file] [log] [blame]
Frederic Weisbecker76369132011-05-19 19:55:04 +02001#ifndef _KERNEL_EVENTS_INTERNAL_H
2#define _KERNEL_EVENTS_INTERNAL_H
3
Borislav Petkov9251f902011-10-16 17:15:04 +02004#include <linux/hardirq.h>
Frederic Weisbecker91d77532012-08-07 15:20:38 +02005#include <linux/uaccess.h>
Borislav Petkov9251f902011-10-16 17:15:04 +02006
7/* Buffer handling */
8
Frederic Weisbecker76369132011-05-19 19:55:04 +02009#define RING_BUFFER_WRITABLE 0x01
10
11struct ring_buffer {
12 atomic_t refcount;
13 struct rcu_head rcu_head;
14#ifdef CONFIG_PERF_USE_VMALLOC
15 struct work_struct work;
16 int page_order; /* allocation order */
17#endif
18 int nr_pages; /* nr of data pages */
Stephane Eraniandd9c0862013-03-18 14:33:28 +010019 int overwrite; /* can overwrite itself */
Frederic Weisbecker76369132011-05-19 19:55:04 +020020
21 atomic_t poll; /* POLL_ for wakeups */
22
23 local_t head; /* write position */
24 local_t nest; /* nested writers */
25 local_t events; /* event limit */
26 local_t wakeup; /* wakeup stamp */
27 local_t lost; /* nr records lost */
28
29 long watermark; /* wakeup watermark */
Peter Zijlstra10c6db12011-11-26 02:47:31 +010030 /* poll crap */
31 spinlock_t event_lock;
32 struct list_head event_list;
Frederic Weisbecker76369132011-05-19 19:55:04 +020033
Peter Zijlstra9bb5d402013-06-04 10:44:21 +020034 atomic_t mmap_count;
35 unsigned long mmap_locked;
Peter Zijlstra26cb63a2013-05-28 10:55:48 +020036 struct user_struct *mmap_user;
37
Frederic Weisbecker76369132011-05-19 19:55:04 +020038 struct perf_event_mmap_page *user_page;
39 void *data_pages[0];
40};
41
Frederic Weisbecker76369132011-05-19 19:55:04 +020042extern void rb_free(struct ring_buffer *rb);
43extern struct ring_buffer *
44rb_alloc(int nr_pages, long watermark, int cpu, int flags);
45extern void perf_event_wakeup(struct perf_event *event);
46
47extern void
48perf_event_header__init_id(struct perf_event_header *header,
49 struct perf_sample_data *data,
50 struct perf_event *event);
51extern void
52perf_event__output_id_sample(struct perf_event *event,
53 struct perf_output_handle *handle,
54 struct perf_sample_data *sample);
55
56extern struct page *
57perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
58
59#ifdef CONFIG_PERF_USE_VMALLOC
60/*
61 * Back perf_mmap() with vmalloc memory.
62 *
63 * Required for architectures that have d-cache aliasing issues.
64 */
65
66static inline int page_order(struct ring_buffer *rb)
67{
68 return rb->page_order;
69}
70
71#else
72
73static inline int page_order(struct ring_buffer *rb)
74{
75 return 0;
76}
77#endif
78
Borislav Petkov9251f902011-10-16 17:15:04 +020079static inline unsigned long perf_data_size(struct ring_buffer *rb)
Frederic Weisbecker76369132011-05-19 19:55:04 +020080{
81 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
82}
83
Frederic Weisbecker91d77532012-08-07 15:20:38 +020084#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
85static inline unsigned int \
86func_name(struct perf_output_handle *handle, \
87 const void *buf, unsigned int len) \
88{ \
89 unsigned long size, written; \
90 \
91 do { \
92 size = min_t(unsigned long, handle->size, len); \
93 \
94 written = memcpy_func(handle->addr, buf, size); \
95 \
96 len -= written; \
97 handle->addr += written; \
98 buf += written; \
99 handle->size -= written; \
100 if (!handle->size) { \
101 struct ring_buffer *rb = handle->rb; \
102 \
103 handle->page++; \
104 handle->page &= rb->nr_pages - 1; \
105 handle->addr = rb->data_pages[handle->page]; \
106 handle->size = PAGE_SIZE << page_order(rb); \
107 } \
108 } while (len && written == size); \
109 \
110 return len; \
Frederic Weisbecker76369132011-05-19 19:55:04 +0200111}
112
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200113static inline int memcpy_common(void *dst, const void *src, size_t n)
114{
115 memcpy(dst, src, n);
116 return n;
117}
118
119DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
120
Jiri Olsa5685e0f2012-08-07 15:20:39 +0200121#define MEMCPY_SKIP(dst, src, n) (n)
122
123DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
124
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200125#ifndef arch_perf_out_copy_user
126#define arch_perf_out_copy_user __copy_from_user_inatomic
127#endif
128
129DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
130
Borislav Petkov9251f902011-10-16 17:15:04 +0200131/* Callchain handling */
Andrew Vagine6dab5f2012-07-11 18:14:58 +0400132extern struct perf_callchain_entry *
133perf_callchain(struct perf_event *event, struct pt_regs *regs);
Borislav Petkov9251f902011-10-16 17:15:04 +0200134extern int get_callchain_buffers(void);
135extern void put_callchain_buffers(void);
136
137static inline int get_recursion_context(int *recursion)
138{
139 int rctx;
140
141 if (in_nmi())
142 rctx = 3;
143 else if (in_irq())
144 rctx = 2;
145 else if (in_softirq())
146 rctx = 1;
147 else
148 rctx = 0;
149
150 if (recursion[rctx])
151 return -1;
152
153 recursion[rctx]++;
154 barrier();
155
156 return rctx;
157}
158
159static inline void put_recursion_context(int *recursion, int rctx)
160{
161 barrier();
162 recursion[rctx]--;
163}
164
Jiri Olsac5ebced2012-08-07 15:20:40 +0200165#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
166static inline bool arch_perf_have_user_stack_dump(void)
167{
168 return true;
169}
170
171#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
172#else
173static inline bool arch_perf_have_user_stack_dump(void)
174{
175 return false;
176}
177
178#define perf_user_stack_pointer(regs) 0
179#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
180
Frederic Weisbecker76369132011-05-19 19:55:04 +0200181#endif /* _KERNEL_EVENTS_INTERNAL_H */