blob: be4a43f6de4fbd371953ade2f46e4325b9efcceb [file] [log] [blame]
Frederic Weisbecker76369132011-05-19 19:55:04 +02001#ifndef _KERNEL_EVENTS_INTERNAL_H
2#define _KERNEL_EVENTS_INTERNAL_H
3
Borislav Petkov9251f902011-10-16 17:15:04 +02004#include <linux/hardirq.h>
5
6/* Buffer handling */
7
Frederic Weisbecker76369132011-05-19 19:55:04 +02008#define RING_BUFFER_WRITABLE 0x01
9
10struct ring_buffer {
11 atomic_t refcount;
12 struct rcu_head rcu_head;
13#ifdef CONFIG_PERF_USE_VMALLOC
14 struct work_struct work;
15 int page_order; /* allocation order */
16#endif
17 int nr_pages; /* nr of data pages */
18 int writable; /* are we writable */
19
20 atomic_t poll; /* POLL_ for wakeups */
21
22 local_t head; /* write position */
23 local_t nest; /* nested writers */
24 local_t events; /* event limit */
25 local_t wakeup; /* wakeup stamp */
26 local_t lost; /* nr records lost */
27
28 long watermark; /* wakeup watermark */
29
30 struct perf_event_mmap_page *user_page;
31 void *data_pages[0];
32};
33
Frederic Weisbecker76369132011-05-19 19:55:04 +020034extern void rb_free(struct ring_buffer *rb);
35extern struct ring_buffer *
36rb_alloc(int nr_pages, long watermark, int cpu, int flags);
37extern void perf_event_wakeup(struct perf_event *event);
38
39extern void
40perf_event_header__init_id(struct perf_event_header *header,
41 struct perf_sample_data *data,
42 struct perf_event *event);
43extern void
44perf_event__output_id_sample(struct perf_event *event,
45 struct perf_output_handle *handle,
46 struct perf_sample_data *sample);
47
48extern struct page *
49perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
50
51#ifdef CONFIG_PERF_USE_VMALLOC
52/*
53 * Back perf_mmap() with vmalloc memory.
54 *
55 * Required for architectures that have d-cache aliasing issues.
56 */
57
58static inline int page_order(struct ring_buffer *rb)
59{
60 return rb->page_order;
61}
62
63#else
64
65static inline int page_order(struct ring_buffer *rb)
66{
67 return 0;
68}
69#endif
70
Borislav Petkov9251f902011-10-16 17:15:04 +020071static inline unsigned long perf_data_size(struct ring_buffer *rb)
Frederic Weisbecker76369132011-05-19 19:55:04 +020072{
73 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
74}
75
76static inline void
77__output_copy(struct perf_output_handle *handle,
78 const void *buf, unsigned int len)
79{
80 do {
81 unsigned long size = min_t(unsigned long, handle->size, len);
82
83 memcpy(handle->addr, buf, size);
84
85 len -= size;
86 handle->addr += size;
87 buf += size;
88 handle->size -= size;
89 if (!handle->size) {
90 struct ring_buffer *rb = handle->rb;
91
92 handle->page++;
93 handle->page &= rb->nr_pages - 1;
94 handle->addr = rb->data_pages[handle->page];
95 handle->size = PAGE_SIZE << page_order(rb);
96 }
97 } while (len);
98}
99
Borislav Petkov9251f902011-10-16 17:15:04 +0200100/* Callchain handling */
101extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
102extern int get_callchain_buffers(void);
103extern void put_callchain_buffers(void);
104
105static inline int get_recursion_context(int *recursion)
106{
107 int rctx;
108
109 if (in_nmi())
110 rctx = 3;
111 else if (in_irq())
112 rctx = 2;
113 else if (in_softirq())
114 rctx = 1;
115 else
116 rctx = 0;
117
118 if (recursion[rctx])
119 return -1;
120
121 recursion[rctx]++;
122 barrier();
123
124 return rctx;
125}
126
127static inline void put_recursion_context(int *recursion, int rctx)
128{
129 barrier();
130 recursion[rctx]--;
131}
132
Frederic Weisbecker76369132011-05-19 19:55:04 +0200133#endif /* _KERNEL_EVENTS_INTERNAL_H */