Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Performance events ring-buffer code: |
| 3 | * |
| 4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar |
| 6 | * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
| 7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
| 8 | * |
| 9 | * For licensing details see kernel-base/COPYING |
| 10 | */ |
| 11 | |
| 12 | #include <linux/perf_event.h> |
| 13 | #include <linux/vmalloc.h> |
| 14 | #include <linux/slab.h> |
| 15 | |
| 16 | #include "internal.h" |
| 17 | |
| 18 | static bool perf_output_space(struct ring_buffer *rb, unsigned long tail, |
| 19 | unsigned long offset, unsigned long head) |
| 20 | { |
| 21 | unsigned long mask; |
| 22 | |
| 23 | if (!rb->writable) |
| 24 | return true; |
| 25 | |
| 26 | mask = perf_data_size(rb) - 1; |
| 27 | |
| 28 | offset = (offset - tail) & mask; |
| 29 | head = (head - tail) & mask; |
| 30 | |
| 31 | if ((int)(head - offset) < 0) |
| 32 | return false; |
| 33 | |
| 34 | return true; |
| 35 | } |
| 36 | |
| 37 | static void perf_output_wakeup(struct perf_output_handle *handle) |
| 38 | { |
| 39 | atomic_set(&handle->rb->poll, POLL_IN); |
| 40 | |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame^] | 41 | handle->event->pending_wakeup = 1; |
| 42 | irq_work_queue(&handle->event->pending); |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 43 | } |
| 44 | |
| 45 | /* |
| 46 | * We need to ensure a later event_id doesn't publish a head when a former |
| 47 | * event isn't done writing. However since we need to deal with NMIs we |
| 48 | * cannot fully serialize things. |
| 49 | * |
| 50 | * We only publish the head (and generate a wakeup) when the outer-most |
| 51 | * event completes. |
| 52 | */ |
| 53 | static void perf_output_get_handle(struct perf_output_handle *handle) |
| 54 | { |
| 55 | struct ring_buffer *rb = handle->rb; |
| 56 | |
| 57 | preempt_disable(); |
| 58 | local_inc(&rb->nest); |
| 59 | handle->wakeup = local_read(&rb->wakeup); |
| 60 | } |
| 61 | |
| 62 | static void perf_output_put_handle(struct perf_output_handle *handle) |
| 63 | { |
| 64 | struct ring_buffer *rb = handle->rb; |
| 65 | unsigned long head; |
| 66 | |
| 67 | again: |
| 68 | head = local_read(&rb->head); |
| 69 | |
| 70 | /* |
| 71 | * IRQ/NMI can happen here, which means we can miss a head update. |
| 72 | */ |
| 73 | |
| 74 | if (!local_dec_and_test(&rb->nest)) |
| 75 | goto out; |
| 76 | |
| 77 | /* |
| 78 | * Publish the known good head. Rely on the full barrier implied |
| 79 | * by atomic_dec_and_test() order the rb->head read and this |
| 80 | * write. |
| 81 | */ |
| 82 | rb->user_page->data_head = head; |
| 83 | |
| 84 | /* |
| 85 | * Now check if we missed an update, rely on the (compiler) |
| 86 | * barrier in atomic_dec_and_test() to re-read rb->head. |
| 87 | */ |
| 88 | if (unlikely(head != local_read(&rb->head))) { |
| 89 | local_inc(&rb->nest); |
| 90 | goto again; |
| 91 | } |
| 92 | |
| 93 | if (handle->wakeup != local_read(&rb->wakeup)) |
| 94 | perf_output_wakeup(handle); |
| 95 | |
| 96 | out: |
| 97 | preempt_enable(); |
| 98 | } |
| 99 | |
| 100 | int perf_output_begin(struct perf_output_handle *handle, |
| 101 | struct perf_event *event, unsigned int size, |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame^] | 102 | int sample) |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 103 | { |
| 104 | struct ring_buffer *rb; |
| 105 | unsigned long tail, offset, head; |
| 106 | int have_lost; |
| 107 | struct perf_sample_data sample_data; |
| 108 | struct { |
| 109 | struct perf_event_header header; |
| 110 | u64 id; |
| 111 | u64 lost; |
| 112 | } lost_event; |
| 113 | |
| 114 | rcu_read_lock(); |
| 115 | /* |
| 116 | * For inherited events we send all the output towards the parent. |
| 117 | */ |
| 118 | if (event->parent) |
| 119 | event = event->parent; |
| 120 | |
| 121 | rb = rcu_dereference(event->rb); |
| 122 | if (!rb) |
| 123 | goto out; |
| 124 | |
| 125 | handle->rb = rb; |
| 126 | handle->event = event; |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 127 | handle->sample = sample; |
| 128 | |
| 129 | if (!rb->nr_pages) |
| 130 | goto out; |
| 131 | |
| 132 | have_lost = local_read(&rb->lost); |
| 133 | if (have_lost) { |
| 134 | lost_event.header.size = sizeof(lost_event); |
| 135 | perf_event_header__init_id(&lost_event.header, &sample_data, |
| 136 | event); |
| 137 | size += lost_event.header.size; |
| 138 | } |
| 139 | |
| 140 | perf_output_get_handle(handle); |
| 141 | |
| 142 | do { |
| 143 | /* |
| 144 | * Userspace could choose to issue a mb() before updating the |
| 145 | * tail pointer. So that all reads will be completed before the |
| 146 | * write is issued. |
| 147 | */ |
| 148 | tail = ACCESS_ONCE(rb->user_page->data_tail); |
| 149 | smp_rmb(); |
| 150 | offset = head = local_read(&rb->head); |
| 151 | head += size; |
| 152 | if (unlikely(!perf_output_space(rb, tail, offset, head))) |
| 153 | goto fail; |
| 154 | } while (local_cmpxchg(&rb->head, offset, head) != offset); |
| 155 | |
| 156 | if (head - local_read(&rb->wakeup) > rb->watermark) |
| 157 | local_add(rb->watermark, &rb->wakeup); |
| 158 | |
| 159 | handle->page = offset >> (PAGE_SHIFT + page_order(rb)); |
| 160 | handle->page &= rb->nr_pages - 1; |
| 161 | handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1); |
| 162 | handle->addr = rb->data_pages[handle->page]; |
| 163 | handle->addr += handle->size; |
| 164 | handle->size = (PAGE_SIZE << page_order(rb)) - handle->size; |
| 165 | |
| 166 | if (have_lost) { |
| 167 | lost_event.header.type = PERF_RECORD_LOST; |
| 168 | lost_event.header.misc = 0; |
| 169 | lost_event.id = event->id; |
| 170 | lost_event.lost = local_xchg(&rb->lost, 0); |
| 171 | |
| 172 | perf_output_put(handle, lost_event); |
| 173 | perf_event__output_id_sample(event, handle, &sample_data); |
| 174 | } |
| 175 | |
| 176 | return 0; |
| 177 | |
| 178 | fail: |
| 179 | local_inc(&rb->lost); |
| 180 | perf_output_put_handle(handle); |
| 181 | out: |
| 182 | rcu_read_unlock(); |
| 183 | |
| 184 | return -ENOSPC; |
| 185 | } |
| 186 | |
| 187 | void perf_output_copy(struct perf_output_handle *handle, |
| 188 | const void *buf, unsigned int len) |
| 189 | { |
| 190 | __output_copy(handle, buf, len); |
| 191 | } |
| 192 | |
| 193 | void perf_output_end(struct perf_output_handle *handle) |
| 194 | { |
| 195 | struct perf_event *event = handle->event; |
| 196 | struct ring_buffer *rb = handle->rb; |
| 197 | |
Vince Weaver | 4ec8363 | 2011-06-01 15:15:36 -0400 | [diff] [blame] | 198 | if (handle->sample && !event->attr.watermark) { |
| 199 | int wakeup_events = event->attr.wakeup_events; |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 200 | |
Vince Weaver | 4ec8363 | 2011-06-01 15:15:36 -0400 | [diff] [blame] | 201 | if (wakeup_events) { |
| 202 | int events = local_inc_return(&rb->events); |
| 203 | if (events >= wakeup_events) { |
| 204 | local_sub(wakeup_events, &rb->events); |
| 205 | local_inc(&rb->wakeup); |
| 206 | } |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 207 | } |
| 208 | } |
| 209 | |
| 210 | perf_output_put_handle(handle); |
| 211 | rcu_read_unlock(); |
| 212 | } |
| 213 | |
| 214 | static void |
| 215 | ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) |
| 216 | { |
| 217 | long max_size = perf_data_size(rb); |
| 218 | |
| 219 | if (watermark) |
| 220 | rb->watermark = min(max_size, watermark); |
| 221 | |
| 222 | if (!rb->watermark) |
| 223 | rb->watermark = max_size / 2; |
| 224 | |
| 225 | if (flags & RING_BUFFER_WRITABLE) |
| 226 | rb->writable = 1; |
| 227 | |
| 228 | atomic_set(&rb->refcount, 1); |
| 229 | } |
| 230 | |
| 231 | #ifndef CONFIG_PERF_USE_VMALLOC |
| 232 | |
| 233 | /* |
| 234 | * Back perf_mmap() with regular GFP_KERNEL-0 pages. |
| 235 | */ |
| 236 | |
| 237 | struct page * |
| 238 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) |
| 239 | { |
| 240 | if (pgoff > rb->nr_pages) |
| 241 | return NULL; |
| 242 | |
| 243 | if (pgoff == 0) |
| 244 | return virt_to_page(rb->user_page); |
| 245 | |
| 246 | return virt_to_page(rb->data_pages[pgoff - 1]); |
| 247 | } |
| 248 | |
| 249 | static void *perf_mmap_alloc_page(int cpu) |
| 250 | { |
| 251 | struct page *page; |
| 252 | int node; |
| 253 | |
| 254 | node = (cpu == -1) ? cpu : cpu_to_node(cpu); |
| 255 | page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); |
| 256 | if (!page) |
| 257 | return NULL; |
| 258 | |
| 259 | return page_address(page); |
| 260 | } |
| 261 | |
| 262 | struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) |
| 263 | { |
| 264 | struct ring_buffer *rb; |
| 265 | unsigned long size; |
| 266 | int i; |
| 267 | |
| 268 | size = sizeof(struct ring_buffer); |
| 269 | size += nr_pages * sizeof(void *); |
| 270 | |
| 271 | rb = kzalloc(size, GFP_KERNEL); |
| 272 | if (!rb) |
| 273 | goto fail; |
| 274 | |
| 275 | rb->user_page = perf_mmap_alloc_page(cpu); |
| 276 | if (!rb->user_page) |
| 277 | goto fail_user_page; |
| 278 | |
| 279 | for (i = 0; i < nr_pages; i++) { |
| 280 | rb->data_pages[i] = perf_mmap_alloc_page(cpu); |
| 281 | if (!rb->data_pages[i]) |
| 282 | goto fail_data_pages; |
| 283 | } |
| 284 | |
| 285 | rb->nr_pages = nr_pages; |
| 286 | |
| 287 | ring_buffer_init(rb, watermark, flags); |
| 288 | |
| 289 | return rb; |
| 290 | |
| 291 | fail_data_pages: |
| 292 | for (i--; i >= 0; i--) |
| 293 | free_page((unsigned long)rb->data_pages[i]); |
| 294 | |
| 295 | free_page((unsigned long)rb->user_page); |
| 296 | |
| 297 | fail_user_page: |
| 298 | kfree(rb); |
| 299 | |
| 300 | fail: |
| 301 | return NULL; |
| 302 | } |
| 303 | |
| 304 | static void perf_mmap_free_page(unsigned long addr) |
| 305 | { |
| 306 | struct page *page = virt_to_page((void *)addr); |
| 307 | |
| 308 | page->mapping = NULL; |
| 309 | __free_page(page); |
| 310 | } |
| 311 | |
| 312 | void rb_free(struct ring_buffer *rb) |
| 313 | { |
| 314 | int i; |
| 315 | |
| 316 | perf_mmap_free_page((unsigned long)rb->user_page); |
| 317 | for (i = 0; i < rb->nr_pages; i++) |
| 318 | perf_mmap_free_page((unsigned long)rb->data_pages[i]); |
| 319 | kfree(rb); |
| 320 | } |
| 321 | |
| 322 | #else |
| 323 | |
| 324 | struct page * |
| 325 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) |
| 326 | { |
| 327 | if (pgoff > (1UL << page_order(rb))) |
| 328 | return NULL; |
| 329 | |
| 330 | return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); |
| 331 | } |
| 332 | |
| 333 | static void perf_mmap_unmark_page(void *addr) |
| 334 | { |
| 335 | struct page *page = vmalloc_to_page(addr); |
| 336 | |
| 337 | page->mapping = NULL; |
| 338 | } |
| 339 | |
| 340 | static void rb_free_work(struct work_struct *work) |
| 341 | { |
| 342 | struct ring_buffer *rb; |
| 343 | void *base; |
| 344 | int i, nr; |
| 345 | |
| 346 | rb = container_of(work, struct ring_buffer, work); |
| 347 | nr = 1 << page_order(rb); |
| 348 | |
| 349 | base = rb->user_page; |
| 350 | for (i = 0; i < nr + 1; i++) |
| 351 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); |
| 352 | |
| 353 | vfree(base); |
| 354 | kfree(rb); |
| 355 | } |
| 356 | |
| 357 | void rb_free(struct ring_buffer *rb) |
| 358 | { |
| 359 | schedule_work(&rb->work); |
| 360 | } |
| 361 | |
| 362 | struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) |
| 363 | { |
| 364 | struct ring_buffer *rb; |
| 365 | unsigned long size; |
| 366 | void *all_buf; |
| 367 | |
| 368 | size = sizeof(struct ring_buffer); |
| 369 | size += sizeof(void *); |
| 370 | |
| 371 | rb = kzalloc(size, GFP_KERNEL); |
| 372 | if (!rb) |
| 373 | goto fail; |
| 374 | |
| 375 | INIT_WORK(&rb->work, rb_free_work); |
| 376 | |
| 377 | all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); |
| 378 | if (!all_buf) |
| 379 | goto fail_all_buf; |
| 380 | |
| 381 | rb->user_page = all_buf; |
| 382 | rb->data_pages[0] = all_buf + PAGE_SIZE; |
| 383 | rb->page_order = ilog2(nr_pages); |
| 384 | rb->nr_pages = 1; |
| 385 | |
| 386 | ring_buffer_init(rb, watermark, flags); |
| 387 | |
| 388 | return rb; |
| 389 | |
| 390 | fail_all_buf: |
| 391 | kfree(rb); |
| 392 | |
| 393 | fail: |
| 394 | return NULL; |
| 395 | } |
| 396 | |
| 397 | #endif |