blob: 71bc608e0ec6328828030a9a37aef249f19a7726 [file] [log] [blame]
Xiao Guangrongb8f46c52010-02-03 11:53:14 +08001#define _FILE_OFFSET_BITS 64
2
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02003#include <linux/kernel.h>
4
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02005#include <byteswap.h>
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02006#include <unistd.h>
7#include <sys/types.h>
8
9#include "session.h"
Arnaldo Carvalho de Meloa3286262009-12-14 14:22:59 -020010#include "sort.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020011#include "util.h"
12
13static int perf_session__open(struct perf_session *self, bool force)
14{
15 struct stat input_stat;
16
Tom Zanussi8dc58102010-04-01 23:59:15 -050017 if (!strcmp(self->filename, "-")) {
18 self->fd_pipe = true;
19 self->fd = STDIN_FILENO;
20
21 if (perf_header__read(self, self->fd) < 0)
22 pr_err("incompatible file format");
23
24 return 0;
25 }
26
Xiao Guangrongf887f302010-02-04 16:46:42 +080027 self->fd = open(self->filename, O_RDONLY);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020028 if (self->fd < 0) {
29 pr_err("failed to open file: %s", self->filename);
30 if (!strcmp(self->filename, "perf.data"))
31 pr_err(" (try 'perf record' first)");
32 pr_err("\n");
33 return -errno;
34 }
35
36 if (fstat(self->fd, &input_stat) < 0)
37 goto out_close;
38
39 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
40 pr_err("file %s not owned by current user or root\n",
41 self->filename);
42 goto out_close;
43 }
44
45 if (!input_stat.st_size) {
46 pr_info("zero-sized file (%s), nothing to do!\n",
47 self->filename);
48 goto out_close;
49 }
50
Tom Zanussi8dc58102010-04-01 23:59:15 -050051 if (perf_header__read(self, self->fd) < 0) {
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020052 pr_err("incompatible file format");
53 goto out_close;
54 }
55
56 self->size = input_stat.st_size;
57 return 0;
58
59out_close:
60 close(self->fd);
61 self->fd = -1;
62 return -1;
63}
64
Tom Zanussi8dc58102010-04-01 23:59:15 -050065void perf_session__update_sample_type(struct perf_session *self)
66{
67 self->sample_type = perf_header__sample_type(&self->header);
68}
69
Zhang, Yanmina1645ce2010-04-19 13:32:50 +080070int perf_session__create_kernel_maps(struct perf_session *self)
71{
Arnaldo Carvalho de Melod28c6222010-04-27 21:20:43 -030072 struct rb_root *machines = &self->machines;
73 int ret = machines__create_kernel_maps(machines, HOST_KERNEL_ID);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +080074
Zhang, Yanmina1645ce2010-04-19 13:32:50 +080075 if (ret >= 0)
Arnaldo Carvalho de Melod28c6222010-04-27 21:20:43 -030076 ret = machines__create_guest_kernel_maps(machines);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +080077 return ret;
78}
79
Tom Zanussi454c4072010-05-01 01:41:20 -050080struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020081{
Arnaldo Carvalho de Melob3165f42009-12-13 19:50:28 -020082 size_t len = filename ? strlen(filename) + 1 : 0;
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020083 struct perf_session *self = zalloc(sizeof(*self) + len);
84
85 if (self == NULL)
86 goto out;
87
88 if (perf_header__init(&self->header) < 0)
Arnaldo Carvalho de Melo4aa65632009-12-13 19:50:29 -020089 goto out_free;
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020090
91 memcpy(self->filename, filename, len);
Arnaldo Carvalho de Melob3165f42009-12-13 19:50:28 -020092 self->threads = RB_ROOT;
Eric B Munsoncb8f0932010-03-05 12:51:07 -030093 self->stats_by_id = RB_ROOT;
Arnaldo Carvalho de Melob3165f42009-12-13 19:50:28 -020094 self->last_match = NULL;
Arnaldo Carvalho de Meloec913362009-12-13 19:50:27 -020095 self->mmap_window = 32;
96 self->cwd = NULL;
97 self->cwdlen = 0;
Arnaldo Carvalho de Melo31d337c2009-12-27 21:37:03 -020098 self->unknown_events = 0;
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -030099 self->machines = RB_ROOT;
Tom Zanussi454c4072010-05-01 01:41:20 -0500100 self->repipe = repipe;
Frederic Weisbeckerc61e52e2010-04-24 00:04:12 +0200101 self->ordered_samples.flush_limit = ULLONG_MAX;
102 INIT_LIST_HEAD(&self->ordered_samples.samples_head);
Arnaldo Carvalho de Melo1f626bc2010-05-09 19:57:08 -0300103 machine__init(&self->host_machine, "", HOST_KERNEL_ID);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200104
Arnaldo Carvalho de Melo64abebf2010-01-27 21:05:52 -0200105 if (mode == O_RDONLY) {
106 if (perf_session__open(self, force) < 0)
107 goto out_delete;
108 } else if (mode == O_WRONLY) {
109 /*
110 * In O_RDONLY mode this will be performed when reading the
111 * kernel MMAP event, in event__process_mmap().
112 */
113 if (perf_session__create_kernel_maps(self) < 0)
114 goto out_delete;
115 }
Arnaldo Carvalho de Melod549c7692009-12-27 21:37:02 -0200116
Tom Zanussi8dc58102010-04-01 23:59:15 -0500117 perf_session__update_sample_type(self);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200118out:
119 return self;
Arnaldo Carvalho de Melo4aa65632009-12-13 19:50:29 -0200120out_free:
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200121 free(self);
122 return NULL;
Arnaldo Carvalho de Melo4aa65632009-12-13 19:50:29 -0200123out_delete:
124 perf_session__delete(self);
125 return NULL;
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200126}
127
128void perf_session__delete(struct perf_session *self)
129{
130 perf_header__exit(&self->header);
131 close(self->fd);
Arnaldo Carvalho de Meloec913362009-12-13 19:50:27 -0200132 free(self->cwd);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200133 free(self);
134}
Arnaldo Carvalho de Meloa3286262009-12-14 14:22:59 -0200135
136static bool symbol__match_parent_regex(struct symbol *sym)
137{
138 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
139 return 1;
140
141 return 0;
142}
143
Arnaldo Carvalho de Melob3c9ac02010-03-24 16:40:18 -0300144struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
145 struct thread *thread,
146 struct ip_callchain *chain,
147 struct symbol **parent)
Arnaldo Carvalho de Meloa3286262009-12-14 14:22:59 -0200148{
149 u8 cpumode = PERF_RECORD_MISC_USER;
Arnaldo Carvalho de Meloa3286262009-12-14 14:22:59 -0200150 unsigned int i;
Arnaldo Carvalho de Meload5b2172010-04-02 10:04:18 -0300151 struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
Arnaldo Carvalho de Meloa3286262009-12-14 14:22:59 -0200152
Arnaldo Carvalho de Meload5b2172010-04-02 10:04:18 -0300153 if (!syms)
154 return NULL;
Arnaldo Carvalho de Meloa3286262009-12-14 14:22:59 -0200155
156 for (i = 0; i < chain->nr; i++) {
157 u64 ip = chain->ips[i];
158 struct addr_location al;
159
160 if (ip >= PERF_CONTEXT_MAX) {
161 switch (ip) {
162 case PERF_CONTEXT_HV:
163 cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
164 case PERF_CONTEXT_KERNEL:
165 cpumode = PERF_RECORD_MISC_KERNEL; break;
166 case PERF_CONTEXT_USER:
167 cpumode = PERF_RECORD_MISC_USER; break;
168 default:
169 break;
170 }
171 continue;
172 }
173
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800174 al.filtered = false;
Arnaldo Carvalho de Meloa3286262009-12-14 14:22:59 -0200175 thread__find_addr_location(thread, self, cpumode,
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800176 MAP__FUNCTION, thread->pid, ip, &al, NULL);
Arnaldo Carvalho de Meloa3286262009-12-14 14:22:59 -0200177 if (al.sym != NULL) {
178 if (sort__has_parent && !*parent &&
179 symbol__match_parent_regex(al.sym))
180 *parent = al.sym;
Arnaldo Carvalho de Melod599db32009-12-15 20:04:42 -0200181 if (!symbol_conf.use_callchain)
Arnaldo Carvalho de Meloa3286262009-12-14 14:22:59 -0200182 break;
Arnaldo Carvalho de Melob3c9ac02010-03-24 16:40:18 -0300183 syms[i].map = al.map;
184 syms[i].sym = al.sym;
Arnaldo Carvalho de Meloa3286262009-12-14 14:22:59 -0200185 }
186 }
187
188 return syms;
189}
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200190
191static int process_event_stub(event_t *event __used,
192 struct perf_session *session __used)
193{
194 dump_printf(": unhandled!\n");
195 return 0;
196}
197
198static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
199{
Arnaldo Carvalho de Melo55aa6402009-12-27 21:37:05 -0200200 if (handler->sample == NULL)
201 handler->sample = process_event_stub;
202 if (handler->mmap == NULL)
203 handler->mmap = process_event_stub;
204 if (handler->comm == NULL)
205 handler->comm = process_event_stub;
206 if (handler->fork == NULL)
207 handler->fork = process_event_stub;
208 if (handler->exit == NULL)
209 handler->exit = process_event_stub;
210 if (handler->lost == NULL)
211 handler->lost = process_event_stub;
212 if (handler->read == NULL)
213 handler->read = process_event_stub;
214 if (handler->throttle == NULL)
215 handler->throttle = process_event_stub;
216 if (handler->unthrottle == NULL)
217 handler->unthrottle = process_event_stub;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -0500218 if (handler->attr == NULL)
219 handler->attr = process_event_stub;
Tom Zanussicd19a032010-04-01 23:59:20 -0500220 if (handler->event_type == NULL)
221 handler->event_type = process_event_stub;
Tom Zanussi92155452010-04-01 23:59:21 -0500222 if (handler->tracing_data == NULL)
223 handler->tracing_data = process_event_stub;
Tom Zanussic7929e42010-04-01 23:59:22 -0500224 if (handler->build_id == NULL)
225 handler->build_id = process_event_stub;
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200226}
227
228static const char *event__name[] = {
229 [0] = "TOTAL",
230 [PERF_RECORD_MMAP] = "MMAP",
231 [PERF_RECORD_LOST] = "LOST",
232 [PERF_RECORD_COMM] = "COMM",
233 [PERF_RECORD_EXIT] = "EXIT",
234 [PERF_RECORD_THROTTLE] = "THROTTLE",
235 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
236 [PERF_RECORD_FORK] = "FORK",
237 [PERF_RECORD_READ] = "READ",
238 [PERF_RECORD_SAMPLE] = "SAMPLE",
Tom Zanussi2c46dbb2010-04-01 23:59:19 -0500239 [PERF_RECORD_HEADER_ATTR] = "ATTR",
Tom Zanussicd19a032010-04-01 23:59:20 -0500240 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
Tom Zanussi92155452010-04-01 23:59:21 -0500241 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
Tom Zanussic7929e42010-04-01 23:59:22 -0500242 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200243};
244
Tom Zanussi8dc58102010-04-01 23:59:15 -0500245unsigned long event__total[PERF_RECORD_HEADER_MAX];
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200246
247void event__print_totals(void)
248{
249 int i;
Tom Zanussi8dc58102010-04-01 23:59:15 -0500250 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
251 if (!event__name[i])
252 continue;
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200253 pr_info("%10s events: %10ld\n",
254 event__name[i], event__total[i]);
Tom Zanussi8dc58102010-04-01 23:59:15 -0500255 }
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200256}
257
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200258void mem_bswap_64(void *src, int byte_size)
259{
260 u64 *m = src;
261
262 while (byte_size > 0) {
263 *m = bswap_64(*m);
264 byte_size -= sizeof(u64);
265 ++m;
266 }
267}
268
269static void event__all64_swap(event_t *self)
270{
271 struct perf_event_header *hdr = &self->header;
272 mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
273}
274
275static void event__comm_swap(event_t *self)
276{
277 self->comm.pid = bswap_32(self->comm.pid);
278 self->comm.tid = bswap_32(self->comm.tid);
279}
280
281static void event__mmap_swap(event_t *self)
282{
283 self->mmap.pid = bswap_32(self->mmap.pid);
284 self->mmap.tid = bswap_32(self->mmap.tid);
285 self->mmap.start = bswap_64(self->mmap.start);
286 self->mmap.len = bswap_64(self->mmap.len);
287 self->mmap.pgoff = bswap_64(self->mmap.pgoff);
288}
289
290static void event__task_swap(event_t *self)
291{
292 self->fork.pid = bswap_32(self->fork.pid);
293 self->fork.tid = bswap_32(self->fork.tid);
294 self->fork.ppid = bswap_32(self->fork.ppid);
295 self->fork.ptid = bswap_32(self->fork.ptid);
296 self->fork.time = bswap_64(self->fork.time);
297}
298
299static void event__read_swap(event_t *self)
300{
301 self->read.pid = bswap_32(self->read.pid);
302 self->read.tid = bswap_32(self->read.tid);
303 self->read.value = bswap_64(self->read.value);
304 self->read.time_enabled = bswap_64(self->read.time_enabled);
305 self->read.time_running = bswap_64(self->read.time_running);
306 self->read.id = bswap_64(self->read.id);
307}
308
Tom Zanussi2c46dbb2010-04-01 23:59:19 -0500309static void event__attr_swap(event_t *self)
310{
311 size_t size;
312
313 self->attr.attr.type = bswap_32(self->attr.attr.type);
314 self->attr.attr.size = bswap_32(self->attr.attr.size);
315 self->attr.attr.config = bswap_64(self->attr.attr.config);
316 self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period);
317 self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type);
318 self->attr.attr.read_format = bswap_64(self->attr.attr.read_format);
319 self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events);
320 self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type);
321 self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr);
322 self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len);
323
324 size = self->header.size;
325 size -= (void *)&self->attr.id - (void *)self;
326 mem_bswap_64(self->attr.id, size);
327}
328
Tom Zanussicd19a032010-04-01 23:59:20 -0500329static void event__event_type_swap(event_t *self)
330{
331 self->event_type.event_type.event_id =
332 bswap_64(self->event_type.event_type.event_id);
333}
334
Tom Zanussi92155452010-04-01 23:59:21 -0500335static void event__tracing_data_swap(event_t *self)
336{
337 self->tracing_data.size = bswap_32(self->tracing_data.size);
338}
339
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200340typedef void (*event__swap_op)(event_t *self);
341
342static event__swap_op event__swap_ops[] = {
343 [PERF_RECORD_MMAP] = event__mmap_swap,
344 [PERF_RECORD_COMM] = event__comm_swap,
345 [PERF_RECORD_FORK] = event__task_swap,
346 [PERF_RECORD_EXIT] = event__task_swap,
347 [PERF_RECORD_LOST] = event__all64_swap,
348 [PERF_RECORD_READ] = event__read_swap,
349 [PERF_RECORD_SAMPLE] = event__all64_swap,
Tom Zanussi2c46dbb2010-04-01 23:59:19 -0500350 [PERF_RECORD_HEADER_ATTR] = event__attr_swap,
Tom Zanussicd19a032010-04-01 23:59:20 -0500351 [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap,
Tom Zanussi92155452010-04-01 23:59:21 -0500352 [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap,
Tom Zanussic7929e42010-04-01 23:59:22 -0500353 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
Tom Zanussi8dc58102010-04-01 23:59:15 -0500354 [PERF_RECORD_HEADER_MAX] = NULL,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200355};
356
Frederic Weisbeckerc61e52e2010-04-24 00:04:12 +0200357struct sample_queue {
358 u64 timestamp;
359 struct sample_event *event;
360 struct list_head list;
361};
362
363#define FLUSH_PERIOD (2 * NSEC_PER_SEC)
364
365static void flush_sample_queue(struct perf_session *s,
366 struct perf_event_ops *ops)
367{
368 struct list_head *head = &s->ordered_samples.samples_head;
369 u64 limit = s->ordered_samples.flush_limit;
370 struct sample_queue *tmp, *iter;
371
372 if (!ops->ordered_samples)
373 return;
374
375 list_for_each_entry_safe(iter, tmp, head, list) {
376 if (iter->timestamp > limit)
377 return;
378
379 if (iter == s->ordered_samples.last_inserted)
380 s->ordered_samples.last_inserted = NULL;
381
382 ops->sample((event_t *)iter->event, s);
383
384 s->ordered_samples.last_flush = iter->timestamp;
385 list_del(&iter->list);
386 free(iter->event);
387 free(iter);
388 }
389}
390
391static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
392{
393 struct sample_queue *iter;
394
395 list_for_each_entry_reverse(iter, head, list) {
396 if (iter->timestamp < new->timestamp) {
397 list_add(&new->list, &iter->list);
398 return;
399 }
400 }
401
402 list_add(&new->list, head);
403}
404
405static void __queue_sample_before(struct sample_queue *new,
406 struct sample_queue *iter,
407 struct list_head *head)
408{
409 list_for_each_entry_continue_reverse(iter, head, list) {
410 if (iter->timestamp < new->timestamp) {
411 list_add(&new->list, &iter->list);
412 return;
413 }
414 }
415
416 list_add(&new->list, head);
417}
418
419static void __queue_sample_after(struct sample_queue *new,
420 struct sample_queue *iter,
421 struct list_head *head)
422{
423 list_for_each_entry_continue(iter, head, list) {
424 if (iter->timestamp > new->timestamp) {
425 list_add_tail(&new->list, &iter->list);
426 return;
427 }
428 }
429 list_add_tail(&new->list, head);
430}
431
432/* The queue is ordered by time */
433static void __queue_sample_event(struct sample_queue *new,
434 struct perf_session *s)
435{
436 struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
437 struct list_head *head = &s->ordered_samples.samples_head;
438
439
440 if (!last_inserted) {
441 __queue_sample_end(new, head);
442 return;
443 }
444
445 /*
446 * Most of the time the current event has a timestamp
447 * very close to the last event inserted, unless we just switched
448 * to another event buffer. Having a sorting based on a list and
449 * on the last inserted event that is close to the current one is
450 * probably more efficient than an rbtree based sorting.
451 */
452 if (last_inserted->timestamp >= new->timestamp)
453 __queue_sample_before(new, last_inserted, head);
454 else
455 __queue_sample_after(new, last_inserted, head);
456}
457
458static int queue_sample_event(event_t *event, struct sample_data *data,
459 struct perf_session *s,
460 struct perf_event_ops *ops)
461{
462 u64 timestamp = data->time;
463 struct sample_queue *new;
464 u64 flush_limit;
465
466
467 if (s->ordered_samples.flush_limit == ULLONG_MAX)
468 s->ordered_samples.flush_limit = timestamp + FLUSH_PERIOD;
469
470 if (timestamp < s->ordered_samples.last_flush) {
471 printf("Warning: Timestamp below last timeslice flush\n");
472 return -EINVAL;
473 }
474
475 new = malloc(sizeof(*new));
476 if (!new)
477 return -ENOMEM;
478
479 new->timestamp = timestamp;
480
481 new->event = malloc(event->header.size);
482 if (!new->event) {
483 free(new);
484 return -ENOMEM;
485 }
486
487 memcpy(new->event, event, event->header.size);
488
489 __queue_sample_event(new, s);
490 s->ordered_samples.last_inserted = new;
491
492 /*
493 * We want to have a slice of events covering 2 * FLUSH_PERIOD
494 * If FLUSH_PERIOD is big enough, it ensures every events that occured
495 * in the first half of the timeslice have all been buffered and there
496 * are none remaining (we need that because of the weakly ordered
497 * event recording we have). Then once we reach the 2 * FLUSH_PERIOD
498 * timeslice, we flush the first half to be gentle with the memory
499 * (the second half can still get new events in the middle, so wait
500 * another period to flush it)
501 */
502 flush_limit = s->ordered_samples.flush_limit;
503
504 if (new->timestamp > flush_limit &&
505 new->timestamp - flush_limit > FLUSH_PERIOD) {
506 s->ordered_samples.flush_limit += FLUSH_PERIOD;
507 flush_sample_queue(s, ops);
508 }
509
510 return 0;
511}
512
513static int perf_session__process_sample(event_t *event, struct perf_session *s,
514 struct perf_event_ops *ops)
515{
516 struct sample_data data;
517
518 if (!ops->ordered_samples)
519 return ops->sample(event, s);
520
521 bzero(&data, sizeof(struct sample_data));
522 event__parse_sample(event, s->sample_type, &data);
523
524 queue_sample_event(event, &data, s, ops);
525
526 return 0;
527}
528
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200529static int perf_session__process_event(struct perf_session *self,
530 event_t *event,
531 struct perf_event_ops *ops,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200532 u64 offset, u64 head)
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200533{
534 trace_event(event);
535
Tom Zanussi8dc58102010-04-01 23:59:15 -0500536 if (event->header.type < PERF_RECORD_HEADER_MAX) {
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200537 dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
Arnaldo Carvalho de Melo0d755032010-01-14 12:23:09 -0200538 offset + head, event->header.size,
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200539 event__name[event->header.type]);
540 ++event__total[0];
541 ++event__total[event->header.type];
542 }
543
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200544 if (self->header.needs_swap && event__swap_ops[event->header.type])
545 event__swap_ops[event->header.type](event);
546
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200547 switch (event->header.type) {
548 case PERF_RECORD_SAMPLE:
Frederic Weisbeckerc61e52e2010-04-24 00:04:12 +0200549 return perf_session__process_sample(event, self, ops);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200550 case PERF_RECORD_MMAP:
Arnaldo Carvalho de Melo55aa6402009-12-27 21:37:05 -0200551 return ops->mmap(event, self);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200552 case PERF_RECORD_COMM:
Arnaldo Carvalho de Melo55aa6402009-12-27 21:37:05 -0200553 return ops->comm(event, self);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200554 case PERF_RECORD_FORK:
Arnaldo Carvalho de Melo55aa6402009-12-27 21:37:05 -0200555 return ops->fork(event, self);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200556 case PERF_RECORD_EXIT:
Arnaldo Carvalho de Melo55aa6402009-12-27 21:37:05 -0200557 return ops->exit(event, self);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200558 case PERF_RECORD_LOST:
Arnaldo Carvalho de Melo55aa6402009-12-27 21:37:05 -0200559 return ops->lost(event, self);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200560 case PERF_RECORD_READ:
Arnaldo Carvalho de Melo55aa6402009-12-27 21:37:05 -0200561 return ops->read(event, self);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200562 case PERF_RECORD_THROTTLE:
Arnaldo Carvalho de Melo55aa6402009-12-27 21:37:05 -0200563 return ops->throttle(event, self);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200564 case PERF_RECORD_UNTHROTTLE:
Arnaldo Carvalho de Melo55aa6402009-12-27 21:37:05 -0200565 return ops->unthrottle(event, self);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -0500566 case PERF_RECORD_HEADER_ATTR:
567 return ops->attr(event, self);
Tom Zanussicd19a032010-04-01 23:59:20 -0500568 case PERF_RECORD_HEADER_EVENT_TYPE:
569 return ops->event_type(event, self);
Tom Zanussi92155452010-04-01 23:59:21 -0500570 case PERF_RECORD_HEADER_TRACING_DATA:
571 /* setup for reading amidst mmap */
572 lseek(self->fd, offset + head, SEEK_SET);
573 return ops->tracing_data(event, self);
Tom Zanussic7929e42010-04-01 23:59:22 -0500574 case PERF_RECORD_HEADER_BUILD_ID:
575 return ops->build_id(event, self);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200576 default:
Arnaldo Carvalho de Melo31d337c2009-12-27 21:37:03 -0200577 self->unknown_events++;
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200578 return -1;
579 }
580}
581
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200582void perf_event_header__bswap(struct perf_event_header *self)
583{
584 self->type = bswap_32(self->type);
585 self->misc = bswap_16(self->misc);
586 self->size = bswap_16(self->size);
587}
588
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200589static struct thread *perf_session__register_idle_thread(struct perf_session *self)
590{
591 struct thread *thread = perf_session__findnew(self, 0);
592
593 if (thread == NULL || thread__set_comm(thread, "swapper")) {
594 pr_err("problem inserting idle task.\n");
595 thread = NULL;
596 }
597
598 return thread;
599}
600
Tom Zanussi8dc58102010-04-01 23:59:15 -0500601int do_read(int fd, void *buf, size_t size)
602{
603 void *buf_start = buf;
604
605 while (size) {
606 int ret = read(fd, buf, size);
607
608 if (ret <= 0)
609 return ret;
610
611 size -= ret;
612 buf += ret;
613 }
614
615 return buf - buf_start;
616}
617
618#define session_done() (*(volatile int *)(&session_done))
619volatile int session_done;
620
621static int __perf_session__process_pipe_events(struct perf_session *self,
622 struct perf_event_ops *ops)
623{
624 event_t event;
625 uint32_t size;
626 int skip = 0;
627 u64 head;
628 int err;
629 void *p;
630
631 perf_event_ops__fill_defaults(ops);
632
633 head = 0;
634more:
635 err = do_read(self->fd, &event, sizeof(struct perf_event_header));
636 if (err <= 0) {
637 if (err == 0)
638 goto done;
639
640 pr_err("failed to read event header\n");
641 goto out_err;
642 }
643
644 if (self->header.needs_swap)
645 perf_event_header__bswap(&event.header);
646
647 size = event.header.size;
648 if (size == 0)
649 size = 8;
650
651 p = &event;
652 p += sizeof(struct perf_event_header);
653
654 err = do_read(self->fd, p, size - sizeof(struct perf_event_header));
655 if (err <= 0) {
656 if (err == 0) {
657 pr_err("unexpected end of event stream\n");
658 goto done;
659 }
660
661 pr_err("failed to read event data\n");
662 goto out_err;
663 }
664
665 if (size == 0 ||
666 (skip = perf_session__process_event(self, &event, ops,
667 0, head)) < 0) {
668 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
669 head, event.header.size, event.header.type);
670 /*
671 * assume we lost track of the stream, check alignment, and
672 * increment a single u64 in the hope to catch on again 'soon'.
673 */
674 if (unlikely(head & 7))
675 head &= ~7ULL;
676
677 size = 8;
678 }
679
680 head += size;
681
682 dump_printf("\n%#Lx [%#x]: event: %d\n",
683 head, event.header.size, event.header.type);
684
685 if (skip > 0)
686 head += skip;
687
688 if (!session_done())
689 goto more;
690done:
691 err = 0;
692out_err:
693 return err;
694}
695
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200696int __perf_session__process_events(struct perf_session *self,
697 u64 data_offset, u64 data_size,
698 u64 file_size, struct perf_event_ops *ops)
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200699{
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200700 int err, mmap_prot, mmap_flags;
701 u64 head, shift;
702 u64 offset = 0;
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200703 size_t page_size;
704 event_t *event;
705 uint32_t size;
706 char *buf;
Arnaldo Carvalho de Melo5f4d3f82010-03-26 21:16:22 -0300707 struct ui_progress *progress = ui_progress__new("Processing events...",
708 self->size);
709 if (progress == NULL)
710 return -1;
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200711
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200712 perf_event_ops__fill_defaults(ops);
713
Arnaldo Carvalho de Melo1b759622010-01-14 18:30:04 -0200714 page_size = sysconf(_SC_PAGESIZE);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200715
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200716 head = data_offset;
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200717 shift = page_size * (head / page_size);
718 offset += shift;
719 head -= shift;
720
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200721 mmap_prot = PROT_READ;
722 mmap_flags = MAP_SHARED;
723
724 if (self->header.needs_swap) {
725 mmap_prot |= PROT_WRITE;
726 mmap_flags = MAP_PRIVATE;
727 }
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200728remap:
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200729 buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
730 mmap_flags, self->fd, offset);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200731 if (buf == MAP_FAILED) {
732 pr_err("failed to mmap file\n");
733 err = -errno;
734 goto out_err;
735 }
736
737more:
738 event = (event_t *)(buf + head);
Arnaldo Carvalho de Melo5f4d3f82010-03-26 21:16:22 -0300739 ui_progress__update(progress, offset);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200740
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200741 if (self->header.needs_swap)
742 perf_event_header__bswap(&event->header);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200743 size = event->header.size;
744 if (size == 0)
745 size = 8;
746
747 if (head + event->header.size >= page_size * self->mmap_window) {
748 int munmap_ret;
749
750 shift = page_size * (head / page_size);
751
752 munmap_ret = munmap(buf, page_size * self->mmap_window);
753 assert(munmap_ret == 0);
754
755 offset += shift;
756 head -= shift;
757 goto remap;
758 }
759
760 size = event->header.size;
761
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200762 dump_printf("\n%#Lx [%#x]: event: %d\n",
Arnaldo Carvalho de Melo0d755032010-01-14 12:23:09 -0200763 offset + head, event->header.size, event->header.type);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200764
765 if (size == 0 ||
766 perf_session__process_event(self, event, ops, offset, head) < 0) {
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200767 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
Arnaldo Carvalho de Melo0d755032010-01-14 12:23:09 -0200768 offset + head, event->header.size,
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200769 event->header.type);
770 /*
771 * assume we lost track of the stream, check alignment, and
772 * increment a single u64 in the hope to catch on again 'soon'.
773 */
774 if (unlikely(head & 7))
775 head &= ~7ULL;
776
777 size = 8;
778 }
779
780 head += size;
781
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200782 if (offset + head >= data_offset + data_size)
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200783 goto done;
784
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200785 if (offset + head < file_size)
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200786 goto more;
787done:
788 err = 0;
Frederic Weisbeckerc61e52e2010-04-24 00:04:12 +0200789 /* do the final flush for ordered samples */
790 self->ordered_samples.flush_limit = ULLONG_MAX;
791 flush_sample_queue(self, ops);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200792out_err:
Arnaldo Carvalho de Melo5f4d3f82010-03-26 21:16:22 -0300793 ui_progress__delete(progress);
Arnaldo Carvalho de Melo06aae592009-12-27 21:36:59 -0200794 return err;
795}
Arnaldo Carvalho de Melo27295592009-12-27 21:37:01 -0200796
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200797int perf_session__process_events(struct perf_session *self,
798 struct perf_event_ops *ops)
799{
800 int err;
801
802 if (perf_session__register_idle_thread(self) == NULL)
803 return -ENOMEM;
804
805 if (!symbol_conf.full_paths) {
806 char bf[PATH_MAX];
807
808 if (getcwd(bf, sizeof(bf)) == NULL) {
809 err = -errno;
810out_getcwd_err:
811 pr_err("failed to get the current directory\n");
812 goto out_err;
813 }
814 self->cwd = strdup(bf);
815 if (self->cwd == NULL) {
816 err = -ENOMEM;
817 goto out_getcwd_err;
818 }
819 self->cwdlen = strlen(self->cwd);
820 }
821
Tom Zanussi8dc58102010-04-01 23:59:15 -0500822 if (!self->fd_pipe)
823 err = __perf_session__process_events(self,
824 self->header.data_offset,
825 self->header.data_size,
826 self->size, ops);
827 else
828 err = __perf_session__process_pipe_events(self, ops);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200829out_err:
830 return err;
831}
832
Arnaldo Carvalho de Melod549c7692009-12-27 21:37:02 -0200833bool perf_session__has_traces(struct perf_session *self, const char *msg)
Arnaldo Carvalho de Melo27295592009-12-27 21:37:01 -0200834{
835 if (!(self->sample_type & PERF_SAMPLE_RAW)) {
Arnaldo Carvalho de Melod549c7692009-12-27 21:37:02 -0200836 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
837 return false;
Arnaldo Carvalho de Melo27295592009-12-27 21:37:01 -0200838 }
839
Arnaldo Carvalho de Melod549c7692009-12-27 21:37:02 -0200840 return true;
Arnaldo Carvalho de Melo27295592009-12-27 21:37:01 -0200841}
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -0200842
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800843int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -0200844 const char *symbol_name,
845 u64 addr)
846{
847 char *bracket;
Arnaldo Carvalho de Melo9de89fe2010-02-03 16:52:00 -0200848 enum map_type i;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800849 struct ref_reloc_sym *ref;
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -0200850
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800851 ref = zalloc(sizeof(struct ref_reloc_sym));
852 if (ref == NULL)
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -0200853 return -ENOMEM;
854
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800855 ref->name = strdup(symbol_name);
856 if (ref->name == NULL) {
857 free(ref);
858 return -ENOMEM;
859 }
860
861 bracket = strchr(ref->name, ']');
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -0200862 if (bracket)
863 *bracket = '\0';
864
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800865 ref->addr = addr;
Arnaldo Carvalho de Melo9de89fe2010-02-03 16:52:00 -0200866
867 for (i = 0; i < MAP__NR_TYPES; ++i) {
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800868 struct kmap *kmap = map__kmap(maps[i]);
869 kmap->ref_reloc_sym = ref;
Arnaldo Carvalho de Melo9de89fe2010-02-03 16:52:00 -0200870 }
871
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -0200872 return 0;
873}
Arnaldo Carvalho de Melo1f626bc2010-05-09 19:57:08 -0300874
875size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
876{
877 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
878 __dsos__fprintf(&self->host_machine.user_dsos, fp) +
879 machines__fprintf_dsos(&self->machines, fp);
880}