Xiao Guangrong | b8f46c5 | 2010-02-03 11:53:14 +0800 | [diff] [blame] | 1 | #define _FILE_OFFSET_BITS 64 |
| 2 | |
Arnaldo Carvalho de Melo | 94c744b | 2009-12-11 21:24:02 -0200 | [diff] [blame] | 3 | #include <linux/kernel.h> |
| 4 | |
Arnaldo Carvalho de Melo | ba21594 | 2010-01-14 12:23:10 -0200 | [diff] [blame] | 5 | #include <byteswap.h> |
Arnaldo Carvalho de Melo | 94c744b | 2009-12-11 21:24:02 -0200 | [diff] [blame] | 6 | #include <unistd.h> |
| 7 | #include <sys/types.h> |
| 8 | |
| 9 | #include "session.h" |
Arnaldo Carvalho de Melo | a328626 | 2009-12-14 14:22:59 -0200 | [diff] [blame] | 10 | #include "sort.h" |
Arnaldo Carvalho de Melo | 94c744b | 2009-12-11 21:24:02 -0200 | [diff] [blame] | 11 | #include "util.h" |
| 12 | |
| 13 | static int perf_session__open(struct perf_session *self, bool force) |
| 14 | { |
| 15 | struct stat input_stat; |
| 16 | |
Tom Zanussi | 8dc5810 | 2010-04-01 23:59:15 -0500 | [diff] [blame] | 17 | if (!strcmp(self->filename, "-")) { |
| 18 | self->fd_pipe = true; |
| 19 | self->fd = STDIN_FILENO; |
| 20 | |
| 21 | if (perf_header__read(self, self->fd) < 0) |
| 22 | pr_err("incompatible file format"); |
| 23 | |
| 24 | return 0; |
| 25 | } |
| 26 | |
Xiao Guangrong | f887f30 | 2010-02-04 16:46:42 +0800 | [diff] [blame] | 27 | self->fd = open(self->filename, O_RDONLY); |
Arnaldo Carvalho de Melo | 94c744b | 2009-12-11 21:24:02 -0200 | [diff] [blame] | 28 | if (self->fd < 0) { |
| 29 | pr_err("failed to open file: %s", self->filename); |
| 30 | if (!strcmp(self->filename, "perf.data")) |
| 31 | pr_err(" (try 'perf record' first)"); |
| 32 | pr_err("\n"); |
| 33 | return -errno; |
| 34 | } |
| 35 | |
| 36 | if (fstat(self->fd, &input_stat) < 0) |
| 37 | goto out_close; |
| 38 | |
| 39 | if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { |
| 40 | pr_err("file %s not owned by current user or root\n", |
| 41 | self->filename); |
| 42 | goto out_close; |
| 43 | } |
| 44 | |
| 45 | if (!input_stat.st_size) { |
| 46 | pr_info("zero-sized file (%s), nothing to do!\n", |
| 47 | self->filename); |
| 48 | goto out_close; |
| 49 | } |
| 50 | |
Tom Zanussi | 8dc5810 | 2010-04-01 23:59:15 -0500 | [diff] [blame] | 51 | if (perf_header__read(self, self->fd) < 0) { |
Arnaldo Carvalho de Melo | 94c744b | 2009-12-11 21:24:02 -0200 | [diff] [blame] | 52 | pr_err("incompatible file format"); |
| 53 | goto out_close; |
| 54 | } |
| 55 | |
| 56 | self->size = input_stat.st_size; |
| 57 | return 0; |
| 58 | |
| 59 | out_close: |
| 60 | close(self->fd); |
| 61 | self->fd = -1; |
| 62 | return -1; |
| 63 | } |
| 64 | |
Tom Zanussi | 8dc5810 | 2010-04-01 23:59:15 -0500 | [diff] [blame] | 65 | void perf_session__update_sample_type(struct perf_session *self) |
| 66 | { |
| 67 | self->sample_type = perf_header__sample_type(&self->header); |
| 68 | } |
| 69 | |
Zhang, Yanmin | a1645ce | 2010-04-19 13:32:50 +0800 | [diff] [blame] | 70 | int perf_session__create_kernel_maps(struct perf_session *self) |
| 71 | { |
Arnaldo Carvalho de Melo | d28c622 | 2010-04-27 21:20:43 -0300 | [diff] [blame^] | 72 | struct rb_root *machines = &self->machines; |
| 73 | int ret = machines__create_kernel_maps(machines, HOST_KERNEL_ID); |
Zhang, Yanmin | a1645ce | 2010-04-19 13:32:50 +0800 | [diff] [blame] | 74 | |
Zhang, Yanmin | a1645ce | 2010-04-19 13:32:50 +0800 | [diff] [blame] | 75 | if (ret >= 0) |
Arnaldo Carvalho de Melo | d28c622 | 2010-04-27 21:20:43 -0300 | [diff] [blame^] | 76 | ret = machines__create_guest_kernel_maps(machines); |
Zhang, Yanmin | a1645ce | 2010-04-19 13:32:50 +0800 | [diff] [blame] | 77 | return ret; |
| 78 | } |
| 79 | |
Arnaldo Carvalho de Melo | 75be6cf | 2009-12-15 20:04:39 -0200 | [diff] [blame] | 80 | struct perf_session *perf_session__new(const char *filename, int mode, bool force) |
Arnaldo Carvalho de Melo | 94c744b | 2009-12-11 21:24:02 -0200 | [diff] [blame] | 81 | { |
Arnaldo Carvalho de Melo | b3165f4 | 2009-12-13 19:50:28 -0200 | [diff] [blame] | 82 | size_t len = filename ? strlen(filename) + 1 : 0; |
Arnaldo Carvalho de Melo | 94c744b | 2009-12-11 21:24:02 -0200 | [diff] [blame] | 83 | struct perf_session *self = zalloc(sizeof(*self) + len); |
| 84 | |
| 85 | if (self == NULL) |
| 86 | goto out; |
| 87 | |
| 88 | if (perf_header__init(&self->header) < 0) |
Arnaldo Carvalho de Melo | 4aa6563 | 2009-12-13 19:50:29 -0200 | [diff] [blame] | 89 | goto out_free; |
Arnaldo Carvalho de Melo | 94c744b | 2009-12-11 21:24:02 -0200 | [diff] [blame] | 90 | |
| 91 | memcpy(self->filename, filename, len); |
Arnaldo Carvalho de Melo | b3165f4 | 2009-12-13 19:50:28 -0200 | [diff] [blame] | 92 | self->threads = RB_ROOT; |
Eric B Munson | cb8f093 | 2010-03-05 12:51:07 -0300 | [diff] [blame] | 93 | self->stats_by_id = RB_ROOT; |
Arnaldo Carvalho de Melo | b3165f4 | 2009-12-13 19:50:28 -0200 | [diff] [blame] | 94 | self->last_match = NULL; |
Arnaldo Carvalho de Melo | ec91336 | 2009-12-13 19:50:27 -0200 | [diff] [blame] | 95 | self->mmap_window = 32; |
| 96 | self->cwd = NULL; |
| 97 | self->cwdlen = 0; |
Arnaldo Carvalho de Melo | 31d337c | 2009-12-27 21:37:03 -0200 | [diff] [blame] | 98 | self->unknown_events = 0; |
Arnaldo Carvalho de Melo | 23346f2 | 2010-04-27 21:17:50 -0300 | [diff] [blame] | 99 | self->machines = RB_ROOT; |
Frederic Weisbecker | c61e52e | 2010-04-24 00:04:12 +0200 | [diff] [blame] | 100 | self->ordered_samples.flush_limit = ULLONG_MAX; |
| 101 | INIT_LIST_HEAD(&self->ordered_samples.samples_head); |
Arnaldo Carvalho de Melo | 94c744b | 2009-12-11 21:24:02 -0200 | [diff] [blame] | 102 | |
Arnaldo Carvalho de Melo | 64abebf | 2010-01-27 21:05:52 -0200 | [diff] [blame] | 103 | if (mode == O_RDONLY) { |
| 104 | if (perf_session__open(self, force) < 0) |
| 105 | goto out_delete; |
| 106 | } else if (mode == O_WRONLY) { |
| 107 | /* |
| 108 | * In O_RDONLY mode this will be performed when reading the |
| 109 | * kernel MMAP event, in event__process_mmap(). |
| 110 | */ |
| 111 | if (perf_session__create_kernel_maps(self) < 0) |
| 112 | goto out_delete; |
| 113 | } |
Arnaldo Carvalho de Melo | d549c769 | 2009-12-27 21:37:02 -0200 | [diff] [blame] | 114 | |
Tom Zanussi | 8dc5810 | 2010-04-01 23:59:15 -0500 | [diff] [blame] | 115 | perf_session__update_sample_type(self); |
Arnaldo Carvalho de Melo | 94c744b | 2009-12-11 21:24:02 -0200 | [diff] [blame] | 116 | out: |
| 117 | return self; |
Arnaldo Carvalho de Melo | 4aa6563 | 2009-12-13 19:50:29 -0200 | [diff] [blame] | 118 | out_free: |
Arnaldo Carvalho de Melo | 94c744b | 2009-12-11 21:24:02 -0200 | [diff] [blame] | 119 | free(self); |
| 120 | return NULL; |
Arnaldo Carvalho de Melo | 4aa6563 | 2009-12-13 19:50:29 -0200 | [diff] [blame] | 121 | out_delete: |
| 122 | perf_session__delete(self); |
| 123 | return NULL; |
Arnaldo Carvalho de Melo | 94c744b | 2009-12-11 21:24:02 -0200 | [diff] [blame] | 124 | } |
| 125 | |
| 126 | void perf_session__delete(struct perf_session *self) |
| 127 | { |
| 128 | perf_header__exit(&self->header); |
| 129 | close(self->fd); |
Arnaldo Carvalho de Melo | ec91336 | 2009-12-13 19:50:27 -0200 | [diff] [blame] | 130 | free(self->cwd); |
Arnaldo Carvalho de Melo | 94c744b | 2009-12-11 21:24:02 -0200 | [diff] [blame] | 131 | free(self); |
| 132 | } |
Arnaldo Carvalho de Melo | a328626 | 2009-12-14 14:22:59 -0200 | [diff] [blame] | 133 | |
| 134 | static bool symbol__match_parent_regex(struct symbol *sym) |
| 135 | { |
| 136 | if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) |
| 137 | return 1; |
| 138 | |
| 139 | return 0; |
| 140 | } |
| 141 | |
Arnaldo Carvalho de Melo | b3c9ac0 | 2010-03-24 16:40:18 -0300 | [diff] [blame] | 142 | struct map_symbol *perf_session__resolve_callchain(struct perf_session *self, |
| 143 | struct thread *thread, |
| 144 | struct ip_callchain *chain, |
| 145 | struct symbol **parent) |
Arnaldo Carvalho de Melo | a328626 | 2009-12-14 14:22:59 -0200 | [diff] [blame] | 146 | { |
| 147 | u8 cpumode = PERF_RECORD_MISC_USER; |
Arnaldo Carvalho de Melo | a328626 | 2009-12-14 14:22:59 -0200 | [diff] [blame] | 148 | unsigned int i; |
Arnaldo Carvalho de Melo | ad5b217 | 2010-04-02 10:04:18 -0300 | [diff] [blame] | 149 | struct map_symbol *syms = calloc(chain->nr, sizeof(*syms)); |
Arnaldo Carvalho de Melo | a328626 | 2009-12-14 14:22:59 -0200 | [diff] [blame] | 150 | |
Arnaldo Carvalho de Melo | ad5b217 | 2010-04-02 10:04:18 -0300 | [diff] [blame] | 151 | if (!syms) |
| 152 | return NULL; |
Arnaldo Carvalho de Melo | a328626 | 2009-12-14 14:22:59 -0200 | [diff] [blame] | 153 | |
| 154 | for (i = 0; i < chain->nr; i++) { |
| 155 | u64 ip = chain->ips[i]; |
| 156 | struct addr_location al; |
| 157 | |
| 158 | if (ip >= PERF_CONTEXT_MAX) { |
| 159 | switch (ip) { |
| 160 | case PERF_CONTEXT_HV: |
| 161 | cpumode = PERF_RECORD_MISC_HYPERVISOR; break; |
| 162 | case PERF_CONTEXT_KERNEL: |
| 163 | cpumode = PERF_RECORD_MISC_KERNEL; break; |
| 164 | case PERF_CONTEXT_USER: |
| 165 | cpumode = PERF_RECORD_MISC_USER; break; |
| 166 | default: |
| 167 | break; |
| 168 | } |
| 169 | continue; |
| 170 | } |
| 171 | |
Zhang, Yanmin | a1645ce | 2010-04-19 13:32:50 +0800 | [diff] [blame] | 172 | al.filtered = false; |
Arnaldo Carvalho de Melo | a328626 | 2009-12-14 14:22:59 -0200 | [diff] [blame] | 173 | thread__find_addr_location(thread, self, cpumode, |
Zhang, Yanmin | a1645ce | 2010-04-19 13:32:50 +0800 | [diff] [blame] | 174 | MAP__FUNCTION, thread->pid, ip, &al, NULL); |
Arnaldo Carvalho de Melo | a328626 | 2009-12-14 14:22:59 -0200 | [diff] [blame] | 175 | if (al.sym != NULL) { |
| 176 | if (sort__has_parent && !*parent && |
| 177 | symbol__match_parent_regex(al.sym)) |
| 178 | *parent = al.sym; |
Arnaldo Carvalho de Melo | d599db3 | 2009-12-15 20:04:42 -0200 | [diff] [blame] | 179 | if (!symbol_conf.use_callchain) |
Arnaldo Carvalho de Melo | a328626 | 2009-12-14 14:22:59 -0200 | [diff] [blame] | 180 | break; |
Arnaldo Carvalho de Melo | b3c9ac0 | 2010-03-24 16:40:18 -0300 | [diff] [blame] | 181 | syms[i].map = al.map; |
| 182 | syms[i].sym = al.sym; |
Arnaldo Carvalho de Melo | a328626 | 2009-12-14 14:22:59 -0200 | [diff] [blame] | 183 | } |
| 184 | } |
| 185 | |
| 186 | return syms; |
| 187 | } |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 188 | |
| 189 | static int process_event_stub(event_t *event __used, |
| 190 | struct perf_session *session __used) |
| 191 | { |
| 192 | dump_printf(": unhandled!\n"); |
| 193 | return 0; |
| 194 | } |
| 195 | |
| 196 | static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) |
| 197 | { |
Arnaldo Carvalho de Melo | 55aa640 | 2009-12-27 21:37:05 -0200 | [diff] [blame] | 198 | if (handler->sample == NULL) |
| 199 | handler->sample = process_event_stub; |
| 200 | if (handler->mmap == NULL) |
| 201 | handler->mmap = process_event_stub; |
| 202 | if (handler->comm == NULL) |
| 203 | handler->comm = process_event_stub; |
| 204 | if (handler->fork == NULL) |
| 205 | handler->fork = process_event_stub; |
| 206 | if (handler->exit == NULL) |
| 207 | handler->exit = process_event_stub; |
| 208 | if (handler->lost == NULL) |
| 209 | handler->lost = process_event_stub; |
| 210 | if (handler->read == NULL) |
| 211 | handler->read = process_event_stub; |
| 212 | if (handler->throttle == NULL) |
| 213 | handler->throttle = process_event_stub; |
| 214 | if (handler->unthrottle == NULL) |
| 215 | handler->unthrottle = process_event_stub; |
Tom Zanussi | 2c46dbb | 2010-04-01 23:59:19 -0500 | [diff] [blame] | 216 | if (handler->attr == NULL) |
| 217 | handler->attr = process_event_stub; |
Tom Zanussi | cd19a03 | 2010-04-01 23:59:20 -0500 | [diff] [blame] | 218 | if (handler->event_type == NULL) |
| 219 | handler->event_type = process_event_stub; |
Tom Zanussi | 9215545 | 2010-04-01 23:59:21 -0500 | [diff] [blame] | 220 | if (handler->tracing_data == NULL) |
| 221 | handler->tracing_data = process_event_stub; |
Tom Zanussi | c7929e4 | 2010-04-01 23:59:22 -0500 | [diff] [blame] | 222 | if (handler->build_id == NULL) |
| 223 | handler->build_id = process_event_stub; |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 224 | } |
| 225 | |
| 226 | static const char *event__name[] = { |
| 227 | [0] = "TOTAL", |
| 228 | [PERF_RECORD_MMAP] = "MMAP", |
| 229 | [PERF_RECORD_LOST] = "LOST", |
| 230 | [PERF_RECORD_COMM] = "COMM", |
| 231 | [PERF_RECORD_EXIT] = "EXIT", |
| 232 | [PERF_RECORD_THROTTLE] = "THROTTLE", |
| 233 | [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", |
| 234 | [PERF_RECORD_FORK] = "FORK", |
| 235 | [PERF_RECORD_READ] = "READ", |
| 236 | [PERF_RECORD_SAMPLE] = "SAMPLE", |
Tom Zanussi | 2c46dbb | 2010-04-01 23:59:19 -0500 | [diff] [blame] | 237 | [PERF_RECORD_HEADER_ATTR] = "ATTR", |
Tom Zanussi | cd19a03 | 2010-04-01 23:59:20 -0500 | [diff] [blame] | 238 | [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", |
Tom Zanussi | 9215545 | 2010-04-01 23:59:21 -0500 | [diff] [blame] | 239 | [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", |
Tom Zanussi | c7929e4 | 2010-04-01 23:59:22 -0500 | [diff] [blame] | 240 | [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 241 | }; |
| 242 | |
Tom Zanussi | 8dc5810 | 2010-04-01 23:59:15 -0500 | [diff] [blame] | 243 | unsigned long event__total[PERF_RECORD_HEADER_MAX]; |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 244 | |
| 245 | void event__print_totals(void) |
| 246 | { |
| 247 | int i; |
Tom Zanussi | 8dc5810 | 2010-04-01 23:59:15 -0500 | [diff] [blame] | 248 | for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { |
| 249 | if (!event__name[i]) |
| 250 | continue; |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 251 | pr_info("%10s events: %10ld\n", |
| 252 | event__name[i], event__total[i]); |
Tom Zanussi | 8dc5810 | 2010-04-01 23:59:15 -0500 | [diff] [blame] | 253 | } |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 254 | } |
| 255 | |
Arnaldo Carvalho de Melo | ba21594 | 2010-01-14 12:23:10 -0200 | [diff] [blame] | 256 | void mem_bswap_64(void *src, int byte_size) |
| 257 | { |
| 258 | u64 *m = src; |
| 259 | |
| 260 | while (byte_size > 0) { |
| 261 | *m = bswap_64(*m); |
| 262 | byte_size -= sizeof(u64); |
| 263 | ++m; |
| 264 | } |
| 265 | } |
| 266 | |
| 267 | static void event__all64_swap(event_t *self) |
| 268 | { |
| 269 | struct perf_event_header *hdr = &self->header; |
| 270 | mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr)); |
| 271 | } |
| 272 | |
| 273 | static void event__comm_swap(event_t *self) |
| 274 | { |
| 275 | self->comm.pid = bswap_32(self->comm.pid); |
| 276 | self->comm.tid = bswap_32(self->comm.tid); |
| 277 | } |
| 278 | |
| 279 | static void event__mmap_swap(event_t *self) |
| 280 | { |
| 281 | self->mmap.pid = bswap_32(self->mmap.pid); |
| 282 | self->mmap.tid = bswap_32(self->mmap.tid); |
| 283 | self->mmap.start = bswap_64(self->mmap.start); |
| 284 | self->mmap.len = bswap_64(self->mmap.len); |
| 285 | self->mmap.pgoff = bswap_64(self->mmap.pgoff); |
| 286 | } |
| 287 | |
| 288 | static void event__task_swap(event_t *self) |
| 289 | { |
| 290 | self->fork.pid = bswap_32(self->fork.pid); |
| 291 | self->fork.tid = bswap_32(self->fork.tid); |
| 292 | self->fork.ppid = bswap_32(self->fork.ppid); |
| 293 | self->fork.ptid = bswap_32(self->fork.ptid); |
| 294 | self->fork.time = bswap_64(self->fork.time); |
| 295 | } |
| 296 | |
| 297 | static void event__read_swap(event_t *self) |
| 298 | { |
| 299 | self->read.pid = bswap_32(self->read.pid); |
| 300 | self->read.tid = bswap_32(self->read.tid); |
| 301 | self->read.value = bswap_64(self->read.value); |
| 302 | self->read.time_enabled = bswap_64(self->read.time_enabled); |
| 303 | self->read.time_running = bswap_64(self->read.time_running); |
| 304 | self->read.id = bswap_64(self->read.id); |
| 305 | } |
| 306 | |
Tom Zanussi | 2c46dbb | 2010-04-01 23:59:19 -0500 | [diff] [blame] | 307 | static void event__attr_swap(event_t *self) |
| 308 | { |
| 309 | size_t size; |
| 310 | |
| 311 | self->attr.attr.type = bswap_32(self->attr.attr.type); |
| 312 | self->attr.attr.size = bswap_32(self->attr.attr.size); |
| 313 | self->attr.attr.config = bswap_64(self->attr.attr.config); |
| 314 | self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period); |
| 315 | self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type); |
| 316 | self->attr.attr.read_format = bswap_64(self->attr.attr.read_format); |
| 317 | self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events); |
| 318 | self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type); |
| 319 | self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr); |
| 320 | self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len); |
| 321 | |
| 322 | size = self->header.size; |
| 323 | size -= (void *)&self->attr.id - (void *)self; |
| 324 | mem_bswap_64(self->attr.id, size); |
| 325 | } |
| 326 | |
Tom Zanussi | cd19a03 | 2010-04-01 23:59:20 -0500 | [diff] [blame] | 327 | static void event__event_type_swap(event_t *self) |
| 328 | { |
| 329 | self->event_type.event_type.event_id = |
| 330 | bswap_64(self->event_type.event_type.event_id); |
| 331 | } |
| 332 | |
Tom Zanussi | 9215545 | 2010-04-01 23:59:21 -0500 | [diff] [blame] | 333 | static void event__tracing_data_swap(event_t *self) |
| 334 | { |
| 335 | self->tracing_data.size = bswap_32(self->tracing_data.size); |
| 336 | } |
| 337 | |
Arnaldo Carvalho de Melo | ba21594 | 2010-01-14 12:23:10 -0200 | [diff] [blame] | 338 | typedef void (*event__swap_op)(event_t *self); |
| 339 | |
| 340 | static event__swap_op event__swap_ops[] = { |
| 341 | [PERF_RECORD_MMAP] = event__mmap_swap, |
| 342 | [PERF_RECORD_COMM] = event__comm_swap, |
| 343 | [PERF_RECORD_FORK] = event__task_swap, |
| 344 | [PERF_RECORD_EXIT] = event__task_swap, |
| 345 | [PERF_RECORD_LOST] = event__all64_swap, |
| 346 | [PERF_RECORD_READ] = event__read_swap, |
| 347 | [PERF_RECORD_SAMPLE] = event__all64_swap, |
Tom Zanussi | 2c46dbb | 2010-04-01 23:59:19 -0500 | [diff] [blame] | 348 | [PERF_RECORD_HEADER_ATTR] = event__attr_swap, |
Tom Zanussi | cd19a03 | 2010-04-01 23:59:20 -0500 | [diff] [blame] | 349 | [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap, |
Tom Zanussi | 9215545 | 2010-04-01 23:59:21 -0500 | [diff] [blame] | 350 | [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap, |
Tom Zanussi | c7929e4 | 2010-04-01 23:59:22 -0500 | [diff] [blame] | 351 | [PERF_RECORD_HEADER_BUILD_ID] = NULL, |
Tom Zanussi | 8dc5810 | 2010-04-01 23:59:15 -0500 | [diff] [blame] | 352 | [PERF_RECORD_HEADER_MAX] = NULL, |
Arnaldo Carvalho de Melo | ba21594 | 2010-01-14 12:23:10 -0200 | [diff] [blame] | 353 | }; |
| 354 | |
Frederic Weisbecker | c61e52e | 2010-04-24 00:04:12 +0200 | [diff] [blame] | 355 | struct sample_queue { |
| 356 | u64 timestamp; |
| 357 | struct sample_event *event; |
| 358 | struct list_head list; |
| 359 | }; |
| 360 | |
| 361 | #define FLUSH_PERIOD (2 * NSEC_PER_SEC) |
| 362 | |
| 363 | static void flush_sample_queue(struct perf_session *s, |
| 364 | struct perf_event_ops *ops) |
| 365 | { |
| 366 | struct list_head *head = &s->ordered_samples.samples_head; |
| 367 | u64 limit = s->ordered_samples.flush_limit; |
| 368 | struct sample_queue *tmp, *iter; |
| 369 | |
| 370 | if (!ops->ordered_samples) |
| 371 | return; |
| 372 | |
| 373 | list_for_each_entry_safe(iter, tmp, head, list) { |
| 374 | if (iter->timestamp > limit) |
| 375 | return; |
| 376 | |
| 377 | if (iter == s->ordered_samples.last_inserted) |
| 378 | s->ordered_samples.last_inserted = NULL; |
| 379 | |
| 380 | ops->sample((event_t *)iter->event, s); |
| 381 | |
| 382 | s->ordered_samples.last_flush = iter->timestamp; |
| 383 | list_del(&iter->list); |
| 384 | free(iter->event); |
| 385 | free(iter); |
| 386 | } |
| 387 | } |
| 388 | |
| 389 | static void __queue_sample_end(struct sample_queue *new, struct list_head *head) |
| 390 | { |
| 391 | struct sample_queue *iter; |
| 392 | |
| 393 | list_for_each_entry_reverse(iter, head, list) { |
| 394 | if (iter->timestamp < new->timestamp) { |
| 395 | list_add(&new->list, &iter->list); |
| 396 | return; |
| 397 | } |
| 398 | } |
| 399 | |
| 400 | list_add(&new->list, head); |
| 401 | } |
| 402 | |
| 403 | static void __queue_sample_before(struct sample_queue *new, |
| 404 | struct sample_queue *iter, |
| 405 | struct list_head *head) |
| 406 | { |
| 407 | list_for_each_entry_continue_reverse(iter, head, list) { |
| 408 | if (iter->timestamp < new->timestamp) { |
| 409 | list_add(&new->list, &iter->list); |
| 410 | return; |
| 411 | } |
| 412 | } |
| 413 | |
| 414 | list_add(&new->list, head); |
| 415 | } |
| 416 | |
| 417 | static void __queue_sample_after(struct sample_queue *new, |
| 418 | struct sample_queue *iter, |
| 419 | struct list_head *head) |
| 420 | { |
| 421 | list_for_each_entry_continue(iter, head, list) { |
| 422 | if (iter->timestamp > new->timestamp) { |
| 423 | list_add_tail(&new->list, &iter->list); |
| 424 | return; |
| 425 | } |
| 426 | } |
| 427 | list_add_tail(&new->list, head); |
| 428 | } |
| 429 | |
| 430 | /* The queue is ordered by time */ |
| 431 | static void __queue_sample_event(struct sample_queue *new, |
| 432 | struct perf_session *s) |
| 433 | { |
| 434 | struct sample_queue *last_inserted = s->ordered_samples.last_inserted; |
| 435 | struct list_head *head = &s->ordered_samples.samples_head; |
| 436 | |
| 437 | |
| 438 | if (!last_inserted) { |
| 439 | __queue_sample_end(new, head); |
| 440 | return; |
| 441 | } |
| 442 | |
| 443 | /* |
| 444 | * Most of the time the current event has a timestamp |
| 445 | * very close to the last event inserted, unless we just switched |
| 446 | * to another event buffer. Having a sorting based on a list and |
| 447 | * on the last inserted event that is close to the current one is |
| 448 | * probably more efficient than an rbtree based sorting. |
| 449 | */ |
| 450 | if (last_inserted->timestamp >= new->timestamp) |
| 451 | __queue_sample_before(new, last_inserted, head); |
| 452 | else |
| 453 | __queue_sample_after(new, last_inserted, head); |
| 454 | } |
| 455 | |
| 456 | static int queue_sample_event(event_t *event, struct sample_data *data, |
| 457 | struct perf_session *s, |
| 458 | struct perf_event_ops *ops) |
| 459 | { |
| 460 | u64 timestamp = data->time; |
| 461 | struct sample_queue *new; |
| 462 | u64 flush_limit; |
| 463 | |
| 464 | |
| 465 | if (s->ordered_samples.flush_limit == ULLONG_MAX) |
| 466 | s->ordered_samples.flush_limit = timestamp + FLUSH_PERIOD; |
| 467 | |
| 468 | if (timestamp < s->ordered_samples.last_flush) { |
| 469 | printf("Warning: Timestamp below last timeslice flush\n"); |
| 470 | return -EINVAL; |
| 471 | } |
| 472 | |
| 473 | new = malloc(sizeof(*new)); |
| 474 | if (!new) |
| 475 | return -ENOMEM; |
| 476 | |
| 477 | new->timestamp = timestamp; |
| 478 | |
| 479 | new->event = malloc(event->header.size); |
| 480 | if (!new->event) { |
| 481 | free(new); |
| 482 | return -ENOMEM; |
| 483 | } |
| 484 | |
| 485 | memcpy(new->event, event, event->header.size); |
| 486 | |
| 487 | __queue_sample_event(new, s); |
| 488 | s->ordered_samples.last_inserted = new; |
| 489 | |
| 490 | /* |
| 491 | * We want to have a slice of events covering 2 * FLUSH_PERIOD |
| 492 | * If FLUSH_PERIOD is big enough, it ensures every events that occured |
| 493 | * in the first half of the timeslice have all been buffered and there |
| 494 | * are none remaining (we need that because of the weakly ordered |
| 495 | * event recording we have). Then once we reach the 2 * FLUSH_PERIOD |
| 496 | * timeslice, we flush the first half to be gentle with the memory |
| 497 | * (the second half can still get new events in the middle, so wait |
| 498 | * another period to flush it) |
| 499 | */ |
| 500 | flush_limit = s->ordered_samples.flush_limit; |
| 501 | |
| 502 | if (new->timestamp > flush_limit && |
| 503 | new->timestamp - flush_limit > FLUSH_PERIOD) { |
| 504 | s->ordered_samples.flush_limit += FLUSH_PERIOD; |
| 505 | flush_sample_queue(s, ops); |
| 506 | } |
| 507 | |
| 508 | return 0; |
| 509 | } |
| 510 | |
| 511 | static int perf_session__process_sample(event_t *event, struct perf_session *s, |
| 512 | struct perf_event_ops *ops) |
| 513 | { |
| 514 | struct sample_data data; |
| 515 | |
| 516 | if (!ops->ordered_samples) |
| 517 | return ops->sample(event, s); |
| 518 | |
| 519 | bzero(&data, sizeof(struct sample_data)); |
| 520 | event__parse_sample(event, s->sample_type, &data); |
| 521 | |
| 522 | queue_sample_event(event, &data, s, ops); |
| 523 | |
| 524 | return 0; |
| 525 | } |
| 526 | |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 527 | static int perf_session__process_event(struct perf_session *self, |
| 528 | event_t *event, |
| 529 | struct perf_event_ops *ops, |
Arnaldo Carvalho de Melo | ba21594 | 2010-01-14 12:23:10 -0200 | [diff] [blame] | 530 | u64 offset, u64 head) |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 531 | { |
| 532 | trace_event(event); |
| 533 | |
Tom Zanussi | 8dc5810 | 2010-04-01 23:59:15 -0500 | [diff] [blame] | 534 | if (event->header.type < PERF_RECORD_HEADER_MAX) { |
Arnaldo Carvalho de Melo | ba21594 | 2010-01-14 12:23:10 -0200 | [diff] [blame] | 535 | dump_printf("%#Lx [%#x]: PERF_RECORD_%s", |
Arnaldo Carvalho de Melo | 0d75503 | 2010-01-14 12:23:09 -0200 | [diff] [blame] | 536 | offset + head, event->header.size, |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 537 | event__name[event->header.type]); |
| 538 | ++event__total[0]; |
| 539 | ++event__total[event->header.type]; |
| 540 | } |
| 541 | |
Arnaldo Carvalho de Melo | ba21594 | 2010-01-14 12:23:10 -0200 | [diff] [blame] | 542 | if (self->header.needs_swap && event__swap_ops[event->header.type]) |
| 543 | event__swap_ops[event->header.type](event); |
| 544 | |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 545 | switch (event->header.type) { |
| 546 | case PERF_RECORD_SAMPLE: |
Frederic Weisbecker | c61e52e | 2010-04-24 00:04:12 +0200 | [diff] [blame] | 547 | return perf_session__process_sample(event, self, ops); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 548 | case PERF_RECORD_MMAP: |
Arnaldo Carvalho de Melo | 55aa640 | 2009-12-27 21:37:05 -0200 | [diff] [blame] | 549 | return ops->mmap(event, self); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 550 | case PERF_RECORD_COMM: |
Arnaldo Carvalho de Melo | 55aa640 | 2009-12-27 21:37:05 -0200 | [diff] [blame] | 551 | return ops->comm(event, self); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 552 | case PERF_RECORD_FORK: |
Arnaldo Carvalho de Melo | 55aa640 | 2009-12-27 21:37:05 -0200 | [diff] [blame] | 553 | return ops->fork(event, self); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 554 | case PERF_RECORD_EXIT: |
Arnaldo Carvalho de Melo | 55aa640 | 2009-12-27 21:37:05 -0200 | [diff] [blame] | 555 | return ops->exit(event, self); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 556 | case PERF_RECORD_LOST: |
Arnaldo Carvalho de Melo | 55aa640 | 2009-12-27 21:37:05 -0200 | [diff] [blame] | 557 | return ops->lost(event, self); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 558 | case PERF_RECORD_READ: |
Arnaldo Carvalho de Melo | 55aa640 | 2009-12-27 21:37:05 -0200 | [diff] [blame] | 559 | return ops->read(event, self); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 560 | case PERF_RECORD_THROTTLE: |
Arnaldo Carvalho de Melo | 55aa640 | 2009-12-27 21:37:05 -0200 | [diff] [blame] | 561 | return ops->throttle(event, self); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 562 | case PERF_RECORD_UNTHROTTLE: |
Arnaldo Carvalho de Melo | 55aa640 | 2009-12-27 21:37:05 -0200 | [diff] [blame] | 563 | return ops->unthrottle(event, self); |
Tom Zanussi | 2c46dbb | 2010-04-01 23:59:19 -0500 | [diff] [blame] | 564 | case PERF_RECORD_HEADER_ATTR: |
| 565 | return ops->attr(event, self); |
Tom Zanussi | cd19a03 | 2010-04-01 23:59:20 -0500 | [diff] [blame] | 566 | case PERF_RECORD_HEADER_EVENT_TYPE: |
| 567 | return ops->event_type(event, self); |
Tom Zanussi | 9215545 | 2010-04-01 23:59:21 -0500 | [diff] [blame] | 568 | case PERF_RECORD_HEADER_TRACING_DATA: |
| 569 | /* setup for reading amidst mmap */ |
| 570 | lseek(self->fd, offset + head, SEEK_SET); |
| 571 | return ops->tracing_data(event, self); |
Tom Zanussi | c7929e4 | 2010-04-01 23:59:22 -0500 | [diff] [blame] | 572 | case PERF_RECORD_HEADER_BUILD_ID: |
| 573 | return ops->build_id(event, self); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 574 | default: |
Arnaldo Carvalho de Melo | 31d337c | 2009-12-27 21:37:03 -0200 | [diff] [blame] | 575 | self->unknown_events++; |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 576 | return -1; |
| 577 | } |
| 578 | } |
| 579 | |
Arnaldo Carvalho de Melo | ba21594 | 2010-01-14 12:23:10 -0200 | [diff] [blame] | 580 | void perf_event_header__bswap(struct perf_event_header *self) |
| 581 | { |
| 582 | self->type = bswap_32(self->type); |
| 583 | self->misc = bswap_16(self->misc); |
| 584 | self->size = bswap_16(self->size); |
| 585 | } |
| 586 | |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 587 | static struct thread *perf_session__register_idle_thread(struct perf_session *self) |
| 588 | { |
| 589 | struct thread *thread = perf_session__findnew(self, 0); |
| 590 | |
| 591 | if (thread == NULL || thread__set_comm(thread, "swapper")) { |
| 592 | pr_err("problem inserting idle task.\n"); |
| 593 | thread = NULL; |
| 594 | } |
| 595 | |
| 596 | return thread; |
| 597 | } |
| 598 | |
Tom Zanussi | 8dc5810 | 2010-04-01 23:59:15 -0500 | [diff] [blame] | 599 | int do_read(int fd, void *buf, size_t size) |
| 600 | { |
| 601 | void *buf_start = buf; |
| 602 | |
| 603 | while (size) { |
| 604 | int ret = read(fd, buf, size); |
| 605 | |
| 606 | if (ret <= 0) |
| 607 | return ret; |
| 608 | |
| 609 | size -= ret; |
| 610 | buf += ret; |
| 611 | } |
| 612 | |
| 613 | return buf - buf_start; |
| 614 | } |
| 615 | |
| 616 | #define session_done() (*(volatile int *)(&session_done)) |
| 617 | volatile int session_done; |
| 618 | |
| 619 | static int __perf_session__process_pipe_events(struct perf_session *self, |
| 620 | struct perf_event_ops *ops) |
| 621 | { |
| 622 | event_t event; |
| 623 | uint32_t size; |
| 624 | int skip = 0; |
| 625 | u64 head; |
| 626 | int err; |
| 627 | void *p; |
| 628 | |
| 629 | perf_event_ops__fill_defaults(ops); |
| 630 | |
| 631 | head = 0; |
| 632 | more: |
| 633 | err = do_read(self->fd, &event, sizeof(struct perf_event_header)); |
| 634 | if (err <= 0) { |
| 635 | if (err == 0) |
| 636 | goto done; |
| 637 | |
| 638 | pr_err("failed to read event header\n"); |
| 639 | goto out_err; |
| 640 | } |
| 641 | |
| 642 | if (self->header.needs_swap) |
| 643 | perf_event_header__bswap(&event.header); |
| 644 | |
| 645 | size = event.header.size; |
| 646 | if (size == 0) |
| 647 | size = 8; |
| 648 | |
| 649 | p = &event; |
| 650 | p += sizeof(struct perf_event_header); |
| 651 | |
| 652 | err = do_read(self->fd, p, size - sizeof(struct perf_event_header)); |
| 653 | if (err <= 0) { |
| 654 | if (err == 0) { |
| 655 | pr_err("unexpected end of event stream\n"); |
| 656 | goto done; |
| 657 | } |
| 658 | |
| 659 | pr_err("failed to read event data\n"); |
| 660 | goto out_err; |
| 661 | } |
| 662 | |
| 663 | if (size == 0 || |
| 664 | (skip = perf_session__process_event(self, &event, ops, |
| 665 | 0, head)) < 0) { |
| 666 | dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", |
| 667 | head, event.header.size, event.header.type); |
| 668 | /* |
| 669 | * assume we lost track of the stream, check alignment, and |
| 670 | * increment a single u64 in the hope to catch on again 'soon'. |
| 671 | */ |
| 672 | if (unlikely(head & 7)) |
| 673 | head &= ~7ULL; |
| 674 | |
| 675 | size = 8; |
| 676 | } |
| 677 | |
| 678 | head += size; |
| 679 | |
| 680 | dump_printf("\n%#Lx [%#x]: event: %d\n", |
| 681 | head, event.header.size, event.header.type); |
| 682 | |
| 683 | if (skip > 0) |
| 684 | head += skip; |
| 685 | |
| 686 | if (!session_done()) |
| 687 | goto more; |
| 688 | done: |
| 689 | err = 0; |
| 690 | out_err: |
| 691 | return err; |
| 692 | } |
| 693 | |
Arnaldo Carvalho de Melo | 6122e4e | 2010-02-03 16:52:05 -0200 | [diff] [blame] | 694 | int __perf_session__process_events(struct perf_session *self, |
| 695 | u64 data_offset, u64 data_size, |
| 696 | u64 file_size, struct perf_event_ops *ops) |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 697 | { |
Arnaldo Carvalho de Melo | ba21594 | 2010-01-14 12:23:10 -0200 | [diff] [blame] | 698 | int err, mmap_prot, mmap_flags; |
| 699 | u64 head, shift; |
| 700 | u64 offset = 0; |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 701 | size_t page_size; |
| 702 | event_t *event; |
| 703 | uint32_t size; |
| 704 | char *buf; |
Arnaldo Carvalho de Melo | 5f4d3f8 | 2010-03-26 21:16:22 -0300 | [diff] [blame] | 705 | struct ui_progress *progress = ui_progress__new("Processing events...", |
| 706 | self->size); |
| 707 | if (progress == NULL) |
| 708 | return -1; |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 709 | |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 710 | perf_event_ops__fill_defaults(ops); |
| 711 | |
Arnaldo Carvalho de Melo | 1b75962 | 2010-01-14 18:30:04 -0200 | [diff] [blame] | 712 | page_size = sysconf(_SC_PAGESIZE); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 713 | |
Arnaldo Carvalho de Melo | 6122e4e | 2010-02-03 16:52:05 -0200 | [diff] [blame] | 714 | head = data_offset; |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 715 | shift = page_size * (head / page_size); |
| 716 | offset += shift; |
| 717 | head -= shift; |
| 718 | |
Arnaldo Carvalho de Melo | ba21594 | 2010-01-14 12:23:10 -0200 | [diff] [blame] | 719 | mmap_prot = PROT_READ; |
| 720 | mmap_flags = MAP_SHARED; |
| 721 | |
| 722 | if (self->header.needs_swap) { |
| 723 | mmap_prot |= PROT_WRITE; |
| 724 | mmap_flags = MAP_PRIVATE; |
| 725 | } |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 726 | remap: |
Arnaldo Carvalho de Melo | ba21594 | 2010-01-14 12:23:10 -0200 | [diff] [blame] | 727 | buf = mmap(NULL, page_size * self->mmap_window, mmap_prot, |
| 728 | mmap_flags, self->fd, offset); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 729 | if (buf == MAP_FAILED) { |
| 730 | pr_err("failed to mmap file\n"); |
| 731 | err = -errno; |
| 732 | goto out_err; |
| 733 | } |
| 734 | |
| 735 | more: |
| 736 | event = (event_t *)(buf + head); |
Arnaldo Carvalho de Melo | 5f4d3f8 | 2010-03-26 21:16:22 -0300 | [diff] [blame] | 737 | ui_progress__update(progress, offset); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 738 | |
Arnaldo Carvalho de Melo | ba21594 | 2010-01-14 12:23:10 -0200 | [diff] [blame] | 739 | if (self->header.needs_swap) |
| 740 | perf_event_header__bswap(&event->header); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 741 | size = event->header.size; |
| 742 | if (size == 0) |
| 743 | size = 8; |
| 744 | |
| 745 | if (head + event->header.size >= page_size * self->mmap_window) { |
| 746 | int munmap_ret; |
| 747 | |
| 748 | shift = page_size * (head / page_size); |
| 749 | |
| 750 | munmap_ret = munmap(buf, page_size * self->mmap_window); |
| 751 | assert(munmap_ret == 0); |
| 752 | |
| 753 | offset += shift; |
| 754 | head -= shift; |
| 755 | goto remap; |
| 756 | } |
| 757 | |
| 758 | size = event->header.size; |
| 759 | |
Arnaldo Carvalho de Melo | ba21594 | 2010-01-14 12:23:10 -0200 | [diff] [blame] | 760 | dump_printf("\n%#Lx [%#x]: event: %d\n", |
Arnaldo Carvalho de Melo | 0d75503 | 2010-01-14 12:23:09 -0200 | [diff] [blame] | 761 | offset + head, event->header.size, event->header.type); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 762 | |
| 763 | if (size == 0 || |
| 764 | perf_session__process_event(self, event, ops, offset, head) < 0) { |
Arnaldo Carvalho de Melo | ba21594 | 2010-01-14 12:23:10 -0200 | [diff] [blame] | 765 | dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", |
Arnaldo Carvalho de Melo | 0d75503 | 2010-01-14 12:23:09 -0200 | [diff] [blame] | 766 | offset + head, event->header.size, |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 767 | event->header.type); |
| 768 | /* |
| 769 | * assume we lost track of the stream, check alignment, and |
| 770 | * increment a single u64 in the hope to catch on again 'soon'. |
| 771 | */ |
| 772 | if (unlikely(head & 7)) |
| 773 | head &= ~7ULL; |
| 774 | |
| 775 | size = 8; |
| 776 | } |
| 777 | |
| 778 | head += size; |
| 779 | |
Arnaldo Carvalho de Melo | 6122e4e | 2010-02-03 16:52:05 -0200 | [diff] [blame] | 780 | if (offset + head >= data_offset + data_size) |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 781 | goto done; |
| 782 | |
Arnaldo Carvalho de Melo | 6122e4e | 2010-02-03 16:52:05 -0200 | [diff] [blame] | 783 | if (offset + head < file_size) |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 784 | goto more; |
| 785 | done: |
| 786 | err = 0; |
Frederic Weisbecker | c61e52e | 2010-04-24 00:04:12 +0200 | [diff] [blame] | 787 | /* do the final flush for ordered samples */ |
| 788 | self->ordered_samples.flush_limit = ULLONG_MAX; |
| 789 | flush_sample_queue(self, ops); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 790 | out_err: |
Arnaldo Carvalho de Melo | 5f4d3f8 | 2010-03-26 21:16:22 -0300 | [diff] [blame] | 791 | ui_progress__delete(progress); |
Arnaldo Carvalho de Melo | 06aae59 | 2009-12-27 21:36:59 -0200 | [diff] [blame] | 792 | return err; |
| 793 | } |
Arnaldo Carvalho de Melo | 2729559 | 2009-12-27 21:37:01 -0200 | [diff] [blame] | 794 | |
Arnaldo Carvalho de Melo | 6122e4e | 2010-02-03 16:52:05 -0200 | [diff] [blame] | 795 | int perf_session__process_events(struct perf_session *self, |
| 796 | struct perf_event_ops *ops) |
| 797 | { |
| 798 | int err; |
| 799 | |
| 800 | if (perf_session__register_idle_thread(self) == NULL) |
| 801 | return -ENOMEM; |
| 802 | |
| 803 | if (!symbol_conf.full_paths) { |
| 804 | char bf[PATH_MAX]; |
| 805 | |
| 806 | if (getcwd(bf, sizeof(bf)) == NULL) { |
| 807 | err = -errno; |
| 808 | out_getcwd_err: |
| 809 | pr_err("failed to get the current directory\n"); |
| 810 | goto out_err; |
| 811 | } |
| 812 | self->cwd = strdup(bf); |
| 813 | if (self->cwd == NULL) { |
| 814 | err = -ENOMEM; |
| 815 | goto out_getcwd_err; |
| 816 | } |
| 817 | self->cwdlen = strlen(self->cwd); |
| 818 | } |
| 819 | |
Tom Zanussi | 8dc5810 | 2010-04-01 23:59:15 -0500 | [diff] [blame] | 820 | if (!self->fd_pipe) |
| 821 | err = __perf_session__process_events(self, |
| 822 | self->header.data_offset, |
| 823 | self->header.data_size, |
| 824 | self->size, ops); |
| 825 | else |
| 826 | err = __perf_session__process_pipe_events(self, ops); |
Arnaldo Carvalho de Melo | 6122e4e | 2010-02-03 16:52:05 -0200 | [diff] [blame] | 827 | out_err: |
| 828 | return err; |
| 829 | } |
| 830 | |
Arnaldo Carvalho de Melo | d549c769 | 2009-12-27 21:37:02 -0200 | [diff] [blame] | 831 | bool perf_session__has_traces(struct perf_session *self, const char *msg) |
Arnaldo Carvalho de Melo | 2729559 | 2009-12-27 21:37:01 -0200 | [diff] [blame] | 832 | { |
| 833 | if (!(self->sample_type & PERF_SAMPLE_RAW)) { |
Arnaldo Carvalho de Melo | d549c769 | 2009-12-27 21:37:02 -0200 | [diff] [blame] | 834 | pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); |
| 835 | return false; |
Arnaldo Carvalho de Melo | 2729559 | 2009-12-27 21:37:01 -0200 | [diff] [blame] | 836 | } |
| 837 | |
Arnaldo Carvalho de Melo | d549c769 | 2009-12-27 21:37:02 -0200 | [diff] [blame] | 838 | return true; |
Arnaldo Carvalho de Melo | 2729559 | 2009-12-27 21:37:01 -0200 | [diff] [blame] | 839 | } |
Arnaldo Carvalho de Melo | 56b03f3 | 2010-01-05 16:50:31 -0200 | [diff] [blame] | 840 | |
Zhang, Yanmin | a1645ce | 2010-04-19 13:32:50 +0800 | [diff] [blame] | 841 | int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps, |
Arnaldo Carvalho de Melo | 56b03f3 | 2010-01-05 16:50:31 -0200 | [diff] [blame] | 842 | const char *symbol_name, |
| 843 | u64 addr) |
| 844 | { |
| 845 | char *bracket; |
Arnaldo Carvalho de Melo | 9de89fe | 2010-02-03 16:52:00 -0200 | [diff] [blame] | 846 | enum map_type i; |
Zhang, Yanmin | a1645ce | 2010-04-19 13:32:50 +0800 | [diff] [blame] | 847 | struct ref_reloc_sym *ref; |
Arnaldo Carvalho de Melo | 56b03f3 | 2010-01-05 16:50:31 -0200 | [diff] [blame] | 848 | |
Zhang, Yanmin | a1645ce | 2010-04-19 13:32:50 +0800 | [diff] [blame] | 849 | ref = zalloc(sizeof(struct ref_reloc_sym)); |
| 850 | if (ref == NULL) |
Arnaldo Carvalho de Melo | 56b03f3 | 2010-01-05 16:50:31 -0200 | [diff] [blame] | 851 | return -ENOMEM; |
| 852 | |
Zhang, Yanmin | a1645ce | 2010-04-19 13:32:50 +0800 | [diff] [blame] | 853 | ref->name = strdup(symbol_name); |
| 854 | if (ref->name == NULL) { |
| 855 | free(ref); |
| 856 | return -ENOMEM; |
| 857 | } |
| 858 | |
| 859 | bracket = strchr(ref->name, ']'); |
Arnaldo Carvalho de Melo | 56b03f3 | 2010-01-05 16:50:31 -0200 | [diff] [blame] | 860 | if (bracket) |
| 861 | *bracket = '\0'; |
| 862 | |
Zhang, Yanmin | a1645ce | 2010-04-19 13:32:50 +0800 | [diff] [blame] | 863 | ref->addr = addr; |
Arnaldo Carvalho de Melo | 9de89fe | 2010-02-03 16:52:00 -0200 | [diff] [blame] | 864 | |
| 865 | for (i = 0; i < MAP__NR_TYPES; ++i) { |
Zhang, Yanmin | a1645ce | 2010-04-19 13:32:50 +0800 | [diff] [blame] | 866 | struct kmap *kmap = map__kmap(maps[i]); |
| 867 | kmap->ref_reloc_sym = ref; |
Arnaldo Carvalho de Melo | 9de89fe | 2010-02-03 16:52:00 -0200 | [diff] [blame] | 868 | } |
| 869 | |
Arnaldo Carvalho de Melo | 56b03f3 | 2010-01-05 16:50:31 -0200 | [diff] [blame] | 870 | return 0; |
| 871 | } |