]> asedeno.scripts.mit.edu Git - linux.git/blob - tools/perf/util/session.c
perf session: Fix double free in perf_data__close
[linux.git] / tools / perf / util / session.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/kernel.h>
5 #include <traceevent/event-parse.h>
6 #include <api/fs/fs.h>
7
8 #include <byteswap.h>
9 #include <unistd.h>
10 #include <sys/types.h>
11 #include <sys/mman.h>
12
13 #include "evlist.h"
14 #include "evsel.h"
15 #include "memswap.h"
16 #include "map.h"
17 #include "symbol.h"
18 #include "session.h"
19 #include "tool.h"
20 #include "sort.h"
21 #include "util.h"
22 #include "cpumap.h"
23 #include "perf_regs.h"
24 #include "asm/bug.h"
25 #include "auxtrace.h"
26 #include "thread.h"
27 #include "thread-stack.h"
28 #include "sample-raw.h"
29 #include "stat.h"
30 #include "arch/common.h"
31
32 static int perf_session__deliver_event(struct perf_session *session,
33                                        union perf_event *event,
34                                        struct perf_tool *tool,
35                                        u64 file_offset);
36
37 static int perf_session__open(struct perf_session *session)
38 {
39         struct perf_data *data = session->data;
40
41         if (perf_session__read_header(session) < 0) {
42                 pr_err("incompatible file format (rerun with -v to learn more)\n");
43                 return -1;
44         }
45
46         if (perf_data__is_pipe(data))
47                 return 0;
48
49         if (perf_header__has_feat(&session->header, HEADER_STAT))
50                 return 0;
51
52         if (!perf_evlist__valid_sample_type(session->evlist)) {
53                 pr_err("non matching sample_type\n");
54                 return -1;
55         }
56
57         if (!perf_evlist__valid_sample_id_all(session->evlist)) {
58                 pr_err("non matching sample_id_all\n");
59                 return -1;
60         }
61
62         if (!perf_evlist__valid_read_format(session->evlist)) {
63                 pr_err("non matching read_format\n");
64                 return -1;
65         }
66
67         return 0;
68 }
69
70 void perf_session__set_id_hdr_size(struct perf_session *session)
71 {
72         u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
73
74         machines__set_id_hdr_size(&session->machines, id_hdr_size);
75 }
76
77 int perf_session__create_kernel_maps(struct perf_session *session)
78 {
79         int ret = machine__create_kernel_maps(&session->machines.host);
80
81         if (ret >= 0)
82                 ret = machines__create_guest_kernel_maps(&session->machines);
83         return ret;
84 }
85
86 static void perf_session__destroy_kernel_maps(struct perf_session *session)
87 {
88         machines__destroy_kernel_maps(&session->machines);
89 }
90
91 static bool perf_session__has_comm_exec(struct perf_session *session)
92 {
93         struct perf_evsel *evsel;
94
95         evlist__for_each_entry(session->evlist, evsel) {
96                 if (evsel->attr.comm_exec)
97                         return true;
98         }
99
100         return false;
101 }
102
103 static void perf_session__set_comm_exec(struct perf_session *session)
104 {
105         bool comm_exec = perf_session__has_comm_exec(session);
106
107         machines__set_comm_exec(&session->machines, comm_exec);
108 }
109
110 static int ordered_events__deliver_event(struct ordered_events *oe,
111                                          struct ordered_event *event)
112 {
113         struct perf_session *session = container_of(oe, struct perf_session,
114                                                     ordered_events);
115
116         return perf_session__deliver_event(session, event->event,
117                                            session->tool, event->file_offset);
118 }
119
120 struct perf_session *perf_session__new(struct perf_data *data,
121                                        bool repipe, struct perf_tool *tool)
122 {
123         struct perf_session *session = zalloc(sizeof(*session));
124
125         if (!session)
126                 goto out;
127
128         session->repipe = repipe;
129         session->tool   = tool;
130         INIT_LIST_HEAD(&session->auxtrace_index);
131         machines__init(&session->machines);
132         ordered_events__init(&session->ordered_events,
133                              ordered_events__deliver_event, NULL);
134
135         if (data) {
136                 if (perf_data__open(data))
137                         goto out_delete;
138
139                 session->data = data;
140
141                 if (perf_data__is_read(data)) {
142                         if (perf_session__open(session) < 0)
143                                 goto out_delete;
144
145                         /*
146                          * set session attributes that are present in perf.data
147                          * but not in pipe-mode.
148                          */
149                         if (!data->is_pipe) {
150                                 perf_session__set_id_hdr_size(session);
151                                 perf_session__set_comm_exec(session);
152                         }
153
154                         perf_evlist__init_trace_event_sample_raw(session->evlist);
155                 }
156         } else  {
157                 session->machines.host.env = &perf_env;
158         }
159
160         session->machines.host.single_address_space =
161                 perf_env__single_address_space(session->machines.host.env);
162
163         if (!data || perf_data__is_write(data)) {
164                 /*
165                  * In O_RDONLY mode this will be performed when reading the
166                  * kernel MMAP event, in perf_event__process_mmap().
167                  */
168                 if (perf_session__create_kernel_maps(session) < 0)
169                         pr_warning("Cannot read kernel map\n");
170         }
171
172         /*
173          * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
174          * processed, so perf_evlist__sample_id_all is not meaningful here.
175          */
176         if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
177             tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
178                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
179                 tool->ordered_events = false;
180         }
181
182         return session;
183
184  out_delete:
185         perf_session__delete(session);
186  out:
187         return NULL;
188 }
189
190 static void perf_session__delete_threads(struct perf_session *session)
191 {
192         machine__delete_threads(&session->machines.host);
193 }
194
195 void perf_session__delete(struct perf_session *session)
196 {
197         if (session == NULL)
198                 return;
199         auxtrace__free(session);
200         auxtrace_index__free(&session->auxtrace_index);
201         perf_session__destroy_kernel_maps(session);
202         perf_session__delete_threads(session);
203         perf_env__exit(&session->header.env);
204         machines__exit(&session->machines);
205         if (session->data)
206                 perf_data__close(session->data);
207         free(session);
208 }
209
210 static int process_event_synth_tracing_data_stub(struct perf_session *session
211                                                  __maybe_unused,
212                                                  union perf_event *event
213                                                  __maybe_unused)
214 {
215         dump_printf(": unhandled!\n");
216         return 0;
217 }
218
219 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
220                                          union perf_event *event __maybe_unused,
221                                          struct perf_evlist **pevlist
222                                          __maybe_unused)
223 {
224         dump_printf(": unhandled!\n");
225         return 0;
226 }
227
228 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
229                                                  union perf_event *event __maybe_unused,
230                                                  struct perf_evlist **pevlist
231                                                  __maybe_unused)
232 {
233         if (dump_trace)
234                 perf_event__fprintf_event_update(event, stdout);
235
236         dump_printf(": unhandled!\n");
237         return 0;
238 }
239
240 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
241                                      union perf_event *event __maybe_unused,
242                                      struct perf_sample *sample __maybe_unused,
243                                      struct perf_evsel *evsel __maybe_unused,
244                                      struct machine *machine __maybe_unused)
245 {
246         dump_printf(": unhandled!\n");
247         return 0;
248 }
249
250 static int process_event_stub(struct perf_tool *tool __maybe_unused,
251                               union perf_event *event __maybe_unused,
252                               struct perf_sample *sample __maybe_unused,
253                               struct machine *machine __maybe_unused)
254 {
255         dump_printf(": unhandled!\n");
256         return 0;
257 }
258
259 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
260                                        union perf_event *event __maybe_unused,
261                                        struct ordered_events *oe __maybe_unused)
262 {
263         dump_printf(": unhandled!\n");
264         return 0;
265 }
266
267 static int process_finished_round(struct perf_tool *tool,
268                                   union perf_event *event,
269                                   struct ordered_events *oe);
270
271 static int skipn(int fd, off_t n)
272 {
273         char buf[4096];
274         ssize_t ret;
275
276         while (n > 0) {
277                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
278                 if (ret <= 0)
279                         return ret;
280                 n -= ret;
281         }
282
283         return 0;
284 }
285
286 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
287                                        union perf_event *event)
288 {
289         dump_printf(": unhandled!\n");
290         if (perf_data__is_pipe(session->data))
291                 skipn(perf_data__fd(session->data), event->auxtrace.size);
292         return event->auxtrace.size;
293 }
294
295 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
296                                   union perf_event *event __maybe_unused)
297 {
298         dump_printf(": unhandled!\n");
299         return 0;
300 }
301
302
303 static
304 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
305                                   union perf_event *event __maybe_unused)
306 {
307         if (dump_trace)
308                 perf_event__fprintf_thread_map(event, stdout);
309
310         dump_printf(": unhandled!\n");
311         return 0;
312 }
313
314 static
315 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
316                                union perf_event *event __maybe_unused)
317 {
318         if (dump_trace)
319                 perf_event__fprintf_cpu_map(event, stdout);
320
321         dump_printf(": unhandled!\n");
322         return 0;
323 }
324
325 static
326 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
327                                    union perf_event *event __maybe_unused)
328 {
329         if (dump_trace)
330                 perf_event__fprintf_stat_config(event, stdout);
331
332         dump_printf(": unhandled!\n");
333         return 0;
334 }
335
336 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
337                              union perf_event *event)
338 {
339         if (dump_trace)
340                 perf_event__fprintf_stat(event, stdout);
341
342         dump_printf(": unhandled!\n");
343         return 0;
344 }
345
346 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
347                                    union perf_event *event)
348 {
349         if (dump_trace)
350                 perf_event__fprintf_stat_round(event, stdout);
351
352         dump_printf(": unhandled!\n");
353         return 0;
354 }
355
356 void perf_tool__fill_defaults(struct perf_tool *tool)
357 {
358         if (tool->sample == NULL)
359                 tool->sample = process_event_sample_stub;
360         if (tool->mmap == NULL)
361                 tool->mmap = process_event_stub;
362         if (tool->mmap2 == NULL)
363                 tool->mmap2 = process_event_stub;
364         if (tool->comm == NULL)
365                 tool->comm = process_event_stub;
366         if (tool->namespaces == NULL)
367                 tool->namespaces = process_event_stub;
368         if (tool->fork == NULL)
369                 tool->fork = process_event_stub;
370         if (tool->exit == NULL)
371                 tool->exit = process_event_stub;
372         if (tool->lost == NULL)
373                 tool->lost = perf_event__process_lost;
374         if (tool->lost_samples == NULL)
375                 tool->lost_samples = perf_event__process_lost_samples;
376         if (tool->aux == NULL)
377                 tool->aux = perf_event__process_aux;
378         if (tool->itrace_start == NULL)
379                 tool->itrace_start = perf_event__process_itrace_start;
380         if (tool->context_switch == NULL)
381                 tool->context_switch = perf_event__process_switch;
382         if (tool->ksymbol == NULL)
383                 tool->ksymbol = perf_event__process_ksymbol;
384         if (tool->bpf_event == NULL)
385                 tool->bpf_event = perf_event__process_bpf_event;
386         if (tool->read == NULL)
387                 tool->read = process_event_sample_stub;
388         if (tool->throttle == NULL)
389                 tool->throttle = process_event_stub;
390         if (tool->unthrottle == NULL)
391                 tool->unthrottle = process_event_stub;
392         if (tool->attr == NULL)
393                 tool->attr = process_event_synth_attr_stub;
394         if (tool->event_update == NULL)
395                 tool->event_update = process_event_synth_event_update_stub;
396         if (tool->tracing_data == NULL)
397                 tool->tracing_data = process_event_synth_tracing_data_stub;
398         if (tool->build_id == NULL)
399                 tool->build_id = process_event_op2_stub;
400         if (tool->finished_round == NULL) {
401                 if (tool->ordered_events)
402                         tool->finished_round = process_finished_round;
403                 else
404                         tool->finished_round = process_finished_round_stub;
405         }
406         if (tool->id_index == NULL)
407                 tool->id_index = process_event_op2_stub;
408         if (tool->auxtrace_info == NULL)
409                 tool->auxtrace_info = process_event_op2_stub;
410         if (tool->auxtrace == NULL)
411                 tool->auxtrace = process_event_auxtrace_stub;
412         if (tool->auxtrace_error == NULL)
413                 tool->auxtrace_error = process_event_op2_stub;
414         if (tool->thread_map == NULL)
415                 tool->thread_map = process_event_thread_map_stub;
416         if (tool->cpu_map == NULL)
417                 tool->cpu_map = process_event_cpu_map_stub;
418         if (tool->stat_config == NULL)
419                 tool->stat_config = process_event_stat_config_stub;
420         if (tool->stat == NULL)
421                 tool->stat = process_stat_stub;
422         if (tool->stat_round == NULL)
423                 tool->stat_round = process_stat_round_stub;
424         if (tool->time_conv == NULL)
425                 tool->time_conv = process_event_op2_stub;
426         if (tool->feature == NULL)
427                 tool->feature = process_event_op2_stub;
428 }
429
430 static void swap_sample_id_all(union perf_event *event, void *data)
431 {
432         void *end = (void *) event + event->header.size;
433         int size = end - data;
434
435         BUG_ON(size % sizeof(u64));
436         mem_bswap_64(data, size);
437 }
438
439 static void perf_event__all64_swap(union perf_event *event,
440                                    bool sample_id_all __maybe_unused)
441 {
442         struct perf_event_header *hdr = &event->header;
443         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
444 }
445
446 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
447 {
448         event->comm.pid = bswap_32(event->comm.pid);
449         event->comm.tid = bswap_32(event->comm.tid);
450
451         if (sample_id_all) {
452                 void *data = &event->comm.comm;
453
454                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
455                 swap_sample_id_all(event, data);
456         }
457 }
458
459 static void perf_event__mmap_swap(union perf_event *event,
460                                   bool sample_id_all)
461 {
462         event->mmap.pid   = bswap_32(event->mmap.pid);
463         event->mmap.tid   = bswap_32(event->mmap.tid);
464         event->mmap.start = bswap_64(event->mmap.start);
465         event->mmap.len   = bswap_64(event->mmap.len);
466         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
467
468         if (sample_id_all) {
469                 void *data = &event->mmap.filename;
470
471                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
472                 swap_sample_id_all(event, data);
473         }
474 }
475
476 static void perf_event__mmap2_swap(union perf_event *event,
477                                   bool sample_id_all)
478 {
479         event->mmap2.pid   = bswap_32(event->mmap2.pid);
480         event->mmap2.tid   = bswap_32(event->mmap2.tid);
481         event->mmap2.start = bswap_64(event->mmap2.start);
482         event->mmap2.len   = bswap_64(event->mmap2.len);
483         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
484         event->mmap2.maj   = bswap_32(event->mmap2.maj);
485         event->mmap2.min   = bswap_32(event->mmap2.min);
486         event->mmap2.ino   = bswap_64(event->mmap2.ino);
487
488         if (sample_id_all) {
489                 void *data = &event->mmap2.filename;
490
491                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
492                 swap_sample_id_all(event, data);
493         }
494 }
495 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
496 {
497         event->fork.pid  = bswap_32(event->fork.pid);
498         event->fork.tid  = bswap_32(event->fork.tid);
499         event->fork.ppid = bswap_32(event->fork.ppid);
500         event->fork.ptid = bswap_32(event->fork.ptid);
501         event->fork.time = bswap_64(event->fork.time);
502
503         if (sample_id_all)
504                 swap_sample_id_all(event, &event->fork + 1);
505 }
506
507 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
508 {
509         event->read.pid          = bswap_32(event->read.pid);
510         event->read.tid          = bswap_32(event->read.tid);
511         event->read.value        = bswap_64(event->read.value);
512         event->read.time_enabled = bswap_64(event->read.time_enabled);
513         event->read.time_running = bswap_64(event->read.time_running);
514         event->read.id           = bswap_64(event->read.id);
515
516         if (sample_id_all)
517                 swap_sample_id_all(event, &event->read + 1);
518 }
519
520 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
521 {
522         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
523         event->aux.aux_size   = bswap_64(event->aux.aux_size);
524         event->aux.flags      = bswap_64(event->aux.flags);
525
526         if (sample_id_all)
527                 swap_sample_id_all(event, &event->aux + 1);
528 }
529
530 static void perf_event__itrace_start_swap(union perf_event *event,
531                                           bool sample_id_all)
532 {
533         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
534         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
535
536         if (sample_id_all)
537                 swap_sample_id_all(event, &event->itrace_start + 1);
538 }
539
540 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
541 {
542         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
543                 event->context_switch.next_prev_pid =
544                                 bswap_32(event->context_switch.next_prev_pid);
545                 event->context_switch.next_prev_tid =
546                                 bswap_32(event->context_switch.next_prev_tid);
547         }
548
549         if (sample_id_all)
550                 swap_sample_id_all(event, &event->context_switch + 1);
551 }
552
553 static void perf_event__throttle_swap(union perf_event *event,
554                                       bool sample_id_all)
555 {
556         event->throttle.time      = bswap_64(event->throttle.time);
557         event->throttle.id        = bswap_64(event->throttle.id);
558         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
559
560         if (sample_id_all)
561                 swap_sample_id_all(event, &event->throttle + 1);
562 }
563
564 static u8 revbyte(u8 b)
565 {
566         int rev = (b >> 4) | ((b & 0xf) << 4);
567         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
568         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
569         return (u8) rev;
570 }
571
572 /*
573  * XXX this is hack in attempt to carry flags bitfield
574  * through endian village. ABI says:
575  *
576  * Bit-fields are allocated from right to left (least to most significant)
577  * on little-endian implementations and from left to right (most to least
578  * significant) on big-endian implementations.
579  *
580  * The above seems to be byte specific, so we need to reverse each
581  * byte of the bitfield. 'Internet' also says this might be implementation
582  * specific and we probably need proper fix and carry perf_event_attr
583  * bitfield flags in separate data file FEAT_ section. Thought this seems
584  * to work for now.
585  */
586 static void swap_bitfield(u8 *p, unsigned len)
587 {
588         unsigned i;
589
590         for (i = 0; i < len; i++) {
591                 *p = revbyte(*p);
592                 p++;
593         }
594 }
595
596 /* exported for swapping attributes in file header */
597 void perf_event__attr_swap(struct perf_event_attr *attr)
598 {
599         attr->type              = bswap_32(attr->type);
600         attr->size              = bswap_32(attr->size);
601
602 #define bswap_safe(f, n)                                        \
603         (attr->size > (offsetof(struct perf_event_attr, f) +    \
604                        sizeof(attr->f) * (n)))
605 #define bswap_field(f, sz)                      \
606 do {                                            \
607         if (bswap_safe(f, 0))                   \
608                 attr->f = bswap_##sz(attr->f);  \
609 } while(0)
610 #define bswap_field_16(f) bswap_field(f, 16)
611 #define bswap_field_32(f) bswap_field(f, 32)
612 #define bswap_field_64(f) bswap_field(f, 64)
613
614         bswap_field_64(config);
615         bswap_field_64(sample_period);
616         bswap_field_64(sample_type);
617         bswap_field_64(read_format);
618         bswap_field_32(wakeup_events);
619         bswap_field_32(bp_type);
620         bswap_field_64(bp_addr);
621         bswap_field_64(bp_len);
622         bswap_field_64(branch_sample_type);
623         bswap_field_64(sample_regs_user);
624         bswap_field_32(sample_stack_user);
625         bswap_field_32(aux_watermark);
626         bswap_field_16(sample_max_stack);
627
628         /*
629          * After read_format are bitfields. Check read_format because
630          * we are unable to use offsetof on bitfield.
631          */
632         if (bswap_safe(read_format, 1))
633                 swap_bitfield((u8 *) (&attr->read_format + 1),
634                               sizeof(u64));
635 #undef bswap_field_64
636 #undef bswap_field_32
637 #undef bswap_field
638 #undef bswap_safe
639 }
640
641 static void perf_event__hdr_attr_swap(union perf_event *event,
642                                       bool sample_id_all __maybe_unused)
643 {
644         size_t size;
645
646         perf_event__attr_swap(&event->attr.attr);
647
648         size = event->header.size;
649         size -= (void *)&event->attr.id - (void *)event;
650         mem_bswap_64(event->attr.id, size);
651 }
652
653 static void perf_event__event_update_swap(union perf_event *event,
654                                           bool sample_id_all __maybe_unused)
655 {
656         event->event_update.type = bswap_64(event->event_update.type);
657         event->event_update.id   = bswap_64(event->event_update.id);
658 }
659
660 static void perf_event__event_type_swap(union perf_event *event,
661                                         bool sample_id_all __maybe_unused)
662 {
663         event->event_type.event_type.event_id =
664                 bswap_64(event->event_type.event_type.event_id);
665 }
666
667 static void perf_event__tracing_data_swap(union perf_event *event,
668                                           bool sample_id_all __maybe_unused)
669 {
670         event->tracing_data.size = bswap_32(event->tracing_data.size);
671 }
672
673 static void perf_event__auxtrace_info_swap(union perf_event *event,
674                                            bool sample_id_all __maybe_unused)
675 {
676         size_t size;
677
678         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
679
680         size = event->header.size;
681         size -= (void *)&event->auxtrace_info.priv - (void *)event;
682         mem_bswap_64(event->auxtrace_info.priv, size);
683 }
684
685 static void perf_event__auxtrace_swap(union perf_event *event,
686                                       bool sample_id_all __maybe_unused)
687 {
688         event->auxtrace.size      = bswap_64(event->auxtrace.size);
689         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
690         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
691         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
692         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
693         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
694 }
695
696 static void perf_event__auxtrace_error_swap(union perf_event *event,
697                                             bool sample_id_all __maybe_unused)
698 {
699         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
700         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
701         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
702         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
703         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
704         event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
705         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
706         if (event->auxtrace_error.fmt)
707                 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
708 }
709
710 static void perf_event__thread_map_swap(union perf_event *event,
711                                         bool sample_id_all __maybe_unused)
712 {
713         unsigned i;
714
715         event->thread_map.nr = bswap_64(event->thread_map.nr);
716
717         for (i = 0; i < event->thread_map.nr; i++)
718                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
719 }
720
721 static void perf_event__cpu_map_swap(union perf_event *event,
722                                      bool sample_id_all __maybe_unused)
723 {
724         struct cpu_map_data *data = &event->cpu_map.data;
725         struct cpu_map_entries *cpus;
726         struct cpu_map_mask *mask;
727         unsigned i;
728
729         data->type = bswap_64(data->type);
730
731         switch (data->type) {
732         case PERF_CPU_MAP__CPUS:
733                 cpus = (struct cpu_map_entries *)data->data;
734
735                 cpus->nr = bswap_16(cpus->nr);
736
737                 for (i = 0; i < cpus->nr; i++)
738                         cpus->cpu[i] = bswap_16(cpus->cpu[i]);
739                 break;
740         case PERF_CPU_MAP__MASK:
741                 mask = (struct cpu_map_mask *) data->data;
742
743                 mask->nr = bswap_16(mask->nr);
744                 mask->long_size = bswap_16(mask->long_size);
745
746                 switch (mask->long_size) {
747                 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
748                 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
749                 default:
750                         pr_err("cpu_map swap: unsupported long size\n");
751                 }
752         default:
753                 break;
754         }
755 }
756
757 static void perf_event__stat_config_swap(union perf_event *event,
758                                          bool sample_id_all __maybe_unused)
759 {
760         u64 size;
761
762         size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
763         size += 1; /* nr item itself */
764         mem_bswap_64(&event->stat_config.nr, size);
765 }
766
767 static void perf_event__stat_swap(union perf_event *event,
768                                   bool sample_id_all __maybe_unused)
769 {
770         event->stat.id     = bswap_64(event->stat.id);
771         event->stat.thread = bswap_32(event->stat.thread);
772         event->stat.cpu    = bswap_32(event->stat.cpu);
773         event->stat.val    = bswap_64(event->stat.val);
774         event->stat.ena    = bswap_64(event->stat.ena);
775         event->stat.run    = bswap_64(event->stat.run);
776 }
777
778 static void perf_event__stat_round_swap(union perf_event *event,
779                                         bool sample_id_all __maybe_unused)
780 {
781         event->stat_round.type = bswap_64(event->stat_round.type);
782         event->stat_round.time = bswap_64(event->stat_round.time);
783 }
784
785 typedef void (*perf_event__swap_op)(union perf_event *event,
786                                     bool sample_id_all);
787
788 static perf_event__swap_op perf_event__swap_ops[] = {
789         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
790         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
791         [PERF_RECORD_COMM]                = perf_event__comm_swap,
792         [PERF_RECORD_FORK]                = perf_event__task_swap,
793         [PERF_RECORD_EXIT]                = perf_event__task_swap,
794         [PERF_RECORD_LOST]                = perf_event__all64_swap,
795         [PERF_RECORD_READ]                = perf_event__read_swap,
796         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
797         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
798         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
799         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
800         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
801         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
802         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
803         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
804         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
805         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
806         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
807         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
808         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
809         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
810         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
811         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
812         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
813         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
814         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
815         [PERF_RECORD_STAT]                = perf_event__stat_swap,
816         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
817         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
818         [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
819         [PERF_RECORD_HEADER_MAX]          = NULL,
820 };
821
822 /*
823  * When perf record finishes a pass on every buffers, it records this pseudo
824  * event.
825  * We record the max timestamp t found in the pass n.
826  * Assuming these timestamps are monotonic across cpus, we know that if
827  * a buffer still has events with timestamps below t, they will be all
828  * available and then read in the pass n + 1.
829  * Hence when we start to read the pass n + 2, we can safely flush every
830  * events with timestamps below t.
831  *
832  *    ============ PASS n =================
833  *       CPU 0         |   CPU 1
834  *                     |
835  *    cnt1 timestamps  |   cnt2 timestamps
836  *          1          |         2
837  *          2          |         3
838  *          -          |         4  <--- max recorded
839  *
840  *    ============ PASS n + 1 ==============
841  *       CPU 0         |   CPU 1
842  *                     |
843  *    cnt1 timestamps  |   cnt2 timestamps
844  *          3          |         5
845  *          4          |         6
846  *          5          |         7 <---- max recorded
847  *
848  *      Flush every events below timestamp 4
849  *
850  *    ============ PASS n + 2 ==============
851  *       CPU 0         |   CPU 1
852  *                     |
853  *    cnt1 timestamps  |   cnt2 timestamps
854  *          6          |         8
855  *          7          |         9
856  *          -          |         10
857  *
858  *      Flush every events below timestamp 7
859  *      etc...
860  */
861 static int process_finished_round(struct perf_tool *tool __maybe_unused,
862                                   union perf_event *event __maybe_unused,
863                                   struct ordered_events *oe)
864 {
865         if (dump_trace)
866                 fprintf(stdout, "\n");
867         return ordered_events__flush(oe, OE_FLUSH__ROUND);
868 }
869
870 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
871                               u64 timestamp, u64 file_offset)
872 {
873         return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
874 }
875
876 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
877 {
878         struct ip_callchain *callchain = sample->callchain;
879         struct branch_stack *lbr_stack = sample->branch_stack;
880         u64 kernel_callchain_nr = callchain->nr;
881         unsigned int i;
882
883         for (i = 0; i < kernel_callchain_nr; i++) {
884                 if (callchain->ips[i] == PERF_CONTEXT_USER)
885                         break;
886         }
887
888         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
889                 u64 total_nr;
890                 /*
891                  * LBR callstack can only get user call chain,
892                  * i is kernel call chain number,
893                  * 1 is PERF_CONTEXT_USER.
894                  *
895                  * The user call chain is stored in LBR registers.
896                  * LBR are pair registers. The caller is stored
897                  * in "from" register, while the callee is stored
898                  * in "to" register.
899                  * For example, there is a call stack
900                  * "A"->"B"->"C"->"D".
901                  * The LBR registers will recorde like
902                  * "C"->"D", "B"->"C", "A"->"B".
903                  * So only the first "to" register and all "from"
904                  * registers are needed to construct the whole stack.
905                  */
906                 total_nr = i + 1 + lbr_stack->nr + 1;
907                 kernel_callchain_nr = i + 1;
908
909                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
910
911                 for (i = 0; i < kernel_callchain_nr; i++)
912                         printf("..... %2d: %016" PRIx64 "\n",
913                                i, callchain->ips[i]);
914
915                 printf("..... %2d: %016" PRIx64 "\n",
916                        (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
917                 for (i = 0; i < lbr_stack->nr; i++)
918                         printf("..... %2d: %016" PRIx64 "\n",
919                                (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
920         }
921 }
922
923 static void callchain__printf(struct perf_evsel *evsel,
924                               struct perf_sample *sample)
925 {
926         unsigned int i;
927         struct ip_callchain *callchain = sample->callchain;
928
929         if (perf_evsel__has_branch_callstack(evsel))
930                 callchain__lbr_callstack_printf(sample);
931
932         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
933
934         for (i = 0; i < callchain->nr; i++)
935                 printf("..... %2d: %016" PRIx64 "\n",
936                        i, callchain->ips[i]);
937 }
938
939 static void branch_stack__printf(struct perf_sample *sample)
940 {
941         uint64_t i;
942
943         printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
944
945         for (i = 0; i < sample->branch_stack->nr; i++) {
946                 struct branch_entry *e = &sample->branch_stack->entries[i];
947
948                 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
949                         i, e->from, e->to,
950                         (unsigned short)e->flags.cycles,
951                         e->flags.mispred ? "M" : " ",
952                         e->flags.predicted ? "P" : " ",
953                         e->flags.abort ? "A" : " ",
954                         e->flags.in_tx ? "T" : " ",
955                         (unsigned)e->flags.reserved);
956         }
957 }
958
959 static void regs_dump__printf(u64 mask, u64 *regs)
960 {
961         unsigned rid, i = 0;
962
963         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
964                 u64 val = regs[i++];
965
966                 printf(".... %-5s 0x%" PRIx64 "\n",
967                        perf_reg_name(rid), val);
968         }
969 }
970
971 static const char *regs_abi[] = {
972         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
973         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
974         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
975 };
976
977 static inline const char *regs_dump_abi(struct regs_dump *d)
978 {
979         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
980                 return "unknown";
981
982         return regs_abi[d->abi];
983 }
984
985 static void regs__printf(const char *type, struct regs_dump *regs)
986 {
987         u64 mask = regs->mask;
988
989         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
990                type,
991                mask,
992                regs_dump_abi(regs));
993
994         regs_dump__printf(mask, regs->regs);
995 }
996
997 static void regs_user__printf(struct perf_sample *sample)
998 {
999         struct regs_dump *user_regs = &sample->user_regs;
1000
1001         if (user_regs->regs)
1002                 regs__printf("user", user_regs);
1003 }
1004
1005 static void regs_intr__printf(struct perf_sample *sample)
1006 {
1007         struct regs_dump *intr_regs = &sample->intr_regs;
1008
1009         if (intr_regs->regs)
1010                 regs__printf("intr", intr_regs);
1011 }
1012
1013 static void stack_user__printf(struct stack_dump *dump)
1014 {
1015         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1016                dump->size, dump->offset);
1017 }
1018
1019 static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
1020                                        union perf_event *event,
1021                                        struct perf_sample *sample)
1022 {
1023         u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1024
1025         if (event->header.type != PERF_RECORD_SAMPLE &&
1026             !perf_evlist__sample_id_all(evlist)) {
1027                 fputs("-1 -1 ", stdout);
1028                 return;
1029         }
1030
1031         if ((sample_type & PERF_SAMPLE_CPU))
1032                 printf("%u ", sample->cpu);
1033
1034         if (sample_type & PERF_SAMPLE_TIME)
1035                 printf("%" PRIu64 " ", sample->time);
1036 }
1037
1038 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1039 {
1040         printf("... sample_read:\n");
1041
1042         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1043                 printf("...... time enabled %016" PRIx64 "\n",
1044                        sample->read.time_enabled);
1045
1046         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1047                 printf("...... time running %016" PRIx64 "\n",
1048                        sample->read.time_running);
1049
1050         if (read_format & PERF_FORMAT_GROUP) {
1051                 u64 i;
1052
1053                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1054
1055                 for (i = 0; i < sample->read.group.nr; i++) {
1056                         struct sample_read_value *value;
1057
1058                         value = &sample->read.group.values[i];
1059                         printf("..... id %016" PRIx64
1060                                ", value %016" PRIx64 "\n",
1061                                value->id, value->value);
1062                 }
1063         } else
1064                 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1065                         sample->read.one.id, sample->read.one.value);
1066 }
1067
1068 static void dump_event(struct perf_evlist *evlist, union perf_event *event,
1069                        u64 file_offset, struct perf_sample *sample)
1070 {
1071         if (!dump_trace)
1072                 return;
1073
1074         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1075                file_offset, event->header.size, event->header.type);
1076
1077         trace_event(event);
1078         if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1079                 evlist->trace_event_sample_raw(evlist, event, sample);
1080
1081         if (sample)
1082                 perf_evlist__print_tstamp(evlist, event, sample);
1083
1084         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1085                event->header.size, perf_event__name(event->header.type));
1086 }
1087
1088 static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1089                         struct perf_sample *sample)
1090 {
1091         u64 sample_type;
1092
1093         if (!dump_trace)
1094                 return;
1095
1096         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1097                event->header.misc, sample->pid, sample->tid, sample->ip,
1098                sample->period, sample->addr);
1099
1100         sample_type = evsel->attr.sample_type;
1101
1102         if (evsel__has_callchain(evsel))
1103                 callchain__printf(evsel, sample);
1104
1105         if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
1106                 branch_stack__printf(sample);
1107
1108         if (sample_type & PERF_SAMPLE_REGS_USER)
1109                 regs_user__printf(sample);
1110
1111         if (sample_type & PERF_SAMPLE_REGS_INTR)
1112                 regs_intr__printf(sample);
1113
1114         if (sample_type & PERF_SAMPLE_STACK_USER)
1115                 stack_user__printf(&sample->user_stack);
1116
1117         if (sample_type & PERF_SAMPLE_WEIGHT)
1118                 printf("... weight: %" PRIu64 "\n", sample->weight);
1119
1120         if (sample_type & PERF_SAMPLE_DATA_SRC)
1121                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1122
1123         if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1124                 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1125
1126         if (sample_type & PERF_SAMPLE_TRANSACTION)
1127                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1128
1129         if (sample_type & PERF_SAMPLE_READ)
1130                 sample_read__printf(sample, evsel->attr.read_format);
1131 }
1132
1133 static void dump_read(struct perf_evsel *evsel, union perf_event *event)
1134 {
1135         struct read_event *read_event = &event->read;
1136         u64 read_format;
1137
1138         if (!dump_trace)
1139                 return;
1140
1141         printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
1142                evsel ? perf_evsel__name(evsel) : "FAIL",
1143                event->read.value);
1144
1145         read_format = evsel->attr.read_format;
1146
1147         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1148                 printf("... time enabled : %" PRIu64 "\n", read_event->time_enabled);
1149
1150         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1151                 printf("... time running : %" PRIu64 "\n", read_event->time_running);
1152
1153         if (read_format & PERF_FORMAT_ID)
1154                 printf("... id           : %" PRIu64 "\n", read_event->id);
1155 }
1156
1157 static struct machine *machines__find_for_cpumode(struct machines *machines,
1158                                                union perf_event *event,
1159                                                struct perf_sample *sample)
1160 {
1161         struct machine *machine;
1162
1163         if (perf_guest &&
1164             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1165              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1166                 u32 pid;
1167
1168                 if (event->header.type == PERF_RECORD_MMAP
1169                     || event->header.type == PERF_RECORD_MMAP2)
1170                         pid = event->mmap.pid;
1171                 else
1172                         pid = sample->pid;
1173
1174                 machine = machines__find(machines, pid);
1175                 if (!machine)
1176                         machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1177                 return machine;
1178         }
1179
1180         return &machines->host;
1181 }
1182
1183 static int deliver_sample_value(struct perf_evlist *evlist,
1184                                 struct perf_tool *tool,
1185                                 union perf_event *event,
1186                                 struct perf_sample *sample,
1187                                 struct sample_read_value *v,
1188                                 struct machine *machine)
1189 {
1190         struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1191
1192         if (sid) {
1193                 sample->id     = v->id;
1194                 sample->period = v->value - sid->period;
1195                 sid->period    = v->value;
1196         }
1197
1198         if (!sid || sid->evsel == NULL) {
1199                 ++evlist->stats.nr_unknown_id;
1200                 return 0;
1201         }
1202
1203         /*
1204          * There's no reason to deliver sample
1205          * for zero period, bail out.
1206          */
1207         if (!sample->period)
1208                 return 0;
1209
1210         return tool->sample(tool, event, sample, sid->evsel, machine);
1211 }
1212
1213 static int deliver_sample_group(struct perf_evlist *evlist,
1214                                 struct perf_tool *tool,
1215                                 union  perf_event *event,
1216                                 struct perf_sample *sample,
1217                                 struct machine *machine)
1218 {
1219         int ret = -EINVAL;
1220         u64 i;
1221
1222         for (i = 0; i < sample->read.group.nr; i++) {
1223                 ret = deliver_sample_value(evlist, tool, event, sample,
1224                                            &sample->read.group.values[i],
1225                                            machine);
1226                 if (ret)
1227                         break;
1228         }
1229
1230         return ret;
1231 }
1232
1233 static int
1234  perf_evlist__deliver_sample(struct perf_evlist *evlist,
1235                              struct perf_tool *tool,
1236                              union  perf_event *event,
1237                              struct perf_sample *sample,
1238                              struct perf_evsel *evsel,
1239                              struct machine *machine)
1240 {
1241         /* We know evsel != NULL. */
1242         u64 sample_type = evsel->attr.sample_type;
1243         u64 read_format = evsel->attr.read_format;
1244
1245         /* Standard sample delivery. */
1246         if (!(sample_type & PERF_SAMPLE_READ))
1247                 return tool->sample(tool, event, sample, evsel, machine);
1248
1249         /* For PERF_SAMPLE_READ we have either single or group mode. */
1250         if (read_format & PERF_FORMAT_GROUP)
1251                 return deliver_sample_group(evlist, tool, event, sample,
1252                                             machine);
1253         else
1254                 return deliver_sample_value(evlist, tool, event, sample,
1255                                             &sample->read.one, machine);
1256 }
1257
1258 static int machines__deliver_event(struct machines *machines,
1259                                    struct perf_evlist *evlist,
1260                                    union perf_event *event,
1261                                    struct perf_sample *sample,
1262                                    struct perf_tool *tool, u64 file_offset)
1263 {
1264         struct perf_evsel *evsel;
1265         struct machine *machine;
1266
1267         dump_event(evlist, event, file_offset, sample);
1268
1269         evsel = perf_evlist__id2evsel(evlist, sample->id);
1270
1271         machine = machines__find_for_cpumode(machines, event, sample);
1272
1273         switch (event->header.type) {
1274         case PERF_RECORD_SAMPLE:
1275                 if (evsel == NULL) {
1276                         ++evlist->stats.nr_unknown_id;
1277                         return 0;
1278                 }
1279                 dump_sample(evsel, event, sample);
1280                 if (machine == NULL) {
1281                         ++evlist->stats.nr_unprocessable_samples;
1282                         return 0;
1283                 }
1284                 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1285         case PERF_RECORD_MMAP:
1286                 return tool->mmap(tool, event, sample, machine);
1287         case PERF_RECORD_MMAP2:
1288                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1289                         ++evlist->stats.nr_proc_map_timeout;
1290                 return tool->mmap2(tool, event, sample, machine);
1291         case PERF_RECORD_COMM:
1292                 return tool->comm(tool, event, sample, machine);
1293         case PERF_RECORD_NAMESPACES:
1294                 return tool->namespaces(tool, event, sample, machine);
1295         case PERF_RECORD_FORK:
1296                 return tool->fork(tool, event, sample, machine);
1297         case PERF_RECORD_EXIT:
1298                 return tool->exit(tool, event, sample, machine);
1299         case PERF_RECORD_LOST:
1300                 if (tool->lost == perf_event__process_lost)
1301                         evlist->stats.total_lost += event->lost.lost;
1302                 return tool->lost(tool, event, sample, machine);
1303         case PERF_RECORD_LOST_SAMPLES:
1304                 if (tool->lost_samples == perf_event__process_lost_samples)
1305                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1306                 return tool->lost_samples(tool, event, sample, machine);
1307         case PERF_RECORD_READ:
1308                 dump_read(evsel, event);
1309                 return tool->read(tool, event, sample, evsel, machine);
1310         case PERF_RECORD_THROTTLE:
1311                 return tool->throttle(tool, event, sample, machine);
1312         case PERF_RECORD_UNTHROTTLE:
1313                 return tool->unthrottle(tool, event, sample, machine);
1314         case PERF_RECORD_AUX:
1315                 if (tool->aux == perf_event__process_aux) {
1316                         if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1317                                 evlist->stats.total_aux_lost += 1;
1318                         if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1319                                 evlist->stats.total_aux_partial += 1;
1320                 }
1321                 return tool->aux(tool, event, sample, machine);
1322         case PERF_RECORD_ITRACE_START:
1323                 return tool->itrace_start(tool, event, sample, machine);
1324         case PERF_RECORD_SWITCH:
1325         case PERF_RECORD_SWITCH_CPU_WIDE:
1326                 return tool->context_switch(tool, event, sample, machine);
1327         case PERF_RECORD_KSYMBOL:
1328                 return tool->ksymbol(tool, event, sample, machine);
1329         case PERF_RECORD_BPF_EVENT:
1330                 return tool->bpf_event(tool, event, sample, machine);
1331         default:
1332                 ++evlist->stats.nr_unknown_events;
1333                 return -1;
1334         }
1335 }
1336
1337 static int perf_session__deliver_event(struct perf_session *session,
1338                                        union perf_event *event,
1339                                        struct perf_tool *tool,
1340                                        u64 file_offset)
1341 {
1342         struct perf_sample sample;
1343         int ret;
1344
1345         ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1346         if (ret) {
1347                 pr_err("Can't parse sample, err = %d\n", ret);
1348                 return ret;
1349         }
1350
1351         ret = auxtrace__process_event(session, event, &sample, tool);
1352         if (ret < 0)
1353                 return ret;
1354         if (ret > 0)
1355                 return 0;
1356
1357         return machines__deliver_event(&session->machines, session->evlist,
1358                                        event, &sample, tool, file_offset);
1359 }
1360
1361 static s64 perf_session__process_user_event(struct perf_session *session,
1362                                             union perf_event *event,
1363                                             u64 file_offset)
1364 {
1365         struct ordered_events *oe = &session->ordered_events;
1366         struct perf_tool *tool = session->tool;
1367         struct perf_sample sample = { .time = 0, };
1368         int fd = perf_data__fd(session->data);
1369         int err;
1370
1371         dump_event(session->evlist, event, file_offset, &sample);
1372
1373         /* These events are processed right away */
1374         switch (event->header.type) {
1375         case PERF_RECORD_HEADER_ATTR:
1376                 err = tool->attr(tool, event, &session->evlist);
1377                 if (err == 0) {
1378                         perf_session__set_id_hdr_size(session);
1379                         perf_session__set_comm_exec(session);
1380                 }
1381                 return err;
1382         case PERF_RECORD_EVENT_UPDATE:
1383                 return tool->event_update(tool, event, &session->evlist);
1384         case PERF_RECORD_HEADER_EVENT_TYPE:
1385                 /*
1386                  * Depreceated, but we need to handle it for sake
1387                  * of old data files create in pipe mode.
1388                  */
1389                 return 0;
1390         case PERF_RECORD_HEADER_TRACING_DATA:
1391                 /* setup for reading amidst mmap */
1392                 lseek(fd, file_offset, SEEK_SET);
1393                 return tool->tracing_data(session, event);
1394         case PERF_RECORD_HEADER_BUILD_ID:
1395                 return tool->build_id(session, event);
1396         case PERF_RECORD_FINISHED_ROUND:
1397                 return tool->finished_round(tool, event, oe);
1398         case PERF_RECORD_ID_INDEX:
1399                 return tool->id_index(session, event);
1400         case PERF_RECORD_AUXTRACE_INFO:
1401                 return tool->auxtrace_info(session, event);
1402         case PERF_RECORD_AUXTRACE:
1403                 /* setup for reading amidst mmap */
1404                 lseek(fd, file_offset + event->header.size, SEEK_SET);
1405                 return tool->auxtrace(session, event);
1406         case PERF_RECORD_AUXTRACE_ERROR:
1407                 perf_session__auxtrace_error_inc(session, event);
1408                 return tool->auxtrace_error(session, event);
1409         case PERF_RECORD_THREAD_MAP:
1410                 return tool->thread_map(session, event);
1411         case PERF_RECORD_CPU_MAP:
1412                 return tool->cpu_map(session, event);
1413         case PERF_RECORD_STAT_CONFIG:
1414                 return tool->stat_config(session, event);
1415         case PERF_RECORD_STAT:
1416                 return tool->stat(session, event);
1417         case PERF_RECORD_STAT_ROUND:
1418                 return tool->stat_round(session, event);
1419         case PERF_RECORD_TIME_CONV:
1420                 session->time_conv = event->time_conv;
1421                 return tool->time_conv(session, event);
1422         case PERF_RECORD_HEADER_FEATURE:
1423                 return tool->feature(session, event);
1424         default:
1425                 return -EINVAL;
1426         }
1427 }
1428
1429 int perf_session__deliver_synth_event(struct perf_session *session,
1430                                       union perf_event *event,
1431                                       struct perf_sample *sample)
1432 {
1433         struct perf_evlist *evlist = session->evlist;
1434         struct perf_tool *tool = session->tool;
1435
1436         events_stats__inc(&evlist->stats, event->header.type);
1437
1438         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1439                 return perf_session__process_user_event(session, event, 0);
1440
1441         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1442 }
1443
1444 static void event_swap(union perf_event *event, bool sample_id_all)
1445 {
1446         perf_event__swap_op swap;
1447
1448         swap = perf_event__swap_ops[event->header.type];
1449         if (swap)
1450                 swap(event, sample_id_all);
1451 }
1452
1453 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1454                              void *buf, size_t buf_sz,
1455                              union perf_event **event_ptr,
1456                              struct perf_sample *sample)
1457 {
1458         union perf_event *event;
1459         size_t hdr_sz, rest;
1460         int fd;
1461
1462         if (session->one_mmap && !session->header.needs_swap) {
1463                 event = file_offset - session->one_mmap_offset +
1464                         session->one_mmap_addr;
1465                 goto out_parse_sample;
1466         }
1467
1468         if (perf_data__is_pipe(session->data))
1469                 return -1;
1470
1471         fd = perf_data__fd(session->data);
1472         hdr_sz = sizeof(struct perf_event_header);
1473
1474         if (buf_sz < hdr_sz)
1475                 return -1;
1476
1477         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1478             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1479                 return -1;
1480
1481         event = (union perf_event *)buf;
1482
1483         if (session->header.needs_swap)
1484                 perf_event_header__bswap(&event->header);
1485
1486         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1487                 return -1;
1488
1489         rest = event->header.size - hdr_sz;
1490
1491         if (readn(fd, buf, rest) != (ssize_t)rest)
1492                 return -1;
1493
1494         if (session->header.needs_swap)
1495                 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1496
1497 out_parse_sample:
1498
1499         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1500             perf_evlist__parse_sample(session->evlist, event, sample))
1501                 return -1;
1502
1503         *event_ptr = event;
1504
1505         return 0;
1506 }
1507
1508 static s64 perf_session__process_event(struct perf_session *session,
1509                                        union perf_event *event, u64 file_offset)
1510 {
1511         struct perf_evlist *evlist = session->evlist;
1512         struct perf_tool *tool = session->tool;
1513         int ret;
1514
1515         if (session->header.needs_swap)
1516                 event_swap(event, perf_evlist__sample_id_all(evlist));
1517
1518         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1519                 return -EINVAL;
1520
1521         events_stats__inc(&evlist->stats, event->header.type);
1522
1523         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1524                 return perf_session__process_user_event(session, event, file_offset);
1525
1526         if (tool->ordered_events) {
1527                 u64 timestamp = -1ULL;
1528
1529                 ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
1530                 if (ret && ret != -1)
1531                         return ret;
1532
1533                 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1534                 if (ret != -ETIME)
1535                         return ret;
1536         }
1537
1538         return perf_session__deliver_event(session, event, tool, file_offset);
1539 }
1540
1541 void perf_event_header__bswap(struct perf_event_header *hdr)
1542 {
1543         hdr->type = bswap_32(hdr->type);
1544         hdr->misc = bswap_16(hdr->misc);
1545         hdr->size = bswap_16(hdr->size);
1546 }
1547
1548 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1549 {
1550         return machine__findnew_thread(&session->machines.host, -1, pid);
1551 }
1552
1553 /*
1554  * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1555  * So here a single thread is created for that, but actually there is a separate
1556  * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1557  * is only 1. That causes problems for some tools, requiring workarounds. For
1558  * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1559  */
1560 int perf_session__register_idle_thread(struct perf_session *session)
1561 {
1562         struct thread *thread;
1563         int err = 0;
1564
1565         thread = machine__findnew_thread(&session->machines.host, 0, 0);
1566         if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1567                 pr_err("problem inserting idle task.\n");
1568                 err = -1;
1569         }
1570
1571         if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1572                 pr_err("problem inserting idle task.\n");
1573                 err = -1;
1574         }
1575
1576         /* machine__findnew_thread() got the thread, so put it */
1577         thread__put(thread);
1578         return err;
1579 }
1580
1581 static void
1582 perf_session__warn_order(const struct perf_session *session)
1583 {
1584         const struct ordered_events *oe = &session->ordered_events;
1585         struct perf_evsel *evsel;
1586         bool should_warn = true;
1587
1588         evlist__for_each_entry(session->evlist, evsel) {
1589                 if (evsel->attr.write_backward)
1590                         should_warn = false;
1591         }
1592
1593         if (!should_warn)
1594                 return;
1595         if (oe->nr_unordered_events != 0)
1596                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1597 }
1598
1599 static void perf_session__warn_about_errors(const struct perf_session *session)
1600 {
1601         const struct events_stats *stats = &session->evlist->stats;
1602
1603         if (session->tool->lost == perf_event__process_lost &&
1604             stats->nr_events[PERF_RECORD_LOST] != 0) {
1605                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1606                             "Check IO/CPU overload!\n\n",
1607                             stats->nr_events[0],
1608                             stats->nr_events[PERF_RECORD_LOST]);
1609         }
1610
1611         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1612                 double drop_rate;
1613
1614                 drop_rate = (double)stats->total_lost_samples /
1615                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1616                 if (drop_rate > 0.05) {
1617                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1618                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1619                                     drop_rate * 100.0);
1620                 }
1621         }
1622
1623         if (session->tool->aux == perf_event__process_aux &&
1624             stats->total_aux_lost != 0) {
1625                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1626                             stats->total_aux_lost,
1627                             stats->nr_events[PERF_RECORD_AUX]);
1628         }
1629
1630         if (session->tool->aux == perf_event__process_aux &&
1631             stats->total_aux_partial != 0) {
1632                 bool vmm_exclusive = false;
1633
1634                 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1635                                        &vmm_exclusive);
1636
1637                 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1638                             "Are you running a KVM guest in the background?%s\n\n",
1639                             stats->total_aux_partial,
1640                             stats->nr_events[PERF_RECORD_AUX],
1641                             vmm_exclusive ?
1642                             "\nReloading kvm_intel module with vmm_exclusive=0\n"
1643                             "will reduce the gaps to only guest's timeslices." :
1644                             "");
1645         }
1646
1647         if (stats->nr_unknown_events != 0) {
1648                 ui__warning("Found %u unknown events!\n\n"
1649                             "Is this an older tool processing a perf.data "
1650                             "file generated by a more recent tool?\n\n"
1651                             "If that is not the case, consider "
1652                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1653                             stats->nr_unknown_events);
1654         }
1655
1656         if (stats->nr_unknown_id != 0) {
1657                 ui__warning("%u samples with id not present in the header\n",
1658                             stats->nr_unknown_id);
1659         }
1660
1661         if (stats->nr_invalid_chains != 0) {
1662                 ui__warning("Found invalid callchains!\n\n"
1663                             "%u out of %u events were discarded for this reason.\n\n"
1664                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1665                             stats->nr_invalid_chains,
1666                             stats->nr_events[PERF_RECORD_SAMPLE]);
1667         }
1668
1669         if (stats->nr_unprocessable_samples != 0) {
1670                 ui__warning("%u unprocessable samples recorded.\n"
1671                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1672                             stats->nr_unprocessable_samples);
1673         }
1674
1675         perf_session__warn_order(session);
1676
1677         events_stats__auxtrace_error_warn(stats);
1678
1679         if (stats->nr_proc_map_timeout != 0) {
1680                 ui__warning("%d map information files for pre-existing threads were\n"
1681                             "not processed, if there are samples for addresses they\n"
1682                             "will not be resolved, you may find out which are these\n"
1683                             "threads by running with -v and redirecting the output\n"
1684                             "to a file.\n"
1685                             "The time limit to process proc map is too short?\n"
1686                             "Increase it by --proc-map-timeout\n",
1687                             stats->nr_proc_map_timeout);
1688         }
1689 }
1690
1691 static int perf_session__flush_thread_stack(struct thread *thread,
1692                                             void *p __maybe_unused)
1693 {
1694         return thread_stack__flush(thread);
1695 }
1696
1697 static int perf_session__flush_thread_stacks(struct perf_session *session)
1698 {
1699         return machines__for_each_thread(&session->machines,
1700                                          perf_session__flush_thread_stack,
1701                                          NULL);
1702 }
1703
1704 volatile int session_done;
1705
1706 static int __perf_session__process_pipe_events(struct perf_session *session)
1707 {
1708         struct ordered_events *oe = &session->ordered_events;
1709         struct perf_tool *tool = session->tool;
1710         int fd = perf_data__fd(session->data);
1711         union perf_event *event;
1712         uint32_t size, cur_size = 0;
1713         void *buf = NULL;
1714         s64 skip = 0;
1715         u64 head;
1716         ssize_t err;
1717         void *p;
1718
1719         perf_tool__fill_defaults(tool);
1720
1721         head = 0;
1722         cur_size = sizeof(union perf_event);
1723
1724         buf = malloc(cur_size);
1725         if (!buf)
1726                 return -errno;
1727         ordered_events__set_copy_on_queue(oe, true);
1728 more:
1729         event = buf;
1730         err = readn(fd, event, sizeof(struct perf_event_header));
1731         if (err <= 0) {
1732                 if (err == 0)
1733                         goto done;
1734
1735                 pr_err("failed to read event header\n");
1736                 goto out_err;
1737         }
1738
1739         if (session->header.needs_swap)
1740                 perf_event_header__bswap(&event->header);
1741
1742         size = event->header.size;
1743         if (size < sizeof(struct perf_event_header)) {
1744                 pr_err("bad event header size\n");
1745                 goto out_err;
1746         }
1747
1748         if (size > cur_size) {
1749                 void *new = realloc(buf, size);
1750                 if (!new) {
1751                         pr_err("failed to allocate memory to read event\n");
1752                         goto out_err;
1753                 }
1754                 buf = new;
1755                 cur_size = size;
1756                 event = buf;
1757         }
1758         p = event;
1759         p += sizeof(struct perf_event_header);
1760
1761         if (size - sizeof(struct perf_event_header)) {
1762                 err = readn(fd, p, size - sizeof(struct perf_event_header));
1763                 if (err <= 0) {
1764                         if (err == 0) {
1765                                 pr_err("unexpected end of event stream\n");
1766                                 goto done;
1767                         }
1768
1769                         pr_err("failed to read event data\n");
1770                         goto out_err;
1771                 }
1772         }
1773
1774         if ((skip = perf_session__process_event(session, event, head)) < 0) {
1775                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1776                        head, event->header.size, event->header.type);
1777                 err = -EINVAL;
1778                 goto out_err;
1779         }
1780
1781         head += size;
1782
1783         if (skip > 0)
1784                 head += skip;
1785
1786         if (!session_done())
1787                 goto more;
1788 done:
1789         /* do the final flush for ordered samples */
1790         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1791         if (err)
1792                 goto out_err;
1793         err = auxtrace__flush_events(session, tool);
1794         if (err)
1795                 goto out_err;
1796         err = perf_session__flush_thread_stacks(session);
1797 out_err:
1798         free(buf);
1799         if (!tool->no_warn)
1800                 perf_session__warn_about_errors(session);
1801         ordered_events__free(&session->ordered_events);
1802         auxtrace__free_events(session);
1803         return err;
1804 }
1805
1806 static union perf_event *
1807 fetch_mmaped_event(struct perf_session *session,
1808                    u64 head, size_t mmap_size, char *buf)
1809 {
1810         union perf_event *event;
1811
1812         /*
1813          * Ensure we have enough space remaining to read
1814          * the size of the event in the headers.
1815          */
1816         if (head + sizeof(event->header) > mmap_size)
1817                 return NULL;
1818
1819         event = (union perf_event *)(buf + head);
1820
1821         if (session->header.needs_swap)
1822                 perf_event_header__bswap(&event->header);
1823
1824         if (head + event->header.size > mmap_size) {
1825                 /* We're not fetching the event so swap back again */
1826                 if (session->header.needs_swap)
1827                         perf_event_header__bswap(&event->header);
1828                 return NULL;
1829         }
1830
1831         return event;
1832 }
1833
1834 /*
1835  * On 64bit we can mmap the data file in one go. No need for tiny mmap
1836  * slices. On 32bit we use 32MB.
1837  */
1838 #if BITS_PER_LONG == 64
1839 #define MMAP_SIZE ULLONG_MAX
1840 #define NUM_MMAPS 1
1841 #else
1842 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1843 #define NUM_MMAPS 128
1844 #endif
1845
1846 struct reader {
1847         int     fd;
1848         u64     data_size;
1849         u64     data_offset;
1850 };
1851
1852 static int
1853 reader__process_events(struct reader *rd, struct perf_session *session,
1854                        struct ui_progress *prog)
1855 {
1856         u64 data_size = rd->data_size;
1857         u64 head, page_offset, file_offset, file_pos, size;
1858         int err = 0, mmap_prot, mmap_flags, map_idx = 0;
1859         size_t  mmap_size;
1860         char *buf, *mmaps[NUM_MMAPS];
1861         union perf_event *event;
1862         s64 skip;
1863
1864         page_offset = page_size * (rd->data_offset / page_size);
1865         file_offset = page_offset;
1866         head = rd->data_offset - page_offset;
1867
1868         ui_progress__init_size(prog, data_size, "Processing events...");
1869
1870         data_size += rd->data_offset;
1871
1872         mmap_size = MMAP_SIZE;
1873         if (mmap_size > data_size) {
1874                 mmap_size = data_size;
1875                 session->one_mmap = true;
1876         }
1877
1878         memset(mmaps, 0, sizeof(mmaps));
1879
1880         mmap_prot  = PROT_READ;
1881         mmap_flags = MAP_SHARED;
1882
1883         if (session->header.needs_swap) {
1884                 mmap_prot  |= PROT_WRITE;
1885                 mmap_flags = MAP_PRIVATE;
1886         }
1887 remap:
1888         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
1889                    file_offset);
1890         if (buf == MAP_FAILED) {
1891                 pr_err("failed to mmap file\n");
1892                 err = -errno;
1893                 goto out;
1894         }
1895         mmaps[map_idx] = buf;
1896         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1897         file_pos = file_offset + head;
1898         if (session->one_mmap) {
1899                 session->one_mmap_addr = buf;
1900                 session->one_mmap_offset = file_offset;
1901         }
1902
1903 more:
1904         event = fetch_mmaped_event(session, head, mmap_size, buf);
1905         if (!event) {
1906                 if (mmaps[map_idx]) {
1907                         munmap(mmaps[map_idx], mmap_size);
1908                         mmaps[map_idx] = NULL;
1909                 }
1910
1911                 page_offset = page_size * (head / page_size);
1912                 file_offset += page_offset;
1913                 head -= page_offset;
1914                 goto remap;
1915         }
1916
1917         size = event->header.size;
1918
1919         if (size < sizeof(struct perf_event_header) ||
1920             (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1921                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1922                        file_offset + head, event->header.size,
1923                        event->header.type);
1924                 err = -EINVAL;
1925                 goto out;
1926         }
1927
1928         if (skip)
1929                 size += skip;
1930
1931         head += size;
1932         file_pos += size;
1933
1934         ui_progress__update(prog, size);
1935
1936         if (session_done())
1937                 goto out;
1938
1939         if (file_pos < data_size)
1940                 goto more;
1941
1942 out:
1943         return err;
1944 }
1945
1946 static int __perf_session__process_events(struct perf_session *session)
1947 {
1948         struct reader rd = {
1949                 .fd             = perf_data__fd(session->data),
1950                 .data_size      = session->header.data_size,
1951                 .data_offset    = session->header.data_offset,
1952         };
1953         struct ordered_events *oe = &session->ordered_events;
1954         struct perf_tool *tool = session->tool;
1955         struct ui_progress prog;
1956         int err;
1957
1958         perf_tool__fill_defaults(tool);
1959
1960         if (rd.data_size == 0)
1961                 return -1;
1962
1963         ui_progress__init_size(&prog, rd.data_size, "Processing events...");
1964
1965         err = reader__process_events(&rd, session, &prog);
1966         if (err)
1967                 goto out_err;
1968         /* do the final flush for ordered samples */
1969         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1970         if (err)
1971                 goto out_err;
1972         err = auxtrace__flush_events(session, tool);
1973         if (err)
1974                 goto out_err;
1975         err = perf_session__flush_thread_stacks(session);
1976 out_err:
1977         ui_progress__finish();
1978         if (!tool->no_warn)
1979                 perf_session__warn_about_errors(session);
1980         /*
1981          * We may switching perf.data output, make ordered_events
1982          * reusable.
1983          */
1984         ordered_events__reinit(&session->ordered_events);
1985         auxtrace__free_events(session);
1986         session->one_mmap = false;
1987         return err;
1988 }
1989
1990 int perf_session__process_events(struct perf_session *session)
1991 {
1992         if (perf_session__register_idle_thread(session) < 0)
1993                 return -ENOMEM;
1994
1995         if (perf_data__is_pipe(session->data))
1996                 return __perf_session__process_pipe_events(session);
1997
1998         return __perf_session__process_events(session);
1999 }
2000
2001 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2002 {
2003         struct perf_evsel *evsel;
2004
2005         evlist__for_each_entry(session->evlist, evsel) {
2006                 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
2007                         return true;
2008         }
2009
2010         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2011         return false;
2012 }
2013
2014 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2015 {
2016         char *bracket;
2017         struct ref_reloc_sym *ref;
2018         struct kmap *kmap;
2019
2020         ref = zalloc(sizeof(struct ref_reloc_sym));
2021         if (ref == NULL)
2022                 return -ENOMEM;
2023
2024         ref->name = strdup(symbol_name);
2025         if (ref->name == NULL) {
2026                 free(ref);
2027                 return -ENOMEM;
2028         }
2029
2030         bracket = strchr(ref->name, ']');
2031         if (bracket)
2032                 *bracket = '\0';
2033
2034         ref->addr = addr;
2035
2036         kmap = map__kmap(map);
2037         if (kmap)
2038                 kmap->ref_reloc_sym = ref;
2039
2040         return 0;
2041 }
2042
2043 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2044 {
2045         return machines__fprintf_dsos(&session->machines, fp);
2046 }
2047
2048 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2049                                           bool (skip)(struct dso *dso, int parm), int parm)
2050 {
2051         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2052 }
2053
2054 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2055 {
2056         size_t ret;
2057         const char *msg = "";
2058
2059         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2060                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2061
2062         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2063
2064         ret += events_stats__fprintf(&session->evlist->stats, fp);
2065         return ret;
2066 }
2067
2068 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2069 {
2070         /*
2071          * FIXME: Here we have to actually print all the machines in this
2072          * session, not just the host...
2073          */
2074         return machine__fprintf(&session->machines.host, fp);
2075 }
2076
2077 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
2078                                               unsigned int type)
2079 {
2080         struct perf_evsel *pos;
2081
2082         evlist__for_each_entry(session->evlist, pos) {
2083                 if (pos->attr.type == type)
2084                         return pos;
2085         }
2086         return NULL;
2087 }
2088
2089 int perf_session__cpu_bitmap(struct perf_session *session,
2090                              const char *cpu_list, unsigned long *cpu_bitmap)
2091 {
2092         int i, err = -1;
2093         struct cpu_map *map;
2094
2095         for (i = 0; i < PERF_TYPE_MAX; ++i) {
2096                 struct perf_evsel *evsel;
2097
2098                 evsel = perf_session__find_first_evtype(session, i);
2099                 if (!evsel)
2100                         continue;
2101
2102                 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
2103                         pr_err("File does not contain CPU events. "
2104                                "Remove -C option to proceed.\n");
2105                         return -1;
2106                 }
2107         }
2108
2109         map = cpu_map__new(cpu_list);
2110         if (map == NULL) {
2111                 pr_err("Invalid cpu_list\n");
2112                 return -1;
2113         }
2114
2115         for (i = 0; i < map->nr; i++) {
2116                 int cpu = map->map[i];
2117
2118                 if (cpu >= MAX_NR_CPUS) {
2119                         pr_err("Requested CPU %d too large. "
2120                                "Consider raising MAX_NR_CPUS\n", cpu);
2121                         goto out_delete_map;
2122                 }
2123
2124                 set_bit(cpu, cpu_bitmap);
2125         }
2126
2127         err = 0;
2128
2129 out_delete_map:
2130         cpu_map__put(map);
2131         return err;
2132 }
2133
2134 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2135                                 bool full)
2136 {
2137         if (session == NULL || fp == NULL)
2138                 return;
2139
2140         fprintf(fp, "# ========\n");
2141         perf_header__fprintf_info(session, fp, full);
2142         fprintf(fp, "# ========\n#\n");
2143 }
2144
2145
2146 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2147                                              const struct perf_evsel_str_handler *assocs,
2148                                              size_t nr_assocs)
2149 {
2150         struct perf_evsel *evsel;
2151         size_t i;
2152         int err;
2153
2154         for (i = 0; i < nr_assocs; i++) {
2155                 /*
2156                  * Adding a handler for an event not in the session,
2157                  * just ignore it.
2158                  */
2159                 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2160                 if (evsel == NULL)
2161                         continue;
2162
2163                 err = -EEXIST;
2164                 if (evsel->handler != NULL)
2165                         goto out;
2166                 evsel->handler = assocs[i].handler;
2167         }
2168
2169         err = 0;
2170 out:
2171         return err;
2172 }
2173
2174 int perf_event__process_id_index(struct perf_session *session,
2175                                  union perf_event *event)
2176 {
2177         struct perf_evlist *evlist = session->evlist;
2178         struct id_index_event *ie = &event->id_index;
2179         size_t i, nr, max_nr;
2180
2181         max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2182                  sizeof(struct id_index_entry);
2183         nr = ie->nr;
2184         if (nr > max_nr)
2185                 return -EINVAL;
2186
2187         if (dump_trace)
2188                 fprintf(stdout, " nr: %zu\n", nr);
2189
2190         for (i = 0; i < nr; i++) {
2191                 struct id_index_entry *e = &ie->entries[i];
2192                 struct perf_sample_id *sid;
2193
2194                 if (dump_trace) {
2195                         fprintf(stdout, " ... id: %"PRIu64, e->id);
2196                         fprintf(stdout, "  idx: %"PRIu64, e->idx);
2197                         fprintf(stdout, "  cpu: %"PRId64, e->cpu);
2198                         fprintf(stdout, "  tid: %"PRId64"\n", e->tid);
2199                 }
2200
2201                 sid = perf_evlist__id2sid(evlist, e->id);
2202                 if (!sid)
2203                         return -ENOENT;
2204                 sid->idx = e->idx;
2205                 sid->cpu = e->cpu;
2206                 sid->tid = e->tid;
2207         }
2208         return 0;
2209 }
2210
2211 int perf_event__synthesize_id_index(struct perf_tool *tool,
2212                                     perf_event__handler_t process,
2213                                     struct perf_evlist *evlist,
2214                                     struct machine *machine)
2215 {
2216         union perf_event *ev;
2217         struct perf_evsel *evsel;
2218         size_t nr = 0, i = 0, sz, max_nr, n;
2219         int err;
2220
2221         pr_debug2("Synthesizing id index\n");
2222
2223         max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2224                  sizeof(struct id_index_entry);
2225
2226         evlist__for_each_entry(evlist, evsel)
2227                 nr += evsel->ids;
2228
2229         n = nr > max_nr ? max_nr : nr;
2230         sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2231         ev = zalloc(sz);
2232         if (!ev)
2233                 return -ENOMEM;
2234
2235         ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2236         ev->id_index.header.size = sz;
2237         ev->id_index.nr = n;
2238
2239         evlist__for_each_entry(evlist, evsel) {
2240                 u32 j;
2241
2242                 for (j = 0; j < evsel->ids; j++) {
2243                         struct id_index_entry *e;
2244                         struct perf_sample_id *sid;
2245
2246                         if (i >= n) {
2247                                 err = process(tool, ev, NULL, machine);
2248                                 if (err)
2249                                         goto out_err;
2250                                 nr -= n;
2251                                 i = 0;
2252                         }
2253
2254                         e = &ev->id_index.entries[i++];
2255
2256                         e->id = evsel->id[j];
2257
2258                         sid = perf_evlist__id2sid(evlist, e->id);
2259                         if (!sid) {
2260                                 free(ev);
2261                                 return -ENOENT;
2262                         }
2263
2264                         e->idx = sid->idx;
2265                         e->cpu = sid->cpu;
2266                         e->tid = sid->tid;
2267                 }
2268         }
2269
2270         sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2271         ev->id_index.header.size = sz;
2272         ev->id_index.nr = nr;
2273
2274         err = process(tool, ev, NULL, machine);
2275 out_err:
2276         free(ev);
2277
2278         return err;
2279 }