]> asedeno.scripts.mit.edu Git - linux.git/blob - tools/perf/util/session.c
de777bdc0ed3d9c7f9c1b599873ff5713763d8dc
[linux.git] / tools / perf / util / session.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/kernel.h>
5 #include <traceevent/event-parse.h>
6 #include <api/fs/fs.h>
7
8 #include <byteswap.h>
9 #include <unistd.h>
10 #include <sys/types.h>
11 #include <sys/mman.h>
12
13 #include "evlist.h"
14 #include "evsel.h"
15 #include "memswap.h"
16 #include "map.h"
17 #include "symbol.h"
18 #include "session.h"
19 #include "tool.h"
20 #include "sort.h"
21 #include "util.h"
22 #include "cpumap.h"
23 #include "perf_regs.h"
24 #include "asm/bug.h"
25 #include "auxtrace.h"
26 #include "thread.h"
27 #include "thread-stack.h"
28 #include "sample-raw.h"
29 #include "stat.h"
30 #include "arch/common.h"
31
32 static int perf_session__deliver_event(struct perf_session *session,
33                                        union perf_event *event,
34                                        struct perf_tool *tool,
35                                        u64 file_offset);
36
37 static int perf_session__open(struct perf_session *session)
38 {
39         struct perf_data *data = session->data;
40
41         if (perf_session__read_header(session) < 0) {
42                 pr_err("incompatible file format (rerun with -v to learn more)\n");
43                 return -1;
44         }
45
46         if (perf_data__is_pipe(data))
47                 return 0;
48
49         if (perf_header__has_feat(&session->header, HEADER_STAT))
50                 return 0;
51
52         if (!perf_evlist__valid_sample_type(session->evlist)) {
53                 pr_err("non matching sample_type\n");
54                 return -1;
55         }
56
57         if (!perf_evlist__valid_sample_id_all(session->evlist)) {
58                 pr_err("non matching sample_id_all\n");
59                 return -1;
60         }
61
62         if (!perf_evlist__valid_read_format(session->evlist)) {
63                 pr_err("non matching read_format\n");
64                 return -1;
65         }
66
67         return 0;
68 }
69
70 void perf_session__set_id_hdr_size(struct perf_session *session)
71 {
72         u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
73
74         machines__set_id_hdr_size(&session->machines, id_hdr_size);
75 }
76
77 int perf_session__create_kernel_maps(struct perf_session *session)
78 {
79         int ret = machine__create_kernel_maps(&session->machines.host);
80
81         if (ret >= 0)
82                 ret = machines__create_guest_kernel_maps(&session->machines);
83         return ret;
84 }
85
86 static void perf_session__destroy_kernel_maps(struct perf_session *session)
87 {
88         machines__destroy_kernel_maps(&session->machines);
89 }
90
91 static bool perf_session__has_comm_exec(struct perf_session *session)
92 {
93         struct perf_evsel *evsel;
94
95         evlist__for_each_entry(session->evlist, evsel) {
96                 if (evsel->attr.comm_exec)
97                         return true;
98         }
99
100         return false;
101 }
102
103 static void perf_session__set_comm_exec(struct perf_session *session)
104 {
105         bool comm_exec = perf_session__has_comm_exec(session);
106
107         machines__set_comm_exec(&session->machines, comm_exec);
108 }
109
110 static int ordered_events__deliver_event(struct ordered_events *oe,
111                                          struct ordered_event *event)
112 {
113         struct perf_session *session = container_of(oe, struct perf_session,
114                                                     ordered_events);
115
116         return perf_session__deliver_event(session, event->event,
117                                            session->tool, event->file_offset);
118 }
119
120 struct perf_session *perf_session__new(struct perf_data *data,
121                                        bool repipe, struct perf_tool *tool)
122 {
123         struct perf_session *session = zalloc(sizeof(*session));
124
125         if (!session)
126                 goto out;
127
128         session->repipe = repipe;
129         session->tool   = tool;
130         INIT_LIST_HEAD(&session->auxtrace_index);
131         machines__init(&session->machines);
132         ordered_events__init(&session->ordered_events,
133                              ordered_events__deliver_event, NULL);
134
135         if (data) {
136                 if (perf_data__open(data))
137                         goto out_delete;
138
139                 session->data = data;
140
141                 if (perf_data__is_read(data)) {
142                         if (perf_session__open(session) < 0)
143                                 goto out_delete;
144
145                         /*
146                          * set session attributes that are present in perf.data
147                          * but not in pipe-mode.
148                          */
149                         if (!data->is_pipe) {
150                                 perf_session__set_id_hdr_size(session);
151                                 perf_session__set_comm_exec(session);
152                         }
153
154                         perf_evlist__init_trace_event_sample_raw(session->evlist);
155
156                         /* Open the directory data. */
157                         if (data->is_dir && perf_data__open_dir(data))
158                                 goto out_delete;
159                 }
160         } else  {
161                 session->machines.host.env = &perf_env;
162         }
163
164         session->machines.host.single_address_space =
165                 perf_env__single_address_space(session->machines.host.env);
166
167         if (!data || perf_data__is_write(data)) {
168                 /*
169                  * In O_RDONLY mode this will be performed when reading the
170                  * kernel MMAP event, in perf_event__process_mmap().
171                  */
172                 if (perf_session__create_kernel_maps(session) < 0)
173                         pr_warning("Cannot read kernel map\n");
174         }
175
176         /*
177          * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
178          * processed, so perf_evlist__sample_id_all is not meaningful here.
179          */
180         if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
181             tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
182                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
183                 tool->ordered_events = false;
184         }
185
186         return session;
187
188  out_delete:
189         perf_session__delete(session);
190  out:
191         return NULL;
192 }
193
194 static void perf_session__delete_threads(struct perf_session *session)
195 {
196         machine__delete_threads(&session->machines.host);
197 }
198
199 void perf_session__delete(struct perf_session *session)
200 {
201         if (session == NULL)
202                 return;
203         auxtrace__free(session);
204         auxtrace_index__free(&session->auxtrace_index);
205         perf_session__destroy_kernel_maps(session);
206         perf_session__delete_threads(session);
207         perf_env__exit(&session->header.env);
208         machines__exit(&session->machines);
209         if (session->data)
210                 perf_data__close(session->data);
211         free(session);
212 }
213
214 static int process_event_synth_tracing_data_stub(struct perf_session *session
215                                                  __maybe_unused,
216                                                  union perf_event *event
217                                                  __maybe_unused)
218 {
219         dump_printf(": unhandled!\n");
220         return 0;
221 }
222
223 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
224                                          union perf_event *event __maybe_unused,
225                                          struct perf_evlist **pevlist
226                                          __maybe_unused)
227 {
228         dump_printf(": unhandled!\n");
229         return 0;
230 }
231
232 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
233                                                  union perf_event *event __maybe_unused,
234                                                  struct perf_evlist **pevlist
235                                                  __maybe_unused)
236 {
237         if (dump_trace)
238                 perf_event__fprintf_event_update(event, stdout);
239
240         dump_printf(": unhandled!\n");
241         return 0;
242 }
243
244 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
245                                      union perf_event *event __maybe_unused,
246                                      struct perf_sample *sample __maybe_unused,
247                                      struct perf_evsel *evsel __maybe_unused,
248                                      struct machine *machine __maybe_unused)
249 {
250         dump_printf(": unhandled!\n");
251         return 0;
252 }
253
254 static int process_event_stub(struct perf_tool *tool __maybe_unused,
255                               union perf_event *event __maybe_unused,
256                               struct perf_sample *sample __maybe_unused,
257                               struct machine *machine __maybe_unused)
258 {
259         dump_printf(": unhandled!\n");
260         return 0;
261 }
262
263 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
264                                        union perf_event *event __maybe_unused,
265                                        struct ordered_events *oe __maybe_unused)
266 {
267         dump_printf(": unhandled!\n");
268         return 0;
269 }
270
271 static int process_finished_round(struct perf_tool *tool,
272                                   union perf_event *event,
273                                   struct ordered_events *oe);
274
275 static int skipn(int fd, off_t n)
276 {
277         char buf[4096];
278         ssize_t ret;
279
280         while (n > 0) {
281                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
282                 if (ret <= 0)
283                         return ret;
284                 n -= ret;
285         }
286
287         return 0;
288 }
289
290 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
291                                        union perf_event *event)
292 {
293         dump_printf(": unhandled!\n");
294         if (perf_data__is_pipe(session->data))
295                 skipn(perf_data__fd(session->data), event->auxtrace.size);
296         return event->auxtrace.size;
297 }
298
299 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
300                                   union perf_event *event __maybe_unused)
301 {
302         dump_printf(": unhandled!\n");
303         return 0;
304 }
305
306
307 static
308 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
309                                   union perf_event *event __maybe_unused)
310 {
311         if (dump_trace)
312                 perf_event__fprintf_thread_map(event, stdout);
313
314         dump_printf(": unhandled!\n");
315         return 0;
316 }
317
318 static
319 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
320                                union perf_event *event __maybe_unused)
321 {
322         if (dump_trace)
323                 perf_event__fprintf_cpu_map(event, stdout);
324
325         dump_printf(": unhandled!\n");
326         return 0;
327 }
328
329 static
330 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
331                                    union perf_event *event __maybe_unused)
332 {
333         if (dump_trace)
334                 perf_event__fprintf_stat_config(event, stdout);
335
336         dump_printf(": unhandled!\n");
337         return 0;
338 }
339
340 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
341                              union perf_event *event)
342 {
343         if (dump_trace)
344                 perf_event__fprintf_stat(event, stdout);
345
346         dump_printf(": unhandled!\n");
347         return 0;
348 }
349
350 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
351                                    union perf_event *event)
352 {
353         if (dump_trace)
354                 perf_event__fprintf_stat_round(event, stdout);
355
356         dump_printf(": unhandled!\n");
357         return 0;
358 }
359
360 void perf_tool__fill_defaults(struct perf_tool *tool)
361 {
362         if (tool->sample == NULL)
363                 tool->sample = process_event_sample_stub;
364         if (tool->mmap == NULL)
365                 tool->mmap = process_event_stub;
366         if (tool->mmap2 == NULL)
367                 tool->mmap2 = process_event_stub;
368         if (tool->comm == NULL)
369                 tool->comm = process_event_stub;
370         if (tool->namespaces == NULL)
371                 tool->namespaces = process_event_stub;
372         if (tool->fork == NULL)
373                 tool->fork = process_event_stub;
374         if (tool->exit == NULL)
375                 tool->exit = process_event_stub;
376         if (tool->lost == NULL)
377                 tool->lost = perf_event__process_lost;
378         if (tool->lost_samples == NULL)
379                 tool->lost_samples = perf_event__process_lost_samples;
380         if (tool->aux == NULL)
381                 tool->aux = perf_event__process_aux;
382         if (tool->itrace_start == NULL)
383                 tool->itrace_start = perf_event__process_itrace_start;
384         if (tool->context_switch == NULL)
385                 tool->context_switch = perf_event__process_switch;
386         if (tool->ksymbol == NULL)
387                 tool->ksymbol = perf_event__process_ksymbol;
388         if (tool->bpf_event == NULL)
389                 tool->bpf_event = perf_event__process_bpf_event;
390         if (tool->read == NULL)
391                 tool->read = process_event_sample_stub;
392         if (tool->throttle == NULL)
393                 tool->throttle = process_event_stub;
394         if (tool->unthrottle == NULL)
395                 tool->unthrottle = process_event_stub;
396         if (tool->attr == NULL)
397                 tool->attr = process_event_synth_attr_stub;
398         if (tool->event_update == NULL)
399                 tool->event_update = process_event_synth_event_update_stub;
400         if (tool->tracing_data == NULL)
401                 tool->tracing_data = process_event_synth_tracing_data_stub;
402         if (tool->build_id == NULL)
403                 tool->build_id = process_event_op2_stub;
404         if (tool->finished_round == NULL) {
405                 if (tool->ordered_events)
406                         tool->finished_round = process_finished_round;
407                 else
408                         tool->finished_round = process_finished_round_stub;
409         }
410         if (tool->id_index == NULL)
411                 tool->id_index = process_event_op2_stub;
412         if (tool->auxtrace_info == NULL)
413                 tool->auxtrace_info = process_event_op2_stub;
414         if (tool->auxtrace == NULL)
415                 tool->auxtrace = process_event_auxtrace_stub;
416         if (tool->auxtrace_error == NULL)
417                 tool->auxtrace_error = process_event_op2_stub;
418         if (tool->thread_map == NULL)
419                 tool->thread_map = process_event_thread_map_stub;
420         if (tool->cpu_map == NULL)
421                 tool->cpu_map = process_event_cpu_map_stub;
422         if (tool->stat_config == NULL)
423                 tool->stat_config = process_event_stat_config_stub;
424         if (tool->stat == NULL)
425                 tool->stat = process_stat_stub;
426         if (tool->stat_round == NULL)
427                 tool->stat_round = process_stat_round_stub;
428         if (tool->time_conv == NULL)
429                 tool->time_conv = process_event_op2_stub;
430         if (tool->feature == NULL)
431                 tool->feature = process_event_op2_stub;
432 }
433
434 static void swap_sample_id_all(union perf_event *event, void *data)
435 {
436         void *end = (void *) event + event->header.size;
437         int size = end - data;
438
439         BUG_ON(size % sizeof(u64));
440         mem_bswap_64(data, size);
441 }
442
443 static void perf_event__all64_swap(union perf_event *event,
444                                    bool sample_id_all __maybe_unused)
445 {
446         struct perf_event_header *hdr = &event->header;
447         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
448 }
449
450 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
451 {
452         event->comm.pid = bswap_32(event->comm.pid);
453         event->comm.tid = bswap_32(event->comm.tid);
454
455         if (sample_id_all) {
456                 void *data = &event->comm.comm;
457
458                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
459                 swap_sample_id_all(event, data);
460         }
461 }
462
463 static void perf_event__mmap_swap(union perf_event *event,
464                                   bool sample_id_all)
465 {
466         event->mmap.pid   = bswap_32(event->mmap.pid);
467         event->mmap.tid   = bswap_32(event->mmap.tid);
468         event->mmap.start = bswap_64(event->mmap.start);
469         event->mmap.len   = bswap_64(event->mmap.len);
470         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
471
472         if (sample_id_all) {
473                 void *data = &event->mmap.filename;
474
475                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
476                 swap_sample_id_all(event, data);
477         }
478 }
479
480 static void perf_event__mmap2_swap(union perf_event *event,
481                                   bool sample_id_all)
482 {
483         event->mmap2.pid   = bswap_32(event->mmap2.pid);
484         event->mmap2.tid   = bswap_32(event->mmap2.tid);
485         event->mmap2.start = bswap_64(event->mmap2.start);
486         event->mmap2.len   = bswap_64(event->mmap2.len);
487         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
488         event->mmap2.maj   = bswap_32(event->mmap2.maj);
489         event->mmap2.min   = bswap_32(event->mmap2.min);
490         event->mmap2.ino   = bswap_64(event->mmap2.ino);
491
492         if (sample_id_all) {
493                 void *data = &event->mmap2.filename;
494
495                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
496                 swap_sample_id_all(event, data);
497         }
498 }
499 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
500 {
501         event->fork.pid  = bswap_32(event->fork.pid);
502         event->fork.tid  = bswap_32(event->fork.tid);
503         event->fork.ppid = bswap_32(event->fork.ppid);
504         event->fork.ptid = bswap_32(event->fork.ptid);
505         event->fork.time = bswap_64(event->fork.time);
506
507         if (sample_id_all)
508                 swap_sample_id_all(event, &event->fork + 1);
509 }
510
511 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
512 {
513         event->read.pid          = bswap_32(event->read.pid);
514         event->read.tid          = bswap_32(event->read.tid);
515         event->read.value        = bswap_64(event->read.value);
516         event->read.time_enabled = bswap_64(event->read.time_enabled);
517         event->read.time_running = bswap_64(event->read.time_running);
518         event->read.id           = bswap_64(event->read.id);
519
520         if (sample_id_all)
521                 swap_sample_id_all(event, &event->read + 1);
522 }
523
524 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
525 {
526         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
527         event->aux.aux_size   = bswap_64(event->aux.aux_size);
528         event->aux.flags      = bswap_64(event->aux.flags);
529
530         if (sample_id_all)
531                 swap_sample_id_all(event, &event->aux + 1);
532 }
533
534 static void perf_event__itrace_start_swap(union perf_event *event,
535                                           bool sample_id_all)
536 {
537         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
538         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
539
540         if (sample_id_all)
541                 swap_sample_id_all(event, &event->itrace_start + 1);
542 }
543
544 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
545 {
546         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
547                 event->context_switch.next_prev_pid =
548                                 bswap_32(event->context_switch.next_prev_pid);
549                 event->context_switch.next_prev_tid =
550                                 bswap_32(event->context_switch.next_prev_tid);
551         }
552
553         if (sample_id_all)
554                 swap_sample_id_all(event, &event->context_switch + 1);
555 }
556
557 static void perf_event__throttle_swap(union perf_event *event,
558                                       bool sample_id_all)
559 {
560         event->throttle.time      = bswap_64(event->throttle.time);
561         event->throttle.id        = bswap_64(event->throttle.id);
562         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
563
564         if (sample_id_all)
565                 swap_sample_id_all(event, &event->throttle + 1);
566 }
567
568 static u8 revbyte(u8 b)
569 {
570         int rev = (b >> 4) | ((b & 0xf) << 4);
571         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
572         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
573         return (u8) rev;
574 }
575
576 /*
577  * XXX this is hack in attempt to carry flags bitfield
578  * through endian village. ABI says:
579  *
580  * Bit-fields are allocated from right to left (least to most significant)
581  * on little-endian implementations and from left to right (most to least
582  * significant) on big-endian implementations.
583  *
584  * The above seems to be byte specific, so we need to reverse each
585  * byte of the bitfield. 'Internet' also says this might be implementation
586  * specific and we probably need proper fix and carry perf_event_attr
587  * bitfield flags in separate data file FEAT_ section. Thought this seems
588  * to work for now.
589  */
590 static void swap_bitfield(u8 *p, unsigned len)
591 {
592         unsigned i;
593
594         for (i = 0; i < len; i++) {
595                 *p = revbyte(*p);
596                 p++;
597         }
598 }
599
600 /* exported for swapping attributes in file header */
601 void perf_event__attr_swap(struct perf_event_attr *attr)
602 {
603         attr->type              = bswap_32(attr->type);
604         attr->size              = bswap_32(attr->size);
605
606 #define bswap_safe(f, n)                                        \
607         (attr->size > (offsetof(struct perf_event_attr, f) +    \
608                        sizeof(attr->f) * (n)))
609 #define bswap_field(f, sz)                      \
610 do {                                            \
611         if (bswap_safe(f, 0))                   \
612                 attr->f = bswap_##sz(attr->f);  \
613 } while(0)
614 #define bswap_field_16(f) bswap_field(f, 16)
615 #define bswap_field_32(f) bswap_field(f, 32)
616 #define bswap_field_64(f) bswap_field(f, 64)
617
618         bswap_field_64(config);
619         bswap_field_64(sample_period);
620         bswap_field_64(sample_type);
621         bswap_field_64(read_format);
622         bswap_field_32(wakeup_events);
623         bswap_field_32(bp_type);
624         bswap_field_64(bp_addr);
625         bswap_field_64(bp_len);
626         bswap_field_64(branch_sample_type);
627         bswap_field_64(sample_regs_user);
628         bswap_field_32(sample_stack_user);
629         bswap_field_32(aux_watermark);
630         bswap_field_16(sample_max_stack);
631
632         /*
633          * After read_format are bitfields. Check read_format because
634          * we are unable to use offsetof on bitfield.
635          */
636         if (bswap_safe(read_format, 1))
637                 swap_bitfield((u8 *) (&attr->read_format + 1),
638                               sizeof(u64));
639 #undef bswap_field_64
640 #undef bswap_field_32
641 #undef bswap_field
642 #undef bswap_safe
643 }
644
645 static void perf_event__hdr_attr_swap(union perf_event *event,
646                                       bool sample_id_all __maybe_unused)
647 {
648         size_t size;
649
650         perf_event__attr_swap(&event->attr.attr);
651
652         size = event->header.size;
653         size -= (void *)&event->attr.id - (void *)event;
654         mem_bswap_64(event->attr.id, size);
655 }
656
657 static void perf_event__event_update_swap(union perf_event *event,
658                                           bool sample_id_all __maybe_unused)
659 {
660         event->event_update.type = bswap_64(event->event_update.type);
661         event->event_update.id   = bswap_64(event->event_update.id);
662 }
663
664 static void perf_event__event_type_swap(union perf_event *event,
665                                         bool sample_id_all __maybe_unused)
666 {
667         event->event_type.event_type.event_id =
668                 bswap_64(event->event_type.event_type.event_id);
669 }
670
671 static void perf_event__tracing_data_swap(union perf_event *event,
672                                           bool sample_id_all __maybe_unused)
673 {
674         event->tracing_data.size = bswap_32(event->tracing_data.size);
675 }
676
677 static void perf_event__auxtrace_info_swap(union perf_event *event,
678                                            bool sample_id_all __maybe_unused)
679 {
680         size_t size;
681
682         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
683
684         size = event->header.size;
685         size -= (void *)&event->auxtrace_info.priv - (void *)event;
686         mem_bswap_64(event->auxtrace_info.priv, size);
687 }
688
689 static void perf_event__auxtrace_swap(union perf_event *event,
690                                       bool sample_id_all __maybe_unused)
691 {
692         event->auxtrace.size      = bswap_64(event->auxtrace.size);
693         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
694         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
695         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
696         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
697         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
698 }
699
700 static void perf_event__auxtrace_error_swap(union perf_event *event,
701                                             bool sample_id_all __maybe_unused)
702 {
703         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
704         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
705         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
706         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
707         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
708         event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
709         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
710         if (event->auxtrace_error.fmt)
711                 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
712 }
713
714 static void perf_event__thread_map_swap(union perf_event *event,
715                                         bool sample_id_all __maybe_unused)
716 {
717         unsigned i;
718
719         event->thread_map.nr = bswap_64(event->thread_map.nr);
720
721         for (i = 0; i < event->thread_map.nr; i++)
722                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
723 }
724
725 static void perf_event__cpu_map_swap(union perf_event *event,
726                                      bool sample_id_all __maybe_unused)
727 {
728         struct cpu_map_data *data = &event->cpu_map.data;
729         struct cpu_map_entries *cpus;
730         struct cpu_map_mask *mask;
731         unsigned i;
732
733         data->type = bswap_64(data->type);
734
735         switch (data->type) {
736         case PERF_CPU_MAP__CPUS:
737                 cpus = (struct cpu_map_entries *)data->data;
738
739                 cpus->nr = bswap_16(cpus->nr);
740
741                 for (i = 0; i < cpus->nr; i++)
742                         cpus->cpu[i] = bswap_16(cpus->cpu[i]);
743                 break;
744         case PERF_CPU_MAP__MASK:
745                 mask = (struct cpu_map_mask *) data->data;
746
747                 mask->nr = bswap_16(mask->nr);
748                 mask->long_size = bswap_16(mask->long_size);
749
750                 switch (mask->long_size) {
751                 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
752                 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
753                 default:
754                         pr_err("cpu_map swap: unsupported long size\n");
755                 }
756         default:
757                 break;
758         }
759 }
760
761 static void perf_event__stat_config_swap(union perf_event *event,
762                                          bool sample_id_all __maybe_unused)
763 {
764         u64 size;
765
766         size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
767         size += 1; /* nr item itself */
768         mem_bswap_64(&event->stat_config.nr, size);
769 }
770
771 static void perf_event__stat_swap(union perf_event *event,
772                                   bool sample_id_all __maybe_unused)
773 {
774         event->stat.id     = bswap_64(event->stat.id);
775         event->stat.thread = bswap_32(event->stat.thread);
776         event->stat.cpu    = bswap_32(event->stat.cpu);
777         event->stat.val    = bswap_64(event->stat.val);
778         event->stat.ena    = bswap_64(event->stat.ena);
779         event->stat.run    = bswap_64(event->stat.run);
780 }
781
782 static void perf_event__stat_round_swap(union perf_event *event,
783                                         bool sample_id_all __maybe_unused)
784 {
785         event->stat_round.type = bswap_64(event->stat_round.type);
786         event->stat_round.time = bswap_64(event->stat_round.time);
787 }
788
789 typedef void (*perf_event__swap_op)(union perf_event *event,
790                                     bool sample_id_all);
791
792 static perf_event__swap_op perf_event__swap_ops[] = {
793         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
794         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
795         [PERF_RECORD_COMM]                = perf_event__comm_swap,
796         [PERF_RECORD_FORK]                = perf_event__task_swap,
797         [PERF_RECORD_EXIT]                = perf_event__task_swap,
798         [PERF_RECORD_LOST]                = perf_event__all64_swap,
799         [PERF_RECORD_READ]                = perf_event__read_swap,
800         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
801         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
802         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
803         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
804         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
805         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
806         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
807         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
808         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
809         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
810         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
811         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
812         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
813         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
814         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
815         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
816         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
817         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
818         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
819         [PERF_RECORD_STAT]                = perf_event__stat_swap,
820         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
821         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
822         [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
823         [PERF_RECORD_HEADER_MAX]          = NULL,
824 };
825
826 /*
827  * When perf record finishes a pass on every buffers, it records this pseudo
828  * event.
829  * We record the max timestamp t found in the pass n.
830  * Assuming these timestamps are monotonic across cpus, we know that if
831  * a buffer still has events with timestamps below t, they will be all
832  * available and then read in the pass n + 1.
833  * Hence when we start to read the pass n + 2, we can safely flush every
834  * events with timestamps below t.
835  *
836  *    ============ PASS n =================
837  *       CPU 0         |   CPU 1
838  *                     |
839  *    cnt1 timestamps  |   cnt2 timestamps
840  *          1          |         2
841  *          2          |         3
842  *          -          |         4  <--- max recorded
843  *
844  *    ============ PASS n + 1 ==============
845  *       CPU 0         |   CPU 1
846  *                     |
847  *    cnt1 timestamps  |   cnt2 timestamps
848  *          3          |         5
849  *          4          |         6
850  *          5          |         7 <---- max recorded
851  *
852  *      Flush every events below timestamp 4
853  *
854  *    ============ PASS n + 2 ==============
855  *       CPU 0         |   CPU 1
856  *                     |
857  *    cnt1 timestamps  |   cnt2 timestamps
858  *          6          |         8
859  *          7          |         9
860  *          -          |         10
861  *
862  *      Flush every events below timestamp 7
863  *      etc...
864  */
865 static int process_finished_round(struct perf_tool *tool __maybe_unused,
866                                   union perf_event *event __maybe_unused,
867                                   struct ordered_events *oe)
868 {
869         if (dump_trace)
870                 fprintf(stdout, "\n");
871         return ordered_events__flush(oe, OE_FLUSH__ROUND);
872 }
873
874 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
875                               u64 timestamp, u64 file_offset)
876 {
877         return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
878 }
879
880 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
881 {
882         struct ip_callchain *callchain = sample->callchain;
883         struct branch_stack *lbr_stack = sample->branch_stack;
884         u64 kernel_callchain_nr = callchain->nr;
885         unsigned int i;
886
887         for (i = 0; i < kernel_callchain_nr; i++) {
888                 if (callchain->ips[i] == PERF_CONTEXT_USER)
889                         break;
890         }
891
892         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
893                 u64 total_nr;
894                 /*
895                  * LBR callstack can only get user call chain,
896                  * i is kernel call chain number,
897                  * 1 is PERF_CONTEXT_USER.
898                  *
899                  * The user call chain is stored in LBR registers.
900                  * LBR are pair registers. The caller is stored
901                  * in "from" register, while the callee is stored
902                  * in "to" register.
903                  * For example, there is a call stack
904                  * "A"->"B"->"C"->"D".
905                  * The LBR registers will recorde like
906                  * "C"->"D", "B"->"C", "A"->"B".
907                  * So only the first "to" register and all "from"
908                  * registers are needed to construct the whole stack.
909                  */
910                 total_nr = i + 1 + lbr_stack->nr + 1;
911                 kernel_callchain_nr = i + 1;
912
913                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
914
915                 for (i = 0; i < kernel_callchain_nr; i++)
916                         printf("..... %2d: %016" PRIx64 "\n",
917                                i, callchain->ips[i]);
918
919                 printf("..... %2d: %016" PRIx64 "\n",
920                        (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
921                 for (i = 0; i < lbr_stack->nr; i++)
922                         printf("..... %2d: %016" PRIx64 "\n",
923                                (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
924         }
925 }
926
927 static void callchain__printf(struct perf_evsel *evsel,
928                               struct perf_sample *sample)
929 {
930         unsigned int i;
931         struct ip_callchain *callchain = sample->callchain;
932
933         if (perf_evsel__has_branch_callstack(evsel))
934                 callchain__lbr_callstack_printf(sample);
935
936         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
937
938         for (i = 0; i < callchain->nr; i++)
939                 printf("..... %2d: %016" PRIx64 "\n",
940                        i, callchain->ips[i]);
941 }
942
943 static void branch_stack__printf(struct perf_sample *sample)
944 {
945         uint64_t i;
946
947         printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
948
949         for (i = 0; i < sample->branch_stack->nr; i++) {
950                 struct branch_entry *e = &sample->branch_stack->entries[i];
951
952                 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
953                         i, e->from, e->to,
954                         (unsigned short)e->flags.cycles,
955                         e->flags.mispred ? "M" : " ",
956                         e->flags.predicted ? "P" : " ",
957                         e->flags.abort ? "A" : " ",
958                         e->flags.in_tx ? "T" : " ",
959                         (unsigned)e->flags.reserved);
960         }
961 }
962
963 static void regs_dump__printf(u64 mask, u64 *regs)
964 {
965         unsigned rid, i = 0;
966
967         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
968                 u64 val = regs[i++];
969
970                 printf(".... %-5s 0x%" PRIx64 "\n",
971                        perf_reg_name(rid), val);
972         }
973 }
974
975 static const char *regs_abi[] = {
976         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
977         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
978         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
979 };
980
981 static inline const char *regs_dump_abi(struct regs_dump *d)
982 {
983         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
984                 return "unknown";
985
986         return regs_abi[d->abi];
987 }
988
989 static void regs__printf(const char *type, struct regs_dump *regs)
990 {
991         u64 mask = regs->mask;
992
993         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
994                type,
995                mask,
996                regs_dump_abi(regs));
997
998         regs_dump__printf(mask, regs->regs);
999 }
1000
1001 static void regs_user__printf(struct perf_sample *sample)
1002 {
1003         struct regs_dump *user_regs = &sample->user_regs;
1004
1005         if (user_regs->regs)
1006                 regs__printf("user", user_regs);
1007 }
1008
1009 static void regs_intr__printf(struct perf_sample *sample)
1010 {
1011         struct regs_dump *intr_regs = &sample->intr_regs;
1012
1013         if (intr_regs->regs)
1014                 regs__printf("intr", intr_regs);
1015 }
1016
1017 static void stack_user__printf(struct stack_dump *dump)
1018 {
1019         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1020                dump->size, dump->offset);
1021 }
1022
1023 static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
1024                                        union perf_event *event,
1025                                        struct perf_sample *sample)
1026 {
1027         u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1028
1029         if (event->header.type != PERF_RECORD_SAMPLE &&
1030             !perf_evlist__sample_id_all(evlist)) {
1031                 fputs("-1 -1 ", stdout);
1032                 return;
1033         }
1034
1035         if ((sample_type & PERF_SAMPLE_CPU))
1036                 printf("%u ", sample->cpu);
1037
1038         if (sample_type & PERF_SAMPLE_TIME)
1039                 printf("%" PRIu64 " ", sample->time);
1040 }
1041
1042 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1043 {
1044         printf("... sample_read:\n");
1045
1046         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1047                 printf("...... time enabled %016" PRIx64 "\n",
1048                        sample->read.time_enabled);
1049
1050         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1051                 printf("...... time running %016" PRIx64 "\n",
1052                        sample->read.time_running);
1053
1054         if (read_format & PERF_FORMAT_GROUP) {
1055                 u64 i;
1056
1057                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1058
1059                 for (i = 0; i < sample->read.group.nr; i++) {
1060                         struct sample_read_value *value;
1061
1062                         value = &sample->read.group.values[i];
1063                         printf("..... id %016" PRIx64
1064                                ", value %016" PRIx64 "\n",
1065                                value->id, value->value);
1066                 }
1067         } else
1068                 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1069                         sample->read.one.id, sample->read.one.value);
1070 }
1071
1072 static void dump_event(struct perf_evlist *evlist, union perf_event *event,
1073                        u64 file_offset, struct perf_sample *sample)
1074 {
1075         if (!dump_trace)
1076                 return;
1077
1078         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1079                file_offset, event->header.size, event->header.type);
1080
1081         trace_event(event);
1082         if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1083                 evlist->trace_event_sample_raw(evlist, event, sample);
1084
1085         if (sample)
1086                 perf_evlist__print_tstamp(evlist, event, sample);
1087
1088         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1089                event->header.size, perf_event__name(event->header.type));
1090 }
1091
1092 static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1093                         struct perf_sample *sample)
1094 {
1095         u64 sample_type;
1096
1097         if (!dump_trace)
1098                 return;
1099
1100         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1101                event->header.misc, sample->pid, sample->tid, sample->ip,
1102                sample->period, sample->addr);
1103
1104         sample_type = evsel->attr.sample_type;
1105
1106         if (evsel__has_callchain(evsel))
1107                 callchain__printf(evsel, sample);
1108
1109         if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
1110                 branch_stack__printf(sample);
1111
1112         if (sample_type & PERF_SAMPLE_REGS_USER)
1113                 regs_user__printf(sample);
1114
1115         if (sample_type & PERF_SAMPLE_REGS_INTR)
1116                 regs_intr__printf(sample);
1117
1118         if (sample_type & PERF_SAMPLE_STACK_USER)
1119                 stack_user__printf(&sample->user_stack);
1120
1121         if (sample_type & PERF_SAMPLE_WEIGHT)
1122                 printf("... weight: %" PRIu64 "\n", sample->weight);
1123
1124         if (sample_type & PERF_SAMPLE_DATA_SRC)
1125                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1126
1127         if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1128                 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1129
1130         if (sample_type & PERF_SAMPLE_TRANSACTION)
1131                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1132
1133         if (sample_type & PERF_SAMPLE_READ)
1134                 sample_read__printf(sample, evsel->attr.read_format);
1135 }
1136
1137 static void dump_read(struct perf_evsel *evsel, union perf_event *event)
1138 {
1139         struct read_event *read_event = &event->read;
1140         u64 read_format;
1141
1142         if (!dump_trace)
1143                 return;
1144
1145         printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
1146                evsel ? perf_evsel__name(evsel) : "FAIL",
1147                event->read.value);
1148
1149         read_format = evsel->attr.read_format;
1150
1151         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1152                 printf("... time enabled : %" PRIu64 "\n", read_event->time_enabled);
1153
1154         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1155                 printf("... time running : %" PRIu64 "\n", read_event->time_running);
1156
1157         if (read_format & PERF_FORMAT_ID)
1158                 printf("... id           : %" PRIu64 "\n", read_event->id);
1159 }
1160
1161 static struct machine *machines__find_for_cpumode(struct machines *machines,
1162                                                union perf_event *event,
1163                                                struct perf_sample *sample)
1164 {
1165         struct machine *machine;
1166
1167         if (perf_guest &&
1168             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1169              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1170                 u32 pid;
1171
1172                 if (event->header.type == PERF_RECORD_MMAP
1173                     || event->header.type == PERF_RECORD_MMAP2)
1174                         pid = event->mmap.pid;
1175                 else
1176                         pid = sample->pid;
1177
1178                 machine = machines__find(machines, pid);
1179                 if (!machine)
1180                         machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1181                 return machine;
1182         }
1183
1184         return &machines->host;
1185 }
1186
1187 static int deliver_sample_value(struct perf_evlist *evlist,
1188                                 struct perf_tool *tool,
1189                                 union perf_event *event,
1190                                 struct perf_sample *sample,
1191                                 struct sample_read_value *v,
1192                                 struct machine *machine)
1193 {
1194         struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1195
1196         if (sid) {
1197                 sample->id     = v->id;
1198                 sample->period = v->value - sid->period;
1199                 sid->period    = v->value;
1200         }
1201
1202         if (!sid || sid->evsel == NULL) {
1203                 ++evlist->stats.nr_unknown_id;
1204                 return 0;
1205         }
1206
1207         /*
1208          * There's no reason to deliver sample
1209          * for zero period, bail out.
1210          */
1211         if (!sample->period)
1212                 return 0;
1213
1214         return tool->sample(tool, event, sample, sid->evsel, machine);
1215 }
1216
1217 static int deliver_sample_group(struct perf_evlist *evlist,
1218                                 struct perf_tool *tool,
1219                                 union  perf_event *event,
1220                                 struct perf_sample *sample,
1221                                 struct machine *machine)
1222 {
1223         int ret = -EINVAL;
1224         u64 i;
1225
1226         for (i = 0; i < sample->read.group.nr; i++) {
1227                 ret = deliver_sample_value(evlist, tool, event, sample,
1228                                            &sample->read.group.values[i],
1229                                            machine);
1230                 if (ret)
1231                         break;
1232         }
1233
1234         return ret;
1235 }
1236
1237 static int
1238  perf_evlist__deliver_sample(struct perf_evlist *evlist,
1239                              struct perf_tool *tool,
1240                              union  perf_event *event,
1241                              struct perf_sample *sample,
1242                              struct perf_evsel *evsel,
1243                              struct machine *machine)
1244 {
1245         /* We know evsel != NULL. */
1246         u64 sample_type = evsel->attr.sample_type;
1247         u64 read_format = evsel->attr.read_format;
1248
1249         /* Standard sample delivery. */
1250         if (!(sample_type & PERF_SAMPLE_READ))
1251                 return tool->sample(tool, event, sample, evsel, machine);
1252
1253         /* For PERF_SAMPLE_READ we have either single or group mode. */
1254         if (read_format & PERF_FORMAT_GROUP)
1255                 return deliver_sample_group(evlist, tool, event, sample,
1256                                             machine);
1257         else
1258                 return deliver_sample_value(evlist, tool, event, sample,
1259                                             &sample->read.one, machine);
1260 }
1261
1262 static int machines__deliver_event(struct machines *machines,
1263                                    struct perf_evlist *evlist,
1264                                    union perf_event *event,
1265                                    struct perf_sample *sample,
1266                                    struct perf_tool *tool, u64 file_offset)
1267 {
1268         struct perf_evsel *evsel;
1269         struct machine *machine;
1270
1271         dump_event(evlist, event, file_offset, sample);
1272
1273         evsel = perf_evlist__id2evsel(evlist, sample->id);
1274
1275         machine = machines__find_for_cpumode(machines, event, sample);
1276
1277         switch (event->header.type) {
1278         case PERF_RECORD_SAMPLE:
1279                 if (evsel == NULL) {
1280                         ++evlist->stats.nr_unknown_id;
1281                         return 0;
1282                 }
1283                 dump_sample(evsel, event, sample);
1284                 if (machine == NULL) {
1285                         ++evlist->stats.nr_unprocessable_samples;
1286                         return 0;
1287                 }
1288                 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1289         case PERF_RECORD_MMAP:
1290                 return tool->mmap(tool, event, sample, machine);
1291         case PERF_RECORD_MMAP2:
1292                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1293                         ++evlist->stats.nr_proc_map_timeout;
1294                 return tool->mmap2(tool, event, sample, machine);
1295         case PERF_RECORD_COMM:
1296                 return tool->comm(tool, event, sample, machine);
1297         case PERF_RECORD_NAMESPACES:
1298                 return tool->namespaces(tool, event, sample, machine);
1299         case PERF_RECORD_FORK:
1300                 return tool->fork(tool, event, sample, machine);
1301         case PERF_RECORD_EXIT:
1302                 return tool->exit(tool, event, sample, machine);
1303         case PERF_RECORD_LOST:
1304                 if (tool->lost == perf_event__process_lost)
1305                         evlist->stats.total_lost += event->lost.lost;
1306                 return tool->lost(tool, event, sample, machine);
1307         case PERF_RECORD_LOST_SAMPLES:
1308                 if (tool->lost_samples == perf_event__process_lost_samples)
1309                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1310                 return tool->lost_samples(tool, event, sample, machine);
1311         case PERF_RECORD_READ:
1312                 dump_read(evsel, event);
1313                 return tool->read(tool, event, sample, evsel, machine);
1314         case PERF_RECORD_THROTTLE:
1315                 return tool->throttle(tool, event, sample, machine);
1316         case PERF_RECORD_UNTHROTTLE:
1317                 return tool->unthrottle(tool, event, sample, machine);
1318         case PERF_RECORD_AUX:
1319                 if (tool->aux == perf_event__process_aux) {
1320                         if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1321                                 evlist->stats.total_aux_lost += 1;
1322                         if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1323                                 evlist->stats.total_aux_partial += 1;
1324                 }
1325                 return tool->aux(tool, event, sample, machine);
1326         case PERF_RECORD_ITRACE_START:
1327                 return tool->itrace_start(tool, event, sample, machine);
1328         case PERF_RECORD_SWITCH:
1329         case PERF_RECORD_SWITCH_CPU_WIDE:
1330                 return tool->context_switch(tool, event, sample, machine);
1331         case PERF_RECORD_KSYMBOL:
1332                 return tool->ksymbol(tool, event, sample, machine);
1333         case PERF_RECORD_BPF_EVENT:
1334                 return tool->bpf_event(tool, event, sample, machine);
1335         default:
1336                 ++evlist->stats.nr_unknown_events;
1337                 return -1;
1338         }
1339 }
1340
1341 static int perf_session__deliver_event(struct perf_session *session,
1342                                        union perf_event *event,
1343                                        struct perf_tool *tool,
1344                                        u64 file_offset)
1345 {
1346         struct perf_sample sample;
1347         int ret;
1348
1349         ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1350         if (ret) {
1351                 pr_err("Can't parse sample, err = %d\n", ret);
1352                 return ret;
1353         }
1354
1355         ret = auxtrace__process_event(session, event, &sample, tool);
1356         if (ret < 0)
1357                 return ret;
1358         if (ret > 0)
1359                 return 0;
1360
1361         return machines__deliver_event(&session->machines, session->evlist,
1362                                        event, &sample, tool, file_offset);
1363 }
1364
1365 static s64 perf_session__process_user_event(struct perf_session *session,
1366                                             union perf_event *event,
1367                                             u64 file_offset)
1368 {
1369         struct ordered_events *oe = &session->ordered_events;
1370         struct perf_tool *tool = session->tool;
1371         struct perf_sample sample = { .time = 0, };
1372         int fd = perf_data__fd(session->data);
1373         int err;
1374
1375         dump_event(session->evlist, event, file_offset, &sample);
1376
1377         /* These events are processed right away */
1378         switch (event->header.type) {
1379         case PERF_RECORD_HEADER_ATTR:
1380                 err = tool->attr(tool, event, &session->evlist);
1381                 if (err == 0) {
1382                         perf_session__set_id_hdr_size(session);
1383                         perf_session__set_comm_exec(session);
1384                 }
1385                 return err;
1386         case PERF_RECORD_EVENT_UPDATE:
1387                 return tool->event_update(tool, event, &session->evlist);
1388         case PERF_RECORD_HEADER_EVENT_TYPE:
1389                 /*
1390                  * Depreceated, but we need to handle it for sake
1391                  * of old data files create in pipe mode.
1392                  */
1393                 return 0;
1394         case PERF_RECORD_HEADER_TRACING_DATA:
1395                 /* setup for reading amidst mmap */
1396                 lseek(fd, file_offset, SEEK_SET);
1397                 return tool->tracing_data(session, event);
1398         case PERF_RECORD_HEADER_BUILD_ID:
1399                 return tool->build_id(session, event);
1400         case PERF_RECORD_FINISHED_ROUND:
1401                 return tool->finished_round(tool, event, oe);
1402         case PERF_RECORD_ID_INDEX:
1403                 return tool->id_index(session, event);
1404         case PERF_RECORD_AUXTRACE_INFO:
1405                 return tool->auxtrace_info(session, event);
1406         case PERF_RECORD_AUXTRACE:
1407                 /* setup for reading amidst mmap */
1408                 lseek(fd, file_offset + event->header.size, SEEK_SET);
1409                 return tool->auxtrace(session, event);
1410         case PERF_RECORD_AUXTRACE_ERROR:
1411                 perf_session__auxtrace_error_inc(session, event);
1412                 return tool->auxtrace_error(session, event);
1413         case PERF_RECORD_THREAD_MAP:
1414                 return tool->thread_map(session, event);
1415         case PERF_RECORD_CPU_MAP:
1416                 return tool->cpu_map(session, event);
1417         case PERF_RECORD_STAT_CONFIG:
1418                 return tool->stat_config(session, event);
1419         case PERF_RECORD_STAT:
1420                 return tool->stat(session, event);
1421         case PERF_RECORD_STAT_ROUND:
1422                 return tool->stat_round(session, event);
1423         case PERF_RECORD_TIME_CONV:
1424                 session->time_conv = event->time_conv;
1425                 return tool->time_conv(session, event);
1426         case PERF_RECORD_HEADER_FEATURE:
1427                 return tool->feature(session, event);
1428         default:
1429                 return -EINVAL;
1430         }
1431 }
1432
1433 int perf_session__deliver_synth_event(struct perf_session *session,
1434                                       union perf_event *event,
1435                                       struct perf_sample *sample)
1436 {
1437         struct perf_evlist *evlist = session->evlist;
1438         struct perf_tool *tool = session->tool;
1439
1440         events_stats__inc(&evlist->stats, event->header.type);
1441
1442         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1443                 return perf_session__process_user_event(session, event, 0);
1444
1445         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1446 }
1447
1448 static void event_swap(union perf_event *event, bool sample_id_all)
1449 {
1450         perf_event__swap_op swap;
1451
1452         swap = perf_event__swap_ops[event->header.type];
1453         if (swap)
1454                 swap(event, sample_id_all);
1455 }
1456
1457 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1458                              void *buf, size_t buf_sz,
1459                              union perf_event **event_ptr,
1460                              struct perf_sample *sample)
1461 {
1462         union perf_event *event;
1463         size_t hdr_sz, rest;
1464         int fd;
1465
1466         if (session->one_mmap && !session->header.needs_swap) {
1467                 event = file_offset - session->one_mmap_offset +
1468                         session->one_mmap_addr;
1469                 goto out_parse_sample;
1470         }
1471
1472         if (perf_data__is_pipe(session->data))
1473                 return -1;
1474
1475         fd = perf_data__fd(session->data);
1476         hdr_sz = sizeof(struct perf_event_header);
1477
1478         if (buf_sz < hdr_sz)
1479                 return -1;
1480
1481         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1482             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1483                 return -1;
1484
1485         event = (union perf_event *)buf;
1486
1487         if (session->header.needs_swap)
1488                 perf_event_header__bswap(&event->header);
1489
1490         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1491                 return -1;
1492
1493         rest = event->header.size - hdr_sz;
1494
1495         if (readn(fd, buf, rest) != (ssize_t)rest)
1496                 return -1;
1497
1498         if (session->header.needs_swap)
1499                 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1500
1501 out_parse_sample:
1502
1503         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1504             perf_evlist__parse_sample(session->evlist, event, sample))
1505                 return -1;
1506
1507         *event_ptr = event;
1508
1509         return 0;
1510 }
1511
1512 static s64 perf_session__process_event(struct perf_session *session,
1513                                        union perf_event *event, u64 file_offset)
1514 {
1515         struct perf_evlist *evlist = session->evlist;
1516         struct perf_tool *tool = session->tool;
1517         int ret;
1518
1519         if (session->header.needs_swap)
1520                 event_swap(event, perf_evlist__sample_id_all(evlist));
1521
1522         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1523                 return -EINVAL;
1524
1525         events_stats__inc(&evlist->stats, event->header.type);
1526
1527         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1528                 return perf_session__process_user_event(session, event, file_offset);
1529
1530         if (tool->ordered_events) {
1531                 u64 timestamp = -1ULL;
1532
1533                 ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
1534                 if (ret && ret != -1)
1535                         return ret;
1536
1537                 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1538                 if (ret != -ETIME)
1539                         return ret;
1540         }
1541
1542         return perf_session__deliver_event(session, event, tool, file_offset);
1543 }
1544
1545 void perf_event_header__bswap(struct perf_event_header *hdr)
1546 {
1547         hdr->type = bswap_32(hdr->type);
1548         hdr->misc = bswap_16(hdr->misc);
1549         hdr->size = bswap_16(hdr->size);
1550 }
1551
1552 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1553 {
1554         return machine__findnew_thread(&session->machines.host, -1, pid);
1555 }
1556
1557 /*
1558  * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1559  * So here a single thread is created for that, but actually there is a separate
1560  * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1561  * is only 1. That causes problems for some tools, requiring workarounds. For
1562  * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1563  */
1564 int perf_session__register_idle_thread(struct perf_session *session)
1565 {
1566         struct thread *thread;
1567         int err = 0;
1568
1569         thread = machine__findnew_thread(&session->machines.host, 0, 0);
1570         if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1571                 pr_err("problem inserting idle task.\n");
1572                 err = -1;
1573         }
1574
1575         if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1576                 pr_err("problem inserting idle task.\n");
1577                 err = -1;
1578         }
1579
1580         /* machine__findnew_thread() got the thread, so put it */
1581         thread__put(thread);
1582         return err;
1583 }
1584
1585 static void
1586 perf_session__warn_order(const struct perf_session *session)
1587 {
1588         const struct ordered_events *oe = &session->ordered_events;
1589         struct perf_evsel *evsel;
1590         bool should_warn = true;
1591
1592         evlist__for_each_entry(session->evlist, evsel) {
1593                 if (evsel->attr.write_backward)
1594                         should_warn = false;
1595         }
1596
1597         if (!should_warn)
1598                 return;
1599         if (oe->nr_unordered_events != 0)
1600                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1601 }
1602
1603 static void perf_session__warn_about_errors(const struct perf_session *session)
1604 {
1605         const struct events_stats *stats = &session->evlist->stats;
1606
1607         if (session->tool->lost == perf_event__process_lost &&
1608             stats->nr_events[PERF_RECORD_LOST] != 0) {
1609                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1610                             "Check IO/CPU overload!\n\n",
1611                             stats->nr_events[0],
1612                             stats->nr_events[PERF_RECORD_LOST]);
1613         }
1614
1615         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1616                 double drop_rate;
1617
1618                 drop_rate = (double)stats->total_lost_samples /
1619                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1620                 if (drop_rate > 0.05) {
1621                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1622                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1623                                     drop_rate * 100.0);
1624                 }
1625         }
1626
1627         if (session->tool->aux == perf_event__process_aux &&
1628             stats->total_aux_lost != 0) {
1629                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1630                             stats->total_aux_lost,
1631                             stats->nr_events[PERF_RECORD_AUX]);
1632         }
1633
1634         if (session->tool->aux == perf_event__process_aux &&
1635             stats->total_aux_partial != 0) {
1636                 bool vmm_exclusive = false;
1637
1638                 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1639                                        &vmm_exclusive);
1640
1641                 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1642                             "Are you running a KVM guest in the background?%s\n\n",
1643                             stats->total_aux_partial,
1644                             stats->nr_events[PERF_RECORD_AUX],
1645                             vmm_exclusive ?
1646                             "\nReloading kvm_intel module with vmm_exclusive=0\n"
1647                             "will reduce the gaps to only guest's timeslices." :
1648                             "");
1649         }
1650
1651         if (stats->nr_unknown_events != 0) {
1652                 ui__warning("Found %u unknown events!\n\n"
1653                             "Is this an older tool processing a perf.data "
1654                             "file generated by a more recent tool?\n\n"
1655                             "If that is not the case, consider "
1656                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1657                             stats->nr_unknown_events);
1658         }
1659
1660         if (stats->nr_unknown_id != 0) {
1661                 ui__warning("%u samples with id not present in the header\n",
1662                             stats->nr_unknown_id);
1663         }
1664
1665         if (stats->nr_invalid_chains != 0) {
1666                 ui__warning("Found invalid callchains!\n\n"
1667                             "%u out of %u events were discarded for this reason.\n\n"
1668                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1669                             stats->nr_invalid_chains,
1670                             stats->nr_events[PERF_RECORD_SAMPLE]);
1671         }
1672
1673         if (stats->nr_unprocessable_samples != 0) {
1674                 ui__warning("%u unprocessable samples recorded.\n"
1675                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1676                             stats->nr_unprocessable_samples);
1677         }
1678
1679         perf_session__warn_order(session);
1680
1681         events_stats__auxtrace_error_warn(stats);
1682
1683         if (stats->nr_proc_map_timeout != 0) {
1684                 ui__warning("%d map information files for pre-existing threads were\n"
1685                             "not processed, if there are samples for addresses they\n"
1686                             "will not be resolved, you may find out which are these\n"
1687                             "threads by running with -v and redirecting the output\n"
1688                             "to a file.\n"
1689                             "The time limit to process proc map is too short?\n"
1690                             "Increase it by --proc-map-timeout\n",
1691                             stats->nr_proc_map_timeout);
1692         }
1693 }
1694
1695 static int perf_session__flush_thread_stack(struct thread *thread,
1696                                             void *p __maybe_unused)
1697 {
1698         return thread_stack__flush(thread);
1699 }
1700
1701 static int perf_session__flush_thread_stacks(struct perf_session *session)
1702 {
1703         return machines__for_each_thread(&session->machines,
1704                                          perf_session__flush_thread_stack,
1705                                          NULL);
1706 }
1707
1708 volatile int session_done;
1709
1710 static int __perf_session__process_pipe_events(struct perf_session *session)
1711 {
1712         struct ordered_events *oe = &session->ordered_events;
1713         struct perf_tool *tool = session->tool;
1714         int fd = perf_data__fd(session->data);
1715         union perf_event *event;
1716         uint32_t size, cur_size = 0;
1717         void *buf = NULL;
1718         s64 skip = 0;
1719         u64 head;
1720         ssize_t err;
1721         void *p;
1722
1723         perf_tool__fill_defaults(tool);
1724
1725         head = 0;
1726         cur_size = sizeof(union perf_event);
1727
1728         buf = malloc(cur_size);
1729         if (!buf)
1730                 return -errno;
1731         ordered_events__set_copy_on_queue(oe, true);
1732 more:
1733         event = buf;
1734         err = readn(fd, event, sizeof(struct perf_event_header));
1735         if (err <= 0) {
1736                 if (err == 0)
1737                         goto done;
1738
1739                 pr_err("failed to read event header\n");
1740                 goto out_err;
1741         }
1742
1743         if (session->header.needs_swap)
1744                 perf_event_header__bswap(&event->header);
1745
1746         size = event->header.size;
1747         if (size < sizeof(struct perf_event_header)) {
1748                 pr_err("bad event header size\n");
1749                 goto out_err;
1750         }
1751
1752         if (size > cur_size) {
1753                 void *new = realloc(buf, size);
1754                 if (!new) {
1755                         pr_err("failed to allocate memory to read event\n");
1756                         goto out_err;
1757                 }
1758                 buf = new;
1759                 cur_size = size;
1760                 event = buf;
1761         }
1762         p = event;
1763         p += sizeof(struct perf_event_header);
1764
1765         if (size - sizeof(struct perf_event_header)) {
1766                 err = readn(fd, p, size - sizeof(struct perf_event_header));
1767                 if (err <= 0) {
1768                         if (err == 0) {
1769                                 pr_err("unexpected end of event stream\n");
1770                                 goto done;
1771                         }
1772
1773                         pr_err("failed to read event data\n");
1774                         goto out_err;
1775                 }
1776         }
1777
1778         if ((skip = perf_session__process_event(session, event, head)) < 0) {
1779                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1780                        head, event->header.size, event->header.type);
1781                 err = -EINVAL;
1782                 goto out_err;
1783         }
1784
1785         head += size;
1786
1787         if (skip > 0)
1788                 head += skip;
1789
1790         if (!session_done())
1791                 goto more;
1792 done:
1793         /* do the final flush for ordered samples */
1794         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1795         if (err)
1796                 goto out_err;
1797         err = auxtrace__flush_events(session, tool);
1798         if (err)
1799                 goto out_err;
1800         err = perf_session__flush_thread_stacks(session);
1801 out_err:
1802         free(buf);
1803         if (!tool->no_warn)
1804                 perf_session__warn_about_errors(session);
1805         ordered_events__free(&session->ordered_events);
1806         auxtrace__free_events(session);
1807         return err;
1808 }
1809
1810 static union perf_event *
1811 fetch_mmaped_event(struct perf_session *session,
1812                    u64 head, size_t mmap_size, char *buf)
1813 {
1814         union perf_event *event;
1815
1816         /*
1817          * Ensure we have enough space remaining to read
1818          * the size of the event in the headers.
1819          */
1820         if (head + sizeof(event->header) > mmap_size)
1821                 return NULL;
1822
1823         event = (union perf_event *)(buf + head);
1824
1825         if (session->header.needs_swap)
1826                 perf_event_header__bswap(&event->header);
1827
1828         if (head + event->header.size > mmap_size) {
1829                 /* We're not fetching the event so swap back again */
1830                 if (session->header.needs_swap)
1831                         perf_event_header__bswap(&event->header);
1832                 return NULL;
1833         }
1834
1835         return event;
1836 }
1837
1838 /*
1839  * On 64bit we can mmap the data file in one go. No need for tiny mmap
1840  * slices. On 32bit we use 32MB.
1841  */
1842 #if BITS_PER_LONG == 64
1843 #define MMAP_SIZE ULLONG_MAX
1844 #define NUM_MMAPS 1
1845 #else
1846 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1847 #define NUM_MMAPS 128
1848 #endif
1849
1850 struct reader {
1851         int     fd;
1852         u64     data_size;
1853         u64     data_offset;
1854 };
1855
1856 static int
1857 reader__process_events(struct reader *rd, struct perf_session *session,
1858                        struct ui_progress *prog)
1859 {
1860         u64 data_size = rd->data_size;
1861         u64 head, page_offset, file_offset, file_pos, size;
1862         int err = 0, mmap_prot, mmap_flags, map_idx = 0;
1863         size_t  mmap_size;
1864         char *buf, *mmaps[NUM_MMAPS];
1865         union perf_event *event;
1866         s64 skip;
1867
1868         page_offset = page_size * (rd->data_offset / page_size);
1869         file_offset = page_offset;
1870         head = rd->data_offset - page_offset;
1871
1872         ui_progress__init_size(prog, data_size, "Processing events...");
1873
1874         data_size += rd->data_offset;
1875
1876         mmap_size = MMAP_SIZE;
1877         if (mmap_size > data_size) {
1878                 mmap_size = data_size;
1879                 session->one_mmap = true;
1880         }
1881
1882         memset(mmaps, 0, sizeof(mmaps));
1883
1884         mmap_prot  = PROT_READ;
1885         mmap_flags = MAP_SHARED;
1886
1887         if (session->header.needs_swap) {
1888                 mmap_prot  |= PROT_WRITE;
1889                 mmap_flags = MAP_PRIVATE;
1890         }
1891 remap:
1892         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
1893                    file_offset);
1894         if (buf == MAP_FAILED) {
1895                 pr_err("failed to mmap file\n");
1896                 err = -errno;
1897                 goto out;
1898         }
1899         mmaps[map_idx] = buf;
1900         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1901         file_pos = file_offset + head;
1902         if (session->one_mmap) {
1903                 session->one_mmap_addr = buf;
1904                 session->one_mmap_offset = file_offset;
1905         }
1906
1907 more:
1908         event = fetch_mmaped_event(session, head, mmap_size, buf);
1909         if (!event) {
1910                 if (mmaps[map_idx]) {
1911                         munmap(mmaps[map_idx], mmap_size);
1912                         mmaps[map_idx] = NULL;
1913                 }
1914
1915                 page_offset = page_size * (head / page_size);
1916                 file_offset += page_offset;
1917                 head -= page_offset;
1918                 goto remap;
1919         }
1920
1921         size = event->header.size;
1922
1923         if (size < sizeof(struct perf_event_header) ||
1924             (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1925                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1926                        file_offset + head, event->header.size,
1927                        event->header.type);
1928                 err = -EINVAL;
1929                 goto out;
1930         }
1931
1932         if (skip)
1933                 size += skip;
1934
1935         head += size;
1936         file_pos += size;
1937
1938         ui_progress__update(prog, size);
1939
1940         if (session_done())
1941                 goto out;
1942
1943         if (file_pos < data_size)
1944                 goto more;
1945
1946 out:
1947         return err;
1948 }
1949
1950 static int __perf_session__process_events(struct perf_session *session)
1951 {
1952         struct reader rd = {
1953                 .fd             = perf_data__fd(session->data),
1954                 .data_size      = session->header.data_size,
1955                 .data_offset    = session->header.data_offset,
1956         };
1957         struct ordered_events *oe = &session->ordered_events;
1958         struct perf_tool *tool = session->tool;
1959         struct ui_progress prog;
1960         int err;
1961
1962         perf_tool__fill_defaults(tool);
1963
1964         if (rd.data_size == 0)
1965                 return -1;
1966
1967         ui_progress__init_size(&prog, rd.data_size, "Processing events...");
1968
1969         err = reader__process_events(&rd, session, &prog);
1970         if (err)
1971                 goto out_err;
1972         /* do the final flush for ordered samples */
1973         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1974         if (err)
1975                 goto out_err;
1976         err = auxtrace__flush_events(session, tool);
1977         if (err)
1978                 goto out_err;
1979         err = perf_session__flush_thread_stacks(session);
1980 out_err:
1981         ui_progress__finish();
1982         if (!tool->no_warn)
1983                 perf_session__warn_about_errors(session);
1984         /*
1985          * We may switching perf.data output, make ordered_events
1986          * reusable.
1987          */
1988         ordered_events__reinit(&session->ordered_events);
1989         auxtrace__free_events(session);
1990         session->one_mmap = false;
1991         return err;
1992 }
1993
1994 int perf_session__process_events(struct perf_session *session)
1995 {
1996         if (perf_session__register_idle_thread(session) < 0)
1997                 return -ENOMEM;
1998
1999         if (perf_data__is_pipe(session->data))
2000                 return __perf_session__process_pipe_events(session);
2001
2002         return __perf_session__process_events(session);
2003 }
2004
2005 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2006 {
2007         struct perf_evsel *evsel;
2008
2009         evlist__for_each_entry(session->evlist, evsel) {
2010                 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
2011                         return true;
2012         }
2013
2014         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2015         return false;
2016 }
2017
2018 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2019 {
2020         char *bracket;
2021         struct ref_reloc_sym *ref;
2022         struct kmap *kmap;
2023
2024         ref = zalloc(sizeof(struct ref_reloc_sym));
2025         if (ref == NULL)
2026                 return -ENOMEM;
2027
2028         ref->name = strdup(symbol_name);
2029         if (ref->name == NULL) {
2030                 free(ref);
2031                 return -ENOMEM;
2032         }
2033
2034         bracket = strchr(ref->name, ']');
2035         if (bracket)
2036                 *bracket = '\0';
2037
2038         ref->addr = addr;
2039
2040         kmap = map__kmap(map);
2041         if (kmap)
2042                 kmap->ref_reloc_sym = ref;
2043
2044         return 0;
2045 }
2046
2047 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2048 {
2049         return machines__fprintf_dsos(&session->machines, fp);
2050 }
2051
2052 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2053                                           bool (skip)(struct dso *dso, int parm), int parm)
2054 {
2055         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2056 }
2057
2058 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2059 {
2060         size_t ret;
2061         const char *msg = "";
2062
2063         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2064                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2065
2066         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2067
2068         ret += events_stats__fprintf(&session->evlist->stats, fp);
2069         return ret;
2070 }
2071
2072 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2073 {
2074         /*
2075          * FIXME: Here we have to actually print all the machines in this
2076          * session, not just the host...
2077          */
2078         return machine__fprintf(&session->machines.host, fp);
2079 }
2080
2081 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
2082                                               unsigned int type)
2083 {
2084         struct perf_evsel *pos;
2085
2086         evlist__for_each_entry(session->evlist, pos) {
2087                 if (pos->attr.type == type)
2088                         return pos;
2089         }
2090         return NULL;
2091 }
2092
2093 int perf_session__cpu_bitmap(struct perf_session *session,
2094                              const char *cpu_list, unsigned long *cpu_bitmap)
2095 {
2096         int i, err = -1;
2097         struct cpu_map *map;
2098
2099         for (i = 0; i < PERF_TYPE_MAX; ++i) {
2100                 struct perf_evsel *evsel;
2101
2102                 evsel = perf_session__find_first_evtype(session, i);
2103                 if (!evsel)
2104                         continue;
2105
2106                 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
2107                         pr_err("File does not contain CPU events. "
2108                                "Remove -C option to proceed.\n");
2109                         return -1;
2110                 }
2111         }
2112
2113         map = cpu_map__new(cpu_list);
2114         if (map == NULL) {
2115                 pr_err("Invalid cpu_list\n");
2116                 return -1;
2117         }
2118
2119         for (i = 0; i < map->nr; i++) {
2120                 int cpu = map->map[i];
2121
2122                 if (cpu >= MAX_NR_CPUS) {
2123                         pr_err("Requested CPU %d too large. "
2124                                "Consider raising MAX_NR_CPUS\n", cpu);
2125                         goto out_delete_map;
2126                 }
2127
2128                 set_bit(cpu, cpu_bitmap);
2129         }
2130
2131         err = 0;
2132
2133 out_delete_map:
2134         cpu_map__put(map);
2135         return err;
2136 }
2137
2138 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2139                                 bool full)
2140 {
2141         if (session == NULL || fp == NULL)
2142                 return;
2143
2144         fprintf(fp, "# ========\n");
2145         perf_header__fprintf_info(session, fp, full);
2146         fprintf(fp, "# ========\n#\n");
2147 }
2148
2149
2150 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2151                                              const struct perf_evsel_str_handler *assocs,
2152                                              size_t nr_assocs)
2153 {
2154         struct perf_evsel *evsel;
2155         size_t i;
2156         int err;
2157
2158         for (i = 0; i < nr_assocs; i++) {
2159                 /*
2160                  * Adding a handler for an event not in the session,
2161                  * just ignore it.
2162                  */
2163                 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2164                 if (evsel == NULL)
2165                         continue;
2166
2167                 err = -EEXIST;
2168                 if (evsel->handler != NULL)
2169                         goto out;
2170                 evsel->handler = assocs[i].handler;
2171         }
2172
2173         err = 0;
2174 out:
2175         return err;
2176 }
2177
2178 int perf_event__process_id_index(struct perf_session *session,
2179                                  union perf_event *event)
2180 {
2181         struct perf_evlist *evlist = session->evlist;
2182         struct id_index_event *ie = &event->id_index;
2183         size_t i, nr, max_nr;
2184
2185         max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2186                  sizeof(struct id_index_entry);
2187         nr = ie->nr;
2188         if (nr > max_nr)
2189                 return -EINVAL;
2190
2191         if (dump_trace)
2192                 fprintf(stdout, " nr: %zu\n", nr);
2193
2194         for (i = 0; i < nr; i++) {
2195                 struct id_index_entry *e = &ie->entries[i];
2196                 struct perf_sample_id *sid;
2197
2198                 if (dump_trace) {
2199                         fprintf(stdout, " ... id: %"PRIu64, e->id);
2200                         fprintf(stdout, "  idx: %"PRIu64, e->idx);
2201                         fprintf(stdout, "  cpu: %"PRId64, e->cpu);
2202                         fprintf(stdout, "  tid: %"PRId64"\n", e->tid);
2203                 }
2204
2205                 sid = perf_evlist__id2sid(evlist, e->id);
2206                 if (!sid)
2207                         return -ENOENT;
2208                 sid->idx = e->idx;
2209                 sid->cpu = e->cpu;
2210                 sid->tid = e->tid;
2211         }
2212         return 0;
2213 }
2214
2215 int perf_event__synthesize_id_index(struct perf_tool *tool,
2216                                     perf_event__handler_t process,
2217                                     struct perf_evlist *evlist,
2218                                     struct machine *machine)
2219 {
2220         union perf_event *ev;
2221         struct perf_evsel *evsel;
2222         size_t nr = 0, i = 0, sz, max_nr, n;
2223         int err;
2224
2225         pr_debug2("Synthesizing id index\n");
2226
2227         max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2228                  sizeof(struct id_index_entry);
2229
2230         evlist__for_each_entry(evlist, evsel)
2231                 nr += evsel->ids;
2232
2233         n = nr > max_nr ? max_nr : nr;
2234         sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2235         ev = zalloc(sz);
2236         if (!ev)
2237                 return -ENOMEM;
2238
2239         ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2240         ev->id_index.header.size = sz;
2241         ev->id_index.nr = n;
2242
2243         evlist__for_each_entry(evlist, evsel) {
2244                 u32 j;
2245
2246                 for (j = 0; j < evsel->ids; j++) {
2247                         struct id_index_entry *e;
2248                         struct perf_sample_id *sid;
2249
2250                         if (i >= n) {
2251                                 err = process(tool, ev, NULL, machine);
2252                                 if (err)
2253                                         goto out_err;
2254                                 nr -= n;
2255                                 i = 0;
2256                         }
2257
2258                         e = &ev->id_index.entries[i++];
2259
2260                         e->id = evsel->id[j];
2261
2262                         sid = perf_evlist__id2sid(evlist, e->id);
2263                         if (!sid) {
2264                                 free(ev);
2265                                 return -ENOENT;
2266                         }
2267
2268                         e->idx = sid->idx;
2269                         e->cpu = sid->cpu;
2270                         e->tid = sid->tid;
2271                 }
2272         }
2273
2274         sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2275         ev->id_index.header.size = sz;
2276         ev->id_index.nr = nr;
2277
2278         err = process(tool, ev, NULL, machine);
2279 out_err:
2280         free(ev);
2281
2282         return err;
2283 }