]> asedeno.scripts.mit.edu Git - linux.git/blob - tools/perf/util/header.c
perf tools: Add cpu_topology object
[linux.git] / tools / perf / util / header.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include "util.h"
5 #include "string2.h"
6 #include <sys/param.h>
7 #include <sys/types.h>
8 #include <byteswap.h>
9 #include <unistd.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <linux/compiler.h>
13 #include <linux/list.h>
14 #include <linux/kernel.h>
15 #include <linux/bitops.h>
16 #include <linux/stringify.h>
17 #include <sys/stat.h>
18 #include <sys/utsname.h>
19 #include <linux/time64.h>
20 #include <dirent.h>
21
22 #include "evlist.h"
23 #include "evsel.h"
24 #include "header.h"
25 #include "memswap.h"
26 #include "../perf.h"
27 #include "trace-event.h"
28 #include "session.h"
29 #include "symbol.h"
30 #include "debug.h"
31 #include "cpumap.h"
32 #include "pmu.h"
33 #include "vdso.h"
34 #include "strbuf.h"
35 #include "build-id.h"
36 #include "data.h"
37 #include <api/fs/fs.h>
38 #include "asm/bug.h"
39 #include "tool.h"
40 #include "time-utils.h"
41 #include "units.h"
42 #include "cputopo.h"
43
44 #include "sane_ctype.h"
45
46 /*
47  * magic2 = "PERFILE2"
48  * must be a numerical value to let the endianness
49  * determine the memory layout. That way we are able
50  * to detect endianness when reading the perf.data file
51  * back.
52  *
53  * we check for legacy (PERFFILE) format.
54  */
55 static const char *__perf_magic1 = "PERFFILE";
56 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
57 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
58
59 #define PERF_MAGIC      __perf_magic2
60
61 const char perf_version_string[] = PERF_VERSION;
62
63 struct perf_file_attr {
64         struct perf_event_attr  attr;
65         struct perf_file_section        ids;
66 };
67
68 struct feat_fd {
69         struct perf_header      *ph;
70         int                     fd;
71         void                    *buf;   /* Either buf != NULL or fd >= 0 */
72         ssize_t                 offset;
73         size_t                  size;
74         struct perf_evsel       *events;
75 };
76
77 void perf_header__set_feat(struct perf_header *header, int feat)
78 {
79         set_bit(feat, header->adds_features);
80 }
81
82 void perf_header__clear_feat(struct perf_header *header, int feat)
83 {
84         clear_bit(feat, header->adds_features);
85 }
86
87 bool perf_header__has_feat(const struct perf_header *header, int feat)
88 {
89         return test_bit(feat, header->adds_features);
90 }
91
92 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
93 {
94         ssize_t ret = writen(ff->fd, buf, size);
95
96         if (ret != (ssize_t)size)
97                 return ret < 0 ? (int)ret : -1;
98         return 0;
99 }
100
101 static int __do_write_buf(struct feat_fd *ff,  const void *buf, size_t size)
102 {
103         /* struct perf_event_header::size is u16 */
104         const size_t max_size = 0xffff - sizeof(struct perf_event_header);
105         size_t new_size = ff->size;
106         void *addr;
107
108         if (size + ff->offset > max_size)
109                 return -E2BIG;
110
111         while (size > (new_size - ff->offset))
112                 new_size <<= 1;
113         new_size = min(max_size, new_size);
114
115         if (ff->size < new_size) {
116                 addr = realloc(ff->buf, new_size);
117                 if (!addr)
118                         return -ENOMEM;
119                 ff->buf = addr;
120                 ff->size = new_size;
121         }
122
123         memcpy(ff->buf + ff->offset, buf, size);
124         ff->offset += size;
125
126         return 0;
127 }
128
129 /* Return: 0 if succeded, -ERR if failed. */
130 int do_write(struct feat_fd *ff, const void *buf, size_t size)
131 {
132         if (!ff->buf)
133                 return __do_write_fd(ff, buf, size);
134         return __do_write_buf(ff, buf, size);
135 }
136
137 /* Return: 0 if succeded, -ERR if failed. */
138 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
139 {
140         u64 *p = (u64 *) set;
141         int i, ret;
142
143         ret = do_write(ff, &size, sizeof(size));
144         if (ret < 0)
145                 return ret;
146
147         for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
148                 ret = do_write(ff, p + i, sizeof(*p));
149                 if (ret < 0)
150                         return ret;
151         }
152
153         return 0;
154 }
155
156 /* Return: 0 if succeded, -ERR if failed. */
157 int write_padded(struct feat_fd *ff, const void *bf,
158                  size_t count, size_t count_aligned)
159 {
160         static const char zero_buf[NAME_ALIGN];
161         int err = do_write(ff, bf, count);
162
163         if (!err)
164                 err = do_write(ff, zero_buf, count_aligned - count);
165
166         return err;
167 }
168
169 #define string_size(str)                                                \
170         (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
171
172 /* Return: 0 if succeded, -ERR if failed. */
173 static int do_write_string(struct feat_fd *ff, const char *str)
174 {
175         u32 len, olen;
176         int ret;
177
178         olen = strlen(str) + 1;
179         len = PERF_ALIGN(olen, NAME_ALIGN);
180
181         /* write len, incl. \0 */
182         ret = do_write(ff, &len, sizeof(len));
183         if (ret < 0)
184                 return ret;
185
186         return write_padded(ff, str, olen, len);
187 }
188
189 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
190 {
191         ssize_t ret = readn(ff->fd, addr, size);
192
193         if (ret != size)
194                 return ret < 0 ? (int)ret : -1;
195         return 0;
196 }
197
198 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
199 {
200         if (size > (ssize_t)ff->size - ff->offset)
201                 return -1;
202
203         memcpy(addr, ff->buf + ff->offset, size);
204         ff->offset += size;
205
206         return 0;
207
208 }
209
210 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
211 {
212         if (!ff->buf)
213                 return __do_read_fd(ff, addr, size);
214         return __do_read_buf(ff, addr, size);
215 }
216
217 static int do_read_u32(struct feat_fd *ff, u32 *addr)
218 {
219         int ret;
220
221         ret = __do_read(ff, addr, sizeof(*addr));
222         if (ret)
223                 return ret;
224
225         if (ff->ph->needs_swap)
226                 *addr = bswap_32(*addr);
227         return 0;
228 }
229
230 static int do_read_u64(struct feat_fd *ff, u64 *addr)
231 {
232         int ret;
233
234         ret = __do_read(ff, addr, sizeof(*addr));
235         if (ret)
236                 return ret;
237
238         if (ff->ph->needs_swap)
239                 *addr = bswap_64(*addr);
240         return 0;
241 }
242
243 static char *do_read_string(struct feat_fd *ff)
244 {
245         u32 len;
246         char *buf;
247
248         if (do_read_u32(ff, &len))
249                 return NULL;
250
251         buf = malloc(len);
252         if (!buf)
253                 return NULL;
254
255         if (!__do_read(ff, buf, len)) {
256                 /*
257                  * strings are padded by zeroes
258                  * thus the actual strlen of buf
259                  * may be less than len
260                  */
261                 return buf;
262         }
263
264         free(buf);
265         return NULL;
266 }
267
268 /* Return: 0 if succeded, -ERR if failed. */
269 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
270 {
271         unsigned long *set;
272         u64 size, *p;
273         int i, ret;
274
275         ret = do_read_u64(ff, &size);
276         if (ret)
277                 return ret;
278
279         set = bitmap_alloc(size);
280         if (!set)
281                 return -ENOMEM;
282
283         p = (u64 *) set;
284
285         for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
286                 ret = do_read_u64(ff, p + i);
287                 if (ret < 0) {
288                         free(set);
289                         return ret;
290                 }
291         }
292
293         *pset  = set;
294         *psize = size;
295         return 0;
296 }
297
298 static int write_tracing_data(struct feat_fd *ff,
299                               struct perf_evlist *evlist)
300 {
301         if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
302                 return -1;
303
304         return read_tracing_data(ff->fd, &evlist->entries);
305 }
306
307 static int write_build_id(struct feat_fd *ff,
308                           struct perf_evlist *evlist __maybe_unused)
309 {
310         struct perf_session *session;
311         int err;
312
313         session = container_of(ff->ph, struct perf_session, header);
314
315         if (!perf_session__read_build_ids(session, true))
316                 return -1;
317
318         if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
319                 return -1;
320
321         err = perf_session__write_buildid_table(session, ff);
322         if (err < 0) {
323                 pr_debug("failed to write buildid table\n");
324                 return err;
325         }
326         perf_session__cache_build_ids(session);
327
328         return 0;
329 }
330
331 static int write_hostname(struct feat_fd *ff,
332                           struct perf_evlist *evlist __maybe_unused)
333 {
334         struct utsname uts;
335         int ret;
336
337         ret = uname(&uts);
338         if (ret < 0)
339                 return -1;
340
341         return do_write_string(ff, uts.nodename);
342 }
343
344 static int write_osrelease(struct feat_fd *ff,
345                            struct perf_evlist *evlist __maybe_unused)
346 {
347         struct utsname uts;
348         int ret;
349
350         ret = uname(&uts);
351         if (ret < 0)
352                 return -1;
353
354         return do_write_string(ff, uts.release);
355 }
356
357 static int write_arch(struct feat_fd *ff,
358                       struct perf_evlist *evlist __maybe_unused)
359 {
360         struct utsname uts;
361         int ret;
362
363         ret = uname(&uts);
364         if (ret < 0)
365                 return -1;
366
367         return do_write_string(ff, uts.machine);
368 }
369
370 static int write_version(struct feat_fd *ff,
371                          struct perf_evlist *evlist __maybe_unused)
372 {
373         return do_write_string(ff, perf_version_string);
374 }
375
376 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
377 {
378         FILE *file;
379         char *buf = NULL;
380         char *s, *p;
381         const char *search = cpuinfo_proc;
382         size_t len = 0;
383         int ret = -1;
384
385         if (!search)
386                 return -1;
387
388         file = fopen("/proc/cpuinfo", "r");
389         if (!file)
390                 return -1;
391
392         while (getline(&buf, &len, file) > 0) {
393                 ret = strncmp(buf, search, strlen(search));
394                 if (!ret)
395                         break;
396         }
397
398         if (ret) {
399                 ret = -1;
400                 goto done;
401         }
402
403         s = buf;
404
405         p = strchr(buf, ':');
406         if (p && *(p+1) == ' ' && *(p+2))
407                 s = p + 2;
408         p = strchr(s, '\n');
409         if (p)
410                 *p = '\0';
411
412         /* squash extra space characters (branding string) */
413         p = s;
414         while (*p) {
415                 if (isspace(*p)) {
416                         char *r = p + 1;
417                         char *q = r;
418                         *p = ' ';
419                         while (*q && isspace(*q))
420                                 q++;
421                         if (q != (p+1))
422                                 while ((*r++ = *q++));
423                 }
424                 p++;
425         }
426         ret = do_write_string(ff, s);
427 done:
428         free(buf);
429         fclose(file);
430         return ret;
431 }
432
433 static int write_cpudesc(struct feat_fd *ff,
434                        struct perf_evlist *evlist __maybe_unused)
435 {
436         const char *cpuinfo_procs[] = CPUINFO_PROC;
437         unsigned int i;
438
439         for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
440                 int ret;
441                 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
442                 if (ret >= 0)
443                         return ret;
444         }
445         return -1;
446 }
447
448
449 static int write_nrcpus(struct feat_fd *ff,
450                         struct perf_evlist *evlist __maybe_unused)
451 {
452         long nr;
453         u32 nrc, nra;
454         int ret;
455
456         nrc = cpu__max_present_cpu();
457
458         nr = sysconf(_SC_NPROCESSORS_ONLN);
459         if (nr < 0)
460                 return -1;
461
462         nra = (u32)(nr & UINT_MAX);
463
464         ret = do_write(ff, &nrc, sizeof(nrc));
465         if (ret < 0)
466                 return ret;
467
468         return do_write(ff, &nra, sizeof(nra));
469 }
470
471 static int write_event_desc(struct feat_fd *ff,
472                             struct perf_evlist *evlist)
473 {
474         struct perf_evsel *evsel;
475         u32 nre, nri, sz;
476         int ret;
477
478         nre = evlist->nr_entries;
479
480         /*
481          * write number of events
482          */
483         ret = do_write(ff, &nre, sizeof(nre));
484         if (ret < 0)
485                 return ret;
486
487         /*
488          * size of perf_event_attr struct
489          */
490         sz = (u32)sizeof(evsel->attr);
491         ret = do_write(ff, &sz, sizeof(sz));
492         if (ret < 0)
493                 return ret;
494
495         evlist__for_each_entry(evlist, evsel) {
496                 ret = do_write(ff, &evsel->attr, sz);
497                 if (ret < 0)
498                         return ret;
499                 /*
500                  * write number of unique id per event
501                  * there is one id per instance of an event
502                  *
503                  * copy into an nri to be independent of the
504                  * type of ids,
505                  */
506                 nri = evsel->ids;
507                 ret = do_write(ff, &nri, sizeof(nri));
508                 if (ret < 0)
509                         return ret;
510
511                 /*
512                  * write event string as passed on cmdline
513                  */
514                 ret = do_write_string(ff, perf_evsel__name(evsel));
515                 if (ret < 0)
516                         return ret;
517                 /*
518                  * write unique ids for this event
519                  */
520                 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
521                 if (ret < 0)
522                         return ret;
523         }
524         return 0;
525 }
526
527 static int write_cmdline(struct feat_fd *ff,
528                          struct perf_evlist *evlist __maybe_unused)
529 {
530         char buf[MAXPATHLEN];
531         u32 n;
532         int i, ret;
533
534         /* actual path to perf binary */
535         ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
536         if (ret <= 0)
537                 return -1;
538
539         /* readlink() does not add null termination */
540         buf[ret] = '\0';
541
542         /* account for binary path */
543         n = perf_env.nr_cmdline + 1;
544
545         ret = do_write(ff, &n, sizeof(n));
546         if (ret < 0)
547                 return ret;
548
549         ret = do_write_string(ff, buf);
550         if (ret < 0)
551                 return ret;
552
553         for (i = 0 ; i < perf_env.nr_cmdline; i++) {
554                 ret = do_write_string(ff, perf_env.cmdline_argv[i]);
555                 if (ret < 0)
556                         return ret;
557         }
558         return 0;
559 }
560
561
562 static int write_cpu_topology(struct feat_fd *ff,
563                               struct perf_evlist *evlist __maybe_unused)
564 {
565         struct cpu_topology *tp;
566         u32 i;
567         int ret, j;
568
569         tp = cpu_topology__new();
570         if (!tp)
571                 return -1;
572
573         ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
574         if (ret < 0)
575                 goto done;
576
577         for (i = 0; i < tp->core_sib; i++) {
578                 ret = do_write_string(ff, tp->core_siblings[i]);
579                 if (ret < 0)
580                         goto done;
581         }
582         ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
583         if (ret < 0)
584                 goto done;
585
586         for (i = 0; i < tp->thread_sib; i++) {
587                 ret = do_write_string(ff, tp->thread_siblings[i]);
588                 if (ret < 0)
589                         break;
590         }
591
592         ret = perf_env__read_cpu_topology_map(&perf_env);
593         if (ret < 0)
594                 goto done;
595
596         for (j = 0; j < perf_env.nr_cpus_avail; j++) {
597                 ret = do_write(ff, &perf_env.cpu[j].core_id,
598                                sizeof(perf_env.cpu[j].core_id));
599                 if (ret < 0)
600                         return ret;
601                 ret = do_write(ff, &perf_env.cpu[j].socket_id,
602                                sizeof(perf_env.cpu[j].socket_id));
603                 if (ret < 0)
604                         return ret;
605         }
606 done:
607         cpu_topology__delete(tp);
608         return ret;
609 }
610
611
612
613 static int write_total_mem(struct feat_fd *ff,
614                            struct perf_evlist *evlist __maybe_unused)
615 {
616         char *buf = NULL;
617         FILE *fp;
618         size_t len = 0;
619         int ret = -1, n;
620         uint64_t mem;
621
622         fp = fopen("/proc/meminfo", "r");
623         if (!fp)
624                 return -1;
625
626         while (getline(&buf, &len, fp) > 0) {
627                 ret = strncmp(buf, "MemTotal:", 9);
628                 if (!ret)
629                         break;
630         }
631         if (!ret) {
632                 n = sscanf(buf, "%*s %"PRIu64, &mem);
633                 if (n == 1)
634                         ret = do_write(ff, &mem, sizeof(mem));
635         } else
636                 ret = -1;
637         free(buf);
638         fclose(fp);
639         return ret;
640 }
641
642 static int write_topo_node(struct feat_fd *ff, int node)
643 {
644         char str[MAXPATHLEN];
645         char field[32];
646         char *buf = NULL, *p;
647         size_t len = 0;
648         FILE *fp;
649         u64 mem_total, mem_free, mem;
650         int ret = -1;
651
652         sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
653         fp = fopen(str, "r");
654         if (!fp)
655                 return -1;
656
657         while (getline(&buf, &len, fp) > 0) {
658                 /* skip over invalid lines */
659                 if (!strchr(buf, ':'))
660                         continue;
661                 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
662                         goto done;
663                 if (!strcmp(field, "MemTotal:"))
664                         mem_total = mem;
665                 if (!strcmp(field, "MemFree:"))
666                         mem_free = mem;
667         }
668
669         fclose(fp);
670         fp = NULL;
671
672         ret = do_write(ff, &mem_total, sizeof(u64));
673         if (ret)
674                 goto done;
675
676         ret = do_write(ff, &mem_free, sizeof(u64));
677         if (ret)
678                 goto done;
679
680         ret = -1;
681         sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
682
683         fp = fopen(str, "r");
684         if (!fp)
685                 goto done;
686
687         if (getline(&buf, &len, fp) <= 0)
688                 goto done;
689
690         p = strchr(buf, '\n');
691         if (p)
692                 *p = '\0';
693
694         ret = do_write_string(ff, buf);
695 done:
696         free(buf);
697         if (fp)
698                 fclose(fp);
699         return ret;
700 }
701
702 static int write_numa_topology(struct feat_fd *ff,
703                                struct perf_evlist *evlist __maybe_unused)
704 {
705         char *buf = NULL;
706         size_t len = 0;
707         FILE *fp;
708         struct cpu_map *node_map = NULL;
709         char *c;
710         u32 nr, i, j;
711         int ret = -1;
712
713         fp = fopen("/sys/devices/system/node/online", "r");
714         if (!fp)
715                 return -1;
716
717         if (getline(&buf, &len, fp) <= 0)
718                 goto done;
719
720         c = strchr(buf, '\n');
721         if (c)
722                 *c = '\0';
723
724         node_map = cpu_map__new(buf);
725         if (!node_map)
726                 goto done;
727
728         nr = (u32)node_map->nr;
729
730         ret = do_write(ff, &nr, sizeof(nr));
731         if (ret < 0)
732                 goto done;
733
734         for (i = 0; i < nr; i++) {
735                 j = (u32)node_map->map[i];
736                 ret = do_write(ff, &j, sizeof(j));
737                 if (ret < 0)
738                         break;
739
740                 ret = write_topo_node(ff, j);
741                 if (ret < 0)
742                         break;
743         }
744 done:
745         free(buf);
746         fclose(fp);
747         cpu_map__put(node_map);
748         return ret;
749 }
750
751 /*
752  * File format:
753  *
754  * struct pmu_mappings {
755  *      u32     pmu_num;
756  *      struct pmu_map {
757  *              u32     type;
758  *              char    name[];
759  *      }[pmu_num];
760  * };
761  */
762
763 static int write_pmu_mappings(struct feat_fd *ff,
764                               struct perf_evlist *evlist __maybe_unused)
765 {
766         struct perf_pmu *pmu = NULL;
767         u32 pmu_num = 0;
768         int ret;
769
770         /*
771          * Do a first pass to count number of pmu to avoid lseek so this
772          * works in pipe mode as well.
773          */
774         while ((pmu = perf_pmu__scan(pmu))) {
775                 if (!pmu->name)
776                         continue;
777                 pmu_num++;
778         }
779
780         ret = do_write(ff, &pmu_num, sizeof(pmu_num));
781         if (ret < 0)
782                 return ret;
783
784         while ((pmu = perf_pmu__scan(pmu))) {
785                 if (!pmu->name)
786                         continue;
787
788                 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
789                 if (ret < 0)
790                         return ret;
791
792                 ret = do_write_string(ff, pmu->name);
793                 if (ret < 0)
794                         return ret;
795         }
796
797         return 0;
798 }
799
800 /*
801  * File format:
802  *
803  * struct group_descs {
804  *      u32     nr_groups;
805  *      struct group_desc {
806  *              char    name[];
807  *              u32     leader_idx;
808  *              u32     nr_members;
809  *      }[nr_groups];
810  * };
811  */
812 static int write_group_desc(struct feat_fd *ff,
813                             struct perf_evlist *evlist)
814 {
815         u32 nr_groups = evlist->nr_groups;
816         struct perf_evsel *evsel;
817         int ret;
818
819         ret = do_write(ff, &nr_groups, sizeof(nr_groups));
820         if (ret < 0)
821                 return ret;
822
823         evlist__for_each_entry(evlist, evsel) {
824                 if (perf_evsel__is_group_leader(evsel) &&
825                     evsel->nr_members > 1) {
826                         const char *name = evsel->group_name ?: "{anon_group}";
827                         u32 leader_idx = evsel->idx;
828                         u32 nr_members = evsel->nr_members;
829
830                         ret = do_write_string(ff, name);
831                         if (ret < 0)
832                                 return ret;
833
834                         ret = do_write(ff, &leader_idx, sizeof(leader_idx));
835                         if (ret < 0)
836                                 return ret;
837
838                         ret = do_write(ff, &nr_members, sizeof(nr_members));
839                         if (ret < 0)
840                                 return ret;
841                 }
842         }
843         return 0;
844 }
845
846 /*
847  * Return the CPU id as a raw string.
848  *
849  * Each architecture should provide a more precise id string that
850  * can be use to match the architecture's "mapfile".
851  */
852 char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
853 {
854         return NULL;
855 }
856
857 /* Return zero when the cpuid from the mapfile.csv matches the
858  * cpuid string generated on this platform.
859  * Otherwise return non-zero.
860  */
861 int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
862 {
863         regex_t re;
864         regmatch_t pmatch[1];
865         int match;
866
867         if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
868                 /* Warn unable to generate match particular string. */
869                 pr_info("Invalid regular expression %s\n", mapcpuid);
870                 return 1;
871         }
872
873         match = !regexec(&re, cpuid, 1, pmatch, 0);
874         regfree(&re);
875         if (match) {
876                 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
877
878                 /* Verify the entire string matched. */
879                 if (match_len == strlen(cpuid))
880                         return 0;
881         }
882         return 1;
883 }
884
885 /*
886  * default get_cpuid(): nothing gets recorded
887  * actual implementation must be in arch/$(SRCARCH)/util/header.c
888  */
889 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
890 {
891         return -1;
892 }
893
894 static int write_cpuid(struct feat_fd *ff,
895                        struct perf_evlist *evlist __maybe_unused)
896 {
897         char buffer[64];
898         int ret;
899
900         ret = get_cpuid(buffer, sizeof(buffer));
901         if (ret)
902                 return -1;
903
904         return do_write_string(ff, buffer);
905 }
906
907 static int write_branch_stack(struct feat_fd *ff __maybe_unused,
908                               struct perf_evlist *evlist __maybe_unused)
909 {
910         return 0;
911 }
912
913 static int write_auxtrace(struct feat_fd *ff,
914                           struct perf_evlist *evlist __maybe_unused)
915 {
916         struct perf_session *session;
917         int err;
918
919         if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
920                 return -1;
921
922         session = container_of(ff->ph, struct perf_session, header);
923
924         err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
925         if (err < 0)
926                 pr_err("Failed to write auxtrace index\n");
927         return err;
928 }
929
930 static int write_clockid(struct feat_fd *ff,
931                          struct perf_evlist *evlist __maybe_unused)
932 {
933         return do_write(ff, &ff->ph->env.clockid_res_ns,
934                         sizeof(ff->ph->env.clockid_res_ns));
935 }
936
937 static int cpu_cache_level__sort(const void *a, const void *b)
938 {
939         struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
940         struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
941
942         return cache_a->level - cache_b->level;
943 }
944
945 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
946 {
947         if (a->level != b->level)
948                 return false;
949
950         if (a->line_size != b->line_size)
951                 return false;
952
953         if (a->sets != b->sets)
954                 return false;
955
956         if (a->ways != b->ways)
957                 return false;
958
959         if (strcmp(a->type, b->type))
960                 return false;
961
962         if (strcmp(a->size, b->size))
963                 return false;
964
965         if (strcmp(a->map, b->map))
966                 return false;
967
968         return true;
969 }
970
971 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
972 {
973         char path[PATH_MAX], file[PATH_MAX];
974         struct stat st;
975         size_t len;
976
977         scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
978         scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
979
980         if (stat(file, &st))
981                 return 1;
982
983         scnprintf(file, PATH_MAX, "%s/level", path);
984         if (sysfs__read_int(file, (int *) &cache->level))
985                 return -1;
986
987         scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
988         if (sysfs__read_int(file, (int *) &cache->line_size))
989                 return -1;
990
991         scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
992         if (sysfs__read_int(file, (int *) &cache->sets))
993                 return -1;
994
995         scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
996         if (sysfs__read_int(file, (int *) &cache->ways))
997                 return -1;
998
999         scnprintf(file, PATH_MAX, "%s/type", path);
1000         if (sysfs__read_str(file, &cache->type, &len))
1001                 return -1;
1002
1003         cache->type[len] = 0;
1004         cache->type = rtrim(cache->type);
1005
1006         scnprintf(file, PATH_MAX, "%s/size", path);
1007         if (sysfs__read_str(file, &cache->size, &len)) {
1008                 free(cache->type);
1009                 return -1;
1010         }
1011
1012         cache->size[len] = 0;
1013         cache->size = rtrim(cache->size);
1014
1015         scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1016         if (sysfs__read_str(file, &cache->map, &len)) {
1017                 free(cache->map);
1018                 free(cache->type);
1019                 return -1;
1020         }
1021
1022         cache->map[len] = 0;
1023         cache->map = rtrim(cache->map);
1024         return 0;
1025 }
1026
1027 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1028 {
1029         fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1030 }
1031
1032 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
1033 {
1034         u32 i, cnt = 0;
1035         long ncpus;
1036         u32 nr, cpu;
1037         u16 level;
1038
1039         ncpus = sysconf(_SC_NPROCESSORS_CONF);
1040         if (ncpus < 0)
1041                 return -1;
1042
1043         nr = (u32)(ncpus & UINT_MAX);
1044
1045         for (cpu = 0; cpu < nr; cpu++) {
1046                 for (level = 0; level < 10; level++) {
1047                         struct cpu_cache_level c;
1048                         int err;
1049
1050                         err = cpu_cache_level__read(&c, cpu, level);
1051                         if (err < 0)
1052                                 return err;
1053
1054                         if (err == 1)
1055                                 break;
1056
1057                         for (i = 0; i < cnt; i++) {
1058                                 if (cpu_cache_level__cmp(&c, &caches[i]))
1059                                         break;
1060                         }
1061
1062                         if (i == cnt)
1063                                 caches[cnt++] = c;
1064                         else
1065                                 cpu_cache_level__free(&c);
1066
1067                         if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1068                                 goto out;
1069                 }
1070         }
1071  out:
1072         *cntp = cnt;
1073         return 0;
1074 }
1075
1076 #define MAX_CACHES 2000
1077
1078 static int write_cache(struct feat_fd *ff,
1079                        struct perf_evlist *evlist __maybe_unused)
1080 {
1081         struct cpu_cache_level caches[MAX_CACHES];
1082         u32 cnt = 0, i, version = 1;
1083         int ret;
1084
1085         ret = build_caches(caches, MAX_CACHES, &cnt);
1086         if (ret)
1087                 goto out;
1088
1089         qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1090
1091         ret = do_write(ff, &version, sizeof(u32));
1092         if (ret < 0)
1093                 goto out;
1094
1095         ret = do_write(ff, &cnt, sizeof(u32));
1096         if (ret < 0)
1097                 goto out;
1098
1099         for (i = 0; i < cnt; i++) {
1100                 struct cpu_cache_level *c = &caches[i];
1101
1102                 #define _W(v)                                   \
1103                         ret = do_write(ff, &c->v, sizeof(u32)); \
1104                         if (ret < 0)                            \
1105                                 goto out;
1106
1107                 _W(level)
1108                 _W(line_size)
1109                 _W(sets)
1110                 _W(ways)
1111                 #undef _W
1112
1113                 #define _W(v)                                           \
1114                         ret = do_write_string(ff, (const char *) c->v); \
1115                         if (ret < 0)                                    \
1116                                 goto out;
1117
1118                 _W(type)
1119                 _W(size)
1120                 _W(map)
1121                 #undef _W
1122         }
1123
1124 out:
1125         for (i = 0; i < cnt; i++)
1126                 cpu_cache_level__free(&caches[i]);
1127         return ret;
1128 }
1129
1130 static int write_stat(struct feat_fd *ff __maybe_unused,
1131                       struct perf_evlist *evlist __maybe_unused)
1132 {
1133         return 0;
1134 }
1135
1136 static int write_sample_time(struct feat_fd *ff,
1137                              struct perf_evlist *evlist)
1138 {
1139         int ret;
1140
1141         ret = do_write(ff, &evlist->first_sample_time,
1142                        sizeof(evlist->first_sample_time));
1143         if (ret < 0)
1144                 return ret;
1145
1146         return do_write(ff, &evlist->last_sample_time,
1147                         sizeof(evlist->last_sample_time));
1148 }
1149
1150
1151 static int memory_node__read(struct memory_node *n, unsigned long idx)
1152 {
1153         unsigned int phys, size = 0;
1154         char path[PATH_MAX];
1155         struct dirent *ent;
1156         DIR *dir;
1157
1158 #define for_each_memory(mem, dir)                                       \
1159         while ((ent = readdir(dir)))                                    \
1160                 if (strcmp(ent->d_name, ".") &&                         \
1161                     strcmp(ent->d_name, "..") &&                        \
1162                     sscanf(ent->d_name, "memory%u", &mem) == 1)
1163
1164         scnprintf(path, PATH_MAX,
1165                   "%s/devices/system/node/node%lu",
1166                   sysfs__mountpoint(), idx);
1167
1168         dir = opendir(path);
1169         if (!dir) {
1170                 pr_warning("failed: cant' open memory sysfs data\n");
1171                 return -1;
1172         }
1173
1174         for_each_memory(phys, dir) {
1175                 size = max(phys, size);
1176         }
1177
1178         size++;
1179
1180         n->set = bitmap_alloc(size);
1181         if (!n->set) {
1182                 closedir(dir);
1183                 return -ENOMEM;
1184         }
1185
1186         n->node = idx;
1187         n->size = size;
1188
1189         rewinddir(dir);
1190
1191         for_each_memory(phys, dir) {
1192                 set_bit(phys, n->set);
1193         }
1194
1195         closedir(dir);
1196         return 0;
1197 }
1198
1199 static int memory_node__sort(const void *a, const void *b)
1200 {
1201         const struct memory_node *na = a;
1202         const struct memory_node *nb = b;
1203
1204         return na->node - nb->node;
1205 }
1206
1207 static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
1208 {
1209         char path[PATH_MAX];
1210         struct dirent *ent;
1211         DIR *dir;
1212         u64 cnt = 0;
1213         int ret = 0;
1214
1215         scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1216                   sysfs__mountpoint());
1217
1218         dir = opendir(path);
1219         if (!dir) {
1220                 pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1221                           __func__, path);
1222                 return -1;
1223         }
1224
1225         while (!ret && (ent = readdir(dir))) {
1226                 unsigned int idx;
1227                 int r;
1228
1229                 if (!strcmp(ent->d_name, ".") ||
1230                     !strcmp(ent->d_name, ".."))
1231                         continue;
1232
1233                 r = sscanf(ent->d_name, "node%u", &idx);
1234                 if (r != 1)
1235                         continue;
1236
1237                 if (WARN_ONCE(cnt >= size,
1238                               "failed to write MEM_TOPOLOGY, way too many nodes\n"))
1239                         return -1;
1240
1241                 ret = memory_node__read(&nodes[cnt++], idx);
1242         }
1243
1244         *cntp = cnt;
1245         closedir(dir);
1246
1247         if (!ret)
1248                 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1249
1250         return ret;
1251 }
1252
1253 #define MAX_MEMORY_NODES 2000
1254
1255 /*
1256  * The MEM_TOPOLOGY holds physical memory map for every
1257  * node in system. The format of data is as follows:
1258  *
1259  *  0 - version          | for future changes
1260  *  8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1261  * 16 - count            | number of nodes
1262  *
1263  * For each node we store map of physical indexes for
1264  * each node:
1265  *
1266  * 32 - node id          | node index
1267  * 40 - size             | size of bitmap
1268  * 48 - bitmap           | bitmap of memory indexes that belongs to node
1269  */
1270 static int write_mem_topology(struct feat_fd *ff __maybe_unused,
1271                               struct perf_evlist *evlist __maybe_unused)
1272 {
1273         static struct memory_node nodes[MAX_MEMORY_NODES];
1274         u64 bsize, version = 1, i, nr;
1275         int ret;
1276
1277         ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1278                               (unsigned long long *) &bsize);
1279         if (ret)
1280                 return ret;
1281
1282         ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
1283         if (ret)
1284                 return ret;
1285
1286         ret = do_write(ff, &version, sizeof(version));
1287         if (ret < 0)
1288                 goto out;
1289
1290         ret = do_write(ff, &bsize, sizeof(bsize));
1291         if (ret < 0)
1292                 goto out;
1293
1294         ret = do_write(ff, &nr, sizeof(nr));
1295         if (ret < 0)
1296                 goto out;
1297
1298         for (i = 0; i < nr; i++) {
1299                 struct memory_node *n = &nodes[i];
1300
1301                 #define _W(v)                                           \
1302                         ret = do_write(ff, &n->v, sizeof(n->v));        \
1303                         if (ret < 0)                                    \
1304                                 goto out;
1305
1306                 _W(node)
1307                 _W(size)
1308
1309                 #undef _W
1310
1311                 ret = do_write_bitmap(ff, n->set, n->size);
1312                 if (ret < 0)
1313                         goto out;
1314         }
1315
1316 out:
1317         return ret;
1318 }
1319
1320 static void print_hostname(struct feat_fd *ff, FILE *fp)
1321 {
1322         fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
1323 }
1324
1325 static void print_osrelease(struct feat_fd *ff, FILE *fp)
1326 {
1327         fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
1328 }
1329
1330 static void print_arch(struct feat_fd *ff, FILE *fp)
1331 {
1332         fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
1333 }
1334
1335 static void print_cpudesc(struct feat_fd *ff, FILE *fp)
1336 {
1337         fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
1338 }
1339
1340 static void print_nrcpus(struct feat_fd *ff, FILE *fp)
1341 {
1342         fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1343         fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
1344 }
1345
1346 static void print_version(struct feat_fd *ff, FILE *fp)
1347 {
1348         fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
1349 }
1350
1351 static void print_cmdline(struct feat_fd *ff, FILE *fp)
1352 {
1353         int nr, i;
1354
1355         nr = ff->ph->env.nr_cmdline;
1356
1357         fprintf(fp, "# cmdline : ");
1358
1359         for (i = 0; i < nr; i++) {
1360                 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1361                 if (!argv_i) {
1362                         fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1363                 } else {
1364                         char *mem = argv_i;
1365                         do {
1366                                 char *quote = strchr(argv_i, '\'');
1367                                 if (!quote)
1368                                         break;
1369                                 *quote++ = '\0';
1370                                 fprintf(fp, "%s\\\'", argv_i);
1371                                 argv_i = quote;
1372                         } while (1);
1373                         fprintf(fp, "%s ", argv_i);
1374                         free(mem);
1375                 }
1376         }
1377         fputc('\n', fp);
1378 }
1379
1380 static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
1381 {
1382         struct perf_header *ph = ff->ph;
1383         int cpu_nr = ph->env.nr_cpus_avail;
1384         int nr, i;
1385         char *str;
1386
1387         nr = ph->env.nr_sibling_cores;
1388         str = ph->env.sibling_cores;
1389
1390         for (i = 0; i < nr; i++) {
1391                 fprintf(fp, "# sibling cores   : %s\n", str);
1392                 str += strlen(str) + 1;
1393         }
1394
1395         nr = ph->env.nr_sibling_threads;
1396         str = ph->env.sibling_threads;
1397
1398         for (i = 0; i < nr; i++) {
1399                 fprintf(fp, "# sibling threads : %s\n", str);
1400                 str += strlen(str) + 1;
1401         }
1402
1403         if (ph->env.cpu != NULL) {
1404                 for (i = 0; i < cpu_nr; i++)
1405                         fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1406                                 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1407         } else
1408                 fprintf(fp, "# Core ID and Socket ID information is not available\n");
1409 }
1410
1411 static void print_clockid(struct feat_fd *ff, FILE *fp)
1412 {
1413         fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1414                 ff->ph->env.clockid_res_ns * 1000);
1415 }
1416
1417 static void free_event_desc(struct perf_evsel *events)
1418 {
1419         struct perf_evsel *evsel;
1420
1421         if (!events)
1422                 return;
1423
1424         for (evsel = events; evsel->attr.size; evsel++) {
1425                 zfree(&evsel->name);
1426                 zfree(&evsel->id);
1427         }
1428
1429         free(events);
1430 }
1431
1432 static struct perf_evsel *read_event_desc(struct feat_fd *ff)
1433 {
1434         struct perf_evsel *evsel, *events = NULL;
1435         u64 *id;
1436         void *buf = NULL;
1437         u32 nre, sz, nr, i, j;
1438         size_t msz;
1439
1440         /* number of events */
1441         if (do_read_u32(ff, &nre))
1442                 goto error;
1443
1444         if (do_read_u32(ff, &sz))
1445                 goto error;
1446
1447         /* buffer to hold on file attr struct */
1448         buf = malloc(sz);
1449         if (!buf)
1450                 goto error;
1451
1452         /* the last event terminates with evsel->attr.size == 0: */
1453         events = calloc(nre + 1, sizeof(*events));
1454         if (!events)
1455                 goto error;
1456
1457         msz = sizeof(evsel->attr);
1458         if (sz < msz)
1459                 msz = sz;
1460
1461         for (i = 0, evsel = events; i < nre; evsel++, i++) {
1462                 evsel->idx = i;
1463
1464                 /*
1465                  * must read entire on-file attr struct to
1466                  * sync up with layout.
1467                  */
1468                 if (__do_read(ff, buf, sz))
1469                         goto error;
1470
1471                 if (ff->ph->needs_swap)
1472                         perf_event__attr_swap(buf);
1473
1474                 memcpy(&evsel->attr, buf, msz);
1475
1476                 if (do_read_u32(ff, &nr))
1477                         goto error;
1478
1479                 if (ff->ph->needs_swap)
1480                         evsel->needs_swap = true;
1481
1482                 evsel->name = do_read_string(ff);
1483                 if (!evsel->name)
1484                         goto error;
1485
1486                 if (!nr)
1487                         continue;
1488
1489                 id = calloc(nr, sizeof(*id));
1490                 if (!id)
1491                         goto error;
1492                 evsel->ids = nr;
1493                 evsel->id = id;
1494
1495                 for (j = 0 ; j < nr; j++) {
1496                         if (do_read_u64(ff, id))
1497                                 goto error;
1498                         id++;
1499                 }
1500         }
1501 out:
1502         free(buf);
1503         return events;
1504 error:
1505         free_event_desc(events);
1506         events = NULL;
1507         goto out;
1508 }
1509
1510 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1511                                 void *priv __maybe_unused)
1512 {
1513         return fprintf(fp, ", %s = %s", name, val);
1514 }
1515
1516 static void print_event_desc(struct feat_fd *ff, FILE *fp)
1517 {
1518         struct perf_evsel *evsel, *events;
1519         u32 j;
1520         u64 *id;
1521
1522         if (ff->events)
1523                 events = ff->events;
1524         else
1525                 events = read_event_desc(ff);
1526
1527         if (!events) {
1528                 fprintf(fp, "# event desc: not available or unable to read\n");
1529                 return;
1530         }
1531
1532         for (evsel = events; evsel->attr.size; evsel++) {
1533                 fprintf(fp, "# event : name = %s, ", evsel->name);
1534
1535                 if (evsel->ids) {
1536                         fprintf(fp, ", id = {");
1537                         for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1538                                 if (j)
1539                                         fputc(',', fp);
1540                                 fprintf(fp, " %"PRIu64, *id);
1541                         }
1542                         fprintf(fp, " }");
1543                 }
1544
1545                 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1546
1547                 fputc('\n', fp);
1548         }
1549
1550         free_event_desc(events);
1551         ff->events = NULL;
1552 }
1553
1554 static void print_total_mem(struct feat_fd *ff, FILE *fp)
1555 {
1556         fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
1557 }
1558
1559 static void print_numa_topology(struct feat_fd *ff, FILE *fp)
1560 {
1561         int i;
1562         struct numa_node *n;
1563
1564         for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1565                 n = &ff->ph->env.numa_nodes[i];
1566
1567                 fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1568                             " free = %"PRIu64" kB\n",
1569                         n->node, n->mem_total, n->mem_free);
1570
1571                 fprintf(fp, "# node%u cpu list : ", n->node);
1572                 cpu_map__fprintf(n->map, fp);
1573         }
1574 }
1575
1576 static void print_cpuid(struct feat_fd *ff, FILE *fp)
1577 {
1578         fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
1579 }
1580
1581 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
1582 {
1583         fprintf(fp, "# contains samples with branch stack\n");
1584 }
1585
1586 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
1587 {
1588         fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1589 }
1590
1591 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
1592 {
1593         fprintf(fp, "# contains stat data\n");
1594 }
1595
1596 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
1597 {
1598         int i;
1599
1600         fprintf(fp, "# CPU cache info:\n");
1601         for (i = 0; i < ff->ph->env.caches_cnt; i++) {
1602                 fprintf(fp, "#  ");
1603                 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
1604         }
1605 }
1606
1607 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
1608 {
1609         const char *delimiter = "# pmu mappings: ";
1610         char *str, *tmp;
1611         u32 pmu_num;
1612         u32 type;
1613
1614         pmu_num = ff->ph->env.nr_pmu_mappings;
1615         if (!pmu_num) {
1616                 fprintf(fp, "# pmu mappings: not available\n");
1617                 return;
1618         }
1619
1620         str = ff->ph->env.pmu_mappings;
1621
1622         while (pmu_num) {
1623                 type = strtoul(str, &tmp, 0);
1624                 if (*tmp != ':')
1625                         goto error;
1626
1627                 str = tmp + 1;
1628                 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1629
1630                 delimiter = ", ";
1631                 str += strlen(str) + 1;
1632                 pmu_num--;
1633         }
1634
1635         fprintf(fp, "\n");
1636
1637         if (!pmu_num)
1638                 return;
1639 error:
1640         fprintf(fp, "# pmu mappings: unable to read\n");
1641 }
1642
1643 static void print_group_desc(struct feat_fd *ff, FILE *fp)
1644 {
1645         struct perf_session *session;
1646         struct perf_evsel *evsel;
1647         u32 nr = 0;
1648
1649         session = container_of(ff->ph, struct perf_session, header);
1650
1651         evlist__for_each_entry(session->evlist, evsel) {
1652                 if (perf_evsel__is_group_leader(evsel) &&
1653                     evsel->nr_members > 1) {
1654                         fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1655                                 perf_evsel__name(evsel));
1656
1657                         nr = evsel->nr_members - 1;
1658                 } else if (nr) {
1659                         fprintf(fp, ",%s", perf_evsel__name(evsel));
1660
1661                         if (--nr == 0)
1662                                 fprintf(fp, "}\n");
1663                 }
1664         }
1665 }
1666
1667 static void print_sample_time(struct feat_fd *ff, FILE *fp)
1668 {
1669         struct perf_session *session;
1670         char time_buf[32];
1671         double d;
1672
1673         session = container_of(ff->ph, struct perf_session, header);
1674
1675         timestamp__scnprintf_usec(session->evlist->first_sample_time,
1676                                   time_buf, sizeof(time_buf));
1677         fprintf(fp, "# time of first sample : %s\n", time_buf);
1678
1679         timestamp__scnprintf_usec(session->evlist->last_sample_time,
1680                                   time_buf, sizeof(time_buf));
1681         fprintf(fp, "# time of last sample : %s\n", time_buf);
1682
1683         d = (double)(session->evlist->last_sample_time -
1684                 session->evlist->first_sample_time) / NSEC_PER_MSEC;
1685
1686         fprintf(fp, "# sample duration : %10.3f ms\n", d);
1687 }
1688
1689 static void memory_node__fprintf(struct memory_node *n,
1690                                  unsigned long long bsize, FILE *fp)
1691 {
1692         char buf_map[100], buf_size[50];
1693         unsigned long long size;
1694
1695         size = bsize * bitmap_weight(n->set, n->size);
1696         unit_number__scnprintf(buf_size, 50, size);
1697
1698         bitmap_scnprintf(n->set, n->size, buf_map, 100);
1699         fprintf(fp, "#  %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
1700 }
1701
1702 static void print_mem_topology(struct feat_fd *ff, FILE *fp)
1703 {
1704         struct memory_node *nodes;
1705         int i, nr;
1706
1707         nodes = ff->ph->env.memory_nodes;
1708         nr    = ff->ph->env.nr_memory_nodes;
1709
1710         fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
1711                 nr, ff->ph->env.memory_bsize);
1712
1713         for (i = 0; i < nr; i++) {
1714                 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
1715         }
1716 }
1717
1718 static int __event_process_build_id(struct build_id_event *bev,
1719                                     char *filename,
1720                                     struct perf_session *session)
1721 {
1722         int err = -1;
1723         struct machine *machine;
1724         u16 cpumode;
1725         struct dso *dso;
1726         enum dso_kernel_type dso_type;
1727
1728         machine = perf_session__findnew_machine(session, bev->pid);
1729         if (!machine)
1730                 goto out;
1731
1732         cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1733
1734         switch (cpumode) {
1735         case PERF_RECORD_MISC_KERNEL:
1736                 dso_type = DSO_TYPE_KERNEL;
1737                 break;
1738         case PERF_RECORD_MISC_GUEST_KERNEL:
1739                 dso_type = DSO_TYPE_GUEST_KERNEL;
1740                 break;
1741         case PERF_RECORD_MISC_USER:
1742         case PERF_RECORD_MISC_GUEST_USER:
1743                 dso_type = DSO_TYPE_USER;
1744                 break;
1745         default:
1746                 goto out;
1747         }
1748
1749         dso = machine__findnew_dso(machine, filename);
1750         if (dso != NULL) {
1751                 char sbuild_id[SBUILD_ID_SIZE];
1752
1753                 dso__set_build_id(dso, &bev->build_id);
1754
1755                 if (dso_type != DSO_TYPE_USER) {
1756                         struct kmod_path m = { .name = NULL, };
1757
1758                         if (!kmod_path__parse_name(&m, filename) && m.kmod)
1759                                 dso__set_module_info(dso, &m, machine);
1760                         else
1761                                 dso->kernel = dso_type;
1762
1763                         free(m.name);
1764                 }
1765
1766                 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1767                                   sbuild_id);
1768                 pr_debug("build id event received for %s: %s\n",
1769                          dso->long_name, sbuild_id);
1770                 dso__put(dso);
1771         }
1772
1773         err = 0;
1774 out:
1775         return err;
1776 }
1777
1778 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1779                                                  int input, u64 offset, u64 size)
1780 {
1781         struct perf_session *session = container_of(header, struct perf_session, header);
1782         struct {
1783                 struct perf_event_header   header;
1784                 u8                         build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1785                 char                       filename[0];
1786         } old_bev;
1787         struct build_id_event bev;
1788         char filename[PATH_MAX];
1789         u64 limit = offset + size;
1790
1791         while (offset < limit) {
1792                 ssize_t len;
1793
1794                 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1795                         return -1;
1796
1797                 if (header->needs_swap)
1798                         perf_event_header__bswap(&old_bev.header);
1799
1800                 len = old_bev.header.size - sizeof(old_bev);
1801                 if (readn(input, filename, len) != len)
1802                         return -1;
1803
1804                 bev.header = old_bev.header;
1805
1806                 /*
1807                  * As the pid is the missing value, we need to fill
1808                  * it properly. The header.misc value give us nice hint.
1809                  */
1810                 bev.pid = HOST_KERNEL_ID;
1811                 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1812                     bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1813                         bev.pid = DEFAULT_GUEST_KERNEL_ID;
1814
1815                 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1816                 __event_process_build_id(&bev, filename, session);
1817
1818                 offset += bev.header.size;
1819         }
1820
1821         return 0;
1822 }
1823
1824 static int perf_header__read_build_ids(struct perf_header *header,
1825                                        int input, u64 offset, u64 size)
1826 {
1827         struct perf_session *session = container_of(header, struct perf_session, header);
1828         struct build_id_event bev;
1829         char filename[PATH_MAX];
1830         u64 limit = offset + size, orig_offset = offset;
1831         int err = -1;
1832
1833         while (offset < limit) {
1834                 ssize_t len;
1835
1836                 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1837                         goto out;
1838
1839                 if (header->needs_swap)
1840                         perf_event_header__bswap(&bev.header);
1841
1842                 len = bev.header.size - sizeof(bev);
1843                 if (readn(input, filename, len) != len)
1844                         goto out;
1845                 /*
1846                  * The a1645ce1 changeset:
1847                  *
1848                  * "perf: 'perf kvm' tool for monitoring guest performance from host"
1849                  *
1850                  * Added a field to struct build_id_event that broke the file
1851                  * format.
1852                  *
1853                  * Since the kernel build-id is the first entry, process the
1854                  * table using the old format if the well known
1855                  * '[kernel.kallsyms]' string for the kernel build-id has the
1856                  * first 4 characters chopped off (where the pid_t sits).
1857                  */
1858                 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1859                         if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1860                                 return -1;
1861                         return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1862                 }
1863
1864                 __event_process_build_id(&bev, filename, session);
1865
1866                 offset += bev.header.size;
1867         }
1868         err = 0;
1869 out:
1870         return err;
1871 }
1872
1873 /* Macro for features that simply need to read and store a string. */
1874 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
1875 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
1876 {\
1877         ff->ph->env.__feat_env = do_read_string(ff); \
1878         return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
1879 }
1880
1881 FEAT_PROCESS_STR_FUN(hostname, hostname);
1882 FEAT_PROCESS_STR_FUN(osrelease, os_release);
1883 FEAT_PROCESS_STR_FUN(version, version);
1884 FEAT_PROCESS_STR_FUN(arch, arch);
1885 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
1886 FEAT_PROCESS_STR_FUN(cpuid, cpuid);
1887
1888 static int process_tracing_data(struct feat_fd *ff, void *data)
1889 {
1890         ssize_t ret = trace_report(ff->fd, data, false);
1891
1892         return ret < 0 ? -1 : 0;
1893 }
1894
1895 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
1896 {
1897         if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
1898                 pr_debug("Failed to read buildids, continuing...\n");
1899         return 0;
1900 }
1901
1902 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
1903 {
1904         int ret;
1905         u32 nr_cpus_avail, nr_cpus_online;
1906
1907         ret = do_read_u32(ff, &nr_cpus_avail);
1908         if (ret)
1909                 return ret;
1910
1911         ret = do_read_u32(ff, &nr_cpus_online);
1912         if (ret)
1913                 return ret;
1914         ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
1915         ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
1916         return 0;
1917 }
1918
1919 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
1920 {
1921         u64 total_mem;
1922         int ret;
1923
1924         ret = do_read_u64(ff, &total_mem);
1925         if (ret)
1926                 return -1;
1927         ff->ph->env.total_mem = (unsigned long long)total_mem;
1928         return 0;
1929 }
1930
1931 static struct perf_evsel *
1932 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1933 {
1934         struct perf_evsel *evsel;
1935
1936         evlist__for_each_entry(evlist, evsel) {
1937                 if (evsel->idx == idx)
1938                         return evsel;
1939         }
1940
1941         return NULL;
1942 }
1943
1944 static void
1945 perf_evlist__set_event_name(struct perf_evlist *evlist,
1946                             struct perf_evsel *event)
1947 {
1948         struct perf_evsel *evsel;
1949
1950         if (!event->name)
1951                 return;
1952
1953         evsel = perf_evlist__find_by_index(evlist, event->idx);
1954         if (!evsel)
1955                 return;
1956
1957         if (evsel->name)
1958                 return;
1959
1960         evsel->name = strdup(event->name);
1961 }
1962
1963 static int
1964 process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
1965 {
1966         struct perf_session *session;
1967         struct perf_evsel *evsel, *events = read_event_desc(ff);
1968
1969         if (!events)
1970                 return 0;
1971
1972         session = container_of(ff->ph, struct perf_session, header);
1973
1974         if (session->data->is_pipe) {
1975                 /* Save events for reading later by print_event_desc,
1976                  * since they can't be read again in pipe mode. */
1977                 ff->events = events;
1978         }
1979
1980         for (evsel = events; evsel->attr.size; evsel++)
1981                 perf_evlist__set_event_name(session->evlist, evsel);
1982
1983         if (!session->data->is_pipe)
1984                 free_event_desc(events);
1985
1986         return 0;
1987 }
1988
1989 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
1990 {
1991         char *str, *cmdline = NULL, **argv = NULL;
1992         u32 nr, i, len = 0;
1993
1994         if (do_read_u32(ff, &nr))
1995                 return -1;
1996
1997         ff->ph->env.nr_cmdline = nr;
1998
1999         cmdline = zalloc(ff->size + nr + 1);
2000         if (!cmdline)
2001                 return -1;
2002
2003         argv = zalloc(sizeof(char *) * (nr + 1));
2004         if (!argv)
2005                 goto error;
2006
2007         for (i = 0; i < nr; i++) {
2008                 str = do_read_string(ff);
2009                 if (!str)
2010                         goto error;
2011
2012                 argv[i] = cmdline + len;
2013                 memcpy(argv[i], str, strlen(str) + 1);
2014                 len += strlen(str) + 1;
2015                 free(str);
2016         }
2017         ff->ph->env.cmdline = cmdline;
2018         ff->ph->env.cmdline_argv = (const char **) argv;
2019         return 0;
2020
2021 error:
2022         free(argv);
2023         free(cmdline);
2024         return -1;
2025 }
2026
2027 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
2028 {
2029         u32 nr, i;
2030         char *str;
2031         struct strbuf sb;
2032         int cpu_nr = ff->ph->env.nr_cpus_avail;
2033         u64 size = 0;
2034         struct perf_header *ph = ff->ph;
2035         bool do_core_id_test = true;
2036
2037         ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
2038         if (!ph->env.cpu)
2039                 return -1;
2040
2041         if (do_read_u32(ff, &nr))
2042                 goto free_cpu;
2043
2044         ph->env.nr_sibling_cores = nr;
2045         size += sizeof(u32);
2046         if (strbuf_init(&sb, 128) < 0)
2047                 goto free_cpu;
2048
2049         for (i = 0; i < nr; i++) {
2050                 str = do_read_string(ff);
2051                 if (!str)
2052                         goto error;
2053
2054                 /* include a NULL character at the end */
2055                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2056                         goto error;
2057                 size += string_size(str);
2058                 free(str);
2059         }
2060         ph->env.sibling_cores = strbuf_detach(&sb, NULL);
2061
2062         if (do_read_u32(ff, &nr))
2063                 return -1;
2064
2065         ph->env.nr_sibling_threads = nr;
2066         size += sizeof(u32);
2067
2068         for (i = 0; i < nr; i++) {
2069                 str = do_read_string(ff);
2070                 if (!str)
2071                         goto error;
2072
2073                 /* include a NULL character at the end */
2074                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2075                         goto error;
2076                 size += string_size(str);
2077                 free(str);
2078         }
2079         ph->env.sibling_threads = strbuf_detach(&sb, NULL);
2080
2081         /*
2082          * The header may be from old perf,
2083          * which doesn't include core id and socket id information.
2084          */
2085         if (ff->size <= size) {
2086                 zfree(&ph->env.cpu);
2087                 return 0;
2088         }
2089
2090         /* On s390 the socket_id number is not related to the numbers of cpus.
2091          * The socket_id number might be higher than the numbers of cpus.
2092          * This depends on the configuration.
2093          */
2094         if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
2095                 do_core_id_test = false;
2096
2097         for (i = 0; i < (u32)cpu_nr; i++) {
2098                 if (do_read_u32(ff, &nr))
2099                         goto free_cpu;
2100
2101                 ph->env.cpu[i].core_id = nr;
2102
2103                 if (do_read_u32(ff, &nr))
2104                         goto free_cpu;
2105
2106                 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
2107                         pr_debug("socket_id number is too big."
2108                                  "You may need to upgrade the perf tool.\n");
2109                         goto free_cpu;
2110                 }
2111
2112                 ph->env.cpu[i].socket_id = nr;
2113         }
2114
2115         return 0;
2116
2117 error:
2118         strbuf_release(&sb);
2119 free_cpu:
2120         zfree(&ph->env.cpu);
2121         return -1;
2122 }
2123
2124 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
2125 {
2126         struct numa_node *nodes, *n;
2127         u32 nr, i;
2128         char *str;
2129
2130         /* nr nodes */
2131         if (do_read_u32(ff, &nr))
2132                 return -1;
2133
2134         nodes = zalloc(sizeof(*nodes) * nr);
2135         if (!nodes)
2136                 return -ENOMEM;
2137
2138         for (i = 0; i < nr; i++) {
2139                 n = &nodes[i];
2140
2141                 /* node number */
2142                 if (do_read_u32(ff, &n->node))
2143                         goto error;
2144
2145                 if (do_read_u64(ff, &n->mem_total))
2146                         goto error;
2147
2148                 if (do_read_u64(ff, &n->mem_free))
2149                         goto error;
2150
2151                 str = do_read_string(ff);
2152                 if (!str)
2153                         goto error;
2154
2155                 n->map = cpu_map__new(str);
2156                 if (!n->map)
2157                         goto error;
2158
2159                 free(str);
2160         }
2161         ff->ph->env.nr_numa_nodes = nr;
2162         ff->ph->env.numa_nodes = nodes;
2163         return 0;
2164
2165 error:
2166         free(nodes);
2167         return -1;
2168 }
2169
2170 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
2171 {
2172         char *name;
2173         u32 pmu_num;
2174         u32 type;
2175         struct strbuf sb;
2176
2177         if (do_read_u32(ff, &pmu_num))
2178                 return -1;
2179
2180         if (!pmu_num) {
2181                 pr_debug("pmu mappings not available\n");
2182                 return 0;
2183         }
2184
2185         ff->ph->env.nr_pmu_mappings = pmu_num;
2186         if (strbuf_init(&sb, 128) < 0)
2187                 return -1;
2188
2189         while (pmu_num) {
2190                 if (do_read_u32(ff, &type))
2191                         goto error;
2192
2193                 name = do_read_string(ff);
2194                 if (!name)
2195                         goto error;
2196
2197                 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2198                         goto error;
2199                 /* include a NULL character at the end */
2200                 if (strbuf_add(&sb, "", 1) < 0)
2201                         goto error;
2202
2203                 if (!strcmp(name, "msr"))
2204                         ff->ph->env.msr_pmu_type = type;
2205
2206                 free(name);
2207                 pmu_num--;
2208         }
2209         ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2210         return 0;
2211
2212 error:
2213         strbuf_release(&sb);
2214         return -1;
2215 }
2216
2217 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
2218 {
2219         size_t ret = -1;
2220         u32 i, nr, nr_groups;
2221         struct perf_session *session;
2222         struct perf_evsel *evsel, *leader = NULL;
2223         struct group_desc {
2224                 char *name;
2225                 u32 leader_idx;
2226                 u32 nr_members;
2227         } *desc;
2228
2229         if (do_read_u32(ff, &nr_groups))
2230                 return -1;
2231
2232         ff->ph->env.nr_groups = nr_groups;
2233         if (!nr_groups) {
2234                 pr_debug("group desc not available\n");
2235                 return 0;
2236         }
2237
2238         desc = calloc(nr_groups, sizeof(*desc));
2239         if (!desc)
2240                 return -1;
2241
2242         for (i = 0; i < nr_groups; i++) {
2243                 desc[i].name = do_read_string(ff);
2244                 if (!desc[i].name)
2245                         goto out_free;
2246
2247                 if (do_read_u32(ff, &desc[i].leader_idx))
2248                         goto out_free;
2249
2250                 if (do_read_u32(ff, &desc[i].nr_members))
2251                         goto out_free;
2252         }
2253
2254         /*
2255          * Rebuild group relationship based on the group_desc
2256          */
2257         session = container_of(ff->ph, struct perf_session, header);
2258         session->evlist->nr_groups = nr_groups;
2259
2260         i = nr = 0;
2261         evlist__for_each_entry(session->evlist, evsel) {
2262                 if (evsel->idx == (int) desc[i].leader_idx) {
2263                         evsel->leader = evsel;
2264                         /* {anon_group} is a dummy name */
2265                         if (strcmp(desc[i].name, "{anon_group}")) {
2266                                 evsel->group_name = desc[i].name;
2267                                 desc[i].name = NULL;
2268                         }
2269                         evsel->nr_members = desc[i].nr_members;
2270
2271                         if (i >= nr_groups || nr > 0) {
2272                                 pr_debug("invalid group desc\n");
2273                                 goto out_free;
2274                         }
2275
2276                         leader = evsel;
2277                         nr = evsel->nr_members - 1;
2278                         i++;
2279                 } else if (nr) {
2280                         /* This is a group member */
2281                         evsel->leader = leader;
2282
2283                         nr--;
2284                 }
2285         }
2286
2287         if (i != nr_groups || nr != 0) {
2288                 pr_debug("invalid group desc\n");
2289                 goto out_free;
2290         }
2291
2292         ret = 0;
2293 out_free:
2294         for (i = 0; i < nr_groups; i++)
2295                 zfree(&desc[i].name);
2296         free(desc);
2297
2298         return ret;
2299 }
2300
2301 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
2302 {
2303         struct perf_session *session;
2304         int err;
2305
2306         session = container_of(ff->ph, struct perf_session, header);
2307
2308         err = auxtrace_index__process(ff->fd, ff->size, session,
2309                                       ff->ph->needs_swap);
2310         if (err < 0)
2311                 pr_err("Failed to process auxtrace index\n");
2312         return err;
2313 }
2314
2315 static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
2316 {
2317         struct cpu_cache_level *caches;
2318         u32 cnt, i, version;
2319
2320         if (do_read_u32(ff, &version))
2321                 return -1;
2322
2323         if (version != 1)
2324                 return -1;
2325
2326         if (do_read_u32(ff, &cnt))
2327                 return -1;
2328
2329         caches = zalloc(sizeof(*caches) * cnt);
2330         if (!caches)
2331                 return -1;
2332
2333         for (i = 0; i < cnt; i++) {
2334                 struct cpu_cache_level c;
2335
2336                 #define _R(v)                                           \
2337                         if (do_read_u32(ff, &c.v))\
2338                                 goto out_free_caches;                   \
2339
2340                 _R(level)
2341                 _R(line_size)
2342                 _R(sets)
2343                 _R(ways)
2344                 #undef _R
2345
2346                 #define _R(v)                                   \
2347                         c.v = do_read_string(ff);               \
2348                         if (!c.v)                               \
2349                                 goto out_free_caches;
2350
2351                 _R(type)
2352                 _R(size)
2353                 _R(map)
2354                 #undef _R
2355
2356                 caches[i] = c;
2357         }
2358
2359         ff->ph->env.caches = caches;
2360         ff->ph->env.caches_cnt = cnt;
2361         return 0;
2362 out_free_caches:
2363         free(caches);
2364         return -1;
2365 }
2366
2367 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2368 {
2369         struct perf_session *session;
2370         u64 first_sample_time, last_sample_time;
2371         int ret;
2372
2373         session = container_of(ff->ph, struct perf_session, header);
2374
2375         ret = do_read_u64(ff, &first_sample_time);
2376         if (ret)
2377                 return -1;
2378
2379         ret = do_read_u64(ff, &last_sample_time);
2380         if (ret)
2381                 return -1;
2382
2383         session->evlist->first_sample_time = first_sample_time;
2384         session->evlist->last_sample_time = last_sample_time;
2385         return 0;
2386 }
2387
2388 static int process_mem_topology(struct feat_fd *ff,
2389                                 void *data __maybe_unused)
2390 {
2391         struct memory_node *nodes;
2392         u64 version, i, nr, bsize;
2393         int ret = -1;
2394
2395         if (do_read_u64(ff, &version))
2396                 return -1;
2397
2398         if (version != 1)
2399                 return -1;
2400
2401         if (do_read_u64(ff, &bsize))
2402                 return -1;
2403
2404         if (do_read_u64(ff, &nr))
2405                 return -1;
2406
2407         nodes = zalloc(sizeof(*nodes) * nr);
2408         if (!nodes)
2409                 return -1;
2410
2411         for (i = 0; i < nr; i++) {
2412                 struct memory_node n;
2413
2414                 #define _R(v)                           \
2415                         if (do_read_u64(ff, &n.v))      \
2416                                 goto out;               \
2417
2418                 _R(node)
2419                 _R(size)
2420
2421                 #undef _R
2422
2423                 if (do_read_bitmap(ff, &n.set, &n.size))
2424                         goto out;
2425
2426                 nodes[i] = n;
2427         }
2428
2429         ff->ph->env.memory_bsize    = bsize;
2430         ff->ph->env.memory_nodes    = nodes;
2431         ff->ph->env.nr_memory_nodes = nr;
2432         ret = 0;
2433
2434 out:
2435         if (ret)
2436                 free(nodes);
2437         return ret;
2438 }
2439
2440 static int process_clockid(struct feat_fd *ff,
2441                            void *data __maybe_unused)
2442 {
2443         if (do_read_u64(ff, &ff->ph->env.clockid_res_ns))
2444                 return -1;
2445
2446         return 0;
2447 }
2448
2449 struct feature_ops {
2450         int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
2451         void (*print)(struct feat_fd *ff, FILE *fp);
2452         int (*process)(struct feat_fd *ff, void *data);
2453         const char *name;
2454         bool full_only;
2455         bool synthesize;
2456 };
2457
2458 #define FEAT_OPR(n, func, __full_only) \
2459         [HEADER_##n] = {                                        \
2460                 .name       = __stringify(n),                   \
2461                 .write      = write_##func,                     \
2462                 .print      = print_##func,                     \
2463                 .full_only  = __full_only,                      \
2464                 .process    = process_##func,                   \
2465                 .synthesize = true                              \
2466         }
2467
2468 #define FEAT_OPN(n, func, __full_only) \
2469         [HEADER_##n] = {                                        \
2470                 .name       = __stringify(n),                   \
2471                 .write      = write_##func,                     \
2472                 .print      = print_##func,                     \
2473                 .full_only  = __full_only,                      \
2474                 .process    = process_##func                    \
2475         }
2476
2477 /* feature_ops not implemented: */
2478 #define print_tracing_data      NULL
2479 #define print_build_id          NULL
2480
2481 #define process_branch_stack    NULL
2482 #define process_stat            NULL
2483
2484
2485 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2486         FEAT_OPN(TRACING_DATA,  tracing_data,   false),
2487         FEAT_OPN(BUILD_ID,      build_id,       false),
2488         FEAT_OPR(HOSTNAME,      hostname,       false),
2489         FEAT_OPR(OSRELEASE,     osrelease,      false),
2490         FEAT_OPR(VERSION,       version,        false),
2491         FEAT_OPR(ARCH,          arch,           false),
2492         FEAT_OPR(NRCPUS,        nrcpus,         false),
2493         FEAT_OPR(CPUDESC,       cpudesc,        false),
2494         FEAT_OPR(CPUID,         cpuid,          false),
2495         FEAT_OPR(TOTAL_MEM,     total_mem,      false),
2496         FEAT_OPR(EVENT_DESC,    event_desc,     false),
2497         FEAT_OPR(CMDLINE,       cmdline,        false),
2498         FEAT_OPR(CPU_TOPOLOGY,  cpu_topology,   true),
2499         FEAT_OPR(NUMA_TOPOLOGY, numa_topology,  true),
2500         FEAT_OPN(BRANCH_STACK,  branch_stack,   false),
2501         FEAT_OPR(PMU_MAPPINGS,  pmu_mappings,   false),
2502         FEAT_OPR(GROUP_DESC,    group_desc,     false),
2503         FEAT_OPN(AUXTRACE,      auxtrace,       false),
2504         FEAT_OPN(STAT,          stat,           false),
2505         FEAT_OPN(CACHE,         cache,          true),
2506         FEAT_OPR(SAMPLE_TIME,   sample_time,    false),
2507         FEAT_OPR(MEM_TOPOLOGY,  mem_topology,   true),
2508         FEAT_OPR(CLOCKID,       clockid,        false)
2509 };
2510
2511 struct header_print_data {
2512         FILE *fp;
2513         bool full; /* extended list of headers */
2514 };
2515
2516 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2517                                            struct perf_header *ph,
2518                                            int feat, int fd, void *data)
2519 {
2520         struct header_print_data *hd = data;
2521         struct feat_fd ff;
2522
2523         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2524                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2525                                 "%d, continuing...\n", section->offset, feat);
2526                 return 0;
2527         }
2528         if (feat >= HEADER_LAST_FEATURE) {
2529                 pr_warning("unknown feature %d\n", feat);
2530                 return 0;
2531         }
2532         if (!feat_ops[feat].print)
2533                 return 0;
2534
2535         ff = (struct  feat_fd) {
2536                 .fd = fd,
2537                 .ph = ph,
2538         };
2539
2540         if (!feat_ops[feat].full_only || hd->full)
2541                 feat_ops[feat].print(&ff, hd->fp);
2542         else
2543                 fprintf(hd->fp, "# %s info available, use -I to display\n",
2544                         feat_ops[feat].name);
2545
2546         return 0;
2547 }
2548
2549 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2550 {
2551         struct header_print_data hd;
2552         struct perf_header *header = &session->header;
2553         int fd = perf_data__fd(session->data);
2554         struct stat st;
2555         time_t stctime;
2556         int ret, bit;
2557
2558         hd.fp = fp;
2559         hd.full = full;
2560
2561         ret = fstat(fd, &st);
2562         if (ret == -1)
2563                 return -1;
2564
2565         stctime = st.st_ctime;
2566         fprintf(fp, "# captured on    : %s", ctime(&stctime));
2567
2568         fprintf(fp, "# header version : %u\n", header->version);
2569         fprintf(fp, "# data offset    : %" PRIu64 "\n", header->data_offset);
2570         fprintf(fp, "# data size      : %" PRIu64 "\n", header->data_size);
2571         fprintf(fp, "# feat offset    : %" PRIu64 "\n", header->feat_offset);
2572
2573         perf_header__process_sections(header, fd, &hd,
2574                                       perf_file_section__fprintf_info);
2575
2576         if (session->data->is_pipe)
2577                 return 0;
2578
2579         fprintf(fp, "# missing features: ");
2580         for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2581                 if (bit)
2582                         fprintf(fp, "%s ", feat_ops[bit].name);
2583         }
2584
2585         fprintf(fp, "\n");
2586         return 0;
2587 }
2588
2589 static int do_write_feat(struct feat_fd *ff, int type,
2590                          struct perf_file_section **p,
2591                          struct perf_evlist *evlist)
2592 {
2593         int err;
2594         int ret = 0;
2595
2596         if (perf_header__has_feat(ff->ph, type)) {
2597                 if (!feat_ops[type].write)
2598                         return -1;
2599
2600                 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
2601                         return -1;
2602
2603                 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
2604
2605                 err = feat_ops[type].write(ff, evlist);
2606                 if (err < 0) {
2607                         pr_debug("failed to write feature %s\n", feat_ops[type].name);
2608
2609                         /* undo anything written */
2610                         lseek(ff->fd, (*p)->offset, SEEK_SET);
2611
2612                         return -1;
2613                 }
2614                 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
2615                 (*p)++;
2616         }
2617         return ret;
2618 }
2619
2620 static int perf_header__adds_write(struct perf_header *header,
2621                                    struct perf_evlist *evlist, int fd)
2622 {
2623         int nr_sections;
2624         struct feat_fd ff;
2625         struct perf_file_section *feat_sec, *p;
2626         int sec_size;
2627         u64 sec_start;
2628         int feat;
2629         int err;
2630
2631         ff = (struct feat_fd){
2632                 .fd  = fd,
2633                 .ph = header,
2634         };
2635
2636         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2637         if (!nr_sections)
2638                 return 0;
2639
2640         feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2641         if (feat_sec == NULL)
2642                 return -ENOMEM;
2643
2644         sec_size = sizeof(*feat_sec) * nr_sections;
2645
2646         sec_start = header->feat_offset;
2647         lseek(fd, sec_start + sec_size, SEEK_SET);
2648
2649         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2650                 if (do_write_feat(&ff, feat, &p, evlist))
2651                         perf_header__clear_feat(header, feat);
2652         }
2653
2654         lseek(fd, sec_start, SEEK_SET);
2655         /*
2656          * may write more than needed due to dropped feature, but
2657          * this is okay, reader will skip the missing entries
2658          */
2659         err = do_write(&ff, feat_sec, sec_size);
2660         if (err < 0)
2661                 pr_debug("failed to write feature section\n");
2662         free(feat_sec);
2663         return err;
2664 }
2665
2666 int perf_header__write_pipe(int fd)
2667 {
2668         struct perf_pipe_file_header f_header;
2669         struct feat_fd ff;
2670         int err;
2671
2672         ff = (struct feat_fd){ .fd = fd };
2673
2674         f_header = (struct perf_pipe_file_header){
2675                 .magic     = PERF_MAGIC,
2676                 .size      = sizeof(f_header),
2677         };
2678
2679         err = do_write(&ff, &f_header, sizeof(f_header));
2680         if (err < 0) {
2681                 pr_debug("failed to write perf pipe header\n");
2682                 return err;
2683         }
2684
2685         return 0;
2686 }
2687
2688 int perf_session__write_header(struct perf_session *session,
2689                                struct perf_evlist *evlist,
2690                                int fd, bool at_exit)
2691 {
2692         struct perf_file_header f_header;
2693         struct perf_file_attr   f_attr;
2694         struct perf_header *header = &session->header;
2695         struct perf_evsel *evsel;
2696         struct feat_fd ff;
2697         u64 attr_offset;
2698         int err;
2699
2700         ff = (struct feat_fd){ .fd = fd};
2701         lseek(fd, sizeof(f_header), SEEK_SET);
2702
2703         evlist__for_each_entry(session->evlist, evsel) {
2704                 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2705                 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
2706                 if (err < 0) {
2707                         pr_debug("failed to write perf header\n");
2708                         return err;
2709                 }
2710         }
2711
2712         attr_offset = lseek(ff.fd, 0, SEEK_CUR);
2713
2714         evlist__for_each_entry(evlist, evsel) {
2715                 f_attr = (struct perf_file_attr){
2716                         .attr = evsel->attr,
2717                         .ids  = {
2718                                 .offset = evsel->id_offset,
2719                                 .size   = evsel->ids * sizeof(u64),
2720                         }
2721                 };
2722                 err = do_write(&ff, &f_attr, sizeof(f_attr));
2723                 if (err < 0) {
2724                         pr_debug("failed to write perf header attribute\n");
2725                         return err;
2726                 }
2727         }
2728
2729         if (!header->data_offset)
2730                 header->data_offset = lseek(fd, 0, SEEK_CUR);
2731         header->feat_offset = header->data_offset + header->data_size;
2732
2733         if (at_exit) {
2734                 err = perf_header__adds_write(header, evlist, fd);
2735                 if (err < 0)
2736                         return err;
2737         }
2738
2739         f_header = (struct perf_file_header){
2740                 .magic     = PERF_MAGIC,
2741                 .size      = sizeof(f_header),
2742                 .attr_size = sizeof(f_attr),
2743                 .attrs = {
2744                         .offset = attr_offset,
2745                         .size   = evlist->nr_entries * sizeof(f_attr),
2746                 },
2747                 .data = {
2748                         .offset = header->data_offset,
2749                         .size   = header->data_size,
2750                 },
2751                 /* event_types is ignored, store zeros */
2752         };
2753
2754         memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2755
2756         lseek(fd, 0, SEEK_SET);
2757         err = do_write(&ff, &f_header, sizeof(f_header));
2758         if (err < 0) {
2759                 pr_debug("failed to write perf header\n");
2760                 return err;
2761         }
2762         lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2763
2764         return 0;
2765 }
2766
2767 static int perf_header__getbuffer64(struct perf_header *header,
2768                                     int fd, void *buf, size_t size)
2769 {
2770         if (readn(fd, buf, size) <= 0)
2771                 return -1;
2772
2773         if (header->needs_swap)
2774                 mem_bswap_64(buf, size);
2775
2776         return 0;
2777 }
2778
2779 int perf_header__process_sections(struct perf_header *header, int fd,
2780                                   void *data,
2781                                   int (*process)(struct perf_file_section *section,
2782                                                  struct perf_header *ph,
2783                                                  int feat, int fd, void *data))
2784 {
2785         struct perf_file_section *feat_sec, *sec;
2786         int nr_sections;
2787         int sec_size;
2788         int feat;
2789         int err;
2790
2791         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2792         if (!nr_sections)
2793                 return 0;
2794
2795         feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2796         if (!feat_sec)
2797                 return -1;
2798
2799         sec_size = sizeof(*feat_sec) * nr_sections;
2800
2801         lseek(fd, header->feat_offset, SEEK_SET);
2802
2803         err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2804         if (err < 0)
2805                 goto out_free;
2806
2807         for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2808                 err = process(sec++, header, feat, fd, data);
2809                 if (err < 0)
2810                         goto out_free;
2811         }
2812         err = 0;
2813 out_free:
2814         free(feat_sec);
2815         return err;
2816 }
2817
2818 static const int attr_file_abi_sizes[] = {
2819         [0] = PERF_ATTR_SIZE_VER0,
2820         [1] = PERF_ATTR_SIZE_VER1,
2821         [2] = PERF_ATTR_SIZE_VER2,
2822         [3] = PERF_ATTR_SIZE_VER3,
2823         [4] = PERF_ATTR_SIZE_VER4,
2824         0,
2825 };
2826
2827 /*
2828  * In the legacy file format, the magic number is not used to encode endianness.
2829  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2830  * on ABI revisions, we need to try all combinations for all endianness to
2831  * detect the endianness.
2832  */
2833 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2834 {
2835         uint64_t ref_size, attr_size;
2836         int i;
2837
2838         for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2839                 ref_size = attr_file_abi_sizes[i]
2840                          + sizeof(struct perf_file_section);
2841                 if (hdr_sz != ref_size) {
2842                         attr_size = bswap_64(hdr_sz);
2843                         if (attr_size != ref_size)
2844                                 continue;
2845
2846                         ph->needs_swap = true;
2847                 }
2848                 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2849                          i,
2850                          ph->needs_swap);
2851                 return 0;
2852         }
2853         /* could not determine endianness */
2854         return -1;
2855 }
2856
2857 #define PERF_PIPE_HDR_VER0      16
2858
2859 static const size_t attr_pipe_abi_sizes[] = {
2860         [0] = PERF_PIPE_HDR_VER0,
2861         0,
2862 };
2863
2864 /*
2865  * In the legacy pipe format, there is an implicit assumption that endiannesss
2866  * between host recording the samples, and host parsing the samples is the
2867  * same. This is not always the case given that the pipe output may always be
2868  * redirected into a file and analyzed on a different machine with possibly a
2869  * different endianness and perf_event ABI revsions in the perf tool itself.
2870  */
2871 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2872 {
2873         u64 attr_size;
2874         int i;
2875
2876         for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2877                 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2878                         attr_size = bswap_64(hdr_sz);
2879                         if (attr_size != hdr_sz)
2880                                 continue;
2881
2882                         ph->needs_swap = true;
2883                 }
2884                 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2885                 return 0;
2886         }
2887         return -1;
2888 }
2889
2890 bool is_perf_magic(u64 magic)
2891 {
2892         if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2893                 || magic == __perf_magic2
2894                 || magic == __perf_magic2_sw)
2895                 return true;
2896
2897         return false;
2898 }
2899
2900 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2901                               bool is_pipe, struct perf_header *ph)
2902 {
2903         int ret;
2904
2905         /* check for legacy format */
2906         ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2907         if (ret == 0) {
2908                 ph->version = PERF_HEADER_VERSION_1;
2909                 pr_debug("legacy perf.data format\n");
2910                 if (is_pipe)
2911                         return try_all_pipe_abis(hdr_sz, ph);
2912
2913                 return try_all_file_abis(hdr_sz, ph);
2914         }
2915         /*
2916          * the new magic number serves two purposes:
2917          * - unique number to identify actual perf.data files
2918          * - encode endianness of file
2919          */
2920         ph->version = PERF_HEADER_VERSION_2;
2921
2922         /* check magic number with one endianness */
2923         if (magic == __perf_magic2)
2924                 return 0;
2925
2926         /* check magic number with opposite endianness */
2927         if (magic != __perf_magic2_sw)
2928                 return -1;
2929
2930         ph->needs_swap = true;
2931
2932         return 0;
2933 }
2934
2935 int perf_file_header__read(struct perf_file_header *header,
2936                            struct perf_header *ph, int fd)
2937 {
2938         ssize_t ret;
2939
2940         lseek(fd, 0, SEEK_SET);
2941
2942         ret = readn(fd, header, sizeof(*header));
2943         if (ret <= 0)
2944                 return -1;
2945
2946         if (check_magic_endian(header->magic,
2947                                header->attr_size, false, ph) < 0) {
2948                 pr_debug("magic/endian check failed\n");
2949                 return -1;
2950         }
2951
2952         if (ph->needs_swap) {
2953                 mem_bswap_64(header, offsetof(struct perf_file_header,
2954                              adds_features));
2955         }
2956
2957         if (header->size != sizeof(*header)) {
2958                 /* Support the previous format */
2959                 if (header->size == offsetof(typeof(*header), adds_features))
2960                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2961                 else
2962                         return -1;
2963         } else if (ph->needs_swap) {
2964                 /*
2965                  * feature bitmap is declared as an array of unsigned longs --
2966                  * not good since its size can differ between the host that
2967                  * generated the data file and the host analyzing the file.
2968                  *
2969                  * We need to handle endianness, but we don't know the size of
2970                  * the unsigned long where the file was generated. Take a best
2971                  * guess at determining it: try 64-bit swap first (ie., file
2972                  * created on a 64-bit host), and check if the hostname feature
2973                  * bit is set (this feature bit is forced on as of fbe96f2).
2974                  * If the bit is not, undo the 64-bit swap and try a 32-bit
2975                  * swap. If the hostname bit is still not set (e.g., older data
2976                  * file), punt and fallback to the original behavior --
2977                  * clearing all feature bits and setting buildid.
2978                  */
2979                 mem_bswap_64(&header->adds_features,
2980                             BITS_TO_U64(HEADER_FEAT_BITS));
2981
2982                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2983                         /* unswap as u64 */
2984                         mem_bswap_64(&header->adds_features,
2985                                     BITS_TO_U64(HEADER_FEAT_BITS));
2986
2987                         /* unswap as u32 */
2988                         mem_bswap_32(&header->adds_features,
2989                                     BITS_TO_U32(HEADER_FEAT_BITS));
2990                 }
2991
2992                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2993                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2994                         set_bit(HEADER_BUILD_ID, header->adds_features);
2995                 }
2996         }
2997
2998         memcpy(&ph->adds_features, &header->adds_features,
2999                sizeof(ph->adds_features));
3000
3001         ph->data_offset  = header->data.offset;
3002         ph->data_size    = header->data.size;
3003         ph->feat_offset  = header->data.offset + header->data.size;
3004         return 0;
3005 }
3006
3007 static int perf_file_section__process(struct perf_file_section *section,
3008                                       struct perf_header *ph,
3009                                       int feat, int fd, void *data)
3010 {
3011         struct feat_fd fdd = {
3012                 .fd     = fd,
3013                 .ph     = ph,
3014                 .size   = section->size,
3015                 .offset = section->offset,
3016         };
3017
3018         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
3019                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
3020                           "%d, continuing...\n", section->offset, feat);
3021                 return 0;
3022         }
3023
3024         if (feat >= HEADER_LAST_FEATURE) {
3025                 pr_debug("unknown feature %d, continuing...\n", feat);
3026                 return 0;
3027         }
3028
3029         if (!feat_ops[feat].process)
3030                 return 0;
3031
3032         return feat_ops[feat].process(&fdd, data);
3033 }
3034
3035 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
3036                                        struct perf_header *ph, int fd,
3037                                        bool repipe)
3038 {
3039         struct feat_fd ff = {
3040                 .fd = STDOUT_FILENO,
3041                 .ph = ph,
3042         };
3043         ssize_t ret;
3044
3045         ret = readn(fd, header, sizeof(*header));
3046         if (ret <= 0)
3047                 return -1;
3048
3049         if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
3050                 pr_debug("endian/magic failed\n");
3051                 return -1;
3052         }
3053
3054         if (ph->needs_swap)
3055                 header->size = bswap_64(header->size);
3056
3057         if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
3058                 return -1;
3059
3060         return 0;
3061 }
3062
3063 static int perf_header__read_pipe(struct perf_session *session)
3064 {
3065         struct perf_header *header = &session->header;
3066         struct perf_pipe_file_header f_header;
3067
3068         if (perf_file_header__read_pipe(&f_header, header,
3069                                         perf_data__fd(session->data),
3070                                         session->repipe) < 0) {
3071                 pr_debug("incompatible file format\n");
3072                 return -EINVAL;
3073         }
3074
3075         return 0;
3076 }
3077
3078 static int read_attr(int fd, struct perf_header *ph,
3079                      struct perf_file_attr *f_attr)
3080 {
3081         struct perf_event_attr *attr = &f_attr->attr;
3082         size_t sz, left;
3083         size_t our_sz = sizeof(f_attr->attr);
3084         ssize_t ret;
3085
3086         memset(f_attr, 0, sizeof(*f_attr));
3087
3088         /* read minimal guaranteed structure */
3089         ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
3090         if (ret <= 0) {
3091                 pr_debug("cannot read %d bytes of header attr\n",
3092                          PERF_ATTR_SIZE_VER0);
3093                 return -1;
3094         }
3095
3096         /* on file perf_event_attr size */
3097         sz = attr->size;
3098
3099         if (ph->needs_swap)
3100                 sz = bswap_32(sz);
3101
3102         if (sz == 0) {
3103                 /* assume ABI0 */
3104                 sz =  PERF_ATTR_SIZE_VER0;
3105         } else if (sz > our_sz) {
3106                 pr_debug("file uses a more recent and unsupported ABI"
3107                          " (%zu bytes extra)\n", sz - our_sz);
3108                 return -1;
3109         }
3110         /* what we have not yet read and that we know about */
3111         left = sz - PERF_ATTR_SIZE_VER0;
3112         if (left) {
3113                 void *ptr = attr;
3114                 ptr += PERF_ATTR_SIZE_VER0;
3115
3116                 ret = readn(fd, ptr, left);
3117         }
3118         /* read perf_file_section, ids are read in caller */
3119         ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
3120
3121         return ret <= 0 ? -1 : 0;
3122 }
3123
3124 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
3125                                                 struct tep_handle *pevent)
3126 {
3127         struct tep_event *event;
3128         char bf[128];
3129
3130         /* already prepared */
3131         if (evsel->tp_format)
3132                 return 0;
3133
3134         if (pevent == NULL) {
3135                 pr_debug("broken or missing trace data\n");
3136                 return -1;
3137         }
3138
3139         event = tep_find_event(pevent, evsel->attr.config);
3140         if (event == NULL) {
3141                 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
3142                 return -1;
3143         }
3144
3145         if (!evsel->name) {
3146                 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
3147                 evsel->name = strdup(bf);
3148                 if (evsel->name == NULL)
3149                         return -1;
3150         }
3151
3152         evsel->tp_format = event;
3153         return 0;
3154 }
3155
3156 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
3157                                                   struct tep_handle *pevent)
3158 {
3159         struct perf_evsel *pos;
3160
3161         evlist__for_each_entry(evlist, pos) {
3162                 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
3163                     perf_evsel__prepare_tracepoint_event(pos, pevent))
3164                         return -1;
3165         }
3166
3167         return 0;
3168 }
3169
3170 int perf_session__read_header(struct perf_session *session)
3171 {
3172         struct perf_data *data = session->data;
3173         struct perf_header *header = &session->header;
3174         struct perf_file_header f_header;
3175         struct perf_file_attr   f_attr;
3176         u64                     f_id;
3177         int nr_attrs, nr_ids, i, j;
3178         int fd = perf_data__fd(data);
3179
3180         session->evlist = perf_evlist__new();
3181         if (session->evlist == NULL)
3182                 return -ENOMEM;
3183
3184         session->evlist->env = &header->env;
3185         session->machines.host.env = &header->env;
3186         if (perf_data__is_pipe(data))
3187                 return perf_header__read_pipe(session);
3188
3189         if (perf_file_header__read(&f_header, header, fd) < 0)
3190                 return -EINVAL;
3191
3192         /*
3193          * Sanity check that perf.data was written cleanly; data size is
3194          * initialized to 0 and updated only if the on_exit function is run.
3195          * If data size is still 0 then the file contains only partial
3196          * information.  Just warn user and process it as much as it can.
3197          */
3198         if (f_header.data.size == 0) {
3199                 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
3200                            "Was the 'perf record' command properly terminated?\n",
3201                            data->file.path);
3202         }
3203
3204         nr_attrs = f_header.attrs.size / f_header.attr_size;
3205         lseek(fd, f_header.attrs.offset, SEEK_SET);
3206
3207         for (i = 0; i < nr_attrs; i++) {
3208                 struct perf_evsel *evsel;
3209                 off_t tmp;
3210
3211                 if (read_attr(fd, header, &f_attr) < 0)
3212                         goto out_errno;
3213
3214                 if (header->needs_swap) {
3215                         f_attr.ids.size   = bswap_64(f_attr.ids.size);
3216                         f_attr.ids.offset = bswap_64(f_attr.ids.offset);
3217                         perf_event__attr_swap(&f_attr.attr);
3218                 }
3219
3220                 tmp = lseek(fd, 0, SEEK_CUR);
3221                 evsel = perf_evsel__new(&f_attr.attr);
3222
3223                 if (evsel == NULL)
3224                         goto out_delete_evlist;
3225
3226                 evsel->needs_swap = header->needs_swap;
3227                 /*
3228                  * Do it before so that if perf_evsel__alloc_id fails, this
3229                  * entry gets purged too at perf_evlist__delete().
3230                  */
3231                 perf_evlist__add(session->evlist, evsel);
3232
3233                 nr_ids = f_attr.ids.size / sizeof(u64);
3234                 /*
3235                  * We don't have the cpu and thread maps on the header, so
3236                  * for allocating the perf_sample_id table we fake 1 cpu and
3237                  * hattr->ids threads.
3238                  */
3239                 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
3240                         goto out_delete_evlist;
3241
3242                 lseek(fd, f_attr.ids.offset, SEEK_SET);
3243
3244                 for (j = 0; j < nr_ids; j++) {
3245                         if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
3246                                 goto out_errno;
3247
3248                         perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
3249                 }
3250
3251                 lseek(fd, tmp, SEEK_SET);
3252         }
3253
3254         perf_header__process_sections(header, fd, &session->tevent,
3255                                       perf_file_section__process);
3256
3257         if (perf_evlist__prepare_tracepoint_events(session->evlist,
3258                                                    session->tevent.pevent))
3259                 goto out_delete_evlist;
3260
3261         return 0;
3262 out_errno:
3263         return -errno;
3264
3265 out_delete_evlist:
3266         perf_evlist__delete(session->evlist);
3267         session->evlist = NULL;
3268         return -ENOMEM;
3269 }
3270
3271 int perf_event__synthesize_attr(struct perf_tool *tool,
3272                                 struct perf_event_attr *attr, u32 ids, u64 *id,
3273                                 perf_event__handler_t process)
3274 {
3275         union perf_event *ev;
3276         size_t size;
3277         int err;
3278
3279         size = sizeof(struct perf_event_attr);
3280         size = PERF_ALIGN(size, sizeof(u64));
3281         size += sizeof(struct perf_event_header);
3282         size += ids * sizeof(u64);
3283
3284         ev = malloc(size);
3285
3286         if (ev == NULL)
3287                 return -ENOMEM;
3288
3289         ev->attr.attr = *attr;
3290         memcpy(ev->attr.id, id, ids * sizeof(u64));
3291
3292         ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
3293         ev->attr.header.size = (u16)size;
3294
3295         if (ev->attr.header.size == size)
3296                 err = process(tool, ev, NULL, NULL);
3297         else
3298                 err = -E2BIG;
3299
3300         free(ev);
3301
3302         return err;
3303 }
3304
3305 int perf_event__synthesize_features(struct perf_tool *tool,
3306                                     struct perf_session *session,
3307                                     struct perf_evlist *evlist,
3308                                     perf_event__handler_t process)
3309 {
3310         struct perf_header *header = &session->header;
3311         struct feat_fd ff;
3312         struct feature_event *fe;
3313         size_t sz, sz_hdr;
3314         int feat, ret;
3315
3316         sz_hdr = sizeof(fe->header);
3317         sz = sizeof(union perf_event);
3318         /* get a nice alignment */
3319         sz = PERF_ALIGN(sz, page_size);
3320
3321         memset(&ff, 0, sizeof(ff));
3322
3323         ff.buf = malloc(sz);
3324         if (!ff.buf)
3325                 return -ENOMEM;
3326
3327         ff.size = sz - sz_hdr;
3328
3329         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3330                 if (!feat_ops[feat].synthesize) {
3331                         pr_debug("No record header feature for header :%d\n", feat);
3332                         continue;
3333                 }
3334
3335                 ff.offset = sizeof(*fe);
3336
3337                 ret = feat_ops[feat].write(&ff, evlist);
3338                 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
3339                         pr_debug("Error writing feature\n");
3340                         continue;
3341                 }
3342                 /* ff.buf may have changed due to realloc in do_write() */
3343                 fe = ff.buf;
3344                 memset(fe, 0, sizeof(*fe));
3345
3346                 fe->feat_id = feat;
3347                 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3348                 fe->header.size = ff.offset;
3349
3350                 ret = process(tool, ff.buf, NULL, NULL);
3351                 if (ret) {
3352                         free(ff.buf);
3353                         return ret;
3354                 }
3355         }
3356
3357         /* Send HEADER_LAST_FEATURE mark. */
3358         fe = ff.buf;
3359         fe->feat_id     = HEADER_LAST_FEATURE;
3360         fe->header.type = PERF_RECORD_HEADER_FEATURE;
3361         fe->header.size = sizeof(*fe);
3362
3363         ret = process(tool, ff.buf, NULL, NULL);
3364
3365         free(ff.buf);
3366         return ret;
3367 }
3368
3369 int perf_event__process_feature(struct perf_session *session,
3370                                 union perf_event *event)
3371 {
3372         struct perf_tool *tool = session->tool;
3373         struct feat_fd ff = { .fd = 0 };
3374         struct feature_event *fe = (struct feature_event *)event;
3375         int type = fe->header.type;
3376         u64 feat = fe->feat_id;
3377
3378         if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
3379                 pr_warning("invalid record type %d in pipe-mode\n", type);
3380                 return 0;
3381         }
3382         if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
3383                 pr_warning("invalid record type %d in pipe-mode\n", type);
3384                 return -1;
3385         }
3386
3387         if (!feat_ops[feat].process)
3388                 return 0;
3389
3390         ff.buf  = (void *)fe->data;
3391         ff.size = event->header.size - sizeof(event->header);
3392         ff.ph = &session->header;
3393
3394         if (feat_ops[feat].process(&ff, NULL))
3395                 return -1;
3396
3397         if (!feat_ops[feat].print || !tool->show_feat_hdr)
3398                 return 0;
3399
3400         if (!feat_ops[feat].full_only ||
3401             tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
3402                 feat_ops[feat].print(&ff, stdout);
3403         } else {
3404                 fprintf(stdout, "# %s info available, use -I to display\n",
3405                         feat_ops[feat].name);
3406         }
3407
3408         return 0;
3409 }
3410
3411 static struct event_update_event *
3412 event_update_event__new(size_t size, u64 type, u64 id)
3413 {
3414         struct event_update_event *ev;
3415
3416         size += sizeof(*ev);
3417         size  = PERF_ALIGN(size, sizeof(u64));
3418
3419         ev = zalloc(size);
3420         if (ev) {
3421                 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3422                 ev->header.size = (u16)size;
3423                 ev->type = type;
3424                 ev->id = id;
3425         }
3426         return ev;
3427 }
3428
3429 int
3430 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3431                                          struct perf_evsel *evsel,
3432                                          perf_event__handler_t process)
3433 {
3434         struct event_update_event *ev;
3435         size_t size = strlen(evsel->unit);
3436         int err;
3437
3438         ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3439         if (ev == NULL)
3440                 return -ENOMEM;
3441
3442         strlcpy(ev->data, evsel->unit, size + 1);
3443         err = process(tool, (union perf_event *)ev, NULL, NULL);
3444         free(ev);
3445         return err;
3446 }
3447
3448 int
3449 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3450                                           struct perf_evsel *evsel,
3451                                           perf_event__handler_t process)
3452 {
3453         struct event_update_event *ev;
3454         struct event_update_event_scale *ev_data;
3455         int err;
3456
3457         ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3458         if (ev == NULL)
3459                 return -ENOMEM;
3460
3461         ev_data = (struct event_update_event_scale *) ev->data;
3462         ev_data->scale = evsel->scale;
3463         err = process(tool, (union perf_event*) ev, NULL, NULL);
3464         free(ev);
3465         return err;
3466 }
3467
3468 int
3469 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3470                                          struct perf_evsel *evsel,
3471                                          perf_event__handler_t process)
3472 {
3473         struct event_update_event *ev;
3474         size_t len = strlen(evsel->name);
3475         int err;
3476
3477         ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3478         if (ev == NULL)
3479                 return -ENOMEM;
3480
3481         strlcpy(ev->data, evsel->name, len + 1);
3482         err = process(tool, (union perf_event*) ev, NULL, NULL);
3483         free(ev);
3484         return err;
3485 }
3486
3487 int
3488 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3489                                         struct perf_evsel *evsel,
3490                                         perf_event__handler_t process)
3491 {
3492         size_t size = sizeof(struct event_update_event);
3493         struct event_update_event *ev;
3494         int max, err;
3495         u16 type;
3496
3497         if (!evsel->own_cpus)
3498                 return 0;
3499
3500         ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3501         if (!ev)
3502                 return -ENOMEM;
3503
3504         ev->header.type = PERF_RECORD_EVENT_UPDATE;
3505         ev->header.size = (u16)size;
3506         ev->type = PERF_EVENT_UPDATE__CPUS;
3507         ev->id   = evsel->id[0];
3508
3509         cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3510                                  evsel->own_cpus,
3511                                  type, max);
3512
3513         err = process(tool, (union perf_event*) ev, NULL, NULL);
3514         free(ev);
3515         return err;
3516 }
3517
3518 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3519 {
3520         struct event_update_event *ev = &event->event_update;
3521         struct event_update_event_scale *ev_scale;
3522         struct event_update_event_cpus *ev_cpus;
3523         struct cpu_map *map;
3524         size_t ret;
3525
3526         ret = fprintf(fp, "\n... id:    %" PRIu64 "\n", ev->id);
3527
3528         switch (ev->type) {
3529         case PERF_EVENT_UPDATE__SCALE:
3530                 ev_scale = (struct event_update_event_scale *) ev->data;
3531                 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3532                 break;
3533         case PERF_EVENT_UPDATE__UNIT:
3534                 ret += fprintf(fp, "... unit:  %s\n", ev->data);
3535                 break;
3536         case PERF_EVENT_UPDATE__NAME:
3537                 ret += fprintf(fp, "... name:  %s\n", ev->data);
3538                 break;
3539         case PERF_EVENT_UPDATE__CPUS:
3540                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3541                 ret += fprintf(fp, "... ");
3542
3543                 map = cpu_map__new_data(&ev_cpus->cpus);
3544                 if (map)
3545                         ret += cpu_map__fprintf(map, fp);
3546                 else
3547                         ret += fprintf(fp, "failed to get cpus\n");
3548                 break;
3549         default:
3550                 ret += fprintf(fp, "... unknown type\n");
3551                 break;
3552         }
3553
3554         return ret;
3555 }
3556
3557 int perf_event__synthesize_attrs(struct perf_tool *tool,
3558                                  struct perf_evlist *evlist,
3559                                  perf_event__handler_t process)
3560 {
3561         struct perf_evsel *evsel;
3562         int err = 0;
3563
3564         evlist__for_each_entry(evlist, evsel) {
3565                 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3566                                                   evsel->id, process);
3567                 if (err) {
3568                         pr_debug("failed to create perf header attribute\n");
3569                         return err;
3570                 }
3571         }
3572
3573         return err;
3574 }
3575
3576 static bool has_unit(struct perf_evsel *counter)
3577 {
3578         return counter->unit && *counter->unit;
3579 }
3580
3581 static bool has_scale(struct perf_evsel *counter)
3582 {
3583         return counter->scale != 1;
3584 }
3585
3586 int perf_event__synthesize_extra_attr(struct perf_tool *tool,
3587                                       struct perf_evlist *evsel_list,
3588                                       perf_event__handler_t process,
3589                                       bool is_pipe)
3590 {
3591         struct perf_evsel *counter;
3592         int err;
3593
3594         /*
3595          * Synthesize other events stuff not carried within
3596          * attr event - unit, scale, name
3597          */
3598         evlist__for_each_entry(evsel_list, counter) {
3599                 if (!counter->supported)
3600                         continue;
3601
3602                 /*
3603                  * Synthesize unit and scale only if it's defined.
3604                  */
3605                 if (has_unit(counter)) {
3606                         err = perf_event__synthesize_event_update_unit(tool, counter, process);
3607                         if (err < 0) {
3608                                 pr_err("Couldn't synthesize evsel unit.\n");
3609                                 return err;
3610                         }
3611                 }
3612
3613                 if (has_scale(counter)) {
3614                         err = perf_event__synthesize_event_update_scale(tool, counter, process);
3615                         if (err < 0) {
3616                                 pr_err("Couldn't synthesize evsel counter.\n");
3617                                 return err;
3618                         }
3619                 }
3620
3621                 if (counter->own_cpus) {
3622                         err = perf_event__synthesize_event_update_cpus(tool, counter, process);
3623                         if (err < 0) {
3624                                 pr_err("Couldn't synthesize evsel cpus.\n");
3625                                 return err;
3626                         }
3627                 }
3628
3629                 /*
3630                  * Name is needed only for pipe output,
3631                  * perf.data carries event names.
3632                  */
3633                 if (is_pipe) {
3634                         err = perf_event__synthesize_event_update_name(tool, counter, process);
3635                         if (err < 0) {
3636                                 pr_err("Couldn't synthesize evsel name.\n");
3637                                 return err;
3638                         }
3639                 }
3640         }
3641         return 0;
3642 }
3643
3644 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3645                              union perf_event *event,
3646                              struct perf_evlist **pevlist)
3647 {
3648         u32 i, ids, n_ids;
3649         struct perf_evsel *evsel;
3650         struct perf_evlist *evlist = *pevlist;
3651
3652         if (evlist == NULL) {
3653                 *pevlist = evlist = perf_evlist__new();
3654                 if (evlist == NULL)
3655                         return -ENOMEM;
3656         }
3657
3658         evsel = perf_evsel__new(&event->attr.attr);
3659         if (evsel == NULL)
3660                 return -ENOMEM;
3661
3662         perf_evlist__add(evlist, evsel);
3663
3664         ids = event->header.size;
3665         ids -= (void *)&event->attr.id - (void *)event;
3666         n_ids = ids / sizeof(u64);
3667         /*
3668          * We don't have the cpu and thread maps on the header, so
3669          * for allocating the perf_sample_id table we fake 1 cpu and
3670          * hattr->ids threads.
3671          */
3672         if (perf_evsel__alloc_id(evsel, 1, n_ids))
3673                 return -ENOMEM;
3674
3675         for (i = 0; i < n_ids; i++) {
3676                 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3677         }
3678
3679         return 0;
3680 }
3681
3682 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3683                                      union perf_event *event,
3684                                      struct perf_evlist **pevlist)
3685 {
3686         struct event_update_event *ev = &event->event_update;
3687         struct event_update_event_scale *ev_scale;
3688         struct event_update_event_cpus *ev_cpus;
3689         struct perf_evlist *evlist;
3690         struct perf_evsel *evsel;
3691         struct cpu_map *map;
3692
3693         if (!pevlist || *pevlist == NULL)
3694                 return -EINVAL;
3695
3696         evlist = *pevlist;
3697
3698         evsel = perf_evlist__id2evsel(evlist, ev->id);
3699         if (evsel == NULL)
3700                 return -EINVAL;
3701
3702         switch (ev->type) {
3703         case PERF_EVENT_UPDATE__UNIT:
3704                 evsel->unit = strdup(ev->data);
3705                 break;
3706         case PERF_EVENT_UPDATE__NAME:
3707                 evsel->name = strdup(ev->data);
3708                 break;
3709         case PERF_EVENT_UPDATE__SCALE:
3710                 ev_scale = (struct event_update_event_scale *) ev->data;
3711                 evsel->scale = ev_scale->scale;
3712                 break;
3713         case PERF_EVENT_UPDATE__CPUS:
3714                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3715
3716                 map = cpu_map__new_data(&ev_cpus->cpus);
3717                 if (map)
3718                         evsel->own_cpus = map;
3719                 else
3720                         pr_err("failed to get event_update cpus\n");
3721         default:
3722                 break;
3723         }
3724
3725         return 0;
3726 }
3727
3728 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3729                                         struct perf_evlist *evlist,
3730                                         perf_event__handler_t process)
3731 {
3732         union perf_event ev;
3733         struct tracing_data *tdata;
3734         ssize_t size = 0, aligned_size = 0, padding;
3735         struct feat_fd ff;
3736         int err __maybe_unused = 0;
3737
3738         /*
3739          * We are going to store the size of the data followed
3740          * by the data contents. Since the fd descriptor is a pipe,
3741          * we cannot seek back to store the size of the data once
3742          * we know it. Instead we:
3743          *
3744          * - write the tracing data to the temp file
3745          * - get/write the data size to pipe
3746          * - write the tracing data from the temp file
3747          *   to the pipe
3748          */
3749         tdata = tracing_data_get(&evlist->entries, fd, true);
3750         if (!tdata)
3751                 return -1;
3752
3753         memset(&ev, 0, sizeof(ev));
3754
3755         ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3756         size = tdata->size;
3757         aligned_size = PERF_ALIGN(size, sizeof(u64));
3758         padding = aligned_size - size;
3759         ev.tracing_data.header.size = sizeof(ev.tracing_data);
3760         ev.tracing_data.size = aligned_size;
3761
3762         process(tool, &ev, NULL, NULL);
3763
3764         /*
3765          * The put function will copy all the tracing data
3766          * stored in temp file to the pipe.
3767          */
3768         tracing_data_put(tdata);
3769
3770         ff = (struct feat_fd){ .fd = fd };
3771         if (write_padded(&ff, NULL, 0, padding))
3772                 return -1;
3773
3774         return aligned_size;
3775 }
3776
3777 int perf_event__process_tracing_data(struct perf_session *session,
3778                                      union perf_event *event)
3779 {
3780         ssize_t size_read, padding, size = event->tracing_data.size;
3781         int fd = perf_data__fd(session->data);
3782         off_t offset = lseek(fd, 0, SEEK_CUR);
3783         char buf[BUFSIZ];
3784
3785         /* setup for reading amidst mmap */
3786         lseek(fd, offset + sizeof(struct tracing_data_event),
3787               SEEK_SET);
3788
3789         size_read = trace_report(fd, &session->tevent,
3790                                  session->repipe);
3791         padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3792
3793         if (readn(fd, buf, padding) < 0) {
3794                 pr_err("%s: reading input file", __func__);
3795                 return -1;
3796         }
3797         if (session->repipe) {
3798                 int retw = write(STDOUT_FILENO, buf, padding);
3799                 if (retw <= 0 || retw != padding) {
3800                         pr_err("%s: repiping tracing data padding", __func__);
3801                         return -1;
3802                 }
3803         }
3804
3805         if (size_read + padding != size) {
3806                 pr_err("%s: tracing data size mismatch", __func__);
3807                 return -1;
3808         }
3809
3810         perf_evlist__prepare_tracepoint_events(session->evlist,
3811                                                session->tevent.pevent);
3812
3813         return size_read + padding;
3814 }
3815
3816 int perf_event__synthesize_build_id(struct perf_tool *tool,
3817                                     struct dso *pos, u16 misc,
3818                                     perf_event__handler_t process,
3819                                     struct machine *machine)
3820 {
3821         union perf_event ev;
3822         size_t len;
3823         int err = 0;
3824
3825         if (!pos->hit)
3826                 return err;
3827
3828         memset(&ev, 0, sizeof(ev));
3829
3830         len = pos->long_name_len + 1;
3831         len = PERF_ALIGN(len, NAME_ALIGN);
3832         memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3833         ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3834         ev.build_id.header.misc = misc;
3835         ev.build_id.pid = machine->pid;
3836         ev.build_id.header.size = sizeof(ev.build_id) + len;
3837         memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3838
3839         err = process(tool, &ev, NULL, machine);
3840
3841         return err;
3842 }
3843
3844 int perf_event__process_build_id(struct perf_session *session,
3845                                  union perf_event *event)
3846 {
3847         __event_process_build_id(&event->build_id,
3848                                  event->build_id.filename,
3849                                  session);
3850         return 0;
3851 }