1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/compiler.h>
13 #include <linux/list.h>
14 #include <linux/kernel.h>
15 #include <linux/bitops.h>
16 #include <linux/stringify.h>
18 #include <sys/utsname.h>
19 #include <linux/time64.h>
27 #include "trace-event.h"
37 #include <api/fs/fs.h>
40 #include "time-utils.h"
43 #include "sane_ctype.h"
47 * must be a numerical value to let the endianness
48 * determine the memory layout. That way we are able
49 * to detect endianness when reading the perf.data file
52 * we check for legacy (PERFFILE) format.
54 static const char *__perf_magic1 = "PERFFILE";
55 static const u64 __perf_magic2 = 0x32454c4946524550ULL;
56 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
58 #define PERF_MAGIC __perf_magic2
60 const char perf_version_string[] = PERF_VERSION;
62 struct perf_file_attr {
63 struct perf_event_attr attr;
64 struct perf_file_section ids;
68 struct perf_header *ph;
70 void *buf; /* Either buf != NULL or fd >= 0 */
73 struct perf_evsel *events;
76 void perf_header__set_feat(struct perf_header *header, int feat)
78 set_bit(feat, header->adds_features);
81 void perf_header__clear_feat(struct perf_header *header, int feat)
83 clear_bit(feat, header->adds_features);
86 bool perf_header__has_feat(const struct perf_header *header, int feat)
88 return test_bit(feat, header->adds_features);
91 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
93 ssize_t ret = writen(ff->fd, buf, size);
95 if (ret != (ssize_t)size)
96 return ret < 0 ? (int)ret : -1;
100 static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
102 /* struct perf_event_header::size is u16 */
103 const size_t max_size = 0xffff - sizeof(struct perf_event_header);
104 size_t new_size = ff->size;
107 if (size + ff->offset > max_size)
110 while (size > (new_size - ff->offset))
112 new_size = min(max_size, new_size);
114 if (ff->size < new_size) {
115 addr = realloc(ff->buf, new_size);
122 memcpy(ff->buf + ff->offset, buf, size);
128 /* Return: 0 if succeded, -ERR if failed. */
129 int do_write(struct feat_fd *ff, const void *buf, size_t size)
132 return __do_write_fd(ff, buf, size);
133 return __do_write_buf(ff, buf, size);
136 /* Return: 0 if succeded, -ERR if failed. */
137 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
139 u64 *p = (u64 *) set;
142 ret = do_write(ff, &size, sizeof(size));
146 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
147 ret = do_write(ff, p + i, sizeof(*p));
155 /* Return: 0 if succeded, -ERR if failed. */
156 int write_padded(struct feat_fd *ff, const void *bf,
157 size_t count, size_t count_aligned)
159 static const char zero_buf[NAME_ALIGN];
160 int err = do_write(ff, bf, count);
163 err = do_write(ff, zero_buf, count_aligned - count);
168 #define string_size(str) \
169 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
171 /* Return: 0 if succeded, -ERR if failed. */
172 static int do_write_string(struct feat_fd *ff, const char *str)
177 olen = strlen(str) + 1;
178 len = PERF_ALIGN(olen, NAME_ALIGN);
180 /* write len, incl. \0 */
181 ret = do_write(ff, &len, sizeof(len));
185 return write_padded(ff, str, olen, len);
188 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
190 ssize_t ret = readn(ff->fd, addr, size);
193 return ret < 0 ? (int)ret : -1;
197 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
199 if (size > (ssize_t)ff->size - ff->offset)
202 memcpy(addr, ff->buf + ff->offset, size);
209 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
212 return __do_read_fd(ff, addr, size);
213 return __do_read_buf(ff, addr, size);
216 static int do_read_u32(struct feat_fd *ff, u32 *addr)
220 ret = __do_read(ff, addr, sizeof(*addr));
224 if (ff->ph->needs_swap)
225 *addr = bswap_32(*addr);
229 static int do_read_u64(struct feat_fd *ff, u64 *addr)
233 ret = __do_read(ff, addr, sizeof(*addr));
237 if (ff->ph->needs_swap)
238 *addr = bswap_64(*addr);
242 static char *do_read_string(struct feat_fd *ff)
247 if (do_read_u32(ff, &len))
254 if (!__do_read(ff, buf, len)) {
256 * strings are padded by zeroes
257 * thus the actual strlen of buf
258 * may be less than len
267 /* Return: 0 if succeded, -ERR if failed. */
268 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
274 ret = do_read_u64(ff, &size);
278 set = bitmap_alloc(size);
284 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
285 ret = do_read_u64(ff, p + i);
297 static int write_tracing_data(struct feat_fd *ff,
298 struct perf_evlist *evlist)
300 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
303 return read_tracing_data(ff->fd, &evlist->entries);
306 static int write_build_id(struct feat_fd *ff,
307 struct perf_evlist *evlist __maybe_unused)
309 struct perf_session *session;
312 session = container_of(ff->ph, struct perf_session, header);
314 if (!perf_session__read_build_ids(session, true))
317 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
320 err = perf_session__write_buildid_table(session, ff);
322 pr_debug("failed to write buildid table\n");
325 perf_session__cache_build_ids(session);
330 static int write_hostname(struct feat_fd *ff,
331 struct perf_evlist *evlist __maybe_unused)
340 return do_write_string(ff, uts.nodename);
343 static int write_osrelease(struct feat_fd *ff,
344 struct perf_evlist *evlist __maybe_unused)
353 return do_write_string(ff, uts.release);
356 static int write_arch(struct feat_fd *ff,
357 struct perf_evlist *evlist __maybe_unused)
366 return do_write_string(ff, uts.machine);
369 static int write_version(struct feat_fd *ff,
370 struct perf_evlist *evlist __maybe_unused)
372 return do_write_string(ff, perf_version_string);
375 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
380 const char *search = cpuinfo_proc;
387 file = fopen("/proc/cpuinfo", "r");
391 while (getline(&buf, &len, file) > 0) {
392 ret = strncmp(buf, search, strlen(search));
404 p = strchr(buf, ':');
405 if (p && *(p+1) == ' ' && *(p+2))
411 /* squash extra space characters (branding string) */
418 while (*q && isspace(*q))
421 while ((*r++ = *q++));
425 ret = do_write_string(ff, s);
432 static int write_cpudesc(struct feat_fd *ff,
433 struct perf_evlist *evlist __maybe_unused)
435 const char *cpuinfo_procs[] = CPUINFO_PROC;
438 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
440 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
448 static int write_nrcpus(struct feat_fd *ff,
449 struct perf_evlist *evlist __maybe_unused)
455 nrc = cpu__max_present_cpu();
457 nr = sysconf(_SC_NPROCESSORS_ONLN);
461 nra = (u32)(nr & UINT_MAX);
463 ret = do_write(ff, &nrc, sizeof(nrc));
467 return do_write(ff, &nra, sizeof(nra));
470 static int write_event_desc(struct feat_fd *ff,
471 struct perf_evlist *evlist)
473 struct perf_evsel *evsel;
477 nre = evlist->nr_entries;
480 * write number of events
482 ret = do_write(ff, &nre, sizeof(nre));
487 * size of perf_event_attr struct
489 sz = (u32)sizeof(evsel->attr);
490 ret = do_write(ff, &sz, sizeof(sz));
494 evlist__for_each_entry(evlist, evsel) {
495 ret = do_write(ff, &evsel->attr, sz);
499 * write number of unique id per event
500 * there is one id per instance of an event
502 * copy into an nri to be independent of the
506 ret = do_write(ff, &nri, sizeof(nri));
511 * write event string as passed on cmdline
513 ret = do_write_string(ff, perf_evsel__name(evsel));
517 * write unique ids for this event
519 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
526 static int write_cmdline(struct feat_fd *ff,
527 struct perf_evlist *evlist __maybe_unused)
529 char buf[MAXPATHLEN];
533 /* actual path to perf binary */
534 ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
538 /* readlink() does not add null termination */
541 /* account for binary path */
542 n = perf_env.nr_cmdline + 1;
544 ret = do_write(ff, &n, sizeof(n));
548 ret = do_write_string(ff, buf);
552 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
553 ret = do_write_string(ff, perf_env.cmdline_argv[i]);
560 #define CORE_SIB_FMT \
561 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
562 #define THRD_SIB_FMT \
563 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
569 char **core_siblings;
570 char **thread_siblings;
573 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
576 char filename[MAXPATHLEN];
577 char *buf = NULL, *p;
583 sprintf(filename, CORE_SIB_FMT, cpu);
584 fp = fopen(filename, "r");
588 sret = getline(&buf, &len, fp);
593 p = strchr(buf, '\n');
597 for (i = 0; i < tp->core_sib; i++) {
598 if (!strcmp(buf, tp->core_siblings[i]))
601 if (i == tp->core_sib) {
602 tp->core_siblings[i] = buf;
610 sprintf(filename, THRD_SIB_FMT, cpu);
611 fp = fopen(filename, "r");
615 if (getline(&buf, &len, fp) <= 0)
618 p = strchr(buf, '\n');
622 for (i = 0; i < tp->thread_sib; i++) {
623 if (!strcmp(buf, tp->thread_siblings[i]))
626 if (i == tp->thread_sib) {
627 tp->thread_siblings[i] = buf;
639 static void free_cpu_topo(struct cpu_topo *tp)
646 for (i = 0 ; i < tp->core_sib; i++)
647 zfree(&tp->core_siblings[i]);
649 for (i = 0 ; i < tp->thread_sib; i++)
650 zfree(&tp->thread_siblings[i]);
655 static struct cpu_topo *build_cpu_topology(void)
657 struct cpu_topo *tp = NULL;
665 ncpus = cpu__max_present_cpu();
667 /* build online CPU map */
668 map = cpu_map__new(NULL);
670 pr_debug("failed to get system cpumap\n");
674 nr = (u32)(ncpus & UINT_MAX);
676 sz = nr * sizeof(char *);
677 addr = calloc(1, sizeof(*tp) + 2 * sz);
684 tp->core_siblings = addr;
686 tp->thread_siblings = addr;
688 for (i = 0; i < nr; i++) {
689 if (!cpu_map__has(map, i))
692 ret = build_cpu_topo(tp, i);
706 static int write_cpu_topology(struct feat_fd *ff,
707 struct perf_evlist *evlist __maybe_unused)
713 tp = build_cpu_topology();
717 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
721 for (i = 0; i < tp->core_sib; i++) {
722 ret = do_write_string(ff, tp->core_siblings[i]);
726 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
730 for (i = 0; i < tp->thread_sib; i++) {
731 ret = do_write_string(ff, tp->thread_siblings[i]);
736 ret = perf_env__read_cpu_topology_map(&perf_env);
740 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
741 ret = do_write(ff, &perf_env.cpu[j].core_id,
742 sizeof(perf_env.cpu[j].core_id));
745 ret = do_write(ff, &perf_env.cpu[j].socket_id,
746 sizeof(perf_env.cpu[j].socket_id));
757 static int write_total_mem(struct feat_fd *ff,
758 struct perf_evlist *evlist __maybe_unused)
766 fp = fopen("/proc/meminfo", "r");
770 while (getline(&buf, &len, fp) > 0) {
771 ret = strncmp(buf, "MemTotal:", 9);
776 n = sscanf(buf, "%*s %"PRIu64, &mem);
778 ret = do_write(ff, &mem, sizeof(mem));
786 static int write_topo_node(struct feat_fd *ff, int node)
788 char str[MAXPATHLEN];
790 char *buf = NULL, *p;
793 u64 mem_total, mem_free, mem;
796 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
797 fp = fopen(str, "r");
801 while (getline(&buf, &len, fp) > 0) {
802 /* skip over invalid lines */
803 if (!strchr(buf, ':'))
805 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
807 if (!strcmp(field, "MemTotal:"))
809 if (!strcmp(field, "MemFree:"))
816 ret = do_write(ff, &mem_total, sizeof(u64));
820 ret = do_write(ff, &mem_free, sizeof(u64));
825 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
827 fp = fopen(str, "r");
831 if (getline(&buf, &len, fp) <= 0)
834 p = strchr(buf, '\n');
838 ret = do_write_string(ff, buf);
846 static int write_numa_topology(struct feat_fd *ff,
847 struct perf_evlist *evlist __maybe_unused)
852 struct cpu_map *node_map = NULL;
857 fp = fopen("/sys/devices/system/node/online", "r");
861 if (getline(&buf, &len, fp) <= 0)
864 c = strchr(buf, '\n');
868 node_map = cpu_map__new(buf);
872 nr = (u32)node_map->nr;
874 ret = do_write(ff, &nr, sizeof(nr));
878 for (i = 0; i < nr; i++) {
879 j = (u32)node_map->map[i];
880 ret = do_write(ff, &j, sizeof(j));
884 ret = write_topo_node(ff, i);
891 cpu_map__put(node_map);
898 * struct pmu_mappings {
907 static int write_pmu_mappings(struct feat_fd *ff,
908 struct perf_evlist *evlist __maybe_unused)
910 struct perf_pmu *pmu = NULL;
915 * Do a first pass to count number of pmu to avoid lseek so this
916 * works in pipe mode as well.
918 while ((pmu = perf_pmu__scan(pmu))) {
924 ret = do_write(ff, &pmu_num, sizeof(pmu_num));
928 while ((pmu = perf_pmu__scan(pmu))) {
932 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
936 ret = do_write_string(ff, pmu->name);
947 * struct group_descs {
949 * struct group_desc {
956 static int write_group_desc(struct feat_fd *ff,
957 struct perf_evlist *evlist)
959 u32 nr_groups = evlist->nr_groups;
960 struct perf_evsel *evsel;
963 ret = do_write(ff, &nr_groups, sizeof(nr_groups));
967 evlist__for_each_entry(evlist, evsel) {
968 if (perf_evsel__is_group_leader(evsel) &&
969 evsel->nr_members > 1) {
970 const char *name = evsel->group_name ?: "{anon_group}";
971 u32 leader_idx = evsel->idx;
972 u32 nr_members = evsel->nr_members;
974 ret = do_write_string(ff, name);
978 ret = do_write(ff, &leader_idx, sizeof(leader_idx));
982 ret = do_write(ff, &nr_members, sizeof(nr_members));
991 * Return the CPU id as a raw string.
993 * Each architecture should provide a more precise id string that
994 * can be use to match the architecture's "mapfile".
996 char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
1001 /* Return zero when the cpuid from the mapfile.csv matches the
1002 * cpuid string generated on this platform.
1003 * Otherwise return non-zero.
1005 int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
1008 regmatch_t pmatch[1];
1011 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
1012 /* Warn unable to generate match particular string. */
1013 pr_info("Invalid regular expression %s\n", mapcpuid);
1017 match = !regexec(&re, cpuid, 1, pmatch, 0);
1020 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
1022 /* Verify the entire string matched. */
1023 if (match_len == strlen(cpuid))
1030 * default get_cpuid(): nothing gets recorded
1031 * actual implementation must be in arch/$(SRCARCH)/util/header.c
1033 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
1038 static int write_cpuid(struct feat_fd *ff,
1039 struct perf_evlist *evlist __maybe_unused)
1044 ret = get_cpuid(buffer, sizeof(buffer));
1050 return do_write_string(ff, buffer);
1053 static int write_branch_stack(struct feat_fd *ff __maybe_unused,
1054 struct perf_evlist *evlist __maybe_unused)
1059 static int write_auxtrace(struct feat_fd *ff,
1060 struct perf_evlist *evlist __maybe_unused)
1062 struct perf_session *session;
1065 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
1068 session = container_of(ff->ph, struct perf_session, header);
1070 err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
1072 pr_err("Failed to write auxtrace index\n");
1076 static int write_clockid(struct feat_fd *ff,
1077 struct perf_evlist *evlist __maybe_unused)
1079 return do_write(ff, &ff->ph->env.clockid_res_ns,
1080 sizeof(ff->ph->env.clockid_res_ns));
1083 static int cpu_cache_level__sort(const void *a, const void *b)
1085 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
1086 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
1088 return cache_a->level - cache_b->level;
1091 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
1093 if (a->level != b->level)
1096 if (a->line_size != b->line_size)
1099 if (a->sets != b->sets)
1102 if (a->ways != b->ways)
1105 if (strcmp(a->type, b->type))
1108 if (strcmp(a->size, b->size))
1111 if (strcmp(a->map, b->map))
1117 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1119 char path[PATH_MAX], file[PATH_MAX];
1123 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1124 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1126 if (stat(file, &st))
1129 scnprintf(file, PATH_MAX, "%s/level", path);
1130 if (sysfs__read_int(file, (int *) &cache->level))
1133 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1134 if (sysfs__read_int(file, (int *) &cache->line_size))
1137 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1138 if (sysfs__read_int(file, (int *) &cache->sets))
1141 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1142 if (sysfs__read_int(file, (int *) &cache->ways))
1145 scnprintf(file, PATH_MAX, "%s/type", path);
1146 if (sysfs__read_str(file, &cache->type, &len))
1149 cache->type[len] = 0;
1150 cache->type = rtrim(cache->type);
1152 scnprintf(file, PATH_MAX, "%s/size", path);
1153 if (sysfs__read_str(file, &cache->size, &len)) {
1158 cache->size[len] = 0;
1159 cache->size = rtrim(cache->size);
1161 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1162 if (sysfs__read_str(file, &cache->map, &len)) {
1168 cache->map[len] = 0;
1169 cache->map = rtrim(cache->map);
1173 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1175 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1178 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
1185 ncpus = sysconf(_SC_NPROCESSORS_CONF);
1189 nr = (u32)(ncpus & UINT_MAX);
1191 for (cpu = 0; cpu < nr; cpu++) {
1192 for (level = 0; level < 10; level++) {
1193 struct cpu_cache_level c;
1196 err = cpu_cache_level__read(&c, cpu, level);
1203 for (i = 0; i < cnt; i++) {
1204 if (cpu_cache_level__cmp(&c, &caches[i]))
1211 cpu_cache_level__free(&c);
1213 if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1222 #define MAX_CACHES 2000
1224 static int write_cache(struct feat_fd *ff,
1225 struct perf_evlist *evlist __maybe_unused)
1227 struct cpu_cache_level caches[MAX_CACHES];
1228 u32 cnt = 0, i, version = 1;
1231 ret = build_caches(caches, MAX_CACHES, &cnt);
1235 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1237 ret = do_write(ff, &version, sizeof(u32));
1241 ret = do_write(ff, &cnt, sizeof(u32));
1245 for (i = 0; i < cnt; i++) {
1246 struct cpu_cache_level *c = &caches[i];
1249 ret = do_write(ff, &c->v, sizeof(u32)); \
1260 ret = do_write_string(ff, (const char *) c->v); \
1271 for (i = 0; i < cnt; i++)
1272 cpu_cache_level__free(&caches[i]);
1276 static int write_stat(struct feat_fd *ff __maybe_unused,
1277 struct perf_evlist *evlist __maybe_unused)
1282 static int write_sample_time(struct feat_fd *ff,
1283 struct perf_evlist *evlist)
1287 ret = do_write(ff, &evlist->first_sample_time,
1288 sizeof(evlist->first_sample_time));
1292 return do_write(ff, &evlist->last_sample_time,
1293 sizeof(evlist->last_sample_time));
1297 static int memory_node__read(struct memory_node *n, unsigned long idx)
1299 unsigned int phys, size = 0;
1300 char path[PATH_MAX];
1304 #define for_each_memory(mem, dir) \
1305 while ((ent = readdir(dir))) \
1306 if (strcmp(ent->d_name, ".") && \
1307 strcmp(ent->d_name, "..") && \
1308 sscanf(ent->d_name, "memory%u", &mem) == 1)
1310 scnprintf(path, PATH_MAX,
1311 "%s/devices/system/node/node%lu",
1312 sysfs__mountpoint(), idx);
1314 dir = opendir(path);
1316 pr_warning("failed: cant' open memory sysfs data\n");
1320 for_each_memory(phys, dir) {
1321 size = max(phys, size);
1326 n->set = bitmap_alloc(size);
1337 for_each_memory(phys, dir) {
1338 set_bit(phys, n->set);
1345 static int memory_node__sort(const void *a, const void *b)
1347 const struct memory_node *na = a;
1348 const struct memory_node *nb = b;
1350 return na->node - nb->node;
1353 static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
1355 char path[PATH_MAX];
1361 scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1362 sysfs__mountpoint());
1364 dir = opendir(path);
1366 pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1371 while (!ret && (ent = readdir(dir))) {
1375 if (!strcmp(ent->d_name, ".") ||
1376 !strcmp(ent->d_name, ".."))
1379 r = sscanf(ent->d_name, "node%u", &idx);
1383 if (WARN_ONCE(cnt >= size,
1384 "failed to write MEM_TOPOLOGY, way too many nodes\n"))
1387 ret = memory_node__read(&nodes[cnt++], idx);
1394 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1399 #define MAX_MEMORY_NODES 2000
1402 * The MEM_TOPOLOGY holds physical memory map for every
1403 * node in system. The format of data is as follows:
1405 * 0 - version | for future changes
1406 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1407 * 16 - count | number of nodes
1409 * For each node we store map of physical indexes for
1412 * 32 - node id | node index
1413 * 40 - size | size of bitmap
1414 * 48 - bitmap | bitmap of memory indexes that belongs to node
1416 static int write_mem_topology(struct feat_fd *ff __maybe_unused,
1417 struct perf_evlist *evlist __maybe_unused)
1419 static struct memory_node nodes[MAX_MEMORY_NODES];
1420 u64 bsize, version = 1, i, nr;
1423 ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1424 (unsigned long long *) &bsize);
1428 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
1432 ret = do_write(ff, &version, sizeof(version));
1436 ret = do_write(ff, &bsize, sizeof(bsize));
1440 ret = do_write(ff, &nr, sizeof(nr));
1444 for (i = 0; i < nr; i++) {
1445 struct memory_node *n = &nodes[i];
1448 ret = do_write(ff, &n->v, sizeof(n->v)); \
1457 ret = do_write_bitmap(ff, n->set, n->size);
1466 static void print_hostname(struct feat_fd *ff, FILE *fp)
1468 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
1471 static void print_osrelease(struct feat_fd *ff, FILE *fp)
1473 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
1476 static void print_arch(struct feat_fd *ff, FILE *fp)
1478 fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
1481 static void print_cpudesc(struct feat_fd *ff, FILE *fp)
1483 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
1486 static void print_nrcpus(struct feat_fd *ff, FILE *fp)
1488 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1489 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
1492 static void print_version(struct feat_fd *ff, FILE *fp)
1494 fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
1497 static void print_cmdline(struct feat_fd *ff, FILE *fp)
1501 nr = ff->ph->env.nr_cmdline;
1503 fprintf(fp, "# cmdline : ");
1505 for (i = 0; i < nr; i++) {
1506 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1508 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1512 char *quote = strchr(argv_i, '\'');
1516 fprintf(fp, "%s\\\'", argv_i);
1519 fprintf(fp, "%s ", argv_i);
1526 static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
1528 struct perf_header *ph = ff->ph;
1529 int cpu_nr = ph->env.nr_cpus_avail;
1533 nr = ph->env.nr_sibling_cores;
1534 str = ph->env.sibling_cores;
1536 for (i = 0; i < nr; i++) {
1537 fprintf(fp, "# sibling cores : %s\n", str);
1538 str += strlen(str) + 1;
1541 nr = ph->env.nr_sibling_threads;
1542 str = ph->env.sibling_threads;
1544 for (i = 0; i < nr; i++) {
1545 fprintf(fp, "# sibling threads : %s\n", str);
1546 str += strlen(str) + 1;
1549 if (ph->env.cpu != NULL) {
1550 for (i = 0; i < cpu_nr; i++)
1551 fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1552 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1554 fprintf(fp, "# Core ID and Socket ID information is not available\n");
1557 static void print_clockid(struct feat_fd *ff, FILE *fp)
1559 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1560 ff->ph->env.clockid_res_ns * 1000);
1563 static void free_event_desc(struct perf_evsel *events)
1565 struct perf_evsel *evsel;
1570 for (evsel = events; evsel->attr.size; evsel++) {
1571 zfree(&evsel->name);
1578 static struct perf_evsel *read_event_desc(struct feat_fd *ff)
1580 struct perf_evsel *evsel, *events = NULL;
1583 u32 nre, sz, nr, i, j;
1586 /* number of events */
1587 if (do_read_u32(ff, &nre))
1590 if (do_read_u32(ff, &sz))
1593 /* buffer to hold on file attr struct */
1598 /* the last event terminates with evsel->attr.size == 0: */
1599 events = calloc(nre + 1, sizeof(*events));
1603 msz = sizeof(evsel->attr);
1607 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1611 * must read entire on-file attr struct to
1612 * sync up with layout.
1614 if (__do_read(ff, buf, sz))
1617 if (ff->ph->needs_swap)
1618 perf_event__attr_swap(buf);
1620 memcpy(&evsel->attr, buf, msz);
1622 if (do_read_u32(ff, &nr))
1625 if (ff->ph->needs_swap)
1626 evsel->needs_swap = true;
1628 evsel->name = do_read_string(ff);
1635 id = calloc(nr, sizeof(*id));
1641 for (j = 0 ; j < nr; j++) {
1642 if (do_read_u64(ff, id))
1651 free_event_desc(events);
1656 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1657 void *priv __maybe_unused)
1659 return fprintf(fp, ", %s = %s", name, val);
1662 static void print_event_desc(struct feat_fd *ff, FILE *fp)
1664 struct perf_evsel *evsel, *events;
1669 events = ff->events;
1671 events = read_event_desc(ff);
1674 fprintf(fp, "# event desc: not available or unable to read\n");
1678 for (evsel = events; evsel->attr.size; evsel++) {
1679 fprintf(fp, "# event : name = %s, ", evsel->name);
1682 fprintf(fp, ", id = {");
1683 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1686 fprintf(fp, " %"PRIu64, *id);
1691 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1696 free_event_desc(events);
1700 static void print_total_mem(struct feat_fd *ff, FILE *fp)
1702 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
1705 static void print_numa_topology(struct feat_fd *ff, FILE *fp)
1708 struct numa_node *n;
1710 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1711 n = &ff->ph->env.numa_nodes[i];
1713 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1714 " free = %"PRIu64" kB\n",
1715 n->node, n->mem_total, n->mem_free);
1717 fprintf(fp, "# node%u cpu list : ", n->node);
1718 cpu_map__fprintf(n->map, fp);
1722 static void print_cpuid(struct feat_fd *ff, FILE *fp)
1724 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
1727 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
1729 fprintf(fp, "# contains samples with branch stack\n");
1732 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
1734 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1737 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
1739 fprintf(fp, "# contains stat data\n");
1742 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
1746 fprintf(fp, "# CPU cache info:\n");
1747 for (i = 0; i < ff->ph->env.caches_cnt; i++) {
1749 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
1753 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
1755 const char *delimiter = "# pmu mappings: ";
1760 pmu_num = ff->ph->env.nr_pmu_mappings;
1762 fprintf(fp, "# pmu mappings: not available\n");
1766 str = ff->ph->env.pmu_mappings;
1769 type = strtoul(str, &tmp, 0);
1774 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1777 str += strlen(str) + 1;
1786 fprintf(fp, "# pmu mappings: unable to read\n");
1789 static void print_group_desc(struct feat_fd *ff, FILE *fp)
1791 struct perf_session *session;
1792 struct perf_evsel *evsel;
1795 session = container_of(ff->ph, struct perf_session, header);
1797 evlist__for_each_entry(session->evlist, evsel) {
1798 if (perf_evsel__is_group_leader(evsel) &&
1799 evsel->nr_members > 1) {
1800 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1801 perf_evsel__name(evsel));
1803 nr = evsel->nr_members - 1;
1805 fprintf(fp, ",%s", perf_evsel__name(evsel));
1813 static void print_sample_time(struct feat_fd *ff, FILE *fp)
1815 struct perf_session *session;
1819 session = container_of(ff->ph, struct perf_session, header);
1821 timestamp__scnprintf_usec(session->evlist->first_sample_time,
1822 time_buf, sizeof(time_buf));
1823 fprintf(fp, "# time of first sample : %s\n", time_buf);
1825 timestamp__scnprintf_usec(session->evlist->last_sample_time,
1826 time_buf, sizeof(time_buf));
1827 fprintf(fp, "# time of last sample : %s\n", time_buf);
1829 d = (double)(session->evlist->last_sample_time -
1830 session->evlist->first_sample_time) / NSEC_PER_MSEC;
1832 fprintf(fp, "# sample duration : %10.3f ms\n", d);
1835 static void memory_node__fprintf(struct memory_node *n,
1836 unsigned long long bsize, FILE *fp)
1838 char buf_map[100], buf_size[50];
1839 unsigned long long size;
1841 size = bsize * bitmap_weight(n->set, n->size);
1842 unit_number__scnprintf(buf_size, 50, size);
1844 bitmap_scnprintf(n->set, n->size, buf_map, 100);
1845 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
1848 static void print_mem_topology(struct feat_fd *ff, FILE *fp)
1850 struct memory_node *nodes;
1853 nodes = ff->ph->env.memory_nodes;
1854 nr = ff->ph->env.nr_memory_nodes;
1856 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
1857 nr, ff->ph->env.memory_bsize);
1859 for (i = 0; i < nr; i++) {
1860 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
1864 static int __event_process_build_id(struct build_id_event *bev,
1866 struct perf_session *session)
1869 struct machine *machine;
1872 enum dso_kernel_type dso_type;
1874 machine = perf_session__findnew_machine(session, bev->pid);
1878 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1881 case PERF_RECORD_MISC_KERNEL:
1882 dso_type = DSO_TYPE_KERNEL;
1884 case PERF_RECORD_MISC_GUEST_KERNEL:
1885 dso_type = DSO_TYPE_GUEST_KERNEL;
1887 case PERF_RECORD_MISC_USER:
1888 case PERF_RECORD_MISC_GUEST_USER:
1889 dso_type = DSO_TYPE_USER;
1895 dso = machine__findnew_dso(machine, filename);
1897 char sbuild_id[SBUILD_ID_SIZE];
1899 dso__set_build_id(dso, &bev->build_id);
1901 if (dso_type != DSO_TYPE_USER) {
1902 struct kmod_path m = { .name = NULL, };
1904 if (!kmod_path__parse_name(&m, filename) && m.kmod)
1905 dso__set_module_info(dso, &m, machine);
1907 dso->kernel = dso_type;
1912 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1914 pr_debug("build id event received for %s: %s\n",
1915 dso->long_name, sbuild_id);
1924 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1925 int input, u64 offset, u64 size)
1927 struct perf_session *session = container_of(header, struct perf_session, header);
1929 struct perf_event_header header;
1930 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1933 struct build_id_event bev;
1934 char filename[PATH_MAX];
1935 u64 limit = offset + size;
1937 while (offset < limit) {
1940 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1943 if (header->needs_swap)
1944 perf_event_header__bswap(&old_bev.header);
1946 len = old_bev.header.size - sizeof(old_bev);
1947 if (readn(input, filename, len) != len)
1950 bev.header = old_bev.header;
1953 * As the pid is the missing value, we need to fill
1954 * it properly. The header.misc value give us nice hint.
1956 bev.pid = HOST_KERNEL_ID;
1957 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1958 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1959 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1961 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1962 __event_process_build_id(&bev, filename, session);
1964 offset += bev.header.size;
1970 static int perf_header__read_build_ids(struct perf_header *header,
1971 int input, u64 offset, u64 size)
1973 struct perf_session *session = container_of(header, struct perf_session, header);
1974 struct build_id_event bev;
1975 char filename[PATH_MAX];
1976 u64 limit = offset + size, orig_offset = offset;
1979 while (offset < limit) {
1982 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1985 if (header->needs_swap)
1986 perf_event_header__bswap(&bev.header);
1988 len = bev.header.size - sizeof(bev);
1989 if (readn(input, filename, len) != len)
1992 * The a1645ce1 changeset:
1994 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1996 * Added a field to struct build_id_event that broke the file
1999 * Since the kernel build-id is the first entry, process the
2000 * table using the old format if the well known
2001 * '[kernel.kallsyms]' string for the kernel build-id has the
2002 * first 4 characters chopped off (where the pid_t sits).
2004 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
2005 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
2007 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
2010 __event_process_build_id(&bev, filename, session);
2012 offset += bev.header.size;
2019 /* Macro for features that simply need to read and store a string. */
2020 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
2021 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
2023 ff->ph->env.__feat_env = do_read_string(ff); \
2024 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
2027 FEAT_PROCESS_STR_FUN(hostname, hostname);
2028 FEAT_PROCESS_STR_FUN(osrelease, os_release);
2029 FEAT_PROCESS_STR_FUN(version, version);
2030 FEAT_PROCESS_STR_FUN(arch, arch);
2031 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
2032 FEAT_PROCESS_STR_FUN(cpuid, cpuid);
2034 static int process_tracing_data(struct feat_fd *ff, void *data)
2036 ssize_t ret = trace_report(ff->fd, data, false);
2038 return ret < 0 ? -1 : 0;
2041 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
2043 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
2044 pr_debug("Failed to read buildids, continuing...\n");
2048 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
2051 u32 nr_cpus_avail, nr_cpus_online;
2053 ret = do_read_u32(ff, &nr_cpus_avail);
2057 ret = do_read_u32(ff, &nr_cpus_online);
2060 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
2061 ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
2065 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
2070 ret = do_read_u64(ff, &total_mem);
2073 ff->ph->env.total_mem = (unsigned long long)total_mem;
2077 static struct perf_evsel *
2078 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
2080 struct perf_evsel *evsel;
2082 evlist__for_each_entry(evlist, evsel) {
2083 if (evsel->idx == idx)
2091 perf_evlist__set_event_name(struct perf_evlist *evlist,
2092 struct perf_evsel *event)
2094 struct perf_evsel *evsel;
2099 evsel = perf_evlist__find_by_index(evlist, event->idx);
2106 evsel->name = strdup(event->name);
2110 process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
2112 struct perf_session *session;
2113 struct perf_evsel *evsel, *events = read_event_desc(ff);
2118 session = container_of(ff->ph, struct perf_session, header);
2120 if (session->data->is_pipe) {
2121 /* Save events for reading later by print_event_desc,
2122 * since they can't be read again in pipe mode. */
2123 ff->events = events;
2126 for (evsel = events; evsel->attr.size; evsel++)
2127 perf_evlist__set_event_name(session->evlist, evsel);
2129 if (!session->data->is_pipe)
2130 free_event_desc(events);
2135 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
2137 char *str, *cmdline = NULL, **argv = NULL;
2140 if (do_read_u32(ff, &nr))
2143 ff->ph->env.nr_cmdline = nr;
2145 cmdline = zalloc(ff->size + nr + 1);
2149 argv = zalloc(sizeof(char *) * (nr + 1));
2153 for (i = 0; i < nr; i++) {
2154 str = do_read_string(ff);
2158 argv[i] = cmdline + len;
2159 memcpy(argv[i], str, strlen(str) + 1);
2160 len += strlen(str) + 1;
2163 ff->ph->env.cmdline = cmdline;
2164 ff->ph->env.cmdline_argv = (const char **) argv;
2173 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
2178 int cpu_nr = ff->ph->env.nr_cpus_avail;
2180 struct perf_header *ph = ff->ph;
2181 bool do_core_id_test = true;
2183 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
2187 if (do_read_u32(ff, &nr))
2190 ph->env.nr_sibling_cores = nr;
2191 size += sizeof(u32);
2192 if (strbuf_init(&sb, 128) < 0)
2195 for (i = 0; i < nr; i++) {
2196 str = do_read_string(ff);
2200 /* include a NULL character at the end */
2201 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2203 size += string_size(str);
2206 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
2208 if (do_read_u32(ff, &nr))
2211 ph->env.nr_sibling_threads = nr;
2212 size += sizeof(u32);
2214 for (i = 0; i < nr; i++) {
2215 str = do_read_string(ff);
2219 /* include a NULL character at the end */
2220 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2222 size += string_size(str);
2225 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
2228 * The header may be from old perf,
2229 * which doesn't include core id and socket id information.
2231 if (ff->size <= size) {
2232 zfree(&ph->env.cpu);
2236 /* On s390 the socket_id number is not related to the numbers of cpus.
2237 * The socket_id number might be higher than the numbers of cpus.
2238 * This depends on the configuration.
2240 if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
2241 do_core_id_test = false;
2243 for (i = 0; i < (u32)cpu_nr; i++) {
2244 if (do_read_u32(ff, &nr))
2247 ph->env.cpu[i].core_id = nr;
2249 if (do_read_u32(ff, &nr))
2252 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
2253 pr_debug("socket_id number is too big."
2254 "You may need to upgrade the perf tool.\n");
2258 ph->env.cpu[i].socket_id = nr;
2264 strbuf_release(&sb);
2266 zfree(&ph->env.cpu);
2270 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
2272 struct numa_node *nodes, *n;
2277 if (do_read_u32(ff, &nr))
2280 nodes = zalloc(sizeof(*nodes) * nr);
2284 for (i = 0; i < nr; i++) {
2288 if (do_read_u32(ff, &n->node))
2291 if (do_read_u64(ff, &n->mem_total))
2294 if (do_read_u64(ff, &n->mem_free))
2297 str = do_read_string(ff);
2301 n->map = cpu_map__new(str);
2307 ff->ph->env.nr_numa_nodes = nr;
2308 ff->ph->env.numa_nodes = nodes;
2316 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
2323 if (do_read_u32(ff, &pmu_num))
2327 pr_debug("pmu mappings not available\n");
2331 ff->ph->env.nr_pmu_mappings = pmu_num;
2332 if (strbuf_init(&sb, 128) < 0)
2336 if (do_read_u32(ff, &type))
2339 name = do_read_string(ff);
2343 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2345 /* include a NULL character at the end */
2346 if (strbuf_add(&sb, "", 1) < 0)
2349 if (!strcmp(name, "msr"))
2350 ff->ph->env.msr_pmu_type = type;
2355 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2359 strbuf_release(&sb);
2363 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
2366 u32 i, nr, nr_groups;
2367 struct perf_session *session;
2368 struct perf_evsel *evsel, *leader = NULL;
2375 if (do_read_u32(ff, &nr_groups))
2378 ff->ph->env.nr_groups = nr_groups;
2380 pr_debug("group desc not available\n");
2384 desc = calloc(nr_groups, sizeof(*desc));
2388 for (i = 0; i < nr_groups; i++) {
2389 desc[i].name = do_read_string(ff);
2393 if (do_read_u32(ff, &desc[i].leader_idx))
2396 if (do_read_u32(ff, &desc[i].nr_members))
2401 * Rebuild group relationship based on the group_desc
2403 session = container_of(ff->ph, struct perf_session, header);
2404 session->evlist->nr_groups = nr_groups;
2407 evlist__for_each_entry(session->evlist, evsel) {
2408 if (evsel->idx == (int) desc[i].leader_idx) {
2409 evsel->leader = evsel;
2410 /* {anon_group} is a dummy name */
2411 if (strcmp(desc[i].name, "{anon_group}")) {
2412 evsel->group_name = desc[i].name;
2413 desc[i].name = NULL;
2415 evsel->nr_members = desc[i].nr_members;
2417 if (i >= nr_groups || nr > 0) {
2418 pr_debug("invalid group desc\n");
2423 nr = evsel->nr_members - 1;
2426 /* This is a group member */
2427 evsel->leader = leader;
2433 if (i != nr_groups || nr != 0) {
2434 pr_debug("invalid group desc\n");
2440 for (i = 0; i < nr_groups; i++)
2441 zfree(&desc[i].name);
2447 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
2449 struct perf_session *session;
2452 session = container_of(ff->ph, struct perf_session, header);
2454 err = auxtrace_index__process(ff->fd, ff->size, session,
2455 ff->ph->needs_swap);
2457 pr_err("Failed to process auxtrace index\n");
2461 static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
2463 struct cpu_cache_level *caches;
2464 u32 cnt, i, version;
2466 if (do_read_u32(ff, &version))
2472 if (do_read_u32(ff, &cnt))
2475 caches = zalloc(sizeof(*caches) * cnt);
2479 for (i = 0; i < cnt; i++) {
2480 struct cpu_cache_level c;
2483 if (do_read_u32(ff, &c.v))\
2484 goto out_free_caches; \
2493 c.v = do_read_string(ff); \
2495 goto out_free_caches;
2505 ff->ph->env.caches = caches;
2506 ff->ph->env.caches_cnt = cnt;
2513 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2515 struct perf_session *session;
2516 u64 first_sample_time, last_sample_time;
2519 session = container_of(ff->ph, struct perf_session, header);
2521 ret = do_read_u64(ff, &first_sample_time);
2525 ret = do_read_u64(ff, &last_sample_time);
2529 session->evlist->first_sample_time = first_sample_time;
2530 session->evlist->last_sample_time = last_sample_time;
2534 static int process_mem_topology(struct feat_fd *ff,
2535 void *data __maybe_unused)
2537 struct memory_node *nodes;
2538 u64 version, i, nr, bsize;
2541 if (do_read_u64(ff, &version))
2547 if (do_read_u64(ff, &bsize))
2550 if (do_read_u64(ff, &nr))
2553 nodes = zalloc(sizeof(*nodes) * nr);
2557 for (i = 0; i < nr; i++) {
2558 struct memory_node n;
2561 if (do_read_u64(ff, &n.v)) \
2569 if (do_read_bitmap(ff, &n.set, &n.size))
2575 ff->ph->env.memory_bsize = bsize;
2576 ff->ph->env.memory_nodes = nodes;
2577 ff->ph->env.nr_memory_nodes = nr;
2586 static int process_clockid(struct feat_fd *ff,
2587 void *data __maybe_unused)
2589 if (do_read_u64(ff, &ff->ph->env.clockid_res_ns))
2595 struct feature_ops {
2596 int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
2597 void (*print)(struct feat_fd *ff, FILE *fp);
2598 int (*process)(struct feat_fd *ff, void *data);
2604 #define FEAT_OPR(n, func, __full_only) \
2606 .name = __stringify(n), \
2607 .write = write_##func, \
2608 .print = print_##func, \
2609 .full_only = __full_only, \
2610 .process = process_##func, \
2611 .synthesize = true \
2614 #define FEAT_OPN(n, func, __full_only) \
2616 .name = __stringify(n), \
2617 .write = write_##func, \
2618 .print = print_##func, \
2619 .full_only = __full_only, \
2620 .process = process_##func \
2623 /* feature_ops not implemented: */
2624 #define print_tracing_data NULL
2625 #define print_build_id NULL
2627 #define process_branch_stack NULL
2628 #define process_stat NULL
2631 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2632 FEAT_OPN(TRACING_DATA, tracing_data, false),
2633 FEAT_OPN(BUILD_ID, build_id, false),
2634 FEAT_OPR(HOSTNAME, hostname, false),
2635 FEAT_OPR(OSRELEASE, osrelease, false),
2636 FEAT_OPR(VERSION, version, false),
2637 FEAT_OPR(ARCH, arch, false),
2638 FEAT_OPR(NRCPUS, nrcpus, false),
2639 FEAT_OPR(CPUDESC, cpudesc, false),
2640 FEAT_OPR(CPUID, cpuid, false),
2641 FEAT_OPR(TOTAL_MEM, total_mem, false),
2642 FEAT_OPR(EVENT_DESC, event_desc, false),
2643 FEAT_OPR(CMDLINE, cmdline, false),
2644 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
2645 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
2646 FEAT_OPN(BRANCH_STACK, branch_stack, false),
2647 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
2648 FEAT_OPR(GROUP_DESC, group_desc, false),
2649 FEAT_OPN(AUXTRACE, auxtrace, false),
2650 FEAT_OPN(STAT, stat, false),
2651 FEAT_OPN(CACHE, cache, true),
2652 FEAT_OPR(SAMPLE_TIME, sample_time, false),
2653 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
2654 FEAT_OPR(CLOCKID, clockid, false)
2657 struct header_print_data {
2659 bool full; /* extended list of headers */
2662 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2663 struct perf_header *ph,
2664 int feat, int fd, void *data)
2666 struct header_print_data *hd = data;
2669 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2670 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2671 "%d, continuing...\n", section->offset, feat);
2674 if (feat >= HEADER_LAST_FEATURE) {
2675 pr_warning("unknown feature %d\n", feat);
2678 if (!feat_ops[feat].print)
2681 ff = (struct feat_fd) {
2686 if (!feat_ops[feat].full_only || hd->full)
2687 feat_ops[feat].print(&ff, hd->fp);
2689 fprintf(hd->fp, "# %s info available, use -I to display\n",
2690 feat_ops[feat].name);
2695 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2697 struct header_print_data hd;
2698 struct perf_header *header = &session->header;
2699 int fd = perf_data__fd(session->data);
2707 ret = fstat(fd, &st);
2711 stctime = st.st_ctime;
2712 fprintf(fp, "# captured on : %s", ctime(&stctime));
2714 fprintf(fp, "# header version : %u\n", header->version);
2715 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
2716 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
2717 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
2719 perf_header__process_sections(header, fd, &hd,
2720 perf_file_section__fprintf_info);
2722 if (session->data->is_pipe)
2725 fprintf(fp, "# missing features: ");
2726 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2728 fprintf(fp, "%s ", feat_ops[bit].name);
2735 static int do_write_feat(struct feat_fd *ff, int type,
2736 struct perf_file_section **p,
2737 struct perf_evlist *evlist)
2742 if (perf_header__has_feat(ff->ph, type)) {
2743 if (!feat_ops[type].write)
2746 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
2749 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
2751 err = feat_ops[type].write(ff, evlist);
2753 pr_debug("failed to write feature %s\n", feat_ops[type].name);
2755 /* undo anything written */
2756 lseek(ff->fd, (*p)->offset, SEEK_SET);
2760 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
2766 static int perf_header__adds_write(struct perf_header *header,
2767 struct perf_evlist *evlist, int fd)
2771 struct perf_file_section *feat_sec, *p;
2777 ff = (struct feat_fd){
2782 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2786 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2787 if (feat_sec == NULL)
2790 sec_size = sizeof(*feat_sec) * nr_sections;
2792 sec_start = header->feat_offset;
2793 lseek(fd, sec_start + sec_size, SEEK_SET);
2795 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2796 if (do_write_feat(&ff, feat, &p, evlist))
2797 perf_header__clear_feat(header, feat);
2800 lseek(fd, sec_start, SEEK_SET);
2802 * may write more than needed due to dropped feature, but
2803 * this is okay, reader will skip the missing entries
2805 err = do_write(&ff, feat_sec, sec_size);
2807 pr_debug("failed to write feature section\n");
2812 int perf_header__write_pipe(int fd)
2814 struct perf_pipe_file_header f_header;
2818 ff = (struct feat_fd){ .fd = fd };
2820 f_header = (struct perf_pipe_file_header){
2821 .magic = PERF_MAGIC,
2822 .size = sizeof(f_header),
2825 err = do_write(&ff, &f_header, sizeof(f_header));
2827 pr_debug("failed to write perf pipe header\n");
2834 int perf_session__write_header(struct perf_session *session,
2835 struct perf_evlist *evlist,
2836 int fd, bool at_exit)
2838 struct perf_file_header f_header;
2839 struct perf_file_attr f_attr;
2840 struct perf_header *header = &session->header;
2841 struct perf_evsel *evsel;
2846 ff = (struct feat_fd){ .fd = fd};
2847 lseek(fd, sizeof(f_header), SEEK_SET);
2849 evlist__for_each_entry(session->evlist, evsel) {
2850 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2851 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
2853 pr_debug("failed to write perf header\n");
2858 attr_offset = lseek(ff.fd, 0, SEEK_CUR);
2860 evlist__for_each_entry(evlist, evsel) {
2861 f_attr = (struct perf_file_attr){
2862 .attr = evsel->attr,
2864 .offset = evsel->id_offset,
2865 .size = evsel->ids * sizeof(u64),
2868 err = do_write(&ff, &f_attr, sizeof(f_attr));
2870 pr_debug("failed to write perf header attribute\n");
2875 if (!header->data_offset)
2876 header->data_offset = lseek(fd, 0, SEEK_CUR);
2877 header->feat_offset = header->data_offset + header->data_size;
2880 err = perf_header__adds_write(header, evlist, fd);
2885 f_header = (struct perf_file_header){
2886 .magic = PERF_MAGIC,
2887 .size = sizeof(f_header),
2888 .attr_size = sizeof(f_attr),
2890 .offset = attr_offset,
2891 .size = evlist->nr_entries * sizeof(f_attr),
2894 .offset = header->data_offset,
2895 .size = header->data_size,
2897 /* event_types is ignored, store zeros */
2900 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2902 lseek(fd, 0, SEEK_SET);
2903 err = do_write(&ff, &f_header, sizeof(f_header));
2905 pr_debug("failed to write perf header\n");
2908 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2913 static int perf_header__getbuffer64(struct perf_header *header,
2914 int fd, void *buf, size_t size)
2916 if (readn(fd, buf, size) <= 0)
2919 if (header->needs_swap)
2920 mem_bswap_64(buf, size);
2925 int perf_header__process_sections(struct perf_header *header, int fd,
2927 int (*process)(struct perf_file_section *section,
2928 struct perf_header *ph,
2929 int feat, int fd, void *data))
2931 struct perf_file_section *feat_sec, *sec;
2937 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2941 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2945 sec_size = sizeof(*feat_sec) * nr_sections;
2947 lseek(fd, header->feat_offset, SEEK_SET);
2949 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2953 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2954 err = process(sec++, header, feat, fd, data);
2964 static const int attr_file_abi_sizes[] = {
2965 [0] = PERF_ATTR_SIZE_VER0,
2966 [1] = PERF_ATTR_SIZE_VER1,
2967 [2] = PERF_ATTR_SIZE_VER2,
2968 [3] = PERF_ATTR_SIZE_VER3,
2969 [4] = PERF_ATTR_SIZE_VER4,
2974 * In the legacy file format, the magic number is not used to encode endianness.
2975 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2976 * on ABI revisions, we need to try all combinations for all endianness to
2977 * detect the endianness.
2979 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2981 uint64_t ref_size, attr_size;
2984 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2985 ref_size = attr_file_abi_sizes[i]
2986 + sizeof(struct perf_file_section);
2987 if (hdr_sz != ref_size) {
2988 attr_size = bswap_64(hdr_sz);
2989 if (attr_size != ref_size)
2992 ph->needs_swap = true;
2994 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2999 /* could not determine endianness */
3003 #define PERF_PIPE_HDR_VER0 16
3005 static const size_t attr_pipe_abi_sizes[] = {
3006 [0] = PERF_PIPE_HDR_VER0,
3011 * In the legacy pipe format, there is an implicit assumption that endiannesss
3012 * between host recording the samples, and host parsing the samples is the
3013 * same. This is not always the case given that the pipe output may always be
3014 * redirected into a file and analyzed on a different machine with possibly a
3015 * different endianness and perf_event ABI revsions in the perf tool itself.
3017 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
3022 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
3023 if (hdr_sz != attr_pipe_abi_sizes[i]) {
3024 attr_size = bswap_64(hdr_sz);
3025 if (attr_size != hdr_sz)
3028 ph->needs_swap = true;
3030 pr_debug("Pipe ABI%d perf.data file detected\n", i);
3036 bool is_perf_magic(u64 magic)
3038 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
3039 || magic == __perf_magic2
3040 || magic == __perf_magic2_sw)
3046 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
3047 bool is_pipe, struct perf_header *ph)
3051 /* check for legacy format */
3052 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
3054 ph->version = PERF_HEADER_VERSION_1;
3055 pr_debug("legacy perf.data format\n");
3057 return try_all_pipe_abis(hdr_sz, ph);
3059 return try_all_file_abis(hdr_sz, ph);
3062 * the new magic number serves two purposes:
3063 * - unique number to identify actual perf.data files
3064 * - encode endianness of file
3066 ph->version = PERF_HEADER_VERSION_2;
3068 /* check magic number with one endianness */
3069 if (magic == __perf_magic2)
3072 /* check magic number with opposite endianness */
3073 if (magic != __perf_magic2_sw)
3076 ph->needs_swap = true;
3081 int perf_file_header__read(struct perf_file_header *header,
3082 struct perf_header *ph, int fd)
3086 lseek(fd, 0, SEEK_SET);
3088 ret = readn(fd, header, sizeof(*header));
3092 if (check_magic_endian(header->magic,
3093 header->attr_size, false, ph) < 0) {
3094 pr_debug("magic/endian check failed\n");
3098 if (ph->needs_swap) {
3099 mem_bswap_64(header, offsetof(struct perf_file_header,
3103 if (header->size != sizeof(*header)) {
3104 /* Support the previous format */
3105 if (header->size == offsetof(typeof(*header), adds_features))
3106 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3109 } else if (ph->needs_swap) {
3111 * feature bitmap is declared as an array of unsigned longs --
3112 * not good since its size can differ between the host that
3113 * generated the data file and the host analyzing the file.
3115 * We need to handle endianness, but we don't know the size of
3116 * the unsigned long where the file was generated. Take a best
3117 * guess at determining it: try 64-bit swap first (ie., file
3118 * created on a 64-bit host), and check if the hostname feature
3119 * bit is set (this feature bit is forced on as of fbe96f2).
3120 * If the bit is not, undo the 64-bit swap and try a 32-bit
3121 * swap. If the hostname bit is still not set (e.g., older data
3122 * file), punt and fallback to the original behavior --
3123 * clearing all feature bits and setting buildid.
3125 mem_bswap_64(&header->adds_features,
3126 BITS_TO_U64(HEADER_FEAT_BITS));
3128 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3130 mem_bswap_64(&header->adds_features,
3131 BITS_TO_U64(HEADER_FEAT_BITS));
3134 mem_bswap_32(&header->adds_features,
3135 BITS_TO_U32(HEADER_FEAT_BITS));
3138 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3139 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3140 set_bit(HEADER_BUILD_ID, header->adds_features);
3144 memcpy(&ph->adds_features, &header->adds_features,
3145 sizeof(ph->adds_features));
3147 ph->data_offset = header->data.offset;
3148 ph->data_size = header->data.size;
3149 ph->feat_offset = header->data.offset + header->data.size;
3153 static int perf_file_section__process(struct perf_file_section *section,
3154 struct perf_header *ph,
3155 int feat, int fd, void *data)
3157 struct feat_fd fdd = {
3160 .size = section->size,
3161 .offset = section->offset,
3164 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
3165 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
3166 "%d, continuing...\n", section->offset, feat);
3170 if (feat >= HEADER_LAST_FEATURE) {
3171 pr_debug("unknown feature %d, continuing...\n", feat);
3175 if (!feat_ops[feat].process)
3178 return feat_ops[feat].process(&fdd, data);
3181 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
3182 struct perf_header *ph, int fd,
3185 struct feat_fd ff = {
3186 .fd = STDOUT_FILENO,
3191 ret = readn(fd, header, sizeof(*header));
3195 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
3196 pr_debug("endian/magic failed\n");
3201 header->size = bswap_64(header->size);
3203 if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
3209 static int perf_header__read_pipe(struct perf_session *session)
3211 struct perf_header *header = &session->header;
3212 struct perf_pipe_file_header f_header;
3214 if (perf_file_header__read_pipe(&f_header, header,
3215 perf_data__fd(session->data),
3216 session->repipe) < 0) {
3217 pr_debug("incompatible file format\n");
3224 static int read_attr(int fd, struct perf_header *ph,
3225 struct perf_file_attr *f_attr)
3227 struct perf_event_attr *attr = &f_attr->attr;
3229 size_t our_sz = sizeof(f_attr->attr);
3232 memset(f_attr, 0, sizeof(*f_attr));
3234 /* read minimal guaranteed structure */
3235 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
3237 pr_debug("cannot read %d bytes of header attr\n",
3238 PERF_ATTR_SIZE_VER0);
3242 /* on file perf_event_attr size */
3250 sz = PERF_ATTR_SIZE_VER0;
3251 } else if (sz > our_sz) {
3252 pr_debug("file uses a more recent and unsupported ABI"
3253 " (%zu bytes extra)\n", sz - our_sz);
3256 /* what we have not yet read and that we know about */
3257 left = sz - PERF_ATTR_SIZE_VER0;
3260 ptr += PERF_ATTR_SIZE_VER0;
3262 ret = readn(fd, ptr, left);
3264 /* read perf_file_section, ids are read in caller */
3265 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
3267 return ret <= 0 ? -1 : 0;
3270 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
3271 struct tep_handle *pevent)
3273 struct tep_event *event;
3276 /* already prepared */
3277 if (evsel->tp_format)
3280 if (pevent == NULL) {
3281 pr_debug("broken or missing trace data\n");
3285 event = tep_find_event(pevent, evsel->attr.config);
3286 if (event == NULL) {
3287 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
3292 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
3293 evsel->name = strdup(bf);
3294 if (evsel->name == NULL)
3298 evsel->tp_format = event;
3302 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
3303 struct tep_handle *pevent)
3305 struct perf_evsel *pos;
3307 evlist__for_each_entry(evlist, pos) {
3308 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
3309 perf_evsel__prepare_tracepoint_event(pos, pevent))
3316 int perf_session__read_header(struct perf_session *session)
3318 struct perf_data *data = session->data;
3319 struct perf_header *header = &session->header;
3320 struct perf_file_header f_header;
3321 struct perf_file_attr f_attr;
3323 int nr_attrs, nr_ids, i, j;
3324 int fd = perf_data__fd(data);
3326 session->evlist = perf_evlist__new();
3327 if (session->evlist == NULL)
3330 session->evlist->env = &header->env;
3331 session->machines.host.env = &header->env;
3332 if (perf_data__is_pipe(data))
3333 return perf_header__read_pipe(session);
3335 if (perf_file_header__read(&f_header, header, fd) < 0)
3339 * Sanity check that perf.data was written cleanly; data size is
3340 * initialized to 0 and updated only if the on_exit function is run.
3341 * If data size is still 0 then the file contains only partial
3342 * information. Just warn user and process it as much as it can.
3344 if (f_header.data.size == 0) {
3345 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
3346 "Was the 'perf record' command properly terminated?\n",
3350 nr_attrs = f_header.attrs.size / f_header.attr_size;
3351 lseek(fd, f_header.attrs.offset, SEEK_SET);
3353 for (i = 0; i < nr_attrs; i++) {
3354 struct perf_evsel *evsel;
3357 if (read_attr(fd, header, &f_attr) < 0)
3360 if (header->needs_swap) {
3361 f_attr.ids.size = bswap_64(f_attr.ids.size);
3362 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
3363 perf_event__attr_swap(&f_attr.attr);
3366 tmp = lseek(fd, 0, SEEK_CUR);
3367 evsel = perf_evsel__new(&f_attr.attr);
3370 goto out_delete_evlist;
3372 evsel->needs_swap = header->needs_swap;
3374 * Do it before so that if perf_evsel__alloc_id fails, this
3375 * entry gets purged too at perf_evlist__delete().
3377 perf_evlist__add(session->evlist, evsel);
3379 nr_ids = f_attr.ids.size / sizeof(u64);
3381 * We don't have the cpu and thread maps on the header, so
3382 * for allocating the perf_sample_id table we fake 1 cpu and
3383 * hattr->ids threads.
3385 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
3386 goto out_delete_evlist;
3388 lseek(fd, f_attr.ids.offset, SEEK_SET);
3390 for (j = 0; j < nr_ids; j++) {
3391 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
3394 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
3397 lseek(fd, tmp, SEEK_SET);
3400 perf_header__process_sections(header, fd, &session->tevent,
3401 perf_file_section__process);
3403 if (perf_evlist__prepare_tracepoint_events(session->evlist,
3404 session->tevent.pevent))
3405 goto out_delete_evlist;
3412 perf_evlist__delete(session->evlist);
3413 session->evlist = NULL;
3417 int perf_event__synthesize_attr(struct perf_tool *tool,
3418 struct perf_event_attr *attr, u32 ids, u64 *id,
3419 perf_event__handler_t process)
3421 union perf_event *ev;
3425 size = sizeof(struct perf_event_attr);
3426 size = PERF_ALIGN(size, sizeof(u64));
3427 size += sizeof(struct perf_event_header);
3428 size += ids * sizeof(u64);
3435 ev->attr.attr = *attr;
3436 memcpy(ev->attr.id, id, ids * sizeof(u64));
3438 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
3439 ev->attr.header.size = (u16)size;
3441 if (ev->attr.header.size == size)
3442 err = process(tool, ev, NULL, NULL);
3451 int perf_event__synthesize_features(struct perf_tool *tool,
3452 struct perf_session *session,
3453 struct perf_evlist *evlist,
3454 perf_event__handler_t process)
3456 struct perf_header *header = &session->header;
3458 struct feature_event *fe;
3462 sz_hdr = sizeof(fe->header);
3463 sz = sizeof(union perf_event);
3464 /* get a nice alignment */
3465 sz = PERF_ALIGN(sz, page_size);
3467 memset(&ff, 0, sizeof(ff));
3469 ff.buf = malloc(sz);
3473 ff.size = sz - sz_hdr;
3475 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3476 if (!feat_ops[feat].synthesize) {
3477 pr_debug("No record header feature for header :%d\n", feat);
3481 ff.offset = sizeof(*fe);
3483 ret = feat_ops[feat].write(&ff, evlist);
3484 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
3485 pr_debug("Error writing feature\n");
3488 /* ff.buf may have changed due to realloc in do_write() */
3490 memset(fe, 0, sizeof(*fe));
3493 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3494 fe->header.size = ff.offset;
3496 ret = process(tool, ff.buf, NULL, NULL);
3503 /* Send HEADER_LAST_FEATURE mark. */
3505 fe->feat_id = HEADER_LAST_FEATURE;
3506 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3507 fe->header.size = sizeof(*fe);
3509 ret = process(tool, ff.buf, NULL, NULL);
3515 int perf_event__process_feature(struct perf_session *session,
3516 union perf_event *event)
3518 struct perf_tool *tool = session->tool;
3519 struct feat_fd ff = { .fd = 0 };
3520 struct feature_event *fe = (struct feature_event *)event;
3521 int type = fe->header.type;
3522 u64 feat = fe->feat_id;
3524 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
3525 pr_warning("invalid record type %d in pipe-mode\n", type);
3528 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
3529 pr_warning("invalid record type %d in pipe-mode\n", type);
3533 if (!feat_ops[feat].process)
3536 ff.buf = (void *)fe->data;
3537 ff.size = event->header.size - sizeof(event->header);
3538 ff.ph = &session->header;
3540 if (feat_ops[feat].process(&ff, NULL))
3543 if (!feat_ops[feat].print || !tool->show_feat_hdr)
3546 if (!feat_ops[feat].full_only ||
3547 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
3548 feat_ops[feat].print(&ff, stdout);
3550 fprintf(stdout, "# %s info available, use -I to display\n",
3551 feat_ops[feat].name);
3557 static struct event_update_event *
3558 event_update_event__new(size_t size, u64 type, u64 id)
3560 struct event_update_event *ev;
3562 size += sizeof(*ev);
3563 size = PERF_ALIGN(size, sizeof(u64));
3567 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3568 ev->header.size = (u16)size;
3576 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3577 struct perf_evsel *evsel,
3578 perf_event__handler_t process)
3580 struct event_update_event *ev;
3581 size_t size = strlen(evsel->unit);
3584 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3588 strlcpy(ev->data, evsel->unit, size + 1);
3589 err = process(tool, (union perf_event *)ev, NULL, NULL);
3595 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3596 struct perf_evsel *evsel,
3597 perf_event__handler_t process)
3599 struct event_update_event *ev;
3600 struct event_update_event_scale *ev_data;
3603 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3607 ev_data = (struct event_update_event_scale *) ev->data;
3608 ev_data->scale = evsel->scale;
3609 err = process(tool, (union perf_event*) ev, NULL, NULL);
3615 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3616 struct perf_evsel *evsel,
3617 perf_event__handler_t process)
3619 struct event_update_event *ev;
3620 size_t len = strlen(evsel->name);
3623 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3627 strlcpy(ev->data, evsel->name, len + 1);
3628 err = process(tool, (union perf_event*) ev, NULL, NULL);
3634 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3635 struct perf_evsel *evsel,
3636 perf_event__handler_t process)
3638 size_t size = sizeof(struct event_update_event);
3639 struct event_update_event *ev;
3643 if (!evsel->own_cpus)
3646 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3650 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3651 ev->header.size = (u16)size;
3652 ev->type = PERF_EVENT_UPDATE__CPUS;
3653 ev->id = evsel->id[0];
3655 cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3659 err = process(tool, (union perf_event*) ev, NULL, NULL);
3664 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3666 struct event_update_event *ev = &event->event_update;
3667 struct event_update_event_scale *ev_scale;
3668 struct event_update_event_cpus *ev_cpus;
3669 struct cpu_map *map;
3672 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id);
3675 case PERF_EVENT_UPDATE__SCALE:
3676 ev_scale = (struct event_update_event_scale *) ev->data;
3677 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3679 case PERF_EVENT_UPDATE__UNIT:
3680 ret += fprintf(fp, "... unit: %s\n", ev->data);
3682 case PERF_EVENT_UPDATE__NAME:
3683 ret += fprintf(fp, "... name: %s\n", ev->data);
3685 case PERF_EVENT_UPDATE__CPUS:
3686 ev_cpus = (struct event_update_event_cpus *) ev->data;
3687 ret += fprintf(fp, "... ");
3689 map = cpu_map__new_data(&ev_cpus->cpus);
3691 ret += cpu_map__fprintf(map, fp);
3693 ret += fprintf(fp, "failed to get cpus\n");
3696 ret += fprintf(fp, "... unknown type\n");
3703 int perf_event__synthesize_attrs(struct perf_tool *tool,
3704 struct perf_evlist *evlist,
3705 perf_event__handler_t process)
3707 struct perf_evsel *evsel;
3710 evlist__for_each_entry(evlist, evsel) {
3711 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3712 evsel->id, process);
3714 pr_debug("failed to create perf header attribute\n");
3722 static bool has_unit(struct perf_evsel *counter)
3724 return counter->unit && *counter->unit;
3727 static bool has_scale(struct perf_evsel *counter)
3729 return counter->scale != 1;
3732 int perf_event__synthesize_extra_attr(struct perf_tool *tool,
3733 struct perf_evlist *evsel_list,
3734 perf_event__handler_t process,
3737 struct perf_evsel *counter;
3741 * Synthesize other events stuff not carried within
3742 * attr event - unit, scale, name
3744 evlist__for_each_entry(evsel_list, counter) {
3745 if (!counter->supported)
3749 * Synthesize unit and scale only if it's defined.
3751 if (has_unit(counter)) {
3752 err = perf_event__synthesize_event_update_unit(tool, counter, process);
3754 pr_err("Couldn't synthesize evsel unit.\n");
3759 if (has_scale(counter)) {
3760 err = perf_event__synthesize_event_update_scale(tool, counter, process);
3762 pr_err("Couldn't synthesize evsel counter.\n");
3767 if (counter->own_cpus) {
3768 err = perf_event__synthesize_event_update_cpus(tool, counter, process);
3770 pr_err("Couldn't synthesize evsel cpus.\n");
3776 * Name is needed only for pipe output,
3777 * perf.data carries event names.
3780 err = perf_event__synthesize_event_update_name(tool, counter, process);
3782 pr_err("Couldn't synthesize evsel name.\n");
3790 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3791 union perf_event *event,
3792 struct perf_evlist **pevlist)
3795 struct perf_evsel *evsel;
3796 struct perf_evlist *evlist = *pevlist;
3798 if (evlist == NULL) {
3799 *pevlist = evlist = perf_evlist__new();
3804 evsel = perf_evsel__new(&event->attr.attr);
3808 perf_evlist__add(evlist, evsel);
3810 ids = event->header.size;
3811 ids -= (void *)&event->attr.id - (void *)event;
3812 n_ids = ids / sizeof(u64);
3814 * We don't have the cpu and thread maps on the header, so
3815 * for allocating the perf_sample_id table we fake 1 cpu and
3816 * hattr->ids threads.
3818 if (perf_evsel__alloc_id(evsel, 1, n_ids))
3821 for (i = 0; i < n_ids; i++) {
3822 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3828 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3829 union perf_event *event,
3830 struct perf_evlist **pevlist)
3832 struct event_update_event *ev = &event->event_update;
3833 struct event_update_event_scale *ev_scale;
3834 struct event_update_event_cpus *ev_cpus;
3835 struct perf_evlist *evlist;
3836 struct perf_evsel *evsel;
3837 struct cpu_map *map;
3839 if (!pevlist || *pevlist == NULL)
3844 evsel = perf_evlist__id2evsel(evlist, ev->id);
3849 case PERF_EVENT_UPDATE__UNIT:
3850 evsel->unit = strdup(ev->data);
3852 case PERF_EVENT_UPDATE__NAME:
3853 evsel->name = strdup(ev->data);
3855 case PERF_EVENT_UPDATE__SCALE:
3856 ev_scale = (struct event_update_event_scale *) ev->data;
3857 evsel->scale = ev_scale->scale;
3859 case PERF_EVENT_UPDATE__CPUS:
3860 ev_cpus = (struct event_update_event_cpus *) ev->data;
3862 map = cpu_map__new_data(&ev_cpus->cpus);
3864 evsel->own_cpus = map;
3866 pr_err("failed to get event_update cpus\n");
3874 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3875 struct perf_evlist *evlist,
3876 perf_event__handler_t process)
3878 union perf_event ev;
3879 struct tracing_data *tdata;
3880 ssize_t size = 0, aligned_size = 0, padding;
3882 int err __maybe_unused = 0;
3885 * We are going to store the size of the data followed
3886 * by the data contents. Since the fd descriptor is a pipe,
3887 * we cannot seek back to store the size of the data once
3888 * we know it. Instead we:
3890 * - write the tracing data to the temp file
3891 * - get/write the data size to pipe
3892 * - write the tracing data from the temp file
3895 tdata = tracing_data_get(&evlist->entries, fd, true);
3899 memset(&ev, 0, sizeof(ev));
3901 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3903 aligned_size = PERF_ALIGN(size, sizeof(u64));
3904 padding = aligned_size - size;
3905 ev.tracing_data.header.size = sizeof(ev.tracing_data);
3906 ev.tracing_data.size = aligned_size;
3908 process(tool, &ev, NULL, NULL);
3911 * The put function will copy all the tracing data
3912 * stored in temp file to the pipe.
3914 tracing_data_put(tdata);
3916 ff = (struct feat_fd){ .fd = fd };
3917 if (write_padded(&ff, NULL, 0, padding))
3920 return aligned_size;
3923 int perf_event__process_tracing_data(struct perf_session *session,
3924 union perf_event *event)
3926 ssize_t size_read, padding, size = event->tracing_data.size;
3927 int fd = perf_data__fd(session->data);
3928 off_t offset = lseek(fd, 0, SEEK_CUR);
3931 /* setup for reading amidst mmap */
3932 lseek(fd, offset + sizeof(struct tracing_data_event),
3935 size_read = trace_report(fd, &session->tevent,
3937 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3939 if (readn(fd, buf, padding) < 0) {
3940 pr_err("%s: reading input file", __func__);
3943 if (session->repipe) {
3944 int retw = write(STDOUT_FILENO, buf, padding);
3945 if (retw <= 0 || retw != padding) {
3946 pr_err("%s: repiping tracing data padding", __func__);
3951 if (size_read + padding != size) {
3952 pr_err("%s: tracing data size mismatch", __func__);
3956 perf_evlist__prepare_tracepoint_events(session->evlist,
3957 session->tevent.pevent);
3959 return size_read + padding;
3962 int perf_event__synthesize_build_id(struct perf_tool *tool,
3963 struct dso *pos, u16 misc,
3964 perf_event__handler_t process,
3965 struct machine *machine)
3967 union perf_event ev;
3974 memset(&ev, 0, sizeof(ev));
3976 len = pos->long_name_len + 1;
3977 len = PERF_ALIGN(len, NAME_ALIGN);
3978 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3979 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3980 ev.build_id.header.misc = misc;
3981 ev.build_id.pid = machine->pid;
3982 ev.build_id.header.size = sizeof(ev.build_id) + len;
3983 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3985 err = process(tool, &ev, NULL, machine);
3990 int perf_event__process_build_id(struct perf_session *session,
3991 union perf_event *event)
3993 __event_process_build_id(&event->build_id,
3994 event->build_id.filename,