1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/compiler.h>
13 #include <linux/list.h>
14 #include <linux/kernel.h>
15 #include <linux/bitops.h>
16 #include <linux/stringify.h>
18 #include <sys/utsname.h>
19 #include <linux/time64.h>
27 #include "trace-event.h"
37 #include <api/fs/fs.h>
40 #include "time-utils.h"
44 #include "sane_ctype.h"
48 * must be a numerical value to let the endianness
49 * determine the memory layout. That way we are able
50 * to detect endianness when reading the perf.data file
53 * we check for legacy (PERFFILE) format.
55 static const char *__perf_magic1 = "PERFFILE";
56 static const u64 __perf_magic2 = 0x32454c4946524550ULL;
57 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
59 #define PERF_MAGIC __perf_magic2
61 const char perf_version_string[] = PERF_VERSION;
63 struct perf_file_attr {
64 struct perf_event_attr attr;
65 struct perf_file_section ids;
69 struct perf_header *ph;
71 void *buf; /* Either buf != NULL or fd >= 0 */
74 struct perf_evsel *events;
77 void perf_header__set_feat(struct perf_header *header, int feat)
79 set_bit(feat, header->adds_features);
82 void perf_header__clear_feat(struct perf_header *header, int feat)
84 clear_bit(feat, header->adds_features);
87 bool perf_header__has_feat(const struct perf_header *header, int feat)
89 return test_bit(feat, header->adds_features);
92 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
94 ssize_t ret = writen(ff->fd, buf, size);
96 if (ret != (ssize_t)size)
97 return ret < 0 ? (int)ret : -1;
101 static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
103 /* struct perf_event_header::size is u16 */
104 const size_t max_size = 0xffff - sizeof(struct perf_event_header);
105 size_t new_size = ff->size;
108 if (size + ff->offset > max_size)
111 while (size > (new_size - ff->offset))
113 new_size = min(max_size, new_size);
115 if (ff->size < new_size) {
116 addr = realloc(ff->buf, new_size);
123 memcpy(ff->buf + ff->offset, buf, size);
129 /* Return: 0 if succeded, -ERR if failed. */
130 int do_write(struct feat_fd *ff, const void *buf, size_t size)
133 return __do_write_fd(ff, buf, size);
134 return __do_write_buf(ff, buf, size);
137 /* Return: 0 if succeded, -ERR if failed. */
138 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
140 u64 *p = (u64 *) set;
143 ret = do_write(ff, &size, sizeof(size));
147 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
148 ret = do_write(ff, p + i, sizeof(*p));
156 /* Return: 0 if succeded, -ERR if failed. */
157 int write_padded(struct feat_fd *ff, const void *bf,
158 size_t count, size_t count_aligned)
160 static const char zero_buf[NAME_ALIGN];
161 int err = do_write(ff, bf, count);
164 err = do_write(ff, zero_buf, count_aligned - count);
169 #define string_size(str) \
170 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
172 /* Return: 0 if succeded, -ERR if failed. */
173 static int do_write_string(struct feat_fd *ff, const char *str)
178 olen = strlen(str) + 1;
179 len = PERF_ALIGN(olen, NAME_ALIGN);
181 /* write len, incl. \0 */
182 ret = do_write(ff, &len, sizeof(len));
186 return write_padded(ff, str, olen, len);
189 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
191 ssize_t ret = readn(ff->fd, addr, size);
194 return ret < 0 ? (int)ret : -1;
198 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
200 if (size > (ssize_t)ff->size - ff->offset)
203 memcpy(addr, ff->buf + ff->offset, size);
210 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
213 return __do_read_fd(ff, addr, size);
214 return __do_read_buf(ff, addr, size);
217 static int do_read_u32(struct feat_fd *ff, u32 *addr)
221 ret = __do_read(ff, addr, sizeof(*addr));
225 if (ff->ph->needs_swap)
226 *addr = bswap_32(*addr);
230 static int do_read_u64(struct feat_fd *ff, u64 *addr)
234 ret = __do_read(ff, addr, sizeof(*addr));
238 if (ff->ph->needs_swap)
239 *addr = bswap_64(*addr);
243 static char *do_read_string(struct feat_fd *ff)
248 if (do_read_u32(ff, &len))
255 if (!__do_read(ff, buf, len)) {
257 * strings are padded by zeroes
258 * thus the actual strlen of buf
259 * may be less than len
268 /* Return: 0 if succeded, -ERR if failed. */
269 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
275 ret = do_read_u64(ff, &size);
279 set = bitmap_alloc(size);
285 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
286 ret = do_read_u64(ff, p + i);
298 static int write_tracing_data(struct feat_fd *ff,
299 struct perf_evlist *evlist)
301 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
304 return read_tracing_data(ff->fd, &evlist->entries);
307 static int write_build_id(struct feat_fd *ff,
308 struct perf_evlist *evlist __maybe_unused)
310 struct perf_session *session;
313 session = container_of(ff->ph, struct perf_session, header);
315 if (!perf_session__read_build_ids(session, true))
318 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
321 err = perf_session__write_buildid_table(session, ff);
323 pr_debug("failed to write buildid table\n");
326 perf_session__cache_build_ids(session);
331 static int write_hostname(struct feat_fd *ff,
332 struct perf_evlist *evlist __maybe_unused)
341 return do_write_string(ff, uts.nodename);
344 static int write_osrelease(struct feat_fd *ff,
345 struct perf_evlist *evlist __maybe_unused)
354 return do_write_string(ff, uts.release);
357 static int write_arch(struct feat_fd *ff,
358 struct perf_evlist *evlist __maybe_unused)
367 return do_write_string(ff, uts.machine);
370 static int write_version(struct feat_fd *ff,
371 struct perf_evlist *evlist __maybe_unused)
373 return do_write_string(ff, perf_version_string);
376 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
381 const char *search = cpuinfo_proc;
388 file = fopen("/proc/cpuinfo", "r");
392 while (getline(&buf, &len, file) > 0) {
393 ret = strncmp(buf, search, strlen(search));
405 p = strchr(buf, ':');
406 if (p && *(p+1) == ' ' && *(p+2))
412 /* squash extra space characters (branding string) */
419 while (*q && isspace(*q))
422 while ((*r++ = *q++));
426 ret = do_write_string(ff, s);
433 static int write_cpudesc(struct feat_fd *ff,
434 struct perf_evlist *evlist __maybe_unused)
436 const char *cpuinfo_procs[] = CPUINFO_PROC;
439 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
441 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
449 static int write_nrcpus(struct feat_fd *ff,
450 struct perf_evlist *evlist __maybe_unused)
456 nrc = cpu__max_present_cpu();
458 nr = sysconf(_SC_NPROCESSORS_ONLN);
462 nra = (u32)(nr & UINT_MAX);
464 ret = do_write(ff, &nrc, sizeof(nrc));
468 return do_write(ff, &nra, sizeof(nra));
471 static int write_event_desc(struct feat_fd *ff,
472 struct perf_evlist *evlist)
474 struct perf_evsel *evsel;
478 nre = evlist->nr_entries;
481 * write number of events
483 ret = do_write(ff, &nre, sizeof(nre));
488 * size of perf_event_attr struct
490 sz = (u32)sizeof(evsel->attr);
491 ret = do_write(ff, &sz, sizeof(sz));
495 evlist__for_each_entry(evlist, evsel) {
496 ret = do_write(ff, &evsel->attr, sz);
500 * write number of unique id per event
501 * there is one id per instance of an event
503 * copy into an nri to be independent of the
507 ret = do_write(ff, &nri, sizeof(nri));
512 * write event string as passed on cmdline
514 ret = do_write_string(ff, perf_evsel__name(evsel));
518 * write unique ids for this event
520 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
527 static int write_cmdline(struct feat_fd *ff,
528 struct perf_evlist *evlist __maybe_unused)
530 char buf[MAXPATHLEN];
534 /* actual path to perf binary */
535 ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
539 /* readlink() does not add null termination */
542 /* account for binary path */
543 n = perf_env.nr_cmdline + 1;
545 ret = do_write(ff, &n, sizeof(n));
549 ret = do_write_string(ff, buf);
553 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
554 ret = do_write_string(ff, perf_env.cmdline_argv[i]);
562 static int write_cpu_topology(struct feat_fd *ff,
563 struct perf_evlist *evlist __maybe_unused)
565 struct cpu_topology *tp;
569 tp = cpu_topology__new();
573 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
577 for (i = 0; i < tp->core_sib; i++) {
578 ret = do_write_string(ff, tp->core_siblings[i]);
582 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
586 for (i = 0; i < tp->thread_sib; i++) {
587 ret = do_write_string(ff, tp->thread_siblings[i]);
592 ret = perf_env__read_cpu_topology_map(&perf_env);
596 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
597 ret = do_write(ff, &perf_env.cpu[j].core_id,
598 sizeof(perf_env.cpu[j].core_id));
601 ret = do_write(ff, &perf_env.cpu[j].socket_id,
602 sizeof(perf_env.cpu[j].socket_id));
607 cpu_topology__delete(tp);
613 static int write_total_mem(struct feat_fd *ff,
614 struct perf_evlist *evlist __maybe_unused)
622 fp = fopen("/proc/meminfo", "r");
626 while (getline(&buf, &len, fp) > 0) {
627 ret = strncmp(buf, "MemTotal:", 9);
632 n = sscanf(buf, "%*s %"PRIu64, &mem);
634 ret = do_write(ff, &mem, sizeof(mem));
642 static int write_topo_node(struct feat_fd *ff, int node)
644 char str[MAXPATHLEN];
646 char *buf = NULL, *p;
649 u64 mem_total, mem_free, mem;
652 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
653 fp = fopen(str, "r");
657 while (getline(&buf, &len, fp) > 0) {
658 /* skip over invalid lines */
659 if (!strchr(buf, ':'))
661 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
663 if (!strcmp(field, "MemTotal:"))
665 if (!strcmp(field, "MemFree:"))
672 ret = do_write(ff, &mem_total, sizeof(u64));
676 ret = do_write(ff, &mem_free, sizeof(u64));
681 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
683 fp = fopen(str, "r");
687 if (getline(&buf, &len, fp) <= 0)
690 p = strchr(buf, '\n');
694 ret = do_write_string(ff, buf);
702 static int write_numa_topology(struct feat_fd *ff,
703 struct perf_evlist *evlist __maybe_unused)
708 struct cpu_map *node_map = NULL;
713 fp = fopen("/sys/devices/system/node/online", "r");
717 if (getline(&buf, &len, fp) <= 0)
720 c = strchr(buf, '\n');
724 node_map = cpu_map__new(buf);
728 nr = (u32)node_map->nr;
730 ret = do_write(ff, &nr, sizeof(nr));
734 for (i = 0; i < nr; i++) {
735 j = (u32)node_map->map[i];
736 ret = do_write(ff, &j, sizeof(j));
740 ret = write_topo_node(ff, j);
747 cpu_map__put(node_map);
754 * struct pmu_mappings {
763 static int write_pmu_mappings(struct feat_fd *ff,
764 struct perf_evlist *evlist __maybe_unused)
766 struct perf_pmu *pmu = NULL;
771 * Do a first pass to count number of pmu to avoid lseek so this
772 * works in pipe mode as well.
774 while ((pmu = perf_pmu__scan(pmu))) {
780 ret = do_write(ff, &pmu_num, sizeof(pmu_num));
784 while ((pmu = perf_pmu__scan(pmu))) {
788 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
792 ret = do_write_string(ff, pmu->name);
803 * struct group_descs {
805 * struct group_desc {
812 static int write_group_desc(struct feat_fd *ff,
813 struct perf_evlist *evlist)
815 u32 nr_groups = evlist->nr_groups;
816 struct perf_evsel *evsel;
819 ret = do_write(ff, &nr_groups, sizeof(nr_groups));
823 evlist__for_each_entry(evlist, evsel) {
824 if (perf_evsel__is_group_leader(evsel) &&
825 evsel->nr_members > 1) {
826 const char *name = evsel->group_name ?: "{anon_group}";
827 u32 leader_idx = evsel->idx;
828 u32 nr_members = evsel->nr_members;
830 ret = do_write_string(ff, name);
834 ret = do_write(ff, &leader_idx, sizeof(leader_idx));
838 ret = do_write(ff, &nr_members, sizeof(nr_members));
847 * Return the CPU id as a raw string.
849 * Each architecture should provide a more precise id string that
850 * can be use to match the architecture's "mapfile".
852 char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
857 /* Return zero when the cpuid from the mapfile.csv matches the
858 * cpuid string generated on this platform.
859 * Otherwise return non-zero.
861 int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
864 regmatch_t pmatch[1];
867 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
868 /* Warn unable to generate match particular string. */
869 pr_info("Invalid regular expression %s\n", mapcpuid);
873 match = !regexec(&re, cpuid, 1, pmatch, 0);
876 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
878 /* Verify the entire string matched. */
879 if (match_len == strlen(cpuid))
886 * default get_cpuid(): nothing gets recorded
887 * actual implementation must be in arch/$(SRCARCH)/util/header.c
889 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
894 static int write_cpuid(struct feat_fd *ff,
895 struct perf_evlist *evlist __maybe_unused)
900 ret = get_cpuid(buffer, sizeof(buffer));
904 return do_write_string(ff, buffer);
907 static int write_branch_stack(struct feat_fd *ff __maybe_unused,
908 struct perf_evlist *evlist __maybe_unused)
913 static int write_auxtrace(struct feat_fd *ff,
914 struct perf_evlist *evlist __maybe_unused)
916 struct perf_session *session;
919 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
922 session = container_of(ff->ph, struct perf_session, header);
924 err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
926 pr_err("Failed to write auxtrace index\n");
930 static int write_clockid(struct feat_fd *ff,
931 struct perf_evlist *evlist __maybe_unused)
933 return do_write(ff, &ff->ph->env.clockid_res_ns,
934 sizeof(ff->ph->env.clockid_res_ns));
937 static int cpu_cache_level__sort(const void *a, const void *b)
939 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
940 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
942 return cache_a->level - cache_b->level;
945 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
947 if (a->level != b->level)
950 if (a->line_size != b->line_size)
953 if (a->sets != b->sets)
956 if (a->ways != b->ways)
959 if (strcmp(a->type, b->type))
962 if (strcmp(a->size, b->size))
965 if (strcmp(a->map, b->map))
971 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
973 char path[PATH_MAX], file[PATH_MAX];
977 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
978 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
983 scnprintf(file, PATH_MAX, "%s/level", path);
984 if (sysfs__read_int(file, (int *) &cache->level))
987 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
988 if (sysfs__read_int(file, (int *) &cache->line_size))
991 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
992 if (sysfs__read_int(file, (int *) &cache->sets))
995 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
996 if (sysfs__read_int(file, (int *) &cache->ways))
999 scnprintf(file, PATH_MAX, "%s/type", path);
1000 if (sysfs__read_str(file, &cache->type, &len))
1003 cache->type[len] = 0;
1004 cache->type = rtrim(cache->type);
1006 scnprintf(file, PATH_MAX, "%s/size", path);
1007 if (sysfs__read_str(file, &cache->size, &len)) {
1012 cache->size[len] = 0;
1013 cache->size = rtrim(cache->size);
1015 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1016 if (sysfs__read_str(file, &cache->map, &len)) {
1022 cache->map[len] = 0;
1023 cache->map = rtrim(cache->map);
1027 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1029 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1032 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
1039 ncpus = sysconf(_SC_NPROCESSORS_CONF);
1043 nr = (u32)(ncpus & UINT_MAX);
1045 for (cpu = 0; cpu < nr; cpu++) {
1046 for (level = 0; level < 10; level++) {
1047 struct cpu_cache_level c;
1050 err = cpu_cache_level__read(&c, cpu, level);
1057 for (i = 0; i < cnt; i++) {
1058 if (cpu_cache_level__cmp(&c, &caches[i]))
1065 cpu_cache_level__free(&c);
1067 if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1076 #define MAX_CACHES 2000
1078 static int write_cache(struct feat_fd *ff,
1079 struct perf_evlist *evlist __maybe_unused)
1081 struct cpu_cache_level caches[MAX_CACHES];
1082 u32 cnt = 0, i, version = 1;
1085 ret = build_caches(caches, MAX_CACHES, &cnt);
1089 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1091 ret = do_write(ff, &version, sizeof(u32));
1095 ret = do_write(ff, &cnt, sizeof(u32));
1099 for (i = 0; i < cnt; i++) {
1100 struct cpu_cache_level *c = &caches[i];
1103 ret = do_write(ff, &c->v, sizeof(u32)); \
1114 ret = do_write_string(ff, (const char *) c->v); \
1125 for (i = 0; i < cnt; i++)
1126 cpu_cache_level__free(&caches[i]);
1130 static int write_stat(struct feat_fd *ff __maybe_unused,
1131 struct perf_evlist *evlist __maybe_unused)
1136 static int write_sample_time(struct feat_fd *ff,
1137 struct perf_evlist *evlist)
1141 ret = do_write(ff, &evlist->first_sample_time,
1142 sizeof(evlist->first_sample_time));
1146 return do_write(ff, &evlist->last_sample_time,
1147 sizeof(evlist->last_sample_time));
1151 static int memory_node__read(struct memory_node *n, unsigned long idx)
1153 unsigned int phys, size = 0;
1154 char path[PATH_MAX];
1158 #define for_each_memory(mem, dir) \
1159 while ((ent = readdir(dir))) \
1160 if (strcmp(ent->d_name, ".") && \
1161 strcmp(ent->d_name, "..") && \
1162 sscanf(ent->d_name, "memory%u", &mem) == 1)
1164 scnprintf(path, PATH_MAX,
1165 "%s/devices/system/node/node%lu",
1166 sysfs__mountpoint(), idx);
1168 dir = opendir(path);
1170 pr_warning("failed: cant' open memory sysfs data\n");
1174 for_each_memory(phys, dir) {
1175 size = max(phys, size);
1180 n->set = bitmap_alloc(size);
1191 for_each_memory(phys, dir) {
1192 set_bit(phys, n->set);
1199 static int memory_node__sort(const void *a, const void *b)
1201 const struct memory_node *na = a;
1202 const struct memory_node *nb = b;
1204 return na->node - nb->node;
1207 static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
1209 char path[PATH_MAX];
1215 scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1216 sysfs__mountpoint());
1218 dir = opendir(path);
1220 pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1225 while (!ret && (ent = readdir(dir))) {
1229 if (!strcmp(ent->d_name, ".") ||
1230 !strcmp(ent->d_name, ".."))
1233 r = sscanf(ent->d_name, "node%u", &idx);
1237 if (WARN_ONCE(cnt >= size,
1238 "failed to write MEM_TOPOLOGY, way too many nodes\n"))
1241 ret = memory_node__read(&nodes[cnt++], idx);
1248 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1253 #define MAX_MEMORY_NODES 2000
1256 * The MEM_TOPOLOGY holds physical memory map for every
1257 * node in system. The format of data is as follows:
1259 * 0 - version | for future changes
1260 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1261 * 16 - count | number of nodes
1263 * For each node we store map of physical indexes for
1266 * 32 - node id | node index
1267 * 40 - size | size of bitmap
1268 * 48 - bitmap | bitmap of memory indexes that belongs to node
1270 static int write_mem_topology(struct feat_fd *ff __maybe_unused,
1271 struct perf_evlist *evlist __maybe_unused)
1273 static struct memory_node nodes[MAX_MEMORY_NODES];
1274 u64 bsize, version = 1, i, nr;
1277 ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1278 (unsigned long long *) &bsize);
1282 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
1286 ret = do_write(ff, &version, sizeof(version));
1290 ret = do_write(ff, &bsize, sizeof(bsize));
1294 ret = do_write(ff, &nr, sizeof(nr));
1298 for (i = 0; i < nr; i++) {
1299 struct memory_node *n = &nodes[i];
1302 ret = do_write(ff, &n->v, sizeof(n->v)); \
1311 ret = do_write_bitmap(ff, n->set, n->size);
1320 static void print_hostname(struct feat_fd *ff, FILE *fp)
1322 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
1325 static void print_osrelease(struct feat_fd *ff, FILE *fp)
1327 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
1330 static void print_arch(struct feat_fd *ff, FILE *fp)
1332 fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
1335 static void print_cpudesc(struct feat_fd *ff, FILE *fp)
1337 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
1340 static void print_nrcpus(struct feat_fd *ff, FILE *fp)
1342 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1343 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
1346 static void print_version(struct feat_fd *ff, FILE *fp)
1348 fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
1351 static void print_cmdline(struct feat_fd *ff, FILE *fp)
1355 nr = ff->ph->env.nr_cmdline;
1357 fprintf(fp, "# cmdline : ");
1359 for (i = 0; i < nr; i++) {
1360 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1362 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1366 char *quote = strchr(argv_i, '\'');
1370 fprintf(fp, "%s\\\'", argv_i);
1373 fprintf(fp, "%s ", argv_i);
1380 static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
1382 struct perf_header *ph = ff->ph;
1383 int cpu_nr = ph->env.nr_cpus_avail;
1387 nr = ph->env.nr_sibling_cores;
1388 str = ph->env.sibling_cores;
1390 for (i = 0; i < nr; i++) {
1391 fprintf(fp, "# sibling cores : %s\n", str);
1392 str += strlen(str) + 1;
1395 nr = ph->env.nr_sibling_threads;
1396 str = ph->env.sibling_threads;
1398 for (i = 0; i < nr; i++) {
1399 fprintf(fp, "# sibling threads : %s\n", str);
1400 str += strlen(str) + 1;
1403 if (ph->env.cpu != NULL) {
1404 for (i = 0; i < cpu_nr; i++)
1405 fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1406 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1408 fprintf(fp, "# Core ID and Socket ID information is not available\n");
1411 static void print_clockid(struct feat_fd *ff, FILE *fp)
1413 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1414 ff->ph->env.clockid_res_ns * 1000);
1417 static void free_event_desc(struct perf_evsel *events)
1419 struct perf_evsel *evsel;
1424 for (evsel = events; evsel->attr.size; evsel++) {
1425 zfree(&evsel->name);
1432 static struct perf_evsel *read_event_desc(struct feat_fd *ff)
1434 struct perf_evsel *evsel, *events = NULL;
1437 u32 nre, sz, nr, i, j;
1440 /* number of events */
1441 if (do_read_u32(ff, &nre))
1444 if (do_read_u32(ff, &sz))
1447 /* buffer to hold on file attr struct */
1452 /* the last event terminates with evsel->attr.size == 0: */
1453 events = calloc(nre + 1, sizeof(*events));
1457 msz = sizeof(evsel->attr);
1461 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1465 * must read entire on-file attr struct to
1466 * sync up with layout.
1468 if (__do_read(ff, buf, sz))
1471 if (ff->ph->needs_swap)
1472 perf_event__attr_swap(buf);
1474 memcpy(&evsel->attr, buf, msz);
1476 if (do_read_u32(ff, &nr))
1479 if (ff->ph->needs_swap)
1480 evsel->needs_swap = true;
1482 evsel->name = do_read_string(ff);
1489 id = calloc(nr, sizeof(*id));
1495 for (j = 0 ; j < nr; j++) {
1496 if (do_read_u64(ff, id))
1505 free_event_desc(events);
1510 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1511 void *priv __maybe_unused)
1513 return fprintf(fp, ", %s = %s", name, val);
1516 static void print_event_desc(struct feat_fd *ff, FILE *fp)
1518 struct perf_evsel *evsel, *events;
1523 events = ff->events;
1525 events = read_event_desc(ff);
1528 fprintf(fp, "# event desc: not available or unable to read\n");
1532 for (evsel = events; evsel->attr.size; evsel++) {
1533 fprintf(fp, "# event : name = %s, ", evsel->name);
1536 fprintf(fp, ", id = {");
1537 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1540 fprintf(fp, " %"PRIu64, *id);
1545 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1550 free_event_desc(events);
1554 static void print_total_mem(struct feat_fd *ff, FILE *fp)
1556 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
1559 static void print_numa_topology(struct feat_fd *ff, FILE *fp)
1562 struct numa_node *n;
1564 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1565 n = &ff->ph->env.numa_nodes[i];
1567 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1568 " free = %"PRIu64" kB\n",
1569 n->node, n->mem_total, n->mem_free);
1571 fprintf(fp, "# node%u cpu list : ", n->node);
1572 cpu_map__fprintf(n->map, fp);
1576 static void print_cpuid(struct feat_fd *ff, FILE *fp)
1578 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
1581 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
1583 fprintf(fp, "# contains samples with branch stack\n");
1586 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
1588 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1591 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
1593 fprintf(fp, "# contains stat data\n");
1596 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
1600 fprintf(fp, "# CPU cache info:\n");
1601 for (i = 0; i < ff->ph->env.caches_cnt; i++) {
1603 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
1607 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
1609 const char *delimiter = "# pmu mappings: ";
1614 pmu_num = ff->ph->env.nr_pmu_mappings;
1616 fprintf(fp, "# pmu mappings: not available\n");
1620 str = ff->ph->env.pmu_mappings;
1623 type = strtoul(str, &tmp, 0);
1628 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1631 str += strlen(str) + 1;
1640 fprintf(fp, "# pmu mappings: unable to read\n");
1643 static void print_group_desc(struct feat_fd *ff, FILE *fp)
1645 struct perf_session *session;
1646 struct perf_evsel *evsel;
1649 session = container_of(ff->ph, struct perf_session, header);
1651 evlist__for_each_entry(session->evlist, evsel) {
1652 if (perf_evsel__is_group_leader(evsel) &&
1653 evsel->nr_members > 1) {
1654 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1655 perf_evsel__name(evsel));
1657 nr = evsel->nr_members - 1;
1659 fprintf(fp, ",%s", perf_evsel__name(evsel));
1667 static void print_sample_time(struct feat_fd *ff, FILE *fp)
1669 struct perf_session *session;
1673 session = container_of(ff->ph, struct perf_session, header);
1675 timestamp__scnprintf_usec(session->evlist->first_sample_time,
1676 time_buf, sizeof(time_buf));
1677 fprintf(fp, "# time of first sample : %s\n", time_buf);
1679 timestamp__scnprintf_usec(session->evlist->last_sample_time,
1680 time_buf, sizeof(time_buf));
1681 fprintf(fp, "# time of last sample : %s\n", time_buf);
1683 d = (double)(session->evlist->last_sample_time -
1684 session->evlist->first_sample_time) / NSEC_PER_MSEC;
1686 fprintf(fp, "# sample duration : %10.3f ms\n", d);
1689 static void memory_node__fprintf(struct memory_node *n,
1690 unsigned long long bsize, FILE *fp)
1692 char buf_map[100], buf_size[50];
1693 unsigned long long size;
1695 size = bsize * bitmap_weight(n->set, n->size);
1696 unit_number__scnprintf(buf_size, 50, size);
1698 bitmap_scnprintf(n->set, n->size, buf_map, 100);
1699 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
1702 static void print_mem_topology(struct feat_fd *ff, FILE *fp)
1704 struct memory_node *nodes;
1707 nodes = ff->ph->env.memory_nodes;
1708 nr = ff->ph->env.nr_memory_nodes;
1710 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
1711 nr, ff->ph->env.memory_bsize);
1713 for (i = 0; i < nr; i++) {
1714 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
1718 static int __event_process_build_id(struct build_id_event *bev,
1720 struct perf_session *session)
1723 struct machine *machine;
1726 enum dso_kernel_type dso_type;
1728 machine = perf_session__findnew_machine(session, bev->pid);
1732 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1735 case PERF_RECORD_MISC_KERNEL:
1736 dso_type = DSO_TYPE_KERNEL;
1738 case PERF_RECORD_MISC_GUEST_KERNEL:
1739 dso_type = DSO_TYPE_GUEST_KERNEL;
1741 case PERF_RECORD_MISC_USER:
1742 case PERF_RECORD_MISC_GUEST_USER:
1743 dso_type = DSO_TYPE_USER;
1749 dso = machine__findnew_dso(machine, filename);
1751 char sbuild_id[SBUILD_ID_SIZE];
1753 dso__set_build_id(dso, &bev->build_id);
1755 if (dso_type != DSO_TYPE_USER) {
1756 struct kmod_path m = { .name = NULL, };
1758 if (!kmod_path__parse_name(&m, filename) && m.kmod)
1759 dso__set_module_info(dso, &m, machine);
1761 dso->kernel = dso_type;
1766 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1768 pr_debug("build id event received for %s: %s\n",
1769 dso->long_name, sbuild_id);
1778 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1779 int input, u64 offset, u64 size)
1781 struct perf_session *session = container_of(header, struct perf_session, header);
1783 struct perf_event_header header;
1784 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1787 struct build_id_event bev;
1788 char filename[PATH_MAX];
1789 u64 limit = offset + size;
1791 while (offset < limit) {
1794 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1797 if (header->needs_swap)
1798 perf_event_header__bswap(&old_bev.header);
1800 len = old_bev.header.size - sizeof(old_bev);
1801 if (readn(input, filename, len) != len)
1804 bev.header = old_bev.header;
1807 * As the pid is the missing value, we need to fill
1808 * it properly. The header.misc value give us nice hint.
1810 bev.pid = HOST_KERNEL_ID;
1811 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1812 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1813 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1815 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1816 __event_process_build_id(&bev, filename, session);
1818 offset += bev.header.size;
1824 static int perf_header__read_build_ids(struct perf_header *header,
1825 int input, u64 offset, u64 size)
1827 struct perf_session *session = container_of(header, struct perf_session, header);
1828 struct build_id_event bev;
1829 char filename[PATH_MAX];
1830 u64 limit = offset + size, orig_offset = offset;
1833 while (offset < limit) {
1836 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1839 if (header->needs_swap)
1840 perf_event_header__bswap(&bev.header);
1842 len = bev.header.size - sizeof(bev);
1843 if (readn(input, filename, len) != len)
1846 * The a1645ce1 changeset:
1848 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1850 * Added a field to struct build_id_event that broke the file
1853 * Since the kernel build-id is the first entry, process the
1854 * table using the old format if the well known
1855 * '[kernel.kallsyms]' string for the kernel build-id has the
1856 * first 4 characters chopped off (where the pid_t sits).
1858 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1859 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1861 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1864 __event_process_build_id(&bev, filename, session);
1866 offset += bev.header.size;
1873 /* Macro for features that simply need to read and store a string. */
1874 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
1875 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
1877 ff->ph->env.__feat_env = do_read_string(ff); \
1878 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
1881 FEAT_PROCESS_STR_FUN(hostname, hostname);
1882 FEAT_PROCESS_STR_FUN(osrelease, os_release);
1883 FEAT_PROCESS_STR_FUN(version, version);
1884 FEAT_PROCESS_STR_FUN(arch, arch);
1885 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
1886 FEAT_PROCESS_STR_FUN(cpuid, cpuid);
1888 static int process_tracing_data(struct feat_fd *ff, void *data)
1890 ssize_t ret = trace_report(ff->fd, data, false);
1892 return ret < 0 ? -1 : 0;
1895 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
1897 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
1898 pr_debug("Failed to read buildids, continuing...\n");
1902 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
1905 u32 nr_cpus_avail, nr_cpus_online;
1907 ret = do_read_u32(ff, &nr_cpus_avail);
1911 ret = do_read_u32(ff, &nr_cpus_online);
1914 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
1915 ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
1919 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
1924 ret = do_read_u64(ff, &total_mem);
1927 ff->ph->env.total_mem = (unsigned long long)total_mem;
1931 static struct perf_evsel *
1932 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1934 struct perf_evsel *evsel;
1936 evlist__for_each_entry(evlist, evsel) {
1937 if (evsel->idx == idx)
1945 perf_evlist__set_event_name(struct perf_evlist *evlist,
1946 struct perf_evsel *event)
1948 struct perf_evsel *evsel;
1953 evsel = perf_evlist__find_by_index(evlist, event->idx);
1960 evsel->name = strdup(event->name);
1964 process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
1966 struct perf_session *session;
1967 struct perf_evsel *evsel, *events = read_event_desc(ff);
1972 session = container_of(ff->ph, struct perf_session, header);
1974 if (session->data->is_pipe) {
1975 /* Save events for reading later by print_event_desc,
1976 * since they can't be read again in pipe mode. */
1977 ff->events = events;
1980 for (evsel = events; evsel->attr.size; evsel++)
1981 perf_evlist__set_event_name(session->evlist, evsel);
1983 if (!session->data->is_pipe)
1984 free_event_desc(events);
1989 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
1991 char *str, *cmdline = NULL, **argv = NULL;
1994 if (do_read_u32(ff, &nr))
1997 ff->ph->env.nr_cmdline = nr;
1999 cmdline = zalloc(ff->size + nr + 1);
2003 argv = zalloc(sizeof(char *) * (nr + 1));
2007 for (i = 0; i < nr; i++) {
2008 str = do_read_string(ff);
2012 argv[i] = cmdline + len;
2013 memcpy(argv[i], str, strlen(str) + 1);
2014 len += strlen(str) + 1;
2017 ff->ph->env.cmdline = cmdline;
2018 ff->ph->env.cmdline_argv = (const char **) argv;
2027 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
2032 int cpu_nr = ff->ph->env.nr_cpus_avail;
2034 struct perf_header *ph = ff->ph;
2035 bool do_core_id_test = true;
2037 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
2041 if (do_read_u32(ff, &nr))
2044 ph->env.nr_sibling_cores = nr;
2045 size += sizeof(u32);
2046 if (strbuf_init(&sb, 128) < 0)
2049 for (i = 0; i < nr; i++) {
2050 str = do_read_string(ff);
2054 /* include a NULL character at the end */
2055 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2057 size += string_size(str);
2060 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
2062 if (do_read_u32(ff, &nr))
2065 ph->env.nr_sibling_threads = nr;
2066 size += sizeof(u32);
2068 for (i = 0; i < nr; i++) {
2069 str = do_read_string(ff);
2073 /* include a NULL character at the end */
2074 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2076 size += string_size(str);
2079 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
2082 * The header may be from old perf,
2083 * which doesn't include core id and socket id information.
2085 if (ff->size <= size) {
2086 zfree(&ph->env.cpu);
2090 /* On s390 the socket_id number is not related to the numbers of cpus.
2091 * The socket_id number might be higher than the numbers of cpus.
2092 * This depends on the configuration.
2094 if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
2095 do_core_id_test = false;
2097 for (i = 0; i < (u32)cpu_nr; i++) {
2098 if (do_read_u32(ff, &nr))
2101 ph->env.cpu[i].core_id = nr;
2103 if (do_read_u32(ff, &nr))
2106 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
2107 pr_debug("socket_id number is too big."
2108 "You may need to upgrade the perf tool.\n");
2112 ph->env.cpu[i].socket_id = nr;
2118 strbuf_release(&sb);
2120 zfree(&ph->env.cpu);
2124 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
2126 struct numa_node *nodes, *n;
2131 if (do_read_u32(ff, &nr))
2134 nodes = zalloc(sizeof(*nodes) * nr);
2138 for (i = 0; i < nr; i++) {
2142 if (do_read_u32(ff, &n->node))
2145 if (do_read_u64(ff, &n->mem_total))
2148 if (do_read_u64(ff, &n->mem_free))
2151 str = do_read_string(ff);
2155 n->map = cpu_map__new(str);
2161 ff->ph->env.nr_numa_nodes = nr;
2162 ff->ph->env.numa_nodes = nodes;
2170 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
2177 if (do_read_u32(ff, &pmu_num))
2181 pr_debug("pmu mappings not available\n");
2185 ff->ph->env.nr_pmu_mappings = pmu_num;
2186 if (strbuf_init(&sb, 128) < 0)
2190 if (do_read_u32(ff, &type))
2193 name = do_read_string(ff);
2197 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2199 /* include a NULL character at the end */
2200 if (strbuf_add(&sb, "", 1) < 0)
2203 if (!strcmp(name, "msr"))
2204 ff->ph->env.msr_pmu_type = type;
2209 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2213 strbuf_release(&sb);
2217 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
2220 u32 i, nr, nr_groups;
2221 struct perf_session *session;
2222 struct perf_evsel *evsel, *leader = NULL;
2229 if (do_read_u32(ff, &nr_groups))
2232 ff->ph->env.nr_groups = nr_groups;
2234 pr_debug("group desc not available\n");
2238 desc = calloc(nr_groups, sizeof(*desc));
2242 for (i = 0; i < nr_groups; i++) {
2243 desc[i].name = do_read_string(ff);
2247 if (do_read_u32(ff, &desc[i].leader_idx))
2250 if (do_read_u32(ff, &desc[i].nr_members))
2255 * Rebuild group relationship based on the group_desc
2257 session = container_of(ff->ph, struct perf_session, header);
2258 session->evlist->nr_groups = nr_groups;
2261 evlist__for_each_entry(session->evlist, evsel) {
2262 if (evsel->idx == (int) desc[i].leader_idx) {
2263 evsel->leader = evsel;
2264 /* {anon_group} is a dummy name */
2265 if (strcmp(desc[i].name, "{anon_group}")) {
2266 evsel->group_name = desc[i].name;
2267 desc[i].name = NULL;
2269 evsel->nr_members = desc[i].nr_members;
2271 if (i >= nr_groups || nr > 0) {
2272 pr_debug("invalid group desc\n");
2277 nr = evsel->nr_members - 1;
2280 /* This is a group member */
2281 evsel->leader = leader;
2287 if (i != nr_groups || nr != 0) {
2288 pr_debug("invalid group desc\n");
2294 for (i = 0; i < nr_groups; i++)
2295 zfree(&desc[i].name);
2301 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
2303 struct perf_session *session;
2306 session = container_of(ff->ph, struct perf_session, header);
2308 err = auxtrace_index__process(ff->fd, ff->size, session,
2309 ff->ph->needs_swap);
2311 pr_err("Failed to process auxtrace index\n");
2315 static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
2317 struct cpu_cache_level *caches;
2318 u32 cnt, i, version;
2320 if (do_read_u32(ff, &version))
2326 if (do_read_u32(ff, &cnt))
2329 caches = zalloc(sizeof(*caches) * cnt);
2333 for (i = 0; i < cnt; i++) {
2334 struct cpu_cache_level c;
2337 if (do_read_u32(ff, &c.v))\
2338 goto out_free_caches; \
2347 c.v = do_read_string(ff); \
2349 goto out_free_caches;
2359 ff->ph->env.caches = caches;
2360 ff->ph->env.caches_cnt = cnt;
2367 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2369 struct perf_session *session;
2370 u64 first_sample_time, last_sample_time;
2373 session = container_of(ff->ph, struct perf_session, header);
2375 ret = do_read_u64(ff, &first_sample_time);
2379 ret = do_read_u64(ff, &last_sample_time);
2383 session->evlist->first_sample_time = first_sample_time;
2384 session->evlist->last_sample_time = last_sample_time;
2388 static int process_mem_topology(struct feat_fd *ff,
2389 void *data __maybe_unused)
2391 struct memory_node *nodes;
2392 u64 version, i, nr, bsize;
2395 if (do_read_u64(ff, &version))
2401 if (do_read_u64(ff, &bsize))
2404 if (do_read_u64(ff, &nr))
2407 nodes = zalloc(sizeof(*nodes) * nr);
2411 for (i = 0; i < nr; i++) {
2412 struct memory_node n;
2415 if (do_read_u64(ff, &n.v)) \
2423 if (do_read_bitmap(ff, &n.set, &n.size))
2429 ff->ph->env.memory_bsize = bsize;
2430 ff->ph->env.memory_nodes = nodes;
2431 ff->ph->env.nr_memory_nodes = nr;
2440 static int process_clockid(struct feat_fd *ff,
2441 void *data __maybe_unused)
2443 if (do_read_u64(ff, &ff->ph->env.clockid_res_ns))
2449 struct feature_ops {
2450 int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
2451 void (*print)(struct feat_fd *ff, FILE *fp);
2452 int (*process)(struct feat_fd *ff, void *data);
2458 #define FEAT_OPR(n, func, __full_only) \
2460 .name = __stringify(n), \
2461 .write = write_##func, \
2462 .print = print_##func, \
2463 .full_only = __full_only, \
2464 .process = process_##func, \
2465 .synthesize = true \
2468 #define FEAT_OPN(n, func, __full_only) \
2470 .name = __stringify(n), \
2471 .write = write_##func, \
2472 .print = print_##func, \
2473 .full_only = __full_only, \
2474 .process = process_##func \
2477 /* feature_ops not implemented: */
2478 #define print_tracing_data NULL
2479 #define print_build_id NULL
2481 #define process_branch_stack NULL
2482 #define process_stat NULL
2485 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2486 FEAT_OPN(TRACING_DATA, tracing_data, false),
2487 FEAT_OPN(BUILD_ID, build_id, false),
2488 FEAT_OPR(HOSTNAME, hostname, false),
2489 FEAT_OPR(OSRELEASE, osrelease, false),
2490 FEAT_OPR(VERSION, version, false),
2491 FEAT_OPR(ARCH, arch, false),
2492 FEAT_OPR(NRCPUS, nrcpus, false),
2493 FEAT_OPR(CPUDESC, cpudesc, false),
2494 FEAT_OPR(CPUID, cpuid, false),
2495 FEAT_OPR(TOTAL_MEM, total_mem, false),
2496 FEAT_OPR(EVENT_DESC, event_desc, false),
2497 FEAT_OPR(CMDLINE, cmdline, false),
2498 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
2499 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
2500 FEAT_OPN(BRANCH_STACK, branch_stack, false),
2501 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
2502 FEAT_OPR(GROUP_DESC, group_desc, false),
2503 FEAT_OPN(AUXTRACE, auxtrace, false),
2504 FEAT_OPN(STAT, stat, false),
2505 FEAT_OPN(CACHE, cache, true),
2506 FEAT_OPR(SAMPLE_TIME, sample_time, false),
2507 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
2508 FEAT_OPR(CLOCKID, clockid, false)
2511 struct header_print_data {
2513 bool full; /* extended list of headers */
2516 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2517 struct perf_header *ph,
2518 int feat, int fd, void *data)
2520 struct header_print_data *hd = data;
2523 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2524 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2525 "%d, continuing...\n", section->offset, feat);
2528 if (feat >= HEADER_LAST_FEATURE) {
2529 pr_warning("unknown feature %d\n", feat);
2532 if (!feat_ops[feat].print)
2535 ff = (struct feat_fd) {
2540 if (!feat_ops[feat].full_only || hd->full)
2541 feat_ops[feat].print(&ff, hd->fp);
2543 fprintf(hd->fp, "# %s info available, use -I to display\n",
2544 feat_ops[feat].name);
2549 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2551 struct header_print_data hd;
2552 struct perf_header *header = &session->header;
2553 int fd = perf_data__fd(session->data);
2561 ret = fstat(fd, &st);
2565 stctime = st.st_ctime;
2566 fprintf(fp, "# captured on : %s", ctime(&stctime));
2568 fprintf(fp, "# header version : %u\n", header->version);
2569 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
2570 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
2571 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
2573 perf_header__process_sections(header, fd, &hd,
2574 perf_file_section__fprintf_info);
2576 if (session->data->is_pipe)
2579 fprintf(fp, "# missing features: ");
2580 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2582 fprintf(fp, "%s ", feat_ops[bit].name);
2589 static int do_write_feat(struct feat_fd *ff, int type,
2590 struct perf_file_section **p,
2591 struct perf_evlist *evlist)
2596 if (perf_header__has_feat(ff->ph, type)) {
2597 if (!feat_ops[type].write)
2600 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
2603 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
2605 err = feat_ops[type].write(ff, evlist);
2607 pr_debug("failed to write feature %s\n", feat_ops[type].name);
2609 /* undo anything written */
2610 lseek(ff->fd, (*p)->offset, SEEK_SET);
2614 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
2620 static int perf_header__adds_write(struct perf_header *header,
2621 struct perf_evlist *evlist, int fd)
2625 struct perf_file_section *feat_sec, *p;
2631 ff = (struct feat_fd){
2636 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2640 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2641 if (feat_sec == NULL)
2644 sec_size = sizeof(*feat_sec) * nr_sections;
2646 sec_start = header->feat_offset;
2647 lseek(fd, sec_start + sec_size, SEEK_SET);
2649 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2650 if (do_write_feat(&ff, feat, &p, evlist))
2651 perf_header__clear_feat(header, feat);
2654 lseek(fd, sec_start, SEEK_SET);
2656 * may write more than needed due to dropped feature, but
2657 * this is okay, reader will skip the missing entries
2659 err = do_write(&ff, feat_sec, sec_size);
2661 pr_debug("failed to write feature section\n");
2666 int perf_header__write_pipe(int fd)
2668 struct perf_pipe_file_header f_header;
2672 ff = (struct feat_fd){ .fd = fd };
2674 f_header = (struct perf_pipe_file_header){
2675 .magic = PERF_MAGIC,
2676 .size = sizeof(f_header),
2679 err = do_write(&ff, &f_header, sizeof(f_header));
2681 pr_debug("failed to write perf pipe header\n");
2688 int perf_session__write_header(struct perf_session *session,
2689 struct perf_evlist *evlist,
2690 int fd, bool at_exit)
2692 struct perf_file_header f_header;
2693 struct perf_file_attr f_attr;
2694 struct perf_header *header = &session->header;
2695 struct perf_evsel *evsel;
2700 ff = (struct feat_fd){ .fd = fd};
2701 lseek(fd, sizeof(f_header), SEEK_SET);
2703 evlist__for_each_entry(session->evlist, evsel) {
2704 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2705 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
2707 pr_debug("failed to write perf header\n");
2712 attr_offset = lseek(ff.fd, 0, SEEK_CUR);
2714 evlist__for_each_entry(evlist, evsel) {
2715 f_attr = (struct perf_file_attr){
2716 .attr = evsel->attr,
2718 .offset = evsel->id_offset,
2719 .size = evsel->ids * sizeof(u64),
2722 err = do_write(&ff, &f_attr, sizeof(f_attr));
2724 pr_debug("failed to write perf header attribute\n");
2729 if (!header->data_offset)
2730 header->data_offset = lseek(fd, 0, SEEK_CUR);
2731 header->feat_offset = header->data_offset + header->data_size;
2734 err = perf_header__adds_write(header, evlist, fd);
2739 f_header = (struct perf_file_header){
2740 .magic = PERF_MAGIC,
2741 .size = sizeof(f_header),
2742 .attr_size = sizeof(f_attr),
2744 .offset = attr_offset,
2745 .size = evlist->nr_entries * sizeof(f_attr),
2748 .offset = header->data_offset,
2749 .size = header->data_size,
2751 /* event_types is ignored, store zeros */
2754 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2756 lseek(fd, 0, SEEK_SET);
2757 err = do_write(&ff, &f_header, sizeof(f_header));
2759 pr_debug("failed to write perf header\n");
2762 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2767 static int perf_header__getbuffer64(struct perf_header *header,
2768 int fd, void *buf, size_t size)
2770 if (readn(fd, buf, size) <= 0)
2773 if (header->needs_swap)
2774 mem_bswap_64(buf, size);
2779 int perf_header__process_sections(struct perf_header *header, int fd,
2781 int (*process)(struct perf_file_section *section,
2782 struct perf_header *ph,
2783 int feat, int fd, void *data))
2785 struct perf_file_section *feat_sec, *sec;
2791 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2795 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2799 sec_size = sizeof(*feat_sec) * nr_sections;
2801 lseek(fd, header->feat_offset, SEEK_SET);
2803 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2807 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2808 err = process(sec++, header, feat, fd, data);
2818 static const int attr_file_abi_sizes[] = {
2819 [0] = PERF_ATTR_SIZE_VER0,
2820 [1] = PERF_ATTR_SIZE_VER1,
2821 [2] = PERF_ATTR_SIZE_VER2,
2822 [3] = PERF_ATTR_SIZE_VER3,
2823 [4] = PERF_ATTR_SIZE_VER4,
2828 * In the legacy file format, the magic number is not used to encode endianness.
2829 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2830 * on ABI revisions, we need to try all combinations for all endianness to
2831 * detect the endianness.
2833 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2835 uint64_t ref_size, attr_size;
2838 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2839 ref_size = attr_file_abi_sizes[i]
2840 + sizeof(struct perf_file_section);
2841 if (hdr_sz != ref_size) {
2842 attr_size = bswap_64(hdr_sz);
2843 if (attr_size != ref_size)
2846 ph->needs_swap = true;
2848 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2853 /* could not determine endianness */
2857 #define PERF_PIPE_HDR_VER0 16
2859 static const size_t attr_pipe_abi_sizes[] = {
2860 [0] = PERF_PIPE_HDR_VER0,
2865 * In the legacy pipe format, there is an implicit assumption that endiannesss
2866 * between host recording the samples, and host parsing the samples is the
2867 * same. This is not always the case given that the pipe output may always be
2868 * redirected into a file and analyzed on a different machine with possibly a
2869 * different endianness and perf_event ABI revsions in the perf tool itself.
2871 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2876 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2877 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2878 attr_size = bswap_64(hdr_sz);
2879 if (attr_size != hdr_sz)
2882 ph->needs_swap = true;
2884 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2890 bool is_perf_magic(u64 magic)
2892 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2893 || magic == __perf_magic2
2894 || magic == __perf_magic2_sw)
2900 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2901 bool is_pipe, struct perf_header *ph)
2905 /* check for legacy format */
2906 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2908 ph->version = PERF_HEADER_VERSION_1;
2909 pr_debug("legacy perf.data format\n");
2911 return try_all_pipe_abis(hdr_sz, ph);
2913 return try_all_file_abis(hdr_sz, ph);
2916 * the new magic number serves two purposes:
2917 * - unique number to identify actual perf.data files
2918 * - encode endianness of file
2920 ph->version = PERF_HEADER_VERSION_2;
2922 /* check magic number with one endianness */
2923 if (magic == __perf_magic2)
2926 /* check magic number with opposite endianness */
2927 if (magic != __perf_magic2_sw)
2930 ph->needs_swap = true;
2935 int perf_file_header__read(struct perf_file_header *header,
2936 struct perf_header *ph, int fd)
2940 lseek(fd, 0, SEEK_SET);
2942 ret = readn(fd, header, sizeof(*header));
2946 if (check_magic_endian(header->magic,
2947 header->attr_size, false, ph) < 0) {
2948 pr_debug("magic/endian check failed\n");
2952 if (ph->needs_swap) {
2953 mem_bswap_64(header, offsetof(struct perf_file_header,
2957 if (header->size != sizeof(*header)) {
2958 /* Support the previous format */
2959 if (header->size == offsetof(typeof(*header), adds_features))
2960 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2963 } else if (ph->needs_swap) {
2965 * feature bitmap is declared as an array of unsigned longs --
2966 * not good since its size can differ between the host that
2967 * generated the data file and the host analyzing the file.
2969 * We need to handle endianness, but we don't know the size of
2970 * the unsigned long where the file was generated. Take a best
2971 * guess at determining it: try 64-bit swap first (ie., file
2972 * created on a 64-bit host), and check if the hostname feature
2973 * bit is set (this feature bit is forced on as of fbe96f2).
2974 * If the bit is not, undo the 64-bit swap and try a 32-bit
2975 * swap. If the hostname bit is still not set (e.g., older data
2976 * file), punt and fallback to the original behavior --
2977 * clearing all feature bits and setting buildid.
2979 mem_bswap_64(&header->adds_features,
2980 BITS_TO_U64(HEADER_FEAT_BITS));
2982 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2984 mem_bswap_64(&header->adds_features,
2985 BITS_TO_U64(HEADER_FEAT_BITS));
2988 mem_bswap_32(&header->adds_features,
2989 BITS_TO_U32(HEADER_FEAT_BITS));
2992 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2993 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2994 set_bit(HEADER_BUILD_ID, header->adds_features);
2998 memcpy(&ph->adds_features, &header->adds_features,
2999 sizeof(ph->adds_features));
3001 ph->data_offset = header->data.offset;
3002 ph->data_size = header->data.size;
3003 ph->feat_offset = header->data.offset + header->data.size;
3007 static int perf_file_section__process(struct perf_file_section *section,
3008 struct perf_header *ph,
3009 int feat, int fd, void *data)
3011 struct feat_fd fdd = {
3014 .size = section->size,
3015 .offset = section->offset,
3018 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
3019 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
3020 "%d, continuing...\n", section->offset, feat);
3024 if (feat >= HEADER_LAST_FEATURE) {
3025 pr_debug("unknown feature %d, continuing...\n", feat);
3029 if (!feat_ops[feat].process)
3032 return feat_ops[feat].process(&fdd, data);
3035 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
3036 struct perf_header *ph, int fd,
3039 struct feat_fd ff = {
3040 .fd = STDOUT_FILENO,
3045 ret = readn(fd, header, sizeof(*header));
3049 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
3050 pr_debug("endian/magic failed\n");
3055 header->size = bswap_64(header->size);
3057 if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
3063 static int perf_header__read_pipe(struct perf_session *session)
3065 struct perf_header *header = &session->header;
3066 struct perf_pipe_file_header f_header;
3068 if (perf_file_header__read_pipe(&f_header, header,
3069 perf_data__fd(session->data),
3070 session->repipe) < 0) {
3071 pr_debug("incompatible file format\n");
3078 static int read_attr(int fd, struct perf_header *ph,
3079 struct perf_file_attr *f_attr)
3081 struct perf_event_attr *attr = &f_attr->attr;
3083 size_t our_sz = sizeof(f_attr->attr);
3086 memset(f_attr, 0, sizeof(*f_attr));
3088 /* read minimal guaranteed structure */
3089 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
3091 pr_debug("cannot read %d bytes of header attr\n",
3092 PERF_ATTR_SIZE_VER0);
3096 /* on file perf_event_attr size */
3104 sz = PERF_ATTR_SIZE_VER0;
3105 } else if (sz > our_sz) {
3106 pr_debug("file uses a more recent and unsupported ABI"
3107 " (%zu bytes extra)\n", sz - our_sz);
3110 /* what we have not yet read and that we know about */
3111 left = sz - PERF_ATTR_SIZE_VER0;
3114 ptr += PERF_ATTR_SIZE_VER0;
3116 ret = readn(fd, ptr, left);
3118 /* read perf_file_section, ids are read in caller */
3119 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
3121 return ret <= 0 ? -1 : 0;
3124 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
3125 struct tep_handle *pevent)
3127 struct tep_event *event;
3130 /* already prepared */
3131 if (evsel->tp_format)
3134 if (pevent == NULL) {
3135 pr_debug("broken or missing trace data\n");
3139 event = tep_find_event(pevent, evsel->attr.config);
3140 if (event == NULL) {
3141 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
3146 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
3147 evsel->name = strdup(bf);
3148 if (evsel->name == NULL)
3152 evsel->tp_format = event;
3156 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
3157 struct tep_handle *pevent)
3159 struct perf_evsel *pos;
3161 evlist__for_each_entry(evlist, pos) {
3162 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
3163 perf_evsel__prepare_tracepoint_event(pos, pevent))
3170 int perf_session__read_header(struct perf_session *session)
3172 struct perf_data *data = session->data;
3173 struct perf_header *header = &session->header;
3174 struct perf_file_header f_header;
3175 struct perf_file_attr f_attr;
3177 int nr_attrs, nr_ids, i, j;
3178 int fd = perf_data__fd(data);
3180 session->evlist = perf_evlist__new();
3181 if (session->evlist == NULL)
3184 session->evlist->env = &header->env;
3185 session->machines.host.env = &header->env;
3186 if (perf_data__is_pipe(data))
3187 return perf_header__read_pipe(session);
3189 if (perf_file_header__read(&f_header, header, fd) < 0)
3193 * Sanity check that perf.data was written cleanly; data size is
3194 * initialized to 0 and updated only if the on_exit function is run.
3195 * If data size is still 0 then the file contains only partial
3196 * information. Just warn user and process it as much as it can.
3198 if (f_header.data.size == 0) {
3199 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
3200 "Was the 'perf record' command properly terminated?\n",
3204 nr_attrs = f_header.attrs.size / f_header.attr_size;
3205 lseek(fd, f_header.attrs.offset, SEEK_SET);
3207 for (i = 0; i < nr_attrs; i++) {
3208 struct perf_evsel *evsel;
3211 if (read_attr(fd, header, &f_attr) < 0)
3214 if (header->needs_swap) {
3215 f_attr.ids.size = bswap_64(f_attr.ids.size);
3216 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
3217 perf_event__attr_swap(&f_attr.attr);
3220 tmp = lseek(fd, 0, SEEK_CUR);
3221 evsel = perf_evsel__new(&f_attr.attr);
3224 goto out_delete_evlist;
3226 evsel->needs_swap = header->needs_swap;
3228 * Do it before so that if perf_evsel__alloc_id fails, this
3229 * entry gets purged too at perf_evlist__delete().
3231 perf_evlist__add(session->evlist, evsel);
3233 nr_ids = f_attr.ids.size / sizeof(u64);
3235 * We don't have the cpu and thread maps on the header, so
3236 * for allocating the perf_sample_id table we fake 1 cpu and
3237 * hattr->ids threads.
3239 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
3240 goto out_delete_evlist;
3242 lseek(fd, f_attr.ids.offset, SEEK_SET);
3244 for (j = 0; j < nr_ids; j++) {
3245 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
3248 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
3251 lseek(fd, tmp, SEEK_SET);
3254 perf_header__process_sections(header, fd, &session->tevent,
3255 perf_file_section__process);
3257 if (perf_evlist__prepare_tracepoint_events(session->evlist,
3258 session->tevent.pevent))
3259 goto out_delete_evlist;
3266 perf_evlist__delete(session->evlist);
3267 session->evlist = NULL;
3271 int perf_event__synthesize_attr(struct perf_tool *tool,
3272 struct perf_event_attr *attr, u32 ids, u64 *id,
3273 perf_event__handler_t process)
3275 union perf_event *ev;
3279 size = sizeof(struct perf_event_attr);
3280 size = PERF_ALIGN(size, sizeof(u64));
3281 size += sizeof(struct perf_event_header);
3282 size += ids * sizeof(u64);
3289 ev->attr.attr = *attr;
3290 memcpy(ev->attr.id, id, ids * sizeof(u64));
3292 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
3293 ev->attr.header.size = (u16)size;
3295 if (ev->attr.header.size == size)
3296 err = process(tool, ev, NULL, NULL);
3305 int perf_event__synthesize_features(struct perf_tool *tool,
3306 struct perf_session *session,
3307 struct perf_evlist *evlist,
3308 perf_event__handler_t process)
3310 struct perf_header *header = &session->header;
3312 struct feature_event *fe;
3316 sz_hdr = sizeof(fe->header);
3317 sz = sizeof(union perf_event);
3318 /* get a nice alignment */
3319 sz = PERF_ALIGN(sz, page_size);
3321 memset(&ff, 0, sizeof(ff));
3323 ff.buf = malloc(sz);
3327 ff.size = sz - sz_hdr;
3329 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3330 if (!feat_ops[feat].synthesize) {
3331 pr_debug("No record header feature for header :%d\n", feat);
3335 ff.offset = sizeof(*fe);
3337 ret = feat_ops[feat].write(&ff, evlist);
3338 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
3339 pr_debug("Error writing feature\n");
3342 /* ff.buf may have changed due to realloc in do_write() */
3344 memset(fe, 0, sizeof(*fe));
3347 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3348 fe->header.size = ff.offset;
3350 ret = process(tool, ff.buf, NULL, NULL);
3357 /* Send HEADER_LAST_FEATURE mark. */
3359 fe->feat_id = HEADER_LAST_FEATURE;
3360 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3361 fe->header.size = sizeof(*fe);
3363 ret = process(tool, ff.buf, NULL, NULL);
3369 int perf_event__process_feature(struct perf_session *session,
3370 union perf_event *event)
3372 struct perf_tool *tool = session->tool;
3373 struct feat_fd ff = { .fd = 0 };
3374 struct feature_event *fe = (struct feature_event *)event;
3375 int type = fe->header.type;
3376 u64 feat = fe->feat_id;
3378 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
3379 pr_warning("invalid record type %d in pipe-mode\n", type);
3382 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
3383 pr_warning("invalid record type %d in pipe-mode\n", type);
3387 if (!feat_ops[feat].process)
3390 ff.buf = (void *)fe->data;
3391 ff.size = event->header.size - sizeof(event->header);
3392 ff.ph = &session->header;
3394 if (feat_ops[feat].process(&ff, NULL))
3397 if (!feat_ops[feat].print || !tool->show_feat_hdr)
3400 if (!feat_ops[feat].full_only ||
3401 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
3402 feat_ops[feat].print(&ff, stdout);
3404 fprintf(stdout, "# %s info available, use -I to display\n",
3405 feat_ops[feat].name);
3411 static struct event_update_event *
3412 event_update_event__new(size_t size, u64 type, u64 id)
3414 struct event_update_event *ev;
3416 size += sizeof(*ev);
3417 size = PERF_ALIGN(size, sizeof(u64));
3421 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3422 ev->header.size = (u16)size;
3430 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3431 struct perf_evsel *evsel,
3432 perf_event__handler_t process)
3434 struct event_update_event *ev;
3435 size_t size = strlen(evsel->unit);
3438 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3442 strlcpy(ev->data, evsel->unit, size + 1);
3443 err = process(tool, (union perf_event *)ev, NULL, NULL);
3449 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3450 struct perf_evsel *evsel,
3451 perf_event__handler_t process)
3453 struct event_update_event *ev;
3454 struct event_update_event_scale *ev_data;
3457 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3461 ev_data = (struct event_update_event_scale *) ev->data;
3462 ev_data->scale = evsel->scale;
3463 err = process(tool, (union perf_event*) ev, NULL, NULL);
3469 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3470 struct perf_evsel *evsel,
3471 perf_event__handler_t process)
3473 struct event_update_event *ev;
3474 size_t len = strlen(evsel->name);
3477 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3481 strlcpy(ev->data, evsel->name, len + 1);
3482 err = process(tool, (union perf_event*) ev, NULL, NULL);
3488 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3489 struct perf_evsel *evsel,
3490 perf_event__handler_t process)
3492 size_t size = sizeof(struct event_update_event);
3493 struct event_update_event *ev;
3497 if (!evsel->own_cpus)
3500 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3504 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3505 ev->header.size = (u16)size;
3506 ev->type = PERF_EVENT_UPDATE__CPUS;
3507 ev->id = evsel->id[0];
3509 cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3513 err = process(tool, (union perf_event*) ev, NULL, NULL);
3518 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3520 struct event_update_event *ev = &event->event_update;
3521 struct event_update_event_scale *ev_scale;
3522 struct event_update_event_cpus *ev_cpus;
3523 struct cpu_map *map;
3526 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id);
3529 case PERF_EVENT_UPDATE__SCALE:
3530 ev_scale = (struct event_update_event_scale *) ev->data;
3531 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3533 case PERF_EVENT_UPDATE__UNIT:
3534 ret += fprintf(fp, "... unit: %s\n", ev->data);
3536 case PERF_EVENT_UPDATE__NAME:
3537 ret += fprintf(fp, "... name: %s\n", ev->data);
3539 case PERF_EVENT_UPDATE__CPUS:
3540 ev_cpus = (struct event_update_event_cpus *) ev->data;
3541 ret += fprintf(fp, "... ");
3543 map = cpu_map__new_data(&ev_cpus->cpus);
3545 ret += cpu_map__fprintf(map, fp);
3547 ret += fprintf(fp, "failed to get cpus\n");
3550 ret += fprintf(fp, "... unknown type\n");
3557 int perf_event__synthesize_attrs(struct perf_tool *tool,
3558 struct perf_evlist *evlist,
3559 perf_event__handler_t process)
3561 struct perf_evsel *evsel;
3564 evlist__for_each_entry(evlist, evsel) {
3565 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3566 evsel->id, process);
3568 pr_debug("failed to create perf header attribute\n");
3576 static bool has_unit(struct perf_evsel *counter)
3578 return counter->unit && *counter->unit;
3581 static bool has_scale(struct perf_evsel *counter)
3583 return counter->scale != 1;
3586 int perf_event__synthesize_extra_attr(struct perf_tool *tool,
3587 struct perf_evlist *evsel_list,
3588 perf_event__handler_t process,
3591 struct perf_evsel *counter;
3595 * Synthesize other events stuff not carried within
3596 * attr event - unit, scale, name
3598 evlist__for_each_entry(evsel_list, counter) {
3599 if (!counter->supported)
3603 * Synthesize unit and scale only if it's defined.
3605 if (has_unit(counter)) {
3606 err = perf_event__synthesize_event_update_unit(tool, counter, process);
3608 pr_err("Couldn't synthesize evsel unit.\n");
3613 if (has_scale(counter)) {
3614 err = perf_event__synthesize_event_update_scale(tool, counter, process);
3616 pr_err("Couldn't synthesize evsel counter.\n");
3621 if (counter->own_cpus) {
3622 err = perf_event__synthesize_event_update_cpus(tool, counter, process);
3624 pr_err("Couldn't synthesize evsel cpus.\n");
3630 * Name is needed only for pipe output,
3631 * perf.data carries event names.
3634 err = perf_event__synthesize_event_update_name(tool, counter, process);
3636 pr_err("Couldn't synthesize evsel name.\n");
3644 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3645 union perf_event *event,
3646 struct perf_evlist **pevlist)
3649 struct perf_evsel *evsel;
3650 struct perf_evlist *evlist = *pevlist;
3652 if (evlist == NULL) {
3653 *pevlist = evlist = perf_evlist__new();
3658 evsel = perf_evsel__new(&event->attr.attr);
3662 perf_evlist__add(evlist, evsel);
3664 ids = event->header.size;
3665 ids -= (void *)&event->attr.id - (void *)event;
3666 n_ids = ids / sizeof(u64);
3668 * We don't have the cpu and thread maps on the header, so
3669 * for allocating the perf_sample_id table we fake 1 cpu and
3670 * hattr->ids threads.
3672 if (perf_evsel__alloc_id(evsel, 1, n_ids))
3675 for (i = 0; i < n_ids; i++) {
3676 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3682 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3683 union perf_event *event,
3684 struct perf_evlist **pevlist)
3686 struct event_update_event *ev = &event->event_update;
3687 struct event_update_event_scale *ev_scale;
3688 struct event_update_event_cpus *ev_cpus;
3689 struct perf_evlist *evlist;
3690 struct perf_evsel *evsel;
3691 struct cpu_map *map;
3693 if (!pevlist || *pevlist == NULL)
3698 evsel = perf_evlist__id2evsel(evlist, ev->id);
3703 case PERF_EVENT_UPDATE__UNIT:
3704 evsel->unit = strdup(ev->data);
3706 case PERF_EVENT_UPDATE__NAME:
3707 evsel->name = strdup(ev->data);
3709 case PERF_EVENT_UPDATE__SCALE:
3710 ev_scale = (struct event_update_event_scale *) ev->data;
3711 evsel->scale = ev_scale->scale;
3713 case PERF_EVENT_UPDATE__CPUS:
3714 ev_cpus = (struct event_update_event_cpus *) ev->data;
3716 map = cpu_map__new_data(&ev_cpus->cpus);
3718 evsel->own_cpus = map;
3720 pr_err("failed to get event_update cpus\n");
3728 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3729 struct perf_evlist *evlist,
3730 perf_event__handler_t process)
3732 union perf_event ev;
3733 struct tracing_data *tdata;
3734 ssize_t size = 0, aligned_size = 0, padding;
3736 int err __maybe_unused = 0;
3739 * We are going to store the size of the data followed
3740 * by the data contents. Since the fd descriptor is a pipe,
3741 * we cannot seek back to store the size of the data once
3742 * we know it. Instead we:
3744 * - write the tracing data to the temp file
3745 * - get/write the data size to pipe
3746 * - write the tracing data from the temp file
3749 tdata = tracing_data_get(&evlist->entries, fd, true);
3753 memset(&ev, 0, sizeof(ev));
3755 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3757 aligned_size = PERF_ALIGN(size, sizeof(u64));
3758 padding = aligned_size - size;
3759 ev.tracing_data.header.size = sizeof(ev.tracing_data);
3760 ev.tracing_data.size = aligned_size;
3762 process(tool, &ev, NULL, NULL);
3765 * The put function will copy all the tracing data
3766 * stored in temp file to the pipe.
3768 tracing_data_put(tdata);
3770 ff = (struct feat_fd){ .fd = fd };
3771 if (write_padded(&ff, NULL, 0, padding))
3774 return aligned_size;
3777 int perf_event__process_tracing_data(struct perf_session *session,
3778 union perf_event *event)
3780 ssize_t size_read, padding, size = event->tracing_data.size;
3781 int fd = perf_data__fd(session->data);
3782 off_t offset = lseek(fd, 0, SEEK_CUR);
3785 /* setup for reading amidst mmap */
3786 lseek(fd, offset + sizeof(struct tracing_data_event),
3789 size_read = trace_report(fd, &session->tevent,
3791 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3793 if (readn(fd, buf, padding) < 0) {
3794 pr_err("%s: reading input file", __func__);
3797 if (session->repipe) {
3798 int retw = write(STDOUT_FILENO, buf, padding);
3799 if (retw <= 0 || retw != padding) {
3800 pr_err("%s: repiping tracing data padding", __func__);
3805 if (size_read + padding != size) {
3806 pr_err("%s: tracing data size mismatch", __func__);
3810 perf_evlist__prepare_tracepoint_events(session->evlist,
3811 session->tevent.pevent);
3813 return size_read + padding;
3816 int perf_event__synthesize_build_id(struct perf_tool *tool,
3817 struct dso *pos, u16 misc,
3818 perf_event__handler_t process,
3819 struct machine *machine)
3821 union perf_event ev;
3828 memset(&ev, 0, sizeof(ev));
3830 len = pos->long_name_len + 1;
3831 len = PERF_ALIGN(len, NAME_ALIGN);
3832 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3833 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3834 ev.build_id.header.misc = misc;
3835 ev.build_id.pid = machine->pid;
3836 ev.build_id.header.size = sizeof(ev.build_id) + len;
3837 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3839 err = process(tool, &ev, NULL, machine);
3844 int perf_event__process_build_id(struct perf_session *session,
3845 union perf_event *event)
3847 __event_process_build_id(&event->build_id,
3848 event->build_id.filename,