1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/kernel.h>
8 #include <linux/mman.h>
9 #include <linux/time64.h>
10 #include <sys/types.h>
12 #include <sys/param.h>
25 #include "namespaces.h"
28 #include <linux/ctype.h>
29 #include <linux/zalloc.h>
33 #include <symbol/kallsyms.h>
34 #include <sys/utsname.h>
36 static int dso__load_kernel_sym(struct dso *dso, struct map *map);
37 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
38 static bool symbol__is_idle(const char *name);
40 int vmlinux_path__nr_entries;
43 struct symbol_conf symbol_conf = {
46 .try_vmlinux_path = true,
48 .demangle_kernel = false,
49 .cumulate_callchain = true,
50 .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */
51 .show_hist_headers = true,
58 static enum dso_binary_type binary_type_symtab[] = {
59 DSO_BINARY_TYPE__KALLSYMS,
60 DSO_BINARY_TYPE__GUEST_KALLSYMS,
61 DSO_BINARY_TYPE__JAVA_JIT,
62 DSO_BINARY_TYPE__DEBUGLINK,
63 DSO_BINARY_TYPE__BUILD_ID_CACHE,
64 DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
65 DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
66 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
67 DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
68 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
69 DSO_BINARY_TYPE__GUEST_KMODULE,
70 DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
71 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
72 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
73 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
74 DSO_BINARY_TYPE__NOT_FOUND,
77 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
79 static bool symbol_type__filter(char symbol_type)
81 symbol_type = toupper(symbol_type);
82 return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
85 static int prefix_underscores_count(const char *str)
87 const char *tail = str;
95 const char * __weak arch__normalize_symbol_name(const char *name)
100 int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
102 return strcmp(namea, nameb);
105 int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb,
108 return strncmp(namea, nameb, n);
111 int __weak arch__choose_best_symbol(struct symbol *syma,
112 struct symbol *symb __maybe_unused)
114 /* Avoid "SyS" kernel syscall aliases */
115 if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
117 if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
123 static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
129 /* Prefer a symbol with non zero length */
130 a = syma->end - syma->start;
131 b = symb->end - symb->start;
132 if ((b == 0) && (a > 0))
134 else if ((a == 0) && (b > 0))
137 /* Prefer a non weak symbol over a weak one */
138 a = syma->binding == STB_WEAK;
139 b = symb->binding == STB_WEAK;
145 /* Prefer a global symbol over a non global one */
146 a = syma->binding == STB_GLOBAL;
147 b = symb->binding == STB_GLOBAL;
153 /* Prefer a symbol with less underscores */
154 a = prefix_underscores_count(syma->name);
155 b = prefix_underscores_count(symb->name);
161 /* Choose the symbol with the longest name */
162 na = strlen(syma->name);
163 nb = strlen(symb->name);
169 return arch__choose_best_symbol(syma, symb);
172 void symbols__fixup_duplicate(struct rb_root_cached *symbols)
175 struct symbol *curr, *next;
177 if (symbol_conf.allow_aliases)
180 nd = rb_first_cached(symbols);
183 curr = rb_entry(nd, struct symbol, rb_node);
185 nd = rb_next(&curr->rb_node);
186 next = rb_entry(nd, struct symbol, rb_node);
191 if (curr->start != next->start)
194 if (choose_best_symbol(curr, next) == SYMBOL_A) {
195 rb_erase_cached(&next->rb_node, symbols);
196 symbol__delete(next);
199 nd = rb_next(&curr->rb_node);
200 rb_erase_cached(&curr->rb_node, symbols);
201 symbol__delete(curr);
206 void symbols__fixup_end(struct rb_root_cached *symbols)
208 struct rb_node *nd, *prevnd = rb_first_cached(symbols);
209 struct symbol *curr, *prev;
214 curr = rb_entry(prevnd, struct symbol, rb_node);
216 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
218 curr = rb_entry(nd, struct symbol, rb_node);
220 if (prev->end == prev->start && prev->end != curr->start)
221 prev->end = curr->start;
225 if (curr->end == curr->start)
226 curr->end = roundup(curr->start, 4096) + 4096;
229 void map_groups__fixup_end(struct map_groups *mg)
231 struct maps *maps = &mg->maps;
232 struct map *next, *curr;
234 down_write(&maps->lock);
236 curr = maps__first(maps);
240 for (next = map__next(curr); next; next = map__next(curr)) {
242 curr->end = next->start;
247 * We still haven't the actual symbols, so guess the
248 * last map final address.
254 up_write(&maps->lock);
257 struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
259 size_t namelen = strlen(name) + 1;
260 struct symbol *sym = calloc(1, (symbol_conf.priv_size +
261 sizeof(*sym) + namelen));
265 if (symbol_conf.priv_size) {
266 if (symbol_conf.init_annotation) {
267 struct annotation *notes = (void *)sym;
268 pthread_mutex_init(¬es->lock, NULL);
270 sym = ((void *)sym) + symbol_conf.priv_size;
274 sym->end = len ? start + len : start;
276 sym->binding = binding;
277 sym->namelen = namelen - 1;
279 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
280 __func__, name, start, sym->end);
281 memcpy(sym->name, name, namelen);
286 void symbol__delete(struct symbol *sym)
288 free(((void *)sym) - symbol_conf.priv_size);
291 void symbols__delete(struct rb_root_cached *symbols)
294 struct rb_node *next = rb_first_cached(symbols);
297 pos = rb_entry(next, struct symbol, rb_node);
298 next = rb_next(&pos->rb_node);
299 rb_erase_cached(&pos->rb_node, symbols);
304 void __symbols__insert(struct rb_root_cached *symbols,
305 struct symbol *sym, bool kernel)
307 struct rb_node **p = &symbols->rb_root.rb_node;
308 struct rb_node *parent = NULL;
309 const u64 ip = sym->start;
311 bool leftmost = true;
314 const char *name = sym->name;
316 * ppc64 uses function descriptors and appends a '.' to the
317 * start of every instruction address. Remove it.
321 sym->idle = symbol__is_idle(name);
326 s = rb_entry(parent, struct symbol, rb_node);
334 rb_link_node(&sym->rb_node, parent, p);
335 rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
338 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
340 __symbols__insert(symbols, sym, false);
343 static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
350 n = symbols->rb_root.rb_node;
353 struct symbol *s = rb_entry(n, struct symbol, rb_node);
357 else if (ip > s->end || (ip == s->end && ip != s->start))
366 static struct symbol *symbols__first(struct rb_root_cached *symbols)
368 struct rb_node *n = rb_first_cached(symbols);
371 return rb_entry(n, struct symbol, rb_node);
376 static struct symbol *symbols__last(struct rb_root_cached *symbols)
378 struct rb_node *n = rb_last(&symbols->rb_root);
381 return rb_entry(n, struct symbol, rb_node);
386 static struct symbol *symbols__next(struct symbol *sym)
388 struct rb_node *n = rb_next(&sym->rb_node);
391 return rb_entry(n, struct symbol, rb_node);
396 static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
398 struct rb_node **p = &symbols->rb_root.rb_node;
399 struct rb_node *parent = NULL;
400 struct symbol_name_rb_node *symn, *s;
401 bool leftmost = true;
403 symn = container_of(sym, struct symbol_name_rb_node, sym);
407 s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
408 if (strcmp(sym->name, s->sym.name) < 0)
415 rb_link_node(&symn->rb_node, parent, p);
416 rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
419 static void symbols__sort_by_name(struct rb_root_cached *symbols,
420 struct rb_root_cached *source)
424 for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
425 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
426 symbols__insert_by_name(symbols, pos);
430 int symbol__match_symbol_name(const char *name, const char *str,
431 enum symbol_tag_include includes)
433 const char *versioning;
435 if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY &&
436 (versioning = strstr(name, "@@"))) {
437 int len = strlen(str);
439 if (len < versioning - name)
440 len = versioning - name;
442 return arch__compare_symbol_names_n(name, str, len);
444 return arch__compare_symbol_names(name, str);
447 static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
449 enum symbol_tag_include includes)
452 struct symbol_name_rb_node *s = NULL;
457 n = symbols->rb_root.rb_node;
462 s = rb_entry(n, struct symbol_name_rb_node, rb_node);
463 cmp = symbol__match_symbol_name(s->sym.name, name, includes);
476 if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY)
477 /* return first symbol that has same name (if any) */
478 for (n = rb_prev(n); n; n = rb_prev(n)) {
479 struct symbol_name_rb_node *tmp;
481 tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
482 if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
491 void dso__reset_find_symbol_cache(struct dso *dso)
493 dso->last_find_result.addr = 0;
494 dso->last_find_result.symbol = NULL;
497 void dso__insert_symbol(struct dso *dso, struct symbol *sym)
499 __symbols__insert(&dso->symbols, sym, dso->kernel);
501 /* update the symbol cache if necessary */
502 if (dso->last_find_result.addr >= sym->start &&
503 (dso->last_find_result.addr < sym->end ||
504 sym->start == sym->end)) {
505 dso->last_find_result.symbol = sym;
509 struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
511 if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
512 dso->last_find_result.addr = addr;
513 dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
516 return dso->last_find_result.symbol;
519 struct symbol *dso__first_symbol(struct dso *dso)
521 return symbols__first(&dso->symbols);
524 struct symbol *dso__last_symbol(struct dso *dso)
526 return symbols__last(&dso->symbols);
529 struct symbol *dso__next_symbol(struct symbol *sym)
531 return symbols__next(sym);
534 struct symbol *symbol__next_by_name(struct symbol *sym)
536 struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
537 struct rb_node *n = rb_next(&s->rb_node);
539 return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
543 * Returns first symbol that matched with @name.
545 struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name)
547 struct symbol *s = symbols__find_by_name(&dso->symbol_names, name,
548 SYMBOL_TAG_INCLUDE__NONE);
550 s = symbols__find_by_name(&dso->symbol_names, name,
551 SYMBOL_TAG_INCLUDE__DEFAULT_ONLY);
555 void dso__sort_by_name(struct dso *dso)
557 dso__set_sorted_by_name(dso);
558 return symbols__sort_by_name(&dso->symbol_names, &dso->symbols);
561 int modules__parse(const char *filename, void *arg,
562 int (*process_module)(void *arg, const char *name,
563 u64 start, u64 size))
570 file = fopen(filename, "r");
580 line_len = getline(&line, &n, file);
593 line[--line_len] = '\0'; /* \n */
595 sep = strrchr(line, 'x');
599 hex2u64(sep + 1, &start);
601 sep = strchr(line, ' ');
607 scnprintf(name, sizeof(name), "[%s]", line);
609 size = strtoul(sep + 1, &endptr, 0);
610 if (*endptr != ' ' && *endptr != '\t')
613 err = process_module(arg, name, start, size);
624 * These are symbols in the kernel image, so make sure that
625 * sym is from a kernel DSO.
627 static bool symbol__is_idle(const char *name)
629 const char * const idle_symbols[] = {
639 "mwait_idle_with_hints",
641 "ppc64_runlatch_off",
642 "pseries_dedicated_idle_sleep",
647 for (i = 0; idle_symbols[i]; i++) {
648 if (!strcmp(idle_symbols[i], name))
655 static int map__process_kallsym_symbol(void *arg, const char *name,
656 char type, u64 start)
659 struct dso *dso = arg;
660 struct rb_root_cached *root = &dso->symbols;
662 if (!symbol_type__filter(type))
666 * module symbols are not sorted so we add all
667 * symbols, setting length to 0, and rely on
668 * symbols__fixup_end() to fix it up.
670 sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name);
674 * We will pass the symbols to the filter later, in
675 * map__split_kallsyms, when we have split the maps per module
677 __symbols__insert(root, sym, !strchr(name, '['));
683 * Loads the function entries in /proc/kallsyms into kernel_map->dso,
684 * so that we can in the next step set the symbol ->end address and then
685 * call kernel_maps__split_kallsyms.
687 static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
689 return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
692 static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct dso *dso)
694 struct map *curr_map;
697 struct rb_root_cached old_root = dso->symbols;
698 struct rb_root_cached *root = &dso->symbols;
699 struct rb_node *next = rb_first_cached(root);
704 *root = RB_ROOT_CACHED;
709 pos = rb_entry(next, struct symbol, rb_node);
710 next = rb_next(&pos->rb_node);
712 rb_erase_cached(&pos->rb_node, &old_root);
713 RB_CLEAR_NODE(&pos->rb_node);
714 module = strchr(pos->name, '\t');
718 curr_map = map_groups__find(kmaps, pos->start);
725 pos->start -= curr_map->start - curr_map->pgoff;
726 if (pos->end > curr_map->end)
727 pos->end = curr_map->end;
729 pos->end -= curr_map->start - curr_map->pgoff;
730 symbols__insert(&curr_map->dso->symbols, pos);
734 /* Symbols have been adjusted */
735 dso->adjust_symbols = 1;
741 * Split the symbols into maps, making sure there are no overlaps, i.e. the
742 * kernel range is broken in several maps, named [kernel].N, as we don't have
743 * the original ELF section names vmlinux have.
745 static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, u64 delta,
746 struct map *initial_map)
748 struct machine *machine;
749 struct map *curr_map = initial_map;
751 int count = 0, moved = 0;
752 struct rb_root_cached *root = &dso->symbols;
753 struct rb_node *next = rb_first_cached(root);
754 int kernel_range = 0;
760 machine = kmaps->machine;
762 x86_64 = machine__is(machine, "x86_64");
767 pos = rb_entry(next, struct symbol, rb_node);
768 next = rb_next(&pos->rb_node);
770 module = strchr(pos->name, '\t');
772 if (!symbol_conf.use_modules)
777 if (strcmp(curr_map->dso->short_name, module)) {
778 if (curr_map != initial_map &&
779 dso->kernel == DSO_TYPE_GUEST_KERNEL &&
780 machine__is_default_guest(machine)) {
782 * We assume all symbols of a module are
783 * continuous in * kallsyms, so curr_map
784 * points to a module and all its
785 * symbols are in its kmap. Mark it as
788 dso__set_loaded(curr_map->dso);
791 curr_map = map_groups__find_by_name(kmaps, module);
792 if (curr_map == NULL) {
793 pr_debug("%s/proc/{kallsyms,modules} "
794 "inconsistency while looking "
795 "for \"%s\" module!\n",
796 machine->root_dir, module);
797 curr_map = initial_map;
801 if (curr_map->dso->loaded &&
802 !machine__is_default_guest(machine))
806 * So that we look just like we get from .ko files,
807 * i.e. not prelinked, relative to initial_map->start.
809 pos->start = curr_map->map_ip(curr_map, pos->start);
810 pos->end = curr_map->map_ip(curr_map, pos->end);
811 } else if (x86_64 && is_entry_trampoline(pos->name)) {
813 * These symbols are not needed anymore since the
814 * trampoline maps refer to the text section and it's
815 * symbols instead. Avoid having to deal with
816 * relocations, and the assumption that the first symbol
817 * is the start of kernel text, by simply removing the
818 * symbols at this point.
821 } else if (curr_map != initial_map) {
822 char dso_name[PATH_MAX];
826 /* Kernel was relocated at boot time */
832 curr_map = initial_map;
836 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
837 snprintf(dso_name, sizeof(dso_name),
841 snprintf(dso_name, sizeof(dso_name),
845 ndso = dso__new(dso_name);
849 ndso->kernel = dso->kernel;
851 curr_map = map__new2(pos->start, ndso);
852 if (curr_map == NULL) {
857 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
858 map_groups__insert(kmaps, curr_map);
861 /* Kernel was relocated at boot time */
866 if (curr_map != initial_map) {
867 rb_erase_cached(&pos->rb_node, root);
868 symbols__insert(&curr_map->dso->symbols, pos);
875 rb_erase_cached(&pos->rb_node, root);
879 if (curr_map != initial_map &&
880 dso->kernel == DSO_TYPE_GUEST_KERNEL &&
881 machine__is_default_guest(kmaps->machine)) {
882 dso__set_loaded(curr_map->dso);
885 return count + moved;
888 bool symbol__restricted_filename(const char *filename,
889 const char *restricted_filename)
891 bool restricted = false;
893 if (symbol_conf.kptr_restrict) {
894 char *r = realpath(filename, NULL);
897 restricted = strcmp(r, restricted_filename) == 0;
907 struct rb_node rb_node;
912 static void add_module(struct module_info *mi, struct rb_root *modules)
914 struct rb_node **p = &modules->rb_node;
915 struct rb_node *parent = NULL;
916 struct module_info *m;
920 m = rb_entry(parent, struct module_info, rb_node);
921 if (strcmp(mi->name, m->name) < 0)
926 rb_link_node(&mi->rb_node, parent, p);
927 rb_insert_color(&mi->rb_node, modules);
930 static void delete_modules(struct rb_root *modules)
932 struct module_info *mi;
933 struct rb_node *next = rb_first(modules);
936 mi = rb_entry(next, struct module_info, rb_node);
937 next = rb_next(&mi->rb_node);
938 rb_erase(&mi->rb_node, modules);
944 static struct module_info *find_module(const char *name,
945 struct rb_root *modules)
947 struct rb_node *n = modules->rb_node;
950 struct module_info *m;
953 m = rb_entry(n, struct module_info, rb_node);
954 cmp = strcmp(name, m->name);
966 static int __read_proc_modules(void *arg, const char *name, u64 start,
967 u64 size __maybe_unused)
969 struct rb_root *modules = arg;
970 struct module_info *mi;
972 mi = zalloc(sizeof(struct module_info));
976 mi->name = strdup(name);
984 add_module(mi, modules);
989 static int read_proc_modules(const char *filename, struct rb_root *modules)
991 if (symbol__restricted_filename(filename, "/proc/modules"))
994 if (modules__parse(filename, modules, __read_proc_modules)) {
995 delete_modules(modules);
1002 int compare_proc_modules(const char *from, const char *to)
1004 struct rb_root from_modules = RB_ROOT;
1005 struct rb_root to_modules = RB_ROOT;
1006 struct rb_node *from_node, *to_node;
1007 struct module_info *from_m, *to_m;
1010 if (read_proc_modules(from, &from_modules))
1013 if (read_proc_modules(to, &to_modules))
1014 goto out_delete_from;
1016 from_node = rb_first(&from_modules);
1017 to_node = rb_first(&to_modules);
1022 from_m = rb_entry(from_node, struct module_info, rb_node);
1023 to_m = rb_entry(to_node, struct module_info, rb_node);
1025 if (from_m->start != to_m->start ||
1026 strcmp(from_m->name, to_m->name))
1029 from_node = rb_next(from_node);
1030 to_node = rb_next(to_node);
1033 if (!from_node && !to_node)
1036 delete_modules(&to_modules);
1038 delete_modules(&from_modules);
1043 struct map *map_groups__first(struct map_groups *mg)
1045 return maps__first(&mg->maps);
1048 static int do_validate_kcore_modules(const char *filename,
1049 struct map_groups *kmaps)
1051 struct rb_root modules = RB_ROOT;
1052 struct map *old_map;
1055 err = read_proc_modules(filename, &modules);
1059 old_map = map_groups__first(kmaps);
1061 struct map *next = map_groups__next(old_map);
1062 struct module_info *mi;
1064 if (!__map__is_kmodule(old_map)) {
1069 /* Module must be in memory at the same address */
1070 mi = find_module(old_map->dso->short_name, &modules);
1071 if (!mi || mi->start != old_map->start) {
1079 delete_modules(&modules);
1084 * If kallsyms is referenced by name then we look for filename in the same
1087 static bool filename_from_kallsyms_filename(char *filename,
1088 const char *base_name,
1089 const char *kallsyms_filename)
1093 strcpy(filename, kallsyms_filename);
1094 name = strrchr(filename, '/');
1100 if (!strcmp(name, "kallsyms")) {
1101 strcpy(name, base_name);
1108 static int validate_kcore_modules(const char *kallsyms_filename,
1111 struct map_groups *kmaps = map__kmaps(map);
1112 char modules_filename[PATH_MAX];
1117 if (!filename_from_kallsyms_filename(modules_filename, "modules",
1121 if (do_validate_kcore_modules(modules_filename, kmaps))
1127 static int validate_kcore_addresses(const char *kallsyms_filename,
1130 struct kmap *kmap = map__kmap(map);
1135 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1138 if (kallsyms__get_function_start(kallsyms_filename,
1139 kmap->ref_reloc_sym->name, &start))
1141 if (start != kmap->ref_reloc_sym->addr)
1145 return validate_kcore_modules(kallsyms_filename, map);
1148 struct kcore_mapfn_data {
1150 struct list_head maps;
1153 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1155 struct kcore_mapfn_data *md = data;
1158 map = map__new2(start, md->dso);
1162 map->end = map->start + len;
1165 list_add(&map->node, &md->maps);
1171 * Merges map into map_groups by splitting the new map
1172 * within the existing map regions.
1174 int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)
1176 struct map *old_map;
1179 for (old_map = map_groups__first(kmaps); old_map;
1180 old_map = map_groups__next(old_map)) {
1182 /* no overload with this one */
1183 if (new_map->end < old_map->start ||
1184 new_map->start >= old_map->end)
1187 if (new_map->start < old_map->start) {
1192 if (new_map->end < old_map->end) {
1194 * |new......| -> |new..|
1195 * |old....| -> |old....|
1197 new_map->end = old_map->start;
1200 * |new.............| -> |new..| |new..|
1201 * |old....| -> |old....|
1203 struct map *m = map__clone(new_map);
1208 m->end = old_map->start;
1209 list_add_tail(&m->node, &merged);
1210 new_map->start = old_map->end;
1217 if (new_map->end < old_map->end) {
1220 * |old.........| -> |old.........|
1227 * |new......| -> |new...|
1228 * |old....| -> |old....|
1230 new_map->start = old_map->end;
1235 while (!list_empty(&merged)) {
1236 old_map = list_entry(merged.next, struct map, node);
1237 list_del_init(&old_map->node);
1238 map_groups__insert(kmaps, old_map);
1243 map_groups__insert(kmaps, new_map);
1249 static int dso__load_kcore(struct dso *dso, struct map *map,
1250 const char *kallsyms_filename)
1252 struct map_groups *kmaps = map__kmaps(map);
1253 struct kcore_mapfn_data md;
1254 struct map *old_map, *new_map, *replacement_map = NULL;
1255 struct machine *machine;
1258 char kcore_filename[PATH_MAX];
1264 machine = kmaps->machine;
1266 /* This function requires that the map is the kernel map */
1267 if (!__map__is_kernel(map))
1270 if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1274 /* Modules and kernel must be present at their original addresses */
1275 if (validate_kcore_addresses(kallsyms_filename, map))
1279 INIT_LIST_HEAD(&md.maps);
1281 fd = open(kcore_filename, O_RDONLY);
1283 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
1288 /* Read new maps into temporary lists */
1289 err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md,
1293 dso->is_64_bit = is_64_bit;
1295 if (list_empty(&md.maps)) {
1300 /* Remove old maps */
1301 old_map = map_groups__first(kmaps);
1303 struct map *next = map_groups__next(old_map);
1306 * We need to preserve eBPF maps even if they are
1307 * covered by kcore, because we need to access
1308 * eBPF dso for source data.
1310 if (old_map != map && !__map__is_bpf_prog(old_map))
1311 map_groups__remove(kmaps, old_map);
1314 machine->trampolines_mapped = false;
1316 /* Find the kernel map using the '_stext' symbol */
1317 if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
1318 list_for_each_entry(new_map, &md.maps, node) {
1319 if (stext >= new_map->start && stext < new_map->end) {
1320 replacement_map = new_map;
1326 if (!replacement_map)
1327 replacement_map = list_entry(md.maps.next, struct map, node);
1330 while (!list_empty(&md.maps)) {
1331 new_map = list_entry(md.maps.next, struct map, node);
1332 list_del_init(&new_map->node);
1333 if (new_map == replacement_map) {
1334 map->start = new_map->start;
1335 map->end = new_map->end;
1336 map->pgoff = new_map->pgoff;
1337 map->map_ip = new_map->map_ip;
1338 map->unmap_ip = new_map->unmap_ip;
1339 /* Ensure maps are correctly ordered */
1341 map_groups__remove(kmaps, map);
1342 map_groups__insert(kmaps, map);
1347 * Merge kcore map into existing maps,
1348 * and ensure that current maps (eBPF)
1351 if (map_groups__merge_in(kmaps, new_map))
1356 if (machine__is(machine, "x86_64")) {
1360 * If one of the corresponding symbols is there, assume the
1361 * entry trampoline maps are too.
1363 if (!kallsyms__get_function_start(kallsyms_filename,
1364 ENTRY_TRAMPOLINE_NAME,
1366 machine->trampolines_mapped = true;
1370 * Set the data type and long name so that kcore can be read via
1371 * dso__data_read_addr().
1373 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1374 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1376 dso->binary_type = DSO_BINARY_TYPE__KCORE;
1377 dso__set_long_name(dso, strdup(kcore_filename), true);
1381 if (map->prot & PROT_EXEC)
1382 pr_debug("Using %s for kernel object code\n", kcore_filename);
1384 pr_debug("Using %s for kernel data\n", kcore_filename);
1389 while (!list_empty(&md.maps)) {
1390 map = list_entry(md.maps.next, struct map, node);
1391 list_del_init(&map->node);
1399 * If the kernel is relocated at boot time, kallsyms won't match. Compute the
1400 * delta based on the relocation reference symbol.
1402 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta)
1406 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1409 if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
1412 *delta = addr - kmap->ref_reloc_sym->addr;
1416 int __dso__load_kallsyms(struct dso *dso, const char *filename,
1417 struct map *map, bool no_kcore)
1419 struct kmap *kmap = map__kmap(map);
1422 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1425 if (!kmap || !kmap->kmaps)
1428 if (dso__load_all_kallsyms(dso, filename) < 0)
1431 if (kallsyms__delta(kmap, filename, &delta))
1434 symbols__fixup_end(&dso->symbols);
1435 symbols__fixup_duplicate(&dso->symbols);
1437 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1438 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1440 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1442 if (!no_kcore && !dso__load_kcore(dso, map, filename))
1443 return map_groups__split_kallsyms_for_kcore(kmap->kmaps, dso);
1445 return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map);
1448 int dso__load_kallsyms(struct dso *dso, const char *filename,
1451 return __dso__load_kallsyms(dso, filename, map, false);
1454 static int dso__load_perf_map(const char *map_path, struct dso *dso)
1461 file = fopen(map_path, "r");
1465 while (!feof(file)) {
1470 line_len = getline(&line, &n, file);
1477 line[--line_len] = '\0'; /* \n */
1479 len = hex2u64(line, &start);
1482 if (len + 2 >= line_len)
1485 len += hex2u64(line + len, &size);
1488 if (len + 2 >= line_len)
1491 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len);
1494 goto out_delete_line;
1496 symbols__insert(&dso->symbols, sym);
1511 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1512 enum dso_binary_type type)
1515 case DSO_BINARY_TYPE__JAVA_JIT:
1516 case DSO_BINARY_TYPE__DEBUGLINK:
1517 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1518 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1519 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1520 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1521 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1522 return !kmod && dso->kernel == DSO_TYPE_USER;
1524 case DSO_BINARY_TYPE__KALLSYMS:
1525 case DSO_BINARY_TYPE__VMLINUX:
1526 case DSO_BINARY_TYPE__KCORE:
1527 return dso->kernel == DSO_TYPE_KERNEL;
1529 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1530 case DSO_BINARY_TYPE__GUEST_VMLINUX:
1531 case DSO_BINARY_TYPE__GUEST_KCORE:
1532 return dso->kernel == DSO_TYPE_GUEST_KERNEL;
1534 case DSO_BINARY_TYPE__GUEST_KMODULE:
1535 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1536 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1537 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1539 * kernel modules know their symtab type - it's set when
1540 * creating a module dso in machine__findnew_module_map().
1542 return kmod && dso->symtab_type == type;
1544 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1545 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
1548 case DSO_BINARY_TYPE__BPF_PROG_INFO:
1549 case DSO_BINARY_TYPE__NOT_FOUND:
1555 /* Checks for the existence of the perf-<pid>.map file in two different
1556 * locations. First, if the process is a separate mount namespace, check in
1557 * that namespace using the pid of the innermost pid namespace. If's not in a
1558 * namespace, or the file can't be found there, try in the mount namespace of
1559 * the tracing process using our view of its pid.
1561 static int dso__find_perf_map(char *filebuf, size_t bufsz,
1562 struct nsinfo **nsip)
1564 struct nscookie nsc;
1566 struct nsinfo *nnsi;
1571 if (nsi->need_setns) {
1572 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsi->nstgid);
1573 nsinfo__mountns_enter(nsi, &nsc);
1574 rc = access(filebuf, R_OK);
1575 nsinfo__mountns_exit(&nsc);
1580 nnsi = nsinfo__copy(nsi);
1584 nnsi->need_setns = false;
1585 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nnsi->tgid);
1593 int dso__load(struct dso *dso, struct map *map)
1598 struct machine *machine;
1599 char *root_dir = (char *) "";
1601 struct symsrc ss_[2];
1602 struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1605 unsigned char build_id[BUILD_ID_SIZE];
1606 struct nscookie nsc;
1607 char newmapname[PATH_MAX];
1608 const char *map_path = dso->long_name;
1610 perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
1612 if (dso->nsinfo && (dso__find_perf_map(newmapname,
1613 sizeof(newmapname), &dso->nsinfo) == 0)) {
1614 map_path = newmapname;
1618 nsinfo__mountns_enter(dso->nsinfo, &nsc);
1619 pthread_mutex_lock(&dso->lock);
1621 /* check again under the dso->lock */
1622 if (dso__loaded(dso)) {
1627 if (map->groups && map->groups->machine)
1628 machine = map->groups->machine;
1633 if (dso->kernel == DSO_TYPE_KERNEL)
1634 ret = dso__load_kernel_sym(dso, map);
1635 else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1636 ret = dso__load_guest_kernel_sym(dso, map);
1638 if (machine__is(machine, "x86_64"))
1639 machine__map_x86_64_entry_trampolines(machine, dso);
1643 dso->adjust_symbols = 0;
1646 ret = dso__load_perf_map(map_path, dso);
1647 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1648 DSO_BINARY_TYPE__NOT_FOUND;
1653 root_dir = machine->root_dir;
1655 name = malloc(PATH_MAX);
1659 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1660 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1661 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1662 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1666 * Read the build id if possible. This is required for
1667 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
1669 if (!dso->has_build_id &&
1670 is_regular_file(dso->long_name)) {
1671 __symbol__join_symfs(name, PATH_MAX, dso->long_name);
1672 if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0)
1673 dso__set_build_id(dso, build_id);
1677 * Iterate over candidate debug images.
1678 * Keep track of "interesting" ones (those which have a symtab, dynsym,
1679 * and/or opd section) for processing.
1681 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1682 struct symsrc *ss = &ss_[ss_pos];
1683 bool next_slot = false;
1688 enum dso_binary_type symtab_type = binary_type_symtab[i];
1690 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE ||
1691 symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO);
1693 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1696 if (dso__read_binary_type_filename(dso, symtab_type,
1697 root_dir, name, PATH_MAX))
1701 nsinfo__mountns_exit(&nsc);
1703 is_reg = is_regular_file(name);
1705 sirc = symsrc__init(ss, dso, name, symtab_type);
1708 nsinfo__mountns_enter(dso->nsinfo, &nsc);
1710 if (!is_reg || sirc < 0)
1713 if (!syms_ss && symsrc__has_symtab(ss)) {
1716 if (!dso->symsrc_filename)
1717 dso->symsrc_filename = strdup(name);
1720 if (!runtime_ss && symsrc__possibly_runtime(ss)) {
1728 if (syms_ss && runtime_ss)
1731 symsrc__destroy(ss);
1736 if (!runtime_ss && !syms_ss)
1739 if (runtime_ss && !syms_ss) {
1740 syms_ss = runtime_ss;
1743 /* We'll have to hope for the best */
1744 if (!runtime_ss && syms_ss)
1745 runtime_ss = syms_ss;
1748 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1755 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss);
1760 for (; ss_pos > 0; ss_pos--)
1761 symsrc__destroy(&ss_[ss_pos - 1]);
1764 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1767 dso__set_loaded(dso);
1768 pthread_mutex_unlock(&dso->lock);
1769 nsinfo__mountns_exit(&nsc);
1774 struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
1776 struct maps *maps = &mg->maps;
1778 struct rb_node *node;
1780 down_read(&maps->lock);
1782 for (node = maps->names.rb_node; node; ) {
1785 map = rb_entry(node, struct map, rb_node_name);
1787 rc = strcmp(map->dso->short_name, name);
1789 node = node->rb_left;
1791 node = node->rb_right;
1800 up_read(&maps->lock);
1804 int dso__load_vmlinux(struct dso *dso, struct map *map,
1805 const char *vmlinux, bool vmlinux_allocated)
1809 char symfs_vmlinux[PATH_MAX];
1810 enum dso_binary_type symtab_type;
1812 if (vmlinux[0] == '/')
1813 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
1815 symbol__join_symfs(symfs_vmlinux, vmlinux);
1817 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1818 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1820 symtab_type = DSO_BINARY_TYPE__VMLINUX;
1822 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
1825 err = dso__load_sym(dso, map, &ss, &ss, 0);
1826 symsrc__destroy(&ss);
1829 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1830 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1832 dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
1833 dso__set_long_name(dso, vmlinux, vmlinux_allocated);
1834 dso__set_loaded(dso);
1835 pr_debug("Using %s for symbols\n", symfs_vmlinux);
1841 int dso__load_vmlinux_path(struct dso *dso, struct map *map)
1844 char *filename = NULL;
1846 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1847 vmlinux_path__nr_entries + 1);
1849 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1850 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
1855 if (!symbol_conf.ignore_vmlinux_buildid)
1856 filename = dso__build_id_filename(dso, NULL, 0, false);
1857 if (filename != NULL) {
1858 err = dso__load_vmlinux(dso, map, filename, true);
1867 static bool visible_dir_filter(const char *name, struct dirent *d)
1869 if (d->d_type != DT_DIR)
1871 return lsdir_no_dot_filter(name, d);
1874 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
1876 char kallsyms_filename[PATH_MAX];
1878 struct strlist *dirs;
1879 struct str_node *nd;
1881 dirs = lsdir(dir, visible_dir_filter);
1885 strlist__for_each_entry(nd, dirs) {
1886 scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
1887 "%s/%s/kallsyms", dir, nd->s);
1888 if (!validate_kcore_addresses(kallsyms_filename, map)) {
1889 strlcpy(dir, kallsyms_filename, dir_sz);
1895 strlist__delete(dirs);
1901 * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
1902 * since access(R_OK) only checks with real UID/GID but open() use effective
1903 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
1905 static bool filename__readable(const char *file)
1907 int fd = open(file, O_RDONLY);
1914 static char *dso__find_kallsyms(struct dso *dso, struct map *map)
1916 u8 host_build_id[BUILD_ID_SIZE];
1917 char sbuild_id[SBUILD_ID_SIZE];
1918 bool is_host = false;
1919 char path[PATH_MAX];
1921 if (!dso->has_build_id) {
1923 * Last resort, if we don't have a build-id and couldn't find
1924 * any vmlinux file, try the running kernel kallsyms table.
1929 if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
1930 sizeof(host_build_id)) == 0)
1931 is_host = dso__build_id_equal(dso, host_build_id);
1933 /* Try a fast path for /proc/kallsyms if possible */
1936 * Do not check the build-id cache, unless we know we cannot use
1937 * /proc/kcore or module maps don't match to /proc/kallsyms.
1938 * To check readability of /proc/kcore, do not use access(R_OK)
1939 * since /proc/kcore requires CAP_SYS_RAWIO to read and access
1942 if (filename__readable("/proc/kcore") &&
1943 !validate_kcore_addresses("/proc/kallsyms", map))
1947 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1949 /* Find kallsyms in build-id cache with kcore */
1950 scnprintf(path, sizeof(path), "%s/%s/%s",
1951 buildid_dir, DSO__NAME_KCORE, sbuild_id);
1953 if (!find_matching_kcore(map, path, sizeof(path)))
1954 return strdup(path);
1956 /* Use current /proc/kallsyms if possible */
1959 return strdup("/proc/kallsyms");
1962 /* Finally, find a cache of kallsyms */
1963 if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
1964 pr_err("No kallsyms or vmlinux with build-id %s was found\n",
1969 return strdup(path);
1972 static int dso__load_kernel_sym(struct dso *dso, struct map *map)
1975 const char *kallsyms_filename = NULL;
1976 char *kallsyms_allocated_filename = NULL;
1978 * Step 1: if the user specified a kallsyms or vmlinux filename, use
1979 * it and only it, reporting errors to the user if it cannot be used.
1981 * For instance, try to analyse an ARM perf.data file _without_ a
1982 * build-id, or if the user specifies the wrong path to the right
1983 * vmlinux file, obviously we can't fallback to another vmlinux (a
1984 * x86_86 one, on the machine where analysis is being performed, say),
1985 * or worse, /proc/kallsyms.
1987 * If the specified file _has_ a build-id and there is a build-id
1988 * section in the perf.data file, we will still do the expected
1989 * validation in dso__load_vmlinux and will bail out if they don't
1992 if (symbol_conf.kallsyms_name != NULL) {
1993 kallsyms_filename = symbol_conf.kallsyms_name;
1997 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
1998 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
2001 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
2002 err = dso__load_vmlinux_path(dso, map);
2007 /* do not try local files if a symfs was given */
2008 if (symbol_conf.symfs[0] != 0)
2011 kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
2012 if (!kallsyms_allocated_filename)
2015 kallsyms_filename = kallsyms_allocated_filename;
2018 err = dso__load_kallsyms(dso, kallsyms_filename, map);
2020 pr_debug("Using %s for symbols\n", kallsyms_filename);
2021 free(kallsyms_allocated_filename);
2023 if (err > 0 && !dso__is_kcore(dso)) {
2024 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
2025 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
2026 map__fixup_start(map);
2027 map__fixup_end(map);
2033 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
2036 const char *kallsyms_filename = NULL;
2037 struct machine *machine;
2038 char path[PATH_MAX];
2041 pr_debug("Guest kernel map hasn't the point to groups\n");
2044 machine = map->groups->machine;
2046 if (machine__is_default_guest(machine)) {
2048 * if the user specified a vmlinux filename, use it and only
2049 * it, reporting errors to the user if it cannot be used.
2050 * Or use file guest_kallsyms inputted by user on commandline
2052 if (symbol_conf.default_guest_vmlinux_name != NULL) {
2053 err = dso__load_vmlinux(dso, map,
2054 symbol_conf.default_guest_vmlinux_name,
2059 kallsyms_filename = symbol_conf.default_guest_kallsyms;
2060 if (!kallsyms_filename)
2063 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
2064 kallsyms_filename = path;
2067 err = dso__load_kallsyms(dso, kallsyms_filename, map);
2069 pr_debug("Using %s for symbols\n", kallsyms_filename);
2070 if (err > 0 && !dso__is_kcore(dso)) {
2071 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
2072 dso__set_long_name(dso, machine->mmap_name, false);
2073 map__fixup_start(map);
2074 map__fixup_end(map);
2080 static void vmlinux_path__exit(void)
2082 while (--vmlinux_path__nr_entries >= 0)
2083 zfree(&vmlinux_path[vmlinux_path__nr_entries]);
2084 vmlinux_path__nr_entries = 0;
2086 zfree(&vmlinux_path);
2089 static const char * const vmlinux_paths[] = {
2094 static const char * const vmlinux_paths_upd[] = {
2096 "/usr/lib/debug/boot/vmlinux-%s",
2097 "/lib/modules/%s/build/vmlinux",
2098 "/usr/lib/debug/lib/modules/%s/vmlinux",
2099 "/usr/lib/debug/boot/vmlinux-%s.debug"
2102 static int vmlinux_path__add(const char *new_entry)
2104 vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
2105 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2107 ++vmlinux_path__nr_entries;
2112 static int vmlinux_path__init(struct perf_env *env)
2116 char *kernel_version;
2119 vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
2120 ARRAY_SIZE(vmlinux_paths_upd)));
2121 if (vmlinux_path == NULL)
2124 for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
2125 if (vmlinux_path__add(vmlinux_paths[i]) < 0)
2128 /* only try kernel version if no symfs was given */
2129 if (symbol_conf.symfs[0] != 0)
2133 kernel_version = env->os_release;
2135 if (uname(&uts) < 0)
2138 kernel_version = uts.release;
2141 for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
2142 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
2143 if (vmlinux_path__add(bf) < 0)
2150 vmlinux_path__exit();
2154 int setup_list(struct strlist **list, const char *list_str,
2155 const char *list_name)
2157 if (list_str == NULL)
2160 *list = strlist__new(list_str, NULL);
2162 pr_err("problems parsing %s list\n", list_name);
2166 symbol_conf.has_filter = true;
2170 int setup_intlist(struct intlist **list, const char *list_str,
2171 const char *list_name)
2173 if (list_str == NULL)
2176 *list = intlist__new(list_str);
2178 pr_err("problems parsing %s list\n", list_name);
2184 static bool symbol__read_kptr_restrict(void)
2187 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
2192 if (fgets(line, sizeof(line), fp) != NULL)
2193 value = ((geteuid() != 0) || (getuid() != 0)) ?
2203 int symbol__annotation_init(void)
2205 if (symbol_conf.init_annotation)
2208 if (symbol_conf.initialized) {
2209 pr_err("Annotation needs to be init before symbol__init()\n");
2213 symbol_conf.priv_size += sizeof(struct annotation);
2214 symbol_conf.init_annotation = true;
2218 int symbol__init(struct perf_env *env)
2222 if (symbol_conf.initialized)
2225 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
2229 if (symbol_conf.sort_by_name)
2230 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
2231 sizeof(struct symbol));
2233 if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
2236 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
2237 pr_err("'.' is the only non valid --field-separator argument\n");
2241 if (setup_list(&symbol_conf.dso_list,
2242 symbol_conf.dso_list_str, "dso") < 0)
2245 if (setup_list(&symbol_conf.comm_list,
2246 symbol_conf.comm_list_str, "comm") < 0)
2247 goto out_free_dso_list;
2249 if (setup_intlist(&symbol_conf.pid_list,
2250 symbol_conf.pid_list_str, "pid") < 0)
2251 goto out_free_comm_list;
2253 if (setup_intlist(&symbol_conf.tid_list,
2254 symbol_conf.tid_list_str, "tid") < 0)
2255 goto out_free_pid_list;
2257 if (setup_list(&symbol_conf.sym_list,
2258 symbol_conf.sym_list_str, "symbol") < 0)
2259 goto out_free_tid_list;
2261 if (setup_list(&symbol_conf.bt_stop_list,
2262 symbol_conf.bt_stop_list_str, "symbol") < 0)
2263 goto out_free_sym_list;
2266 * A path to symbols of "/" is identical to ""
2267 * reset here for simplicity.
2269 symfs = realpath(symbol_conf.symfs, NULL);
2271 symfs = symbol_conf.symfs;
2272 if (strcmp(symfs, "/") == 0)
2273 symbol_conf.symfs = "";
2274 if (symfs != symbol_conf.symfs)
2275 free((void *)symfs);
2277 symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
2279 symbol_conf.initialized = true;
2283 strlist__delete(symbol_conf.sym_list);
2285 intlist__delete(symbol_conf.tid_list);
2287 intlist__delete(symbol_conf.pid_list);
2289 strlist__delete(symbol_conf.comm_list);
2291 strlist__delete(symbol_conf.dso_list);
2295 void symbol__exit(void)
2297 if (!symbol_conf.initialized)
2299 strlist__delete(symbol_conf.bt_stop_list);
2300 strlist__delete(symbol_conf.sym_list);
2301 strlist__delete(symbol_conf.dso_list);
2302 strlist__delete(symbol_conf.comm_list);
2303 intlist__delete(symbol_conf.tid_list);
2304 intlist__delete(symbol_conf.pid_list);
2305 vmlinux_path__exit();
2306 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2307 symbol_conf.bt_stop_list = NULL;
2308 symbol_conf.initialized = false;
2311 int symbol__config_symfs(const struct option *opt __maybe_unused,
2312 const char *dir, int unset __maybe_unused)
2317 symbol_conf.symfs = strdup(dir);
2318 if (symbol_conf.symfs == NULL)
2321 /* skip the locally configured cache if a symfs is given, and
2322 * config buildid dir to symfs/.debug
2324 ret = asprintf(&bf, "%s/%s", dir, ".debug");
2328 set_buildid_dir(bf);
2334 struct mem_info *mem_info__get(struct mem_info *mi)
2337 refcount_inc(&mi->refcnt);
2341 void mem_info__put(struct mem_info *mi)
2343 if (mi && refcount_dec_and_test(&mi->refcnt))
2347 struct mem_info *mem_info__new(void)
2349 struct mem_info *mi = zalloc(sizeof(*mi));
2352 refcount_set(&mi->refcnt, 1);
2356 struct block_info *block_info__get(struct block_info *bi)
2359 refcount_inc(&bi->refcnt);
2363 void block_info__put(struct block_info *bi)
2365 if (bi && refcount_dec_and_test(&bi->refcnt))
2369 struct block_info *block_info__new(void)
2371 struct block_info *bi = zalloc(sizeof(*bi));
2374 refcount_set(&bi->refcnt, 1);