1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
11 #include <linux/err.h>
12 #include <linux/btf.h>
17 #include "libbpf_internal.h"
20 #define BTF_MAX_NR_TYPES 0x7fffffff
21 #define BTF_MAX_STR_OFFSET 0x7fffffff
23 static struct btf_type btf_void;
27 struct btf_header *hdr;
30 struct btf_type **types;
39 static inline __u64 ptr_to_u64(const void *ptr)
41 return (__u64) (unsigned long) ptr;
44 static int btf_add_type(struct btf *btf, struct btf_type *t)
46 if (btf->types_size - btf->nr_types < 2) {
47 struct btf_type **new_types;
48 __u32 expand_by, new_size;
50 if (btf->types_size == BTF_MAX_NR_TYPES)
53 expand_by = max(btf->types_size >> 2, 16);
54 new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by);
56 new_types = realloc(btf->types, sizeof(*new_types) * new_size);
60 if (btf->nr_types == 0)
61 new_types[0] = &btf_void;
63 btf->types = new_types;
64 btf->types_size = new_size;
67 btf->types[++(btf->nr_types)] = t;
72 static int btf_parse_hdr(struct btf *btf)
74 const struct btf_header *hdr = btf->hdr;
77 if (btf->data_size < sizeof(struct btf_header)) {
78 pr_debug("BTF header not found\n");
82 if (hdr->magic != BTF_MAGIC) {
83 pr_debug("Invalid BTF magic:%x\n", hdr->magic);
87 if (hdr->version != BTF_VERSION) {
88 pr_debug("Unsupported BTF version:%u\n", hdr->version);
93 pr_debug("Unsupported BTF flags:%x\n", hdr->flags);
97 meta_left = btf->data_size - sizeof(*hdr);
99 pr_debug("BTF has no data\n");
103 if (meta_left < hdr->type_off) {
104 pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off);
108 if (meta_left < hdr->str_off) {
109 pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off);
113 if (hdr->type_off >= hdr->str_off) {
114 pr_debug("BTF type section offset >= string section offset. No type?\n");
118 if (hdr->type_off & 0x02) {
119 pr_debug("BTF type section is not aligned to 4 bytes\n");
123 btf->nohdr_data = btf->hdr + 1;
128 static int btf_parse_str_sec(struct btf *btf)
130 const struct btf_header *hdr = btf->hdr;
131 const char *start = btf->nohdr_data + hdr->str_off;
132 const char *end = start + btf->hdr->str_len;
134 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET ||
135 start[0] || end[-1]) {
136 pr_debug("Invalid BTF string section\n");
140 btf->strings = start;
145 static int btf_type_size(struct btf_type *t)
147 int base_size = sizeof(struct btf_type);
148 __u16 vlen = btf_vlen(t);
150 switch (btf_kind(t)) {
153 case BTF_KIND_VOLATILE:
154 case BTF_KIND_RESTRICT:
156 case BTF_KIND_TYPEDEF:
160 return base_size + sizeof(__u32);
162 return base_size + vlen * sizeof(struct btf_enum);
164 return base_size + sizeof(struct btf_array);
165 case BTF_KIND_STRUCT:
167 return base_size + vlen * sizeof(struct btf_member);
168 case BTF_KIND_FUNC_PROTO:
169 return base_size + vlen * sizeof(struct btf_param);
171 return base_size + sizeof(struct btf_var);
172 case BTF_KIND_DATASEC:
173 return base_size + vlen * sizeof(struct btf_var_secinfo);
175 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
180 static int btf_parse_type_sec(struct btf *btf)
182 struct btf_header *hdr = btf->hdr;
183 void *nohdr_data = btf->nohdr_data;
184 void *next_type = nohdr_data + hdr->type_off;
185 void *end_type = nohdr_data + hdr->str_off;
187 while (next_type < end_type) {
188 struct btf_type *t = next_type;
192 type_size = btf_type_size(t);
195 next_type += type_size;
196 err = btf_add_type(btf, t);
204 __u32 btf__get_nr_types(const struct btf *btf)
206 return btf->nr_types;
209 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
211 if (type_id > btf->nr_types)
214 return btf->types[type_id];
217 static bool btf_type_is_void(const struct btf_type *t)
219 return t == &btf_void || btf_is_fwd(t);
222 static bool btf_type_is_void_or_null(const struct btf_type *t)
224 return !t || btf_type_is_void(t);
227 #define MAX_RESOLVE_DEPTH 32
229 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
231 const struct btf_array *array;
232 const struct btf_type *t;
237 t = btf__type_by_id(btf, type_id);
238 for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
240 switch (btf_kind(t)) {
242 case BTF_KIND_STRUCT:
245 case BTF_KIND_DATASEC:
249 size = sizeof(void *);
251 case BTF_KIND_TYPEDEF:
252 case BTF_KIND_VOLATILE:
254 case BTF_KIND_RESTRICT:
259 array = btf_array(t);
260 if (nelems && array->nelems > UINT32_MAX / nelems)
262 nelems *= array->nelems;
263 type_id = array->type;
269 t = btf__type_by_id(btf, type_id);
275 if (nelems && size > UINT32_MAX / nelems)
278 return nelems * size;
281 int btf__resolve_type(const struct btf *btf, __u32 type_id)
283 const struct btf_type *t;
286 t = btf__type_by_id(btf, type_id);
287 while (depth < MAX_RESOLVE_DEPTH &&
288 !btf_type_is_void_or_null(t) &&
289 (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
291 t = btf__type_by_id(btf, type_id);
295 if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
301 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
305 if (!strcmp(type_name, "void"))
308 for (i = 1; i <= btf->nr_types; i++) {
309 const struct btf_type *t = btf->types[i];
310 const char *name = btf__name_by_offset(btf, t->name_off);
312 if (name && !strcmp(type_name, name))
319 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
324 if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
327 for (i = 1; i <= btf->nr_types; i++) {
328 const struct btf_type *t = btf->types[i];
331 if (btf_kind(t) != kind)
333 name = btf__name_by_offset(btf, t->name_off);
334 if (name && !strcmp(type_name, name))
341 void btf__free(struct btf *btf)
354 struct btf *btf__new(__u8 *data, __u32 size)
359 btf = calloc(1, sizeof(struct btf));
361 return ERR_PTR(-ENOMEM);
365 btf->data = malloc(size);
371 memcpy(btf->data, data, size);
372 btf->data_size = size;
374 err = btf_parse_hdr(btf);
378 err = btf_parse_str_sec(btf);
382 err = btf_parse_type_sec(btf);
393 static bool btf_check_endianness(const GElf_Ehdr *ehdr)
395 #if __BYTE_ORDER == __LITTLE_ENDIAN
396 return ehdr->e_ident[EI_DATA] == ELFDATA2LSB;
397 #elif __BYTE_ORDER == __BIG_ENDIAN
398 return ehdr->e_ident[EI_DATA] == ELFDATA2MSB;
400 # error "Unrecognized __BYTE_ORDER__"
404 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
406 Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
407 int err = 0, fd = -1, idx = 0;
408 struct btf *btf = NULL;
413 if (elf_version(EV_CURRENT) == EV_NONE) {
414 pr_warn("failed to init libelf for %s\n", path);
415 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
418 fd = open(path, O_RDONLY);
421 pr_warn("failed to open %s: %s\n", path, strerror(errno));
425 err = -LIBBPF_ERRNO__FORMAT;
427 elf = elf_begin(fd, ELF_C_READ, NULL);
429 pr_warn("failed to open %s as ELF file\n", path);
432 if (!gelf_getehdr(elf, &ehdr)) {
433 pr_warn("failed to get EHDR from %s\n", path);
436 if (!btf_check_endianness(&ehdr)) {
437 pr_warn("non-native ELF endianness is not supported\n");
440 if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
441 pr_warn("failed to get e_shstrndx from %s\n", path);
445 while ((scn = elf_nextscn(elf, scn)) != NULL) {
450 if (gelf_getshdr(scn, &sh) != &sh) {
451 pr_warn("failed to get section(%d) header from %s\n",
455 name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
457 pr_warn("failed to get section(%d) name from %s\n",
461 if (strcmp(name, BTF_ELF_SEC) == 0) {
462 btf_data = elf_getdata(scn, 0);
464 pr_warn("failed to get section(%d, %s) data from %s\n",
469 } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
470 btf_ext_data = elf_getdata(scn, 0);
472 pr_warn("failed to get section(%d, %s) data from %s\n",
486 btf = btf__new(btf_data->d_buf, btf_data->d_size);
490 if (btf_ext && btf_ext_data) {
491 *btf_ext = btf_ext__new(btf_ext_data->d_buf,
492 btf_ext_data->d_size);
493 if (IS_ERR(*btf_ext))
495 } else if (btf_ext) {
506 * btf is always parsed before btf_ext, so no need to clean up
507 * btf_ext, if btf loading failed
511 if (btf_ext && IS_ERR(*btf_ext)) {
513 err = PTR_ERR(*btf_ext);
519 static int compare_vsi_off(const void *_a, const void *_b)
521 const struct btf_var_secinfo *a = _a;
522 const struct btf_var_secinfo *b = _b;
524 return a->offset - b->offset;
527 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
530 __u32 size = 0, off = 0, i, vars = btf_vlen(t);
531 const char *name = btf__name_by_offset(btf, t->name_off);
532 const struct btf_type *t_var;
533 struct btf_var_secinfo *vsi;
534 const struct btf_var *var;
538 pr_debug("No name found in string section for DATASEC kind.\n");
542 ret = bpf_object__section_size(obj, name, &size);
543 if (ret || !size || (t->size && t->size != size)) {
544 pr_debug("Invalid size for section %s: %u bytes\n", name, size);
550 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
551 t_var = btf__type_by_id(btf, vsi->type);
552 var = btf_var(t_var);
554 if (!btf_is_var(t_var)) {
555 pr_debug("Non-VAR type seen in section %s\n", name);
559 if (var->linkage == BTF_VAR_STATIC)
562 name = btf__name_by_offset(btf, t_var->name_off);
564 pr_debug("No name found in string section for VAR kind\n");
568 ret = bpf_object__variable_offset(obj, name, &off);
570 pr_debug("No offset found in symbol table for VAR %s\n",
578 qsort(t + 1, vars, sizeof(*vsi), compare_vsi_off);
582 int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
587 for (i = 1; i <= btf->nr_types; i++) {
588 struct btf_type *t = btf->types[i];
590 /* Loader needs to fix up some of the things compiler
591 * couldn't get its hands on while emitting BTF. This
592 * is section size and global variable offset. We use
593 * the info from the ELF itself for this purpose.
595 if (btf_is_datasec(t)) {
596 err = btf_fixup_datasec(obj, btf, t);
605 int btf__load(struct btf *btf)
607 __u32 log_buf_size = BPF_LOG_BUF_SIZE;
608 char *log_buf = NULL;
614 log_buf = malloc(log_buf_size);
620 btf->fd = bpf_load_btf(btf->data, btf->data_size,
621 log_buf, log_buf_size, false);
624 pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno);
626 pr_warn("%s\n", log_buf);
635 int btf__fd(const struct btf *btf)
640 const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
642 *size = btf->data_size;
646 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
648 if (offset < btf->hdr->str_len)
649 return &btf->strings[offset];
654 int btf__get_from_id(__u32 id, struct btf **btf)
656 struct bpf_btf_info btf_info = { 0 };
657 __u32 len = sizeof(btf_info);
665 btf_fd = bpf_btf_get_fd_by_id(id);
669 /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
670 * let's start with a sane default - 4KiB here - and resize it only if
671 * bpf_obj_get_info_by_fd() needs a bigger buffer.
673 btf_info.btf_size = 4096;
674 last_size = btf_info.btf_size;
675 ptr = malloc(last_size);
681 memset(ptr, 0, last_size);
682 btf_info.btf = ptr_to_u64(ptr);
683 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
685 if (!err && btf_info.btf_size > last_size) {
688 last_size = btf_info.btf_size;
689 temp_ptr = realloc(ptr, last_size);
695 memset(ptr, 0, last_size);
696 btf_info.btf = ptr_to_u64(ptr);
697 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
700 if (err || btf_info.btf_size > last_size) {
705 *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size);
718 int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
719 __u32 expected_key_size, __u32 expected_value_size,
720 __u32 *key_type_id, __u32 *value_type_id)
722 const struct btf_type *container_type;
723 const struct btf_member *key, *value;
724 const size_t max_name = 256;
725 char container_name[max_name];
726 __s64 key_size, value_size;
729 if (snprintf(container_name, max_name, "____btf_map_%s", map_name) ==
731 pr_warn("map:%s length of '____btf_map_%s' is too long\n",
736 container_id = btf__find_by_name(btf, container_name);
737 if (container_id < 0) {
738 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
739 map_name, container_name);
743 container_type = btf__type_by_id(btf, container_id);
744 if (!container_type) {
745 pr_warn("map:%s cannot find BTF type for container_id:%u\n",
746 map_name, container_id);
750 if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
751 pr_warn("map:%s container_name:%s is an invalid container struct\n",
752 map_name, container_name);
756 key = btf_members(container_type);
759 key_size = btf__resolve_size(btf, key->type);
761 pr_warn("map:%s invalid BTF key_type_size\n", map_name);
765 if (expected_key_size != key_size) {
766 pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
767 map_name, (__u32)key_size, expected_key_size);
771 value_size = btf__resolve_size(btf, value->type);
772 if (value_size < 0) {
773 pr_warn("map:%s invalid BTF value_type_size\n", map_name);
777 if (expected_value_size != value_size) {
778 pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
779 map_name, (__u32)value_size, expected_value_size);
783 *key_type_id = key->type;
784 *value_type_id = value->type;
789 struct btf_ext_sec_setup_param {
793 struct btf_ext_info *ext_info;
797 static int btf_ext_setup_info(struct btf_ext *btf_ext,
798 struct btf_ext_sec_setup_param *ext_sec)
800 const struct btf_ext_info_sec *sinfo;
801 struct btf_ext_info *ext_info;
802 __u32 info_left, record_size;
803 /* The start of the info sec (including the __u32 record_size). */
806 if (ext_sec->len == 0)
809 if (ext_sec->off & 0x03) {
810 pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
815 info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
816 info_left = ext_sec->len;
818 if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
819 pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
820 ext_sec->desc, ext_sec->off, ext_sec->len);
824 /* At least a record size */
825 if (info_left < sizeof(__u32)) {
826 pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
830 /* The record size needs to meet the minimum standard */
831 record_size = *(__u32 *)info;
832 if (record_size < ext_sec->min_rec_size ||
833 record_size & 0x03) {
834 pr_debug("%s section in .BTF.ext has invalid record size %u\n",
835 ext_sec->desc, record_size);
839 sinfo = info + sizeof(__u32);
840 info_left -= sizeof(__u32);
842 /* If no records, return failure now so .BTF.ext won't be used. */
844 pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
849 unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
850 __u64 total_record_size;
853 if (info_left < sec_hdrlen) {
854 pr_debug("%s section header is not found in .BTF.ext\n",
859 num_records = sinfo->num_info;
860 if (num_records == 0) {
861 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
866 total_record_size = sec_hdrlen +
867 (__u64)num_records * record_size;
868 if (info_left < total_record_size) {
869 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
874 info_left -= total_record_size;
875 sinfo = (void *)sinfo + total_record_size;
878 ext_info = ext_sec->ext_info;
879 ext_info->len = ext_sec->len - sizeof(__u32);
880 ext_info->rec_size = record_size;
881 ext_info->info = info + sizeof(__u32);
886 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
888 struct btf_ext_sec_setup_param param = {
889 .off = btf_ext->hdr->func_info_off,
890 .len = btf_ext->hdr->func_info_len,
891 .min_rec_size = sizeof(struct bpf_func_info_min),
892 .ext_info = &btf_ext->func_info,
896 return btf_ext_setup_info(btf_ext, ¶m);
899 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
901 struct btf_ext_sec_setup_param param = {
902 .off = btf_ext->hdr->line_info_off,
903 .len = btf_ext->hdr->line_info_len,
904 .min_rec_size = sizeof(struct bpf_line_info_min),
905 .ext_info = &btf_ext->line_info,
909 return btf_ext_setup_info(btf_ext, ¶m);
912 static int btf_ext_setup_field_reloc(struct btf_ext *btf_ext)
914 struct btf_ext_sec_setup_param param = {
915 .off = btf_ext->hdr->field_reloc_off,
916 .len = btf_ext->hdr->field_reloc_len,
917 .min_rec_size = sizeof(struct bpf_field_reloc),
918 .ext_info = &btf_ext->field_reloc_info,
919 .desc = "field_reloc",
922 return btf_ext_setup_info(btf_ext, ¶m);
925 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
927 const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
929 if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
930 data_size < hdr->hdr_len) {
931 pr_debug("BTF.ext header not found");
935 if (hdr->magic != BTF_MAGIC) {
936 pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
940 if (hdr->version != BTF_VERSION) {
941 pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
946 pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
950 if (data_size == hdr->hdr_len) {
951 pr_debug("BTF.ext has no data\n");
958 void btf_ext__free(struct btf_ext *btf_ext)
966 struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
968 struct btf_ext *btf_ext;
971 err = btf_ext_parse_hdr(data, size);
975 btf_ext = calloc(1, sizeof(struct btf_ext));
977 return ERR_PTR(-ENOMEM);
979 btf_ext->data_size = size;
980 btf_ext->data = malloc(size);
981 if (!btf_ext->data) {
985 memcpy(btf_ext->data, data, size);
987 if (btf_ext->hdr->hdr_len <
988 offsetofend(struct btf_ext_header, line_info_len))
990 err = btf_ext_setup_func_info(btf_ext);
994 err = btf_ext_setup_line_info(btf_ext);
998 if (btf_ext->hdr->hdr_len <
999 offsetofend(struct btf_ext_header, field_reloc_len))
1001 err = btf_ext_setup_field_reloc(btf_ext);
1007 btf_ext__free(btf_ext);
1008 return ERR_PTR(err);
1014 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
1016 *size = btf_ext->data_size;
1017 return btf_ext->data;
1020 static int btf_ext_reloc_info(const struct btf *btf,
1021 const struct btf_ext_info *ext_info,
1022 const char *sec_name, __u32 insns_cnt,
1023 void **info, __u32 *cnt)
1025 __u32 sec_hdrlen = sizeof(struct btf_ext_info_sec);
1026 __u32 i, record_size, existing_len, records_len;
1027 struct btf_ext_info_sec *sinfo;
1028 const char *info_sec_name;
1032 record_size = ext_info->rec_size;
1033 sinfo = ext_info->info;
1034 remain_len = ext_info->len;
1035 while (remain_len > 0) {
1036 records_len = sinfo->num_info * record_size;
1037 info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
1038 if (strcmp(info_sec_name, sec_name)) {
1039 remain_len -= sec_hdrlen + records_len;
1040 sinfo = (void *)sinfo + sec_hdrlen + records_len;
1044 existing_len = (*cnt) * record_size;
1045 data = realloc(*info, existing_len + records_len);
1049 memcpy(data + existing_len, sinfo->data, records_len);
1050 /* adjust insn_off only, the rest data will be passed
1053 for (i = 0; i < sinfo->num_info; i++) {
1056 insn_off = data + existing_len + (i * record_size);
1057 *insn_off = *insn_off / sizeof(struct bpf_insn) +
1061 *cnt += sinfo->num_info;
1068 int btf_ext__reloc_func_info(const struct btf *btf,
1069 const struct btf_ext *btf_ext,
1070 const char *sec_name, __u32 insns_cnt,
1071 void **func_info, __u32 *cnt)
1073 return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name,
1074 insns_cnt, func_info, cnt);
1077 int btf_ext__reloc_line_info(const struct btf *btf,
1078 const struct btf_ext *btf_ext,
1079 const char *sec_name, __u32 insns_cnt,
1080 void **line_info, __u32 *cnt)
1082 return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name,
1083 insns_cnt, line_info, cnt);
1086 __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext)
1088 return btf_ext->func_info.rec_size;
1091 __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
1093 return btf_ext->line_info.rec_size;
1098 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
1099 const struct btf_dedup_opts *opts);
1100 static void btf_dedup_free(struct btf_dedup *d);
1101 static int btf_dedup_strings(struct btf_dedup *d);
1102 static int btf_dedup_prim_types(struct btf_dedup *d);
1103 static int btf_dedup_struct_types(struct btf_dedup *d);
1104 static int btf_dedup_ref_types(struct btf_dedup *d);
1105 static int btf_dedup_compact_types(struct btf_dedup *d);
1106 static int btf_dedup_remap_types(struct btf_dedup *d);
1109 * Deduplicate BTF types and strings.
1111 * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
1112 * section with all BTF type descriptors and string data. It overwrites that
1113 * memory in-place with deduplicated types and strings without any loss of
1114 * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
1115 * is provided, all the strings referenced from .BTF.ext section are honored
1116 * and updated to point to the right offsets after deduplication.
1118 * If function returns with error, type/string data might be garbled and should
1121 * More verbose and detailed description of both problem btf_dedup is solving,
1122 * as well as solution could be found at:
1123 * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
1125 * Problem description and justification
1126 * =====================================
1128 * BTF type information is typically emitted either as a result of conversion
1129 * from DWARF to BTF or directly by compiler. In both cases, each compilation
1130 * unit contains information about a subset of all the types that are used
1131 * in an application. These subsets are frequently overlapping and contain a lot
1132 * of duplicated information when later concatenated together into a single
1133 * binary. This algorithm ensures that each unique type is represented by single
1134 * BTF type descriptor, greatly reducing resulting size of BTF data.
1136 * Compilation unit isolation and subsequent duplication of data is not the only
1137 * problem. The same type hierarchy (e.g., struct and all the type that struct
1138 * references) in different compilation units can be represented in BTF to
1139 * various degrees of completeness (or, rather, incompleteness) due to
1140 * struct/union forward declarations.
1142 * Let's take a look at an example, that we'll use to better understand the
1143 * problem (and solution). Suppose we have two compilation units, each using
1144 * same `struct S`, but each of them having incomplete type information about
1173 * In case of CU #1, BTF data will know only that `struct B` exist (but no
1174 * more), but will know the complete type information about `struct A`. While
1175 * for CU #2, it will know full type information about `struct B`, but will
1176 * only know about forward declaration of `struct A` (in BTF terms, it will
1177 * have `BTF_KIND_FWD` type descriptor with name `B`).
1179 * This compilation unit isolation means that it's possible that there is no
1180 * single CU with complete type information describing structs `S`, `A`, and
1181 * `B`. Also, we might get tons of duplicated and redundant type information.
1183 * Additional complication we need to keep in mind comes from the fact that
1184 * types, in general, can form graphs containing cycles, not just DAGs.
1186 * While algorithm does deduplication, it also merges and resolves type
1187 * information (unless disabled throught `struct btf_opts`), whenever possible.
1188 * E.g., in the example above with two compilation units having partial type
1189 * information for structs `A` and `B`, the output of algorithm will emit
1190 * a single copy of each BTF type that describes structs `A`, `B`, and `S`
1191 * (as well as type information for `int` and pointers), as if they were defined
1192 * in a single compilation unit as:
1212 * Algorithm completes its work in 6 separate passes:
1214 * 1. Strings deduplication.
1215 * 2. Primitive types deduplication (int, enum, fwd).
1216 * 3. Struct/union types deduplication.
1217 * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
1218 * protos, and const/volatile/restrict modifiers).
1219 * 5. Types compaction.
1220 * 6. Types remapping.
1222 * Algorithm determines canonical type descriptor, which is a single
1223 * representative type for each truly unique type. This canonical type is the
1224 * one that will go into final deduplicated BTF type information. For
1225 * struct/unions, it is also the type that algorithm will merge additional type
1226 * information into (while resolving FWDs), as it discovers it from data in
1227 * other CUs. Each input BTF type eventually gets either mapped to itself, if
1228 * that type is canonical, or to some other type, if that type is equivalent
1229 * and was chosen as canonical representative. This mapping is stored in
1230 * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
1231 * FWD type got resolved to.
1233 * To facilitate fast discovery of canonical types, we also maintain canonical
1234 * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
1235 * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
1236 * that match that signature. With sufficiently good choice of type signature
1237 * hashing function, we can limit number of canonical types for each unique type
1238 * signature to a very small number, allowing to find canonical type for any
1239 * duplicated type very quickly.
1241 * Struct/union deduplication is the most critical part and algorithm for
1242 * deduplicating structs/unions is described in greater details in comments for
1243 * `btf_dedup_is_equiv` function.
1245 int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
1246 const struct btf_dedup_opts *opts)
1248 struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
1252 pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
1256 err = btf_dedup_strings(d);
1258 pr_debug("btf_dedup_strings failed:%d\n", err);
1261 err = btf_dedup_prim_types(d);
1263 pr_debug("btf_dedup_prim_types failed:%d\n", err);
1266 err = btf_dedup_struct_types(d);
1268 pr_debug("btf_dedup_struct_types failed:%d\n", err);
1271 err = btf_dedup_ref_types(d);
1273 pr_debug("btf_dedup_ref_types failed:%d\n", err);
1276 err = btf_dedup_compact_types(d);
1278 pr_debug("btf_dedup_compact_types failed:%d\n", err);
1281 err = btf_dedup_remap_types(d);
1283 pr_debug("btf_dedup_remap_types failed:%d\n", err);
1292 #define BTF_UNPROCESSED_ID ((__u32)-1)
1293 #define BTF_IN_PROGRESS_ID ((__u32)-2)
1296 /* .BTF section to be deduped in-place */
1299 * Optional .BTF.ext section. When provided, any strings referenced
1300 * from it will be taken into account when deduping strings
1302 struct btf_ext *btf_ext;
1304 * This is a map from any type's signature hash to a list of possible
1305 * canonical representative type candidates. Hash collisions are
1306 * ignored, so even types of various kinds can share same list of
1307 * candidates, which is fine because we rely on subsequent
1308 * btf_xxx_equal() checks to authoritatively verify type equality.
1310 struct hashmap *dedup_table;
1311 /* Canonical types map */
1313 /* Hypothetical mapping, used during type graph equivalence checks */
1318 /* Various option modifying behavior of algorithm */
1319 struct btf_dedup_opts opts;
1322 struct btf_str_ptr {
1328 struct btf_str_ptrs {
1329 struct btf_str_ptr *ptrs;
1335 static long hash_combine(long h, long value)
1337 return h * 31 + value;
1340 #define for_each_dedup_cand(d, node, hash) \
1341 hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
1343 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
1345 return hashmap__append(d->dedup_table,
1346 (void *)hash, (void *)(long)type_id);
1349 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
1350 __u32 from_id, __u32 to_id)
1352 if (d->hypot_cnt == d->hypot_cap) {
1355 d->hypot_cap += max(16, d->hypot_cap / 2);
1356 new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap);
1359 d->hypot_list = new_list;
1361 d->hypot_list[d->hypot_cnt++] = from_id;
1362 d->hypot_map[from_id] = to_id;
1366 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
1370 for (i = 0; i < d->hypot_cnt; i++)
1371 d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
1375 static void btf_dedup_free(struct btf_dedup *d)
1377 hashmap__free(d->dedup_table);
1378 d->dedup_table = NULL;
1384 d->hypot_map = NULL;
1386 free(d->hypot_list);
1387 d->hypot_list = NULL;
1392 static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx)
1397 static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx)
1402 static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
1407 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
1408 const struct btf_dedup_opts *opts)
1410 struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
1411 hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
1415 return ERR_PTR(-ENOMEM);
1417 d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
1418 /* dedup_table_size is now used only to force collisions in tests */
1419 if (opts && opts->dedup_table_size == 1)
1420 hash_fn = btf_dedup_collision_hash_fn;
1423 d->btf_ext = btf_ext;
1425 d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
1426 if (IS_ERR(d->dedup_table)) {
1427 err = PTR_ERR(d->dedup_table);
1428 d->dedup_table = NULL;
1432 d->map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1437 /* special BTF "void" type is made canonical immediately */
1439 for (i = 1; i <= btf->nr_types; i++) {
1440 struct btf_type *t = d->btf->types[i];
1442 /* VAR and DATASEC are never deduped and are self-canonical */
1443 if (btf_is_var(t) || btf_is_datasec(t))
1446 d->map[i] = BTF_UNPROCESSED_ID;
1449 d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1450 if (!d->hypot_map) {
1454 for (i = 0; i <= btf->nr_types; i++)
1455 d->hypot_map[i] = BTF_UNPROCESSED_ID;
1460 return ERR_PTR(err);
1466 typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx);
1469 * Iterate over all possible places in .BTF and .BTF.ext that can reference
1470 * string and pass pointer to it to a provided callback `fn`.
1472 static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
1474 void *line_data_cur, *line_data_end;
1475 int i, j, r, rec_size;
1478 for (i = 1; i <= d->btf->nr_types; i++) {
1479 t = d->btf->types[i];
1480 r = fn(&t->name_off, ctx);
1484 switch (btf_kind(t)) {
1485 case BTF_KIND_STRUCT:
1486 case BTF_KIND_UNION: {
1487 struct btf_member *m = btf_members(t);
1488 __u16 vlen = btf_vlen(t);
1490 for (j = 0; j < vlen; j++) {
1491 r = fn(&m->name_off, ctx);
1498 case BTF_KIND_ENUM: {
1499 struct btf_enum *m = btf_enum(t);
1500 __u16 vlen = btf_vlen(t);
1502 for (j = 0; j < vlen; j++) {
1503 r = fn(&m->name_off, ctx);
1510 case BTF_KIND_FUNC_PROTO: {
1511 struct btf_param *m = btf_params(t);
1512 __u16 vlen = btf_vlen(t);
1514 for (j = 0; j < vlen; j++) {
1515 r = fn(&m->name_off, ctx);
1530 line_data_cur = d->btf_ext->line_info.info;
1531 line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len;
1532 rec_size = d->btf_ext->line_info.rec_size;
1534 while (line_data_cur < line_data_end) {
1535 struct btf_ext_info_sec *sec = line_data_cur;
1536 struct bpf_line_info_min *line_info;
1537 __u32 num_info = sec->num_info;
1539 r = fn(&sec->sec_name_off, ctx);
1543 line_data_cur += sizeof(struct btf_ext_info_sec);
1544 for (i = 0; i < num_info; i++) {
1545 line_info = line_data_cur;
1546 r = fn(&line_info->file_name_off, ctx);
1549 r = fn(&line_info->line_off, ctx);
1552 line_data_cur += rec_size;
1559 static int str_sort_by_content(const void *a1, const void *a2)
1561 const struct btf_str_ptr *p1 = a1;
1562 const struct btf_str_ptr *p2 = a2;
1564 return strcmp(p1->str, p2->str);
1567 static int str_sort_by_offset(const void *a1, const void *a2)
1569 const struct btf_str_ptr *p1 = a1;
1570 const struct btf_str_ptr *p2 = a2;
1572 if (p1->str != p2->str)
1573 return p1->str < p2->str ? -1 : 1;
1577 static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem)
1579 const struct btf_str_ptr *p = pelem;
1581 if (str_ptr != p->str)
1582 return (const char *)str_ptr < p->str ? -1 : 1;
1586 static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx)
1588 struct btf_str_ptrs *strs;
1589 struct btf_str_ptr *s;
1591 if (*str_off_ptr == 0)
1595 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1596 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1603 static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx)
1605 struct btf_str_ptrs *strs;
1606 struct btf_str_ptr *s;
1608 if (*str_off_ptr == 0)
1612 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1613 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1616 *str_off_ptr = s->new_off;
1621 * Dedup string and filter out those that are not referenced from either .BTF
1622 * or .BTF.ext (if provided) sections.
1624 * This is done by building index of all strings in BTF's string section,
1625 * then iterating over all entities that can reference strings (e.g., type
1626 * names, struct field names, .BTF.ext line info, etc) and marking corresponding
1627 * strings as used. After that all used strings are deduped and compacted into
1628 * sequential blob of memory and new offsets are calculated. Then all the string
1629 * references are iterated again and rewritten using new offsets.
1631 static int btf_dedup_strings(struct btf_dedup *d)
1633 const struct btf_header *hdr = d->btf->hdr;
1634 char *start = (char *)d->btf->nohdr_data + hdr->str_off;
1635 char *end = start + d->btf->hdr->str_len;
1636 char *p = start, *tmp_strs = NULL;
1637 struct btf_str_ptrs strs = {
1643 int i, j, err = 0, grp_idx;
1646 /* build index of all strings */
1648 if (strs.cnt + 1 > strs.cap) {
1649 struct btf_str_ptr *new_ptrs;
1651 strs.cap += max(strs.cnt / 2, 16);
1652 new_ptrs = realloc(strs.ptrs,
1653 sizeof(strs.ptrs[0]) * strs.cap);
1658 strs.ptrs = new_ptrs;
1661 strs.ptrs[strs.cnt].str = p;
1662 strs.ptrs[strs.cnt].used = false;
1668 /* temporary storage for deduplicated strings */
1669 tmp_strs = malloc(d->btf->hdr->str_len);
1675 /* mark all used strings */
1676 strs.ptrs[0].used = true;
1677 err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs);
1681 /* sort strings by context, so that we can identify duplicates */
1682 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content);
1685 * iterate groups of equal strings and if any instance in a group was
1686 * referenced, emit single instance and remember new offset
1690 grp_used = strs.ptrs[0].used;
1691 /* iterate past end to avoid code duplication after loop */
1692 for (i = 1; i <= strs.cnt; i++) {
1694 * when i == strs.cnt, we want to skip string comparison and go
1695 * straight to handling last group of strings (otherwise we'd
1696 * need to handle last group after the loop w/ duplicated code)
1699 !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) {
1700 grp_used = grp_used || strs.ptrs[i].used;
1705 * this check would have been required after the loop to handle
1706 * last group of strings, but due to <= condition in a loop
1707 * we avoid that duplication
1710 int new_off = p - tmp_strs;
1711 __u32 len = strlen(strs.ptrs[grp_idx].str);
1713 memmove(p, strs.ptrs[grp_idx].str, len + 1);
1714 for (j = grp_idx; j < i; j++)
1715 strs.ptrs[j].new_off = new_off;
1721 grp_used = strs.ptrs[i].used;
1725 /* replace original strings with deduped ones */
1726 d->btf->hdr->str_len = p - tmp_strs;
1727 memmove(start, tmp_strs, d->btf->hdr->str_len);
1728 end = start + d->btf->hdr->str_len;
1730 /* restore original order for further binary search lookups */
1731 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset);
1733 /* remap string offsets */
1734 err = btf_for_each_str_off(d, btf_str_remap_offset, &strs);
1738 d->btf->hdr->str_len = end - start;
1746 static long btf_hash_common(struct btf_type *t)
1750 h = hash_combine(0, t->name_off);
1751 h = hash_combine(h, t->info);
1752 h = hash_combine(h, t->size);
1756 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
1758 return t1->name_off == t2->name_off &&
1759 t1->info == t2->info &&
1760 t1->size == t2->size;
1763 /* Calculate type signature hash of INT. */
1764 static long btf_hash_int(struct btf_type *t)
1766 __u32 info = *(__u32 *)(t + 1);
1769 h = btf_hash_common(t);
1770 h = hash_combine(h, info);
1774 /* Check structural equality of two INTs. */
1775 static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
1779 if (!btf_equal_common(t1, t2))
1781 info1 = *(__u32 *)(t1 + 1);
1782 info2 = *(__u32 *)(t2 + 1);
1783 return info1 == info2;
1786 /* Calculate type signature hash of ENUM. */
1787 static long btf_hash_enum(struct btf_type *t)
1791 /* don't hash vlen and enum members to support enum fwd resolving */
1792 h = hash_combine(0, t->name_off);
1793 h = hash_combine(h, t->info & ~0xffff);
1794 h = hash_combine(h, t->size);
1798 /* Check structural equality of two ENUMs. */
1799 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
1801 const struct btf_enum *m1, *m2;
1805 if (!btf_equal_common(t1, t2))
1808 vlen = btf_vlen(t1);
1811 for (i = 0; i < vlen; i++) {
1812 if (m1->name_off != m2->name_off || m1->val != m2->val)
1820 static inline bool btf_is_enum_fwd(struct btf_type *t)
1822 return btf_is_enum(t) && btf_vlen(t) == 0;
1825 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
1827 if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
1828 return btf_equal_enum(t1, t2);
1829 /* ignore vlen when comparing */
1830 return t1->name_off == t2->name_off &&
1831 (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
1832 t1->size == t2->size;
1836 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
1837 * as referenced type IDs equivalence is established separately during type
1838 * graph equivalence check algorithm.
1840 static long btf_hash_struct(struct btf_type *t)
1842 const struct btf_member *member = btf_members(t);
1843 __u32 vlen = btf_vlen(t);
1844 long h = btf_hash_common(t);
1847 for (i = 0; i < vlen; i++) {
1848 h = hash_combine(h, member->name_off);
1849 h = hash_combine(h, member->offset);
1850 /* no hashing of referenced type ID, it can be unresolved yet */
1857 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
1858 * IDs. This check is performed during type graph equivalence check and
1859 * referenced types equivalence is checked separately.
1861 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
1863 const struct btf_member *m1, *m2;
1867 if (!btf_equal_common(t1, t2))
1870 vlen = btf_vlen(t1);
1871 m1 = btf_members(t1);
1872 m2 = btf_members(t2);
1873 for (i = 0; i < vlen; i++) {
1874 if (m1->name_off != m2->name_off || m1->offset != m2->offset)
1883 * Calculate type signature hash of ARRAY, including referenced type IDs,
1884 * under assumption that they were already resolved to canonical type IDs and
1885 * are not going to change.
1887 static long btf_hash_array(struct btf_type *t)
1889 const struct btf_array *info = btf_array(t);
1890 long h = btf_hash_common(t);
1892 h = hash_combine(h, info->type);
1893 h = hash_combine(h, info->index_type);
1894 h = hash_combine(h, info->nelems);
1899 * Check exact equality of two ARRAYs, taking into account referenced
1900 * type IDs, under assumption that they were already resolved to canonical
1901 * type IDs and are not going to change.
1902 * This function is called during reference types deduplication to compare
1903 * ARRAY to potential canonical representative.
1905 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
1907 const struct btf_array *info1, *info2;
1909 if (!btf_equal_common(t1, t2))
1912 info1 = btf_array(t1);
1913 info2 = btf_array(t2);
1914 return info1->type == info2->type &&
1915 info1->index_type == info2->index_type &&
1916 info1->nelems == info2->nelems;
1920 * Check structural compatibility of two ARRAYs, ignoring referenced type
1921 * IDs. This check is performed during type graph equivalence check and
1922 * referenced types equivalence is checked separately.
1924 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
1926 if (!btf_equal_common(t1, t2))
1929 return btf_array(t1)->nelems == btf_array(t2)->nelems;
1933 * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
1934 * under assumption that they were already resolved to canonical type IDs and
1935 * are not going to change.
1937 static long btf_hash_fnproto(struct btf_type *t)
1939 const struct btf_param *member = btf_params(t);
1940 __u16 vlen = btf_vlen(t);
1941 long h = btf_hash_common(t);
1944 for (i = 0; i < vlen; i++) {
1945 h = hash_combine(h, member->name_off);
1946 h = hash_combine(h, member->type);
1953 * Check exact equality of two FUNC_PROTOs, taking into account referenced
1954 * type IDs, under assumption that they were already resolved to canonical
1955 * type IDs and are not going to change.
1956 * This function is called during reference types deduplication to compare
1957 * FUNC_PROTO to potential canonical representative.
1959 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
1961 const struct btf_param *m1, *m2;
1965 if (!btf_equal_common(t1, t2))
1968 vlen = btf_vlen(t1);
1969 m1 = btf_params(t1);
1970 m2 = btf_params(t2);
1971 for (i = 0; i < vlen; i++) {
1972 if (m1->name_off != m2->name_off || m1->type != m2->type)
1981 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
1982 * IDs. This check is performed during type graph equivalence check and
1983 * referenced types equivalence is checked separately.
1985 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
1987 const struct btf_param *m1, *m2;
1991 /* skip return type ID */
1992 if (t1->name_off != t2->name_off || t1->info != t2->info)
1995 vlen = btf_vlen(t1);
1996 m1 = btf_params(t1);
1997 m2 = btf_params(t2);
1998 for (i = 0; i < vlen; i++) {
1999 if (m1->name_off != m2->name_off)
2008 * Deduplicate primitive types, that can't reference other types, by calculating
2009 * their type signature hash and comparing them with any possible canonical
2010 * candidate. If no canonical candidate matches, type itself is marked as
2011 * canonical and is added into `btf_dedup->dedup_table` as another candidate.
2013 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
2015 struct btf_type *t = d->btf->types[type_id];
2016 struct hashmap_entry *hash_entry;
2017 struct btf_type *cand;
2018 /* if we don't find equivalent type, then we are canonical */
2019 __u32 new_id = type_id;
2023 switch (btf_kind(t)) {
2024 case BTF_KIND_CONST:
2025 case BTF_KIND_VOLATILE:
2026 case BTF_KIND_RESTRICT:
2028 case BTF_KIND_TYPEDEF:
2029 case BTF_KIND_ARRAY:
2030 case BTF_KIND_STRUCT:
2031 case BTF_KIND_UNION:
2033 case BTF_KIND_FUNC_PROTO:
2035 case BTF_KIND_DATASEC:
2039 h = btf_hash_int(t);
2040 for_each_dedup_cand(d, hash_entry, h) {
2041 cand_id = (__u32)(long)hash_entry->value;
2042 cand = d->btf->types[cand_id];
2043 if (btf_equal_int(t, cand)) {
2051 h = btf_hash_enum(t);
2052 for_each_dedup_cand(d, hash_entry, h) {
2053 cand_id = (__u32)(long)hash_entry->value;
2054 cand = d->btf->types[cand_id];
2055 if (btf_equal_enum(t, cand)) {
2059 if (d->opts.dont_resolve_fwds)
2061 if (btf_compat_enum(t, cand)) {
2062 if (btf_is_enum_fwd(t)) {
2063 /* resolve fwd to full enum */
2067 /* resolve canonical enum fwd to full enum */
2068 d->map[cand_id] = type_id;
2074 h = btf_hash_common(t);
2075 for_each_dedup_cand(d, hash_entry, h) {
2076 cand_id = (__u32)(long)hash_entry->value;
2077 cand = d->btf->types[cand_id];
2078 if (btf_equal_common(t, cand)) {
2089 d->map[type_id] = new_id;
2090 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2096 static int btf_dedup_prim_types(struct btf_dedup *d)
2100 for (i = 1; i <= d->btf->nr_types; i++) {
2101 err = btf_dedup_prim_type(d, i);
2109 * Check whether type is already mapped into canonical one (could be to itself).
2111 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
2113 return d->map[type_id] <= BTF_MAX_NR_TYPES;
2117 * Resolve type ID into its canonical type ID, if any; otherwise return original
2118 * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
2119 * STRUCT/UNION link and resolve it into canonical type ID as well.
2121 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
2123 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
2124 type_id = d->map[type_id];
2129 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
2132 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
2134 __u32 orig_type_id = type_id;
2136 if (!btf_is_fwd(d->btf->types[type_id]))
2139 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
2140 type_id = d->map[type_id];
2142 if (!btf_is_fwd(d->btf->types[type_id]))
2145 return orig_type_id;
2149 static inline __u16 btf_fwd_kind(struct btf_type *t)
2151 return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
2155 * Check equivalence of BTF type graph formed by candidate struct/union (we'll
2156 * call it "candidate graph" in this description for brevity) to a type graph
2157 * formed by (potential) canonical struct/union ("canonical graph" for brevity
2158 * here, though keep in mind that not all types in canonical graph are
2159 * necessarily canonical representatives themselves, some of them might be
2160 * duplicates or its uniqueness might not have been established yet).
2162 * - >0, if type graphs are equivalent;
2163 * - 0, if not equivalent;
2166 * Algorithm performs side-by-side DFS traversal of both type graphs and checks
2167 * equivalence of BTF types at each step. If at any point BTF types in candidate
2168 * and canonical graphs are not compatible structurally, whole graphs are
2169 * incompatible. If types are structurally equivalent (i.e., all information
2170 * except referenced type IDs is exactly the same), a mapping from `canon_id` to
2171 * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
2172 * If a type references other types, then those referenced types are checked
2173 * for equivalence recursively.
2175 * During DFS traversal, if we find that for current `canon_id` type we
2176 * already have some mapping in hypothetical map, we check for two possible
2178 * - `canon_id` is mapped to exactly the same type as `cand_id`. This will
2179 * happen when type graphs have cycles. In this case we assume those two
2180 * types are equivalent.
2181 * - `canon_id` is mapped to different type. This is contradiction in our
2182 * hypothetical mapping, because same graph in canonical graph corresponds
2183 * to two different types in candidate graph, which for equivalent type
2184 * graphs shouldn't happen. This condition terminates equivalence check
2185 * with negative result.
2187 * If type graphs traversal exhausts types to check and find no contradiction,
2188 * then type graphs are equivalent.
2190 * When checking types for equivalence, there is one special case: FWD types.
2191 * If FWD type resolution is allowed and one of the types (either from canonical
2192 * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
2193 * flag) and their names match, hypothetical mapping is updated to point from
2194 * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
2195 * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
2197 * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
2198 * if there are two exactly named (or anonymous) structs/unions that are
2199 * compatible structurally, one of which has FWD field, while other is concrete
2200 * STRUCT/UNION, but according to C sources they are different structs/unions
2201 * that are referencing different types with the same name. This is extremely
2202 * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
2203 * this logic is causing problems.
2205 * Doing FWD resolution means that both candidate and/or canonical graphs can
2206 * consists of portions of the graph that come from multiple compilation units.
2207 * This is due to the fact that types within single compilation unit are always
2208 * deduplicated and FWDs are already resolved, if referenced struct/union
2209 * definiton is available. So, if we had unresolved FWD and found corresponding
2210 * STRUCT/UNION, they will be from different compilation units. This
2211 * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
2212 * type graph will likely have at least two different BTF types that describe
2213 * same type (e.g., most probably there will be two different BTF types for the
2214 * same 'int' primitive type) and could even have "overlapping" parts of type
2215 * graph that describe same subset of types.
2217 * This in turn means that our assumption that each type in canonical graph
2218 * must correspond to exactly one type in candidate graph might not hold
2219 * anymore and will make it harder to detect contradictions using hypothetical
2220 * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
2221 * resolution only in canonical graph. FWDs in candidate graphs are never
2222 * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
2224 * - Both types in canonical and candidate graphs are FWDs. If they are
2225 * structurally equivalent, then they can either be both resolved to the
2226 * same STRUCT/UNION or not resolved at all. In both cases they are
2227 * equivalent and there is no need to resolve FWD on candidate side.
2228 * - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
2229 * so nothing to resolve as well, algorithm will check equivalence anyway.
2230 * - Type in canonical graph is FWD, while type in candidate is concrete
2231 * STRUCT/UNION. In this case candidate graph comes from single compilation
2232 * unit, so there is exactly one BTF type for each unique C type. After
2233 * resolving FWD into STRUCT/UNION, there might be more than one BTF type
2234 * in canonical graph mapping to single BTF type in candidate graph, but
2235 * because hypothetical mapping maps from canonical to candidate types, it's
2236 * alright, and we still maintain the property of having single `canon_id`
2237 * mapping to single `cand_id` (there could be two different `canon_id`
2238 * mapped to the same `cand_id`, but it's not contradictory).
2239 * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
2240 * graph is FWD. In this case we are just going to check compatibility of
2241 * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
2242 * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
2243 * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
2244 * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
2247 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
2250 struct btf_type *cand_type;
2251 struct btf_type *canon_type;
2252 __u32 hypot_type_id;
2257 /* if both resolve to the same canonical, they must be equivalent */
2258 if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
2261 canon_id = resolve_fwd_id(d, canon_id);
2263 hypot_type_id = d->hypot_map[canon_id];
2264 if (hypot_type_id <= BTF_MAX_NR_TYPES)
2265 return hypot_type_id == cand_id;
2267 if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
2270 cand_type = d->btf->types[cand_id];
2271 canon_type = d->btf->types[canon_id];
2272 cand_kind = btf_kind(cand_type);
2273 canon_kind = btf_kind(canon_type);
2275 if (cand_type->name_off != canon_type->name_off)
2278 /* FWD <--> STRUCT/UNION equivalence check, if enabled */
2279 if (!d->opts.dont_resolve_fwds
2280 && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
2281 && cand_kind != canon_kind) {
2285 if (cand_kind == BTF_KIND_FWD) {
2286 real_kind = canon_kind;
2287 fwd_kind = btf_fwd_kind(cand_type);
2289 real_kind = cand_kind;
2290 fwd_kind = btf_fwd_kind(canon_type);
2292 return fwd_kind == real_kind;
2295 if (cand_kind != canon_kind)
2298 switch (cand_kind) {
2300 return btf_equal_int(cand_type, canon_type);
2303 if (d->opts.dont_resolve_fwds)
2304 return btf_equal_enum(cand_type, canon_type);
2306 return btf_compat_enum(cand_type, canon_type);
2309 return btf_equal_common(cand_type, canon_type);
2311 case BTF_KIND_CONST:
2312 case BTF_KIND_VOLATILE:
2313 case BTF_KIND_RESTRICT:
2315 case BTF_KIND_TYPEDEF:
2317 if (cand_type->info != canon_type->info)
2319 return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2321 case BTF_KIND_ARRAY: {
2322 const struct btf_array *cand_arr, *canon_arr;
2324 if (!btf_compat_array(cand_type, canon_type))
2326 cand_arr = btf_array(cand_type);
2327 canon_arr = btf_array(canon_type);
2328 eq = btf_dedup_is_equiv(d,
2329 cand_arr->index_type, canon_arr->index_type);
2332 return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
2335 case BTF_KIND_STRUCT:
2336 case BTF_KIND_UNION: {
2337 const struct btf_member *cand_m, *canon_m;
2340 if (!btf_shallow_equal_struct(cand_type, canon_type))
2342 vlen = btf_vlen(cand_type);
2343 cand_m = btf_members(cand_type);
2344 canon_m = btf_members(canon_type);
2345 for (i = 0; i < vlen; i++) {
2346 eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
2356 case BTF_KIND_FUNC_PROTO: {
2357 const struct btf_param *cand_p, *canon_p;
2360 if (!btf_compat_fnproto(cand_type, canon_type))
2362 eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2365 vlen = btf_vlen(cand_type);
2366 cand_p = btf_params(cand_type);
2367 canon_p = btf_params(canon_type);
2368 for (i = 0; i < vlen; i++) {
2369 eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
2385 * Use hypothetical mapping, produced by successful type graph equivalence
2386 * check, to augment existing struct/union canonical mapping, where possible.
2388 * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
2389 * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
2390 * it doesn't matter if FWD type was part of canonical graph or candidate one,
2391 * we are recording the mapping anyway. As opposed to carefulness required
2392 * for struct/union correspondence mapping (described below), for FWD resolution
2393 * it's not important, as by the time that FWD type (reference type) will be
2394 * deduplicated all structs/unions will be deduped already anyway.
2396 * Recording STRUCT/UNION mapping is purely a performance optimization and is
2397 * not required for correctness. It needs to be done carefully to ensure that
2398 * struct/union from candidate's type graph is not mapped into corresponding
2399 * struct/union from canonical type graph that itself hasn't been resolved into
2400 * canonical representative. The only guarantee we have is that canonical
2401 * struct/union was determined as canonical and that won't change. But any
2402 * types referenced through that struct/union fields could have been not yet
2403 * resolved, so in case like that it's too early to establish any kind of
2404 * correspondence between structs/unions.
2406 * No canonical correspondence is derived for primitive types (they are already
2407 * deduplicated completely already anyway) or reference types (they rely on
2408 * stability of struct/union canonical relationship for equivalence checks).
2410 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
2412 __u32 cand_type_id, targ_type_id;
2413 __u16 t_kind, c_kind;
2417 for (i = 0; i < d->hypot_cnt; i++) {
2418 cand_type_id = d->hypot_list[i];
2419 targ_type_id = d->hypot_map[cand_type_id];
2420 t_id = resolve_type_id(d, targ_type_id);
2421 c_id = resolve_type_id(d, cand_type_id);
2422 t_kind = btf_kind(d->btf->types[t_id]);
2423 c_kind = btf_kind(d->btf->types[c_id]);
2425 * Resolve FWD into STRUCT/UNION.
2426 * It's ok to resolve FWD into STRUCT/UNION that's not yet
2427 * mapped to canonical representative (as opposed to
2428 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
2429 * eventually that struct is going to be mapped and all resolved
2430 * FWDs will automatically resolve to correct canonical
2431 * representative. This will happen before ref type deduping,
2432 * which critically depends on stability of these mapping. This
2433 * stability is not a requirement for STRUCT/UNION equivalence
2436 if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
2437 d->map[c_id] = t_id;
2438 else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
2439 d->map[t_id] = c_id;
2441 if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
2442 c_kind != BTF_KIND_FWD &&
2443 is_type_mapped(d, c_id) &&
2444 !is_type_mapped(d, t_id)) {
2446 * as a perf optimization, we can map struct/union
2447 * that's part of type graph we just verified for
2448 * equivalence. We can do that for struct/union that has
2449 * canonical representative only, though.
2451 d->map[t_id] = c_id;
2457 * Deduplicate struct/union types.
2459 * For each struct/union type its type signature hash is calculated, taking
2460 * into account type's name, size, number, order and names of fields, but
2461 * ignoring type ID's referenced from fields, because they might not be deduped
2462 * completely until after reference types deduplication phase. This type hash
2463 * is used to iterate over all potential canonical types, sharing same hash.
2464 * For each canonical candidate we check whether type graphs that they form
2465 * (through referenced types in fields and so on) are equivalent using algorithm
2466 * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
2467 * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
2468 * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
2469 * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
2470 * potentially map other structs/unions to their canonical representatives,
2471 * if such relationship hasn't yet been established. This speeds up algorithm
2472 * by eliminating some of the duplicate work.
2474 * If no matching canonical representative was found, struct/union is marked
2475 * as canonical for itself and is added into btf_dedup->dedup_table hash map
2476 * for further look ups.
2478 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
2480 struct btf_type *cand_type, *t;
2481 struct hashmap_entry *hash_entry;
2482 /* if we don't find equivalent type, then we are canonical */
2483 __u32 new_id = type_id;
2487 /* already deduped or is in process of deduping (loop detected) */
2488 if (d->map[type_id] <= BTF_MAX_NR_TYPES)
2491 t = d->btf->types[type_id];
2494 if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
2497 h = btf_hash_struct(t);
2498 for_each_dedup_cand(d, hash_entry, h) {
2499 __u32 cand_id = (__u32)(long)hash_entry->value;
2503 * Even though btf_dedup_is_equiv() checks for
2504 * btf_shallow_equal_struct() internally when checking two
2505 * structs (unions) for equivalence, we need to guard here
2506 * from picking matching FWD type as a dedup candidate.
2507 * This can happen due to hash collision. In such case just
2508 * relying on btf_dedup_is_equiv() would lead to potentially
2509 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
2510 * FWD and compatible STRUCT/UNION are considered equivalent.
2512 cand_type = d->btf->types[cand_id];
2513 if (!btf_shallow_equal_struct(t, cand_type))
2516 btf_dedup_clear_hypot_map(d);
2517 eq = btf_dedup_is_equiv(d, type_id, cand_id);
2523 btf_dedup_merge_hypot_map(d);
2527 d->map[type_id] = new_id;
2528 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2534 static int btf_dedup_struct_types(struct btf_dedup *d)
2538 for (i = 1; i <= d->btf->nr_types; i++) {
2539 err = btf_dedup_struct_type(d, i);
2547 * Deduplicate reference type.
2549 * Once all primitive and struct/union types got deduplicated, we can easily
2550 * deduplicate all other (reference) BTF types. This is done in two steps:
2552 * 1. Resolve all referenced type IDs into their canonical type IDs. This
2553 * resolution can be done either immediately for primitive or struct/union types
2554 * (because they were deduped in previous two phases) or recursively for
2555 * reference types. Recursion will always terminate at either primitive or
2556 * struct/union type, at which point we can "unwind" chain of reference types
2557 * one by one. There is no danger of encountering cycles because in C type
2558 * system the only way to form type cycle is through struct/union, so any chain
2559 * of reference types, even those taking part in a type cycle, will inevitably
2560 * reach struct/union at some point.
2562 * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
2563 * becomes "stable", in the sense that no further deduplication will cause
2564 * any changes to it. With that, it's now possible to calculate type's signature
2565 * hash (this time taking into account referenced type IDs) and loop over all
2566 * potential canonical representatives. If no match was found, current type
2567 * will become canonical representative of itself and will be added into
2568 * btf_dedup->dedup_table as another possible canonical representative.
2570 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
2572 struct hashmap_entry *hash_entry;
2573 __u32 new_id = type_id, cand_id;
2574 struct btf_type *t, *cand;
2575 /* if we don't find equivalent type, then we are representative type */
2579 if (d->map[type_id] == BTF_IN_PROGRESS_ID)
2581 if (d->map[type_id] <= BTF_MAX_NR_TYPES)
2582 return resolve_type_id(d, type_id);
2584 t = d->btf->types[type_id];
2585 d->map[type_id] = BTF_IN_PROGRESS_ID;
2587 switch (btf_kind(t)) {
2588 case BTF_KIND_CONST:
2589 case BTF_KIND_VOLATILE:
2590 case BTF_KIND_RESTRICT:
2592 case BTF_KIND_TYPEDEF:
2594 ref_type_id = btf_dedup_ref_type(d, t->type);
2595 if (ref_type_id < 0)
2597 t->type = ref_type_id;
2599 h = btf_hash_common(t);
2600 for_each_dedup_cand(d, hash_entry, h) {
2601 cand_id = (__u32)(long)hash_entry->value;
2602 cand = d->btf->types[cand_id];
2603 if (btf_equal_common(t, cand)) {
2610 case BTF_KIND_ARRAY: {
2611 struct btf_array *info = btf_array(t);
2613 ref_type_id = btf_dedup_ref_type(d, info->type);
2614 if (ref_type_id < 0)
2616 info->type = ref_type_id;
2618 ref_type_id = btf_dedup_ref_type(d, info->index_type);
2619 if (ref_type_id < 0)
2621 info->index_type = ref_type_id;
2623 h = btf_hash_array(t);
2624 for_each_dedup_cand(d, hash_entry, h) {
2625 cand_id = (__u32)(long)hash_entry->value;
2626 cand = d->btf->types[cand_id];
2627 if (btf_equal_array(t, cand)) {
2635 case BTF_KIND_FUNC_PROTO: {
2636 struct btf_param *param;
2640 ref_type_id = btf_dedup_ref_type(d, t->type);
2641 if (ref_type_id < 0)
2643 t->type = ref_type_id;
2646 param = btf_params(t);
2647 for (i = 0; i < vlen; i++) {
2648 ref_type_id = btf_dedup_ref_type(d, param->type);
2649 if (ref_type_id < 0)
2651 param->type = ref_type_id;
2655 h = btf_hash_fnproto(t);
2656 for_each_dedup_cand(d, hash_entry, h) {
2657 cand_id = (__u32)(long)hash_entry->value;
2658 cand = d->btf->types[cand_id];
2659 if (btf_equal_fnproto(t, cand)) {
2671 d->map[type_id] = new_id;
2672 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2678 static int btf_dedup_ref_types(struct btf_dedup *d)
2682 for (i = 1; i <= d->btf->nr_types; i++) {
2683 err = btf_dedup_ref_type(d, i);
2687 /* we won't need d->dedup_table anymore */
2688 hashmap__free(d->dedup_table);
2689 d->dedup_table = NULL;
2696 * After we established for each type its corresponding canonical representative
2697 * type, we now can eliminate types that are not canonical and leave only
2698 * canonical ones layed out sequentially in memory by copying them over
2699 * duplicates. During compaction btf_dedup->hypot_map array is reused to store
2700 * a map from original type ID to a new compacted type ID, which will be used
2701 * during next phase to "fix up" type IDs, referenced from struct/union and
2704 static int btf_dedup_compact_types(struct btf_dedup *d)
2706 struct btf_type **new_types;
2707 __u32 next_type_id = 1;
2708 char *types_start, *p;
2711 /* we are going to reuse hypot_map to store compaction remapping */
2712 d->hypot_map[0] = 0;
2713 for (i = 1; i <= d->btf->nr_types; i++)
2714 d->hypot_map[i] = BTF_UNPROCESSED_ID;
2716 types_start = d->btf->nohdr_data + d->btf->hdr->type_off;
2719 for (i = 1; i <= d->btf->nr_types; i++) {
2723 len = btf_type_size(d->btf->types[i]);
2727 memmove(p, d->btf->types[i], len);
2728 d->hypot_map[i] = next_type_id;
2729 d->btf->types[next_type_id] = (struct btf_type *)p;
2734 /* shrink struct btf's internal types index and update btf_header */
2735 d->btf->nr_types = next_type_id - 1;
2736 d->btf->types_size = d->btf->nr_types;
2737 d->btf->hdr->type_len = p - types_start;
2738 new_types = realloc(d->btf->types,
2739 (1 + d->btf->nr_types) * sizeof(struct btf_type *));
2742 d->btf->types = new_types;
2744 /* make sure string section follows type information without gaps */
2745 d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data;
2746 memmove(p, d->btf->strings, d->btf->hdr->str_len);
2747 d->btf->strings = p;
2748 p += d->btf->hdr->str_len;
2750 d->btf->data_size = p - (char *)d->btf->data;
2755 * Figure out final (deduplicated and compacted) type ID for provided original
2756 * `type_id` by first resolving it into corresponding canonical type ID and
2757 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
2758 * which is populated during compaction phase.
2760 static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
2762 __u32 resolved_type_id, new_type_id;
2764 resolved_type_id = resolve_type_id(d, type_id);
2765 new_type_id = d->hypot_map[resolved_type_id];
2766 if (new_type_id > BTF_MAX_NR_TYPES)
2772 * Remap referenced type IDs into deduped type IDs.
2774 * After BTF types are deduplicated and compacted, their final type IDs may
2775 * differ from original ones. The map from original to a corresponding
2776 * deduped type ID is stored in btf_dedup->hypot_map and is populated during
2777 * compaction phase. During remapping phase we are rewriting all type IDs
2778 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
2779 * their final deduped type IDs.
2781 static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
2783 struct btf_type *t = d->btf->types[type_id];
2786 switch (btf_kind(t)) {
2792 case BTF_KIND_CONST:
2793 case BTF_KIND_VOLATILE:
2794 case BTF_KIND_RESTRICT:
2796 case BTF_KIND_TYPEDEF:
2799 r = btf_dedup_remap_type_id(d, t->type);
2805 case BTF_KIND_ARRAY: {
2806 struct btf_array *arr_info = btf_array(t);
2808 r = btf_dedup_remap_type_id(d, arr_info->type);
2812 r = btf_dedup_remap_type_id(d, arr_info->index_type);
2815 arr_info->index_type = r;
2819 case BTF_KIND_STRUCT:
2820 case BTF_KIND_UNION: {
2821 struct btf_member *member = btf_members(t);
2822 __u16 vlen = btf_vlen(t);
2824 for (i = 0; i < vlen; i++) {
2825 r = btf_dedup_remap_type_id(d, member->type);
2834 case BTF_KIND_FUNC_PROTO: {
2835 struct btf_param *param = btf_params(t);
2836 __u16 vlen = btf_vlen(t);
2838 r = btf_dedup_remap_type_id(d, t->type);
2843 for (i = 0; i < vlen; i++) {
2844 r = btf_dedup_remap_type_id(d, param->type);
2853 case BTF_KIND_DATASEC: {
2854 struct btf_var_secinfo *var = btf_var_secinfos(t);
2855 __u16 vlen = btf_vlen(t);
2857 for (i = 0; i < vlen; i++) {
2858 r = btf_dedup_remap_type_id(d, var->type);
2874 static int btf_dedup_remap_types(struct btf_dedup *d)
2878 for (i = 1; i <= d->btf->nr_types; i++) {
2879 r = btf_dedup_remap_type(d, i);