1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018 Facebook */
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/types.h>
6 #include <linux/seq_file.h>
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/slab.h>
10 #include <linux/anon_inodes.h>
11 #include <linux/file.h>
12 #include <linux/uaccess.h>
13 #include <linux/kernel.h>
14 #include <linux/bpf_verifier.h>
15 #include <linux/btf.h>
17 /* BTF (BPF Type Format) is the meta data format which describes
18 * the data types of BPF program/map. Hence, it basically focus
19 * on the C programming language which the modern BPF is primary
24 * The BTF data is stored under the ".BTF" ELF section
28 * Each 'struct btf_type' object describes a C data type.
29 * Depending on the type it is describing, a 'struct btf_type'
30 * object may be followed by more data. F.e.
31 * To describe an array, 'struct btf_type' is followed by
34 * 'struct btf_type' and any extra data following it are
39 * The BTF type section contains a list of 'struct btf_type' objects.
40 * Each one describes a C type. Recall from the above section
41 * that a 'struct btf_type' object could be immediately followed by extra
42 * data in order to desribe some particular C types.
46 * Each btf_type object is identified by a type_id. The type_id
47 * is implicitly implied by the location of the btf_type object in
48 * the BTF type section. The first one has type_id 1. The second
49 * one has type_id 2...etc. Hence, an earlier btf_type has
52 * A btf_type object may refer to another btf_type object by using
53 * type_id (i.e. the "type" in the "struct btf_type").
55 * NOTE that we cannot assume any reference-order.
56 * A btf_type object can refer to an earlier btf_type object
57 * but it can also refer to a later btf_type object.
59 * For example, to describe "const void *". A btf_type
60 * object describing "const" may refer to another btf_type
61 * object describing "void *". This type-reference is done
62 * by specifying type_id:
64 * [1] CONST (anon) type_id=2
65 * [2] PTR (anon) type_id=0
67 * The above is the btf_verifier debug log:
68 * - Each line started with "[?]" is a btf_type object
69 * - [?] is the type_id of the btf_type object.
70 * - CONST/PTR is the BTF_KIND_XXX
71 * - "(anon)" is the name of the type. It just
72 * happens that CONST and PTR has no name.
73 * - type_id=XXX is the 'u32 type' in btf_type
75 * NOTE: "void" has type_id 0
79 * The BTF string section contains the names used by the type section.
80 * Each string is referred by an "offset" from the beginning of the
83 * Each string is '\0' terminated.
85 * The first character in the string section must be '\0'
86 * which is used to mean 'anonymous'. Some btf_type may not
92 * To verify BTF data, two passes are needed.
96 * The first pass is to collect all btf_type objects to
97 * an array: "btf->types".
99 * Depending on the C type that a btf_type is describing,
100 * a btf_type may be followed by extra data. We don't know
101 * how many btf_type is there, and more importantly we don't
102 * know where each btf_type is located in the type section.
104 * Without knowing the location of each type_id, most verifications
105 * cannot be done. e.g. an earlier btf_type may refer to a later
106 * btf_type (recall the "const void *" above), so we cannot
107 * check this type-reference in the first pass.
109 * In the first pass, it still does some verifications (e.g.
110 * checking the name is a valid offset to the string section).
114 * The main focus is to resolve a btf_type that is referring
117 * We have to ensure the referring type:
118 * 1) does exist in the BTF (i.e. in btf->types[])
119 * 2) does not cause a loop:
128 * btf_type_needs_resolve() decides if a btf_type needs
131 * The needs_resolve type implements the "resolve()" ops which
132 * essentially does a DFS and detects backedge.
134 * During resolve (or DFS), different C types have different
135 * "RESOLVED" conditions.
137 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
138 * members because a member is always referring to another
139 * type. A struct's member can be treated as "RESOLVED" if
140 * it is referring to a BTF_KIND_PTR. Otherwise, the
141 * following valid C struct would be rejected:
148 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
149 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
150 * detect a pointer loop, e.g.:
151 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
153 * +-----------------------------------------+
157 #define BITS_PER_U64 (sizeof(u64) * BITS_PER_BYTE)
158 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
159 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
160 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
161 #define BITS_ROUNDUP_BYTES(bits) \
162 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
164 /* 16MB for 64k structs and each has 16 members and
165 * a few MB spaces for the string section.
166 * The hard limit is S32_MAX.
168 #define BTF_MAX_SIZE (16 * 1024 * 1024)
169 /* 64k. We can raise it later. The hard limit is S32_MAX. */
170 #define BTF_MAX_NR_TYPES 65535
172 #define for_each_member(i, struct_type, member) \
173 for (i = 0, member = btf_type_member(struct_type); \
174 i < btf_type_vlen(struct_type); \
177 #define for_each_member_from(i, from, struct_type, member) \
178 for (i = from, member = btf_type_member(struct_type) + from; \
179 i < btf_type_vlen(struct_type); \
184 struct btf_header *hdr;
187 struct btf_type **types;
198 enum verifier_phase {
203 struct resolve_vertex {
204 const struct btf_type *t;
216 RESOLVE_TBD, /* To Be Determined */
217 RESOLVE_PTR, /* Resolving for Pointer */
218 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union
223 #define MAX_RESOLVE_DEPTH 32
225 struct btf_verifier_env {
228 struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
229 struct bpf_verifier_log log;
232 enum verifier_phase phase;
233 enum resolve_mode resolve_mode;
236 static const char * const btf_kind_str[NR_BTF_KINDS] = {
237 [BTF_KIND_UNKN] = "UNKNOWN",
238 [BTF_KIND_INT] = "INT",
239 [BTF_KIND_PTR] = "PTR",
240 [BTF_KIND_ARRAY] = "ARRAY",
241 [BTF_KIND_STRUCT] = "STRUCT",
242 [BTF_KIND_UNION] = "UNION",
243 [BTF_KIND_ENUM] = "ENUM",
244 [BTF_KIND_FWD] = "FWD",
245 [BTF_KIND_TYPEDEF] = "TYPEDEF",
246 [BTF_KIND_VOLATILE] = "VOLATILE",
247 [BTF_KIND_CONST] = "CONST",
248 [BTF_KIND_RESTRICT] = "RESTRICT",
251 struct btf_kind_operations {
252 s32 (*check_meta)(struct btf_verifier_env *env,
253 const struct btf_type *t,
255 int (*resolve)(struct btf_verifier_env *env,
256 const struct resolve_vertex *v);
257 int (*check_member)(struct btf_verifier_env *env,
258 const struct btf_type *struct_type,
259 const struct btf_member *member,
260 const struct btf_type *member_type);
261 void (*log_details)(struct btf_verifier_env *env,
262 const struct btf_type *t);
263 void (*seq_show)(const struct btf *btf, const struct btf_type *t,
264 u32 type_id, void *data, u8 bits_offsets,
268 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
269 static struct btf_type btf_void;
271 static bool btf_type_is_modifier(const struct btf_type *t)
273 /* Some of them is not strictly a C modifier
274 * but they are grouped into the same bucket
276 * A type (t) that refers to another
277 * type through t->type AND its size cannot
278 * be determined without following the t->type.
280 * ptr does not fall into this bucket
281 * because its size is always sizeof(void *).
283 switch (BTF_INFO_KIND(t->info)) {
284 case BTF_KIND_TYPEDEF:
285 case BTF_KIND_VOLATILE:
287 case BTF_KIND_RESTRICT:
294 static bool btf_type_is_void(const struct btf_type *t)
296 /* void => no type and size info.
297 * Hence, FWD is also treated as void.
299 return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
302 static bool btf_type_is_void_or_null(const struct btf_type *t)
304 return !t || btf_type_is_void(t);
307 /* union is only a special case of struct:
308 * all its offsetof(member) == 0
310 static bool btf_type_is_struct(const struct btf_type *t)
312 u8 kind = BTF_INFO_KIND(t->info);
314 return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
317 static bool btf_type_is_array(const struct btf_type *t)
319 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
322 static bool btf_type_is_ptr(const struct btf_type *t)
324 return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
327 static bool btf_type_is_int(const struct btf_type *t)
329 return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
332 /* What types need to be resolved?
334 * btf_type_is_modifier() is an obvious one.
336 * btf_type_is_struct() because its member refers to
337 * another type (through member->type).
339 * btf_type_is_array() because its element (array->type)
340 * refers to another type. Array can be thought of a
341 * special case of struct while array just has the same
342 * member-type repeated by array->nelems of times.
344 static bool btf_type_needs_resolve(const struct btf_type *t)
346 return btf_type_is_modifier(t) ||
347 btf_type_is_ptr(t) ||
348 btf_type_is_struct(t) ||
349 btf_type_is_array(t);
352 /* t->size can be used */
353 static bool btf_type_has_size(const struct btf_type *t)
355 switch (BTF_INFO_KIND(t->info)) {
357 case BTF_KIND_STRUCT:
366 static const char *btf_int_encoding_str(u8 encoding)
370 else if (encoding == BTF_INT_SIGNED)
372 else if (encoding == BTF_INT_CHAR)
374 else if (encoding == BTF_INT_BOOL)
376 else if (encoding == BTF_INT_VARARGS)
382 static u16 btf_type_vlen(const struct btf_type *t)
384 return BTF_INFO_VLEN(t->info);
387 static u32 btf_type_int(const struct btf_type *t)
389 return *(u32 *)(t + 1);
392 static const struct btf_array *btf_type_array(const struct btf_type *t)
394 return (const struct btf_array *)(t + 1);
397 static const struct btf_member *btf_type_member(const struct btf_type *t)
399 return (const struct btf_member *)(t + 1);
402 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
404 return (const struct btf_enum *)(t + 1);
407 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
409 return kind_ops[BTF_INFO_KIND(t->info)];
412 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
414 return !BTF_STR_TBL_ELF_ID(offset) &&
415 BTF_STR_OFFSET(offset) < btf->hdr->str_len;
418 static const char *btf_name_by_offset(const struct btf *btf, u32 offset)
420 if (!BTF_STR_OFFSET(offset))
422 else if (BTF_STR_OFFSET(offset) < btf->hdr->str_len)
423 return &btf->strings[BTF_STR_OFFSET(offset)];
425 return "(invalid-name-offset)";
428 static const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
430 if (type_id > btf->nr_types)
433 return btf->types[type_id];
436 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
437 const char *fmt, ...)
442 bpf_verifier_vlog(log, fmt, args);
446 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
447 const char *fmt, ...)
449 struct bpf_verifier_log *log = &env->log;
452 if (!bpf_verifier_log_needed(log))
456 bpf_verifier_vlog(log, fmt, args);
460 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
461 const struct btf_type *t,
463 const char *fmt, ...)
465 struct bpf_verifier_log *log = &env->log;
466 u8 kind = BTF_INFO_KIND(t->info);
467 struct btf *btf = env->btf;
470 if (!bpf_verifier_log_needed(log))
473 __btf_verifier_log(log, "[%u] %s %s%s",
476 btf_name_by_offset(btf, t->name),
477 log_details ? " " : "");
480 btf_type_ops(t)->log_details(env, t);
483 __btf_verifier_log(log, " ");
485 bpf_verifier_vlog(log, fmt, args);
489 __btf_verifier_log(log, "\n");
492 #define btf_verifier_log_type(env, t, ...) \
493 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
494 #define btf_verifier_log_basic(env, t, ...) \
495 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
498 static void btf_verifier_log_member(struct btf_verifier_env *env,
499 const struct btf_type *struct_type,
500 const struct btf_member *member,
501 const char *fmt, ...)
503 struct bpf_verifier_log *log = &env->log;
504 struct btf *btf = env->btf;
507 if (!bpf_verifier_log_needed(log))
510 /* The CHECK_META phase already did a btf dump.
512 * If member is logged again, it must hit an error in
513 * parsing this member. It is useful to print out which
514 * struct this member belongs to.
516 if (env->phase != CHECK_META)
517 btf_verifier_log_type(env, struct_type, NULL);
519 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
520 btf_name_by_offset(btf, member->name),
521 member->type, member->offset);
524 __btf_verifier_log(log, " ");
526 bpf_verifier_vlog(log, fmt, args);
530 __btf_verifier_log(log, "\n");
533 static void btf_verifier_log_hdr(struct btf_verifier_env *env)
535 struct bpf_verifier_log *log = &env->log;
536 const struct btf *btf = env->btf;
537 const struct btf_header *hdr;
539 if (!bpf_verifier_log_needed(log))
543 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
544 __btf_verifier_log(log, "version: %u\n", hdr->version);
545 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
546 __btf_verifier_log(log, "parent_label: %u\n", hdr->parent_label);
547 __btf_verifier_log(log, "parent_name: %u\n", hdr->parent_name);
548 __btf_verifier_log(log, "label_off: %u\n", hdr->label_off);
549 __btf_verifier_log(log, "object_off: %u\n", hdr->object_off);
550 __btf_verifier_log(log, "func_off: %u\n", hdr->func_off);
551 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
552 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
553 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
554 __btf_verifier_log(log, "btf_total_size: %u\n", btf->data_size);
557 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
559 struct btf *btf = env->btf;
561 /* < 2 because +1 for btf_void which is always in btf->types[0].
562 * btf_void is not accounted in btf->nr_types because btf_void
563 * does not come from the BTF file.
565 if (btf->types_size - btf->nr_types < 2) {
566 /* Expand 'types' array */
568 struct btf_type **new_types;
569 u32 expand_by, new_size;
571 if (btf->types_size == BTF_MAX_NR_TYPES) {
572 btf_verifier_log(env, "Exceeded max num of types");
576 expand_by = max_t(u32, btf->types_size >> 2, 16);
577 new_size = min_t(u32, BTF_MAX_NR_TYPES,
578 btf->types_size + expand_by);
580 new_types = kvzalloc(new_size * sizeof(*new_types),
581 GFP_KERNEL | __GFP_NOWARN);
585 if (btf->nr_types == 0)
586 new_types[0] = &btf_void;
588 memcpy(new_types, btf->types,
589 sizeof(*btf->types) * (btf->nr_types + 1));
592 btf->types = new_types;
593 btf->types_size = new_size;
596 btf->types[++(btf->nr_types)] = t;
601 static void btf_free(struct btf *btf)
604 kvfree(btf->resolved_sizes);
605 kvfree(btf->resolved_ids);
610 static void btf_get(struct btf *btf)
612 refcount_inc(&btf->refcnt);
615 void btf_put(struct btf *btf)
617 if (btf && refcount_dec_and_test(&btf->refcnt))
621 static int env_resolve_init(struct btf_verifier_env *env)
623 struct btf *btf = env->btf;
624 u32 nr_types = btf->nr_types;
625 u32 *resolved_sizes = NULL;
626 u32 *resolved_ids = NULL;
627 u8 *visit_states = NULL;
629 /* +1 for btf_void */
630 resolved_sizes = kvzalloc((nr_types + 1) * sizeof(*resolved_sizes),
631 GFP_KERNEL | __GFP_NOWARN);
635 resolved_ids = kvzalloc((nr_types + 1) * sizeof(*resolved_ids),
636 GFP_KERNEL | __GFP_NOWARN);
640 visit_states = kvzalloc((nr_types + 1) * sizeof(*visit_states),
641 GFP_KERNEL | __GFP_NOWARN);
645 btf->resolved_sizes = resolved_sizes;
646 btf->resolved_ids = resolved_ids;
647 env->visit_states = visit_states;
652 kvfree(resolved_sizes);
653 kvfree(resolved_ids);
654 kvfree(visit_states);
658 static void btf_verifier_env_free(struct btf_verifier_env *env)
660 kvfree(env->visit_states);
664 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
665 const struct btf_type *next_type)
667 switch (env->resolve_mode) {
669 /* int, enum or void is a sink */
670 return !btf_type_needs_resolve(next_type);
672 /* int, enum, void, struct or array is a sink for ptr */
673 return !btf_type_is_modifier(next_type) &&
674 !btf_type_is_ptr(next_type);
675 case RESOLVE_STRUCT_OR_ARRAY:
676 /* int, enum, void or ptr is a sink for struct and array */
677 return !btf_type_is_modifier(next_type) &&
678 !btf_type_is_array(next_type) &&
679 !btf_type_is_struct(next_type);
685 static bool env_type_is_resolved(const struct btf_verifier_env *env,
688 return env->visit_states[type_id] == RESOLVED;
691 static int env_stack_push(struct btf_verifier_env *env,
692 const struct btf_type *t, u32 type_id)
694 struct resolve_vertex *v;
696 if (env->top_stack == MAX_RESOLVE_DEPTH)
699 if (env->visit_states[type_id] != NOT_VISITED)
702 env->visit_states[type_id] = VISITED;
704 v = &env->stack[env->top_stack++];
706 v->type_id = type_id;
709 if (env->resolve_mode == RESOLVE_TBD) {
710 if (btf_type_is_ptr(t))
711 env->resolve_mode = RESOLVE_PTR;
712 else if (btf_type_is_struct(t) || btf_type_is_array(t))
713 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
719 static void env_stack_set_next_member(struct btf_verifier_env *env,
722 env->stack[env->top_stack - 1].next_member = next_member;
725 static void env_stack_pop_resolved(struct btf_verifier_env *env,
726 u32 resolved_type_id,
729 u32 type_id = env->stack[--(env->top_stack)].type_id;
730 struct btf *btf = env->btf;
732 btf->resolved_sizes[type_id] = resolved_size;
733 btf->resolved_ids[type_id] = resolved_type_id;
734 env->visit_states[type_id] = RESOLVED;
737 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
739 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
742 /* The input param "type_id" must point to a needs_resolve type */
743 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
746 *type_id = btf->resolved_ids[*type_id];
747 return btf_type_by_id(btf, *type_id);
750 const struct btf_type *btf_type_id_size(const struct btf *btf,
751 u32 *type_id, u32 *ret_size)
753 const struct btf_type *size_type;
754 u32 size_type_id = *type_id;
757 size_type = btf_type_by_id(btf, size_type_id);
758 if (btf_type_is_void_or_null(size_type))
761 if (btf_type_has_size(size_type)) {
762 size = size_type->size;
763 } else if (btf_type_is_array(size_type)) {
764 size = btf->resolved_sizes[size_type_id];
765 } else if (btf_type_is_ptr(size_type)) {
766 size = sizeof(void *);
768 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type)))
771 size = btf->resolved_sizes[size_type_id];
772 size_type_id = btf->resolved_ids[size_type_id];
773 size_type = btf_type_by_id(btf, size_type_id);
774 if (btf_type_is_void(size_type))
778 *type_id = size_type_id;
785 static int btf_df_check_member(struct btf_verifier_env *env,
786 const struct btf_type *struct_type,
787 const struct btf_member *member,
788 const struct btf_type *member_type)
790 btf_verifier_log_basic(env, struct_type,
791 "Unsupported check_member");
795 static int btf_df_resolve(struct btf_verifier_env *env,
796 const struct resolve_vertex *v)
798 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
802 static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
803 u32 type_id, void *data, u8 bits_offsets,
806 seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
809 static int btf_int_check_member(struct btf_verifier_env *env,
810 const struct btf_type *struct_type,
811 const struct btf_member *member,
812 const struct btf_type *member_type)
814 u32 int_data = btf_type_int(member_type);
815 u32 struct_bits_off = member->offset;
816 u32 struct_size = struct_type->size;
820 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
821 btf_verifier_log_member(env, struct_type, member,
822 "bits_offset exceeds U32_MAX");
826 struct_bits_off += BTF_INT_OFFSET(int_data);
827 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
828 nr_copy_bits = BTF_INT_BITS(int_data) +
829 BITS_PER_BYTE_MASKED(struct_bits_off);
831 if (nr_copy_bits > BITS_PER_U64) {
832 btf_verifier_log_member(env, struct_type, member,
833 "nr_copy_bits exceeds 64");
837 if (struct_size < bytes_offset ||
838 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
839 btf_verifier_log_member(env, struct_type, member,
840 "Member exceeds struct_size");
847 static s32 btf_int_check_meta(struct btf_verifier_env *env,
848 const struct btf_type *t,
851 u32 int_data, nr_bits, meta_needed = sizeof(int_data);
854 if (meta_left < meta_needed) {
855 btf_verifier_log_basic(env, t,
856 "meta_left:%u meta_needed:%u",
857 meta_left, meta_needed);
861 if (btf_type_vlen(t)) {
862 btf_verifier_log_type(env, t, "vlen != 0");
866 int_data = btf_type_int(t);
867 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
869 if (nr_bits > BITS_PER_U64) {
870 btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
875 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
876 btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
880 encoding = BTF_INT_ENCODING(int_data);
882 encoding != BTF_INT_SIGNED &&
883 encoding != BTF_INT_CHAR &&
884 encoding != BTF_INT_BOOL &&
885 encoding != BTF_INT_VARARGS) {
886 btf_verifier_log_type(env, t, "Unsupported encoding");
890 btf_verifier_log_type(env, t, NULL);
895 static void btf_int_log(struct btf_verifier_env *env,
896 const struct btf_type *t)
898 int int_data = btf_type_int(t);
900 btf_verifier_log(env,
901 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
902 t->size, BTF_INT_OFFSET(int_data),
903 BTF_INT_BITS(int_data),
904 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
907 static void btf_int_bits_seq_show(const struct btf *btf,
908 const struct btf_type *t,
909 void *data, u8 bits_offset,
912 u32 int_data = btf_type_int(t);
913 u16 nr_bits = BTF_INT_BITS(int_data);
914 u16 total_bits_offset;
923 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
924 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
925 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
926 nr_copy_bits = nr_bits + bits_offset;
927 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
929 print_num.u64_num = 0;
930 memcpy(&print_num.u64_num, data, nr_copy_bytes);
932 /* Ditch the higher order bits */
933 nr_upper_bits = BITS_PER_BYTE_MASKED(nr_copy_bits);
935 /* We need to mask out some bits of the upper byte. */
936 u8 mask = (1 << nr_upper_bits) - 1;
938 print_num.u8_nums[nr_copy_bytes - 1] &= mask;
941 print_num.u64_num >>= bits_offset;
943 seq_printf(m, "0x%llx", print_num.u64_num);
946 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
947 u32 type_id, void *data, u8 bits_offset,
950 u32 int_data = btf_type_int(t);
951 u8 encoding = BTF_INT_ENCODING(int_data);
952 bool sign = encoding & BTF_INT_SIGNED;
953 u32 nr_bits = BTF_INT_BITS(int_data);
955 if (bits_offset || BTF_INT_OFFSET(int_data) ||
956 BITS_PER_BYTE_MASKED(nr_bits)) {
957 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
964 seq_printf(m, "%lld", *(s64 *)data);
966 seq_printf(m, "%llu", *(u64 *)data);
970 seq_printf(m, "%d", *(s32 *)data);
972 seq_printf(m, "%u", *(u32 *)data);
976 seq_printf(m, "%d", *(s16 *)data);
978 seq_printf(m, "%u", *(u16 *)data);
982 seq_printf(m, "%d", *(s8 *)data);
984 seq_printf(m, "%u", *(u8 *)data);
987 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
991 static const struct btf_kind_operations int_ops = {
992 .check_meta = btf_int_check_meta,
993 .resolve = btf_df_resolve,
994 .check_member = btf_int_check_member,
995 .log_details = btf_int_log,
996 .seq_show = btf_int_seq_show,
999 static int btf_modifier_check_member(struct btf_verifier_env *env,
1000 const struct btf_type *struct_type,
1001 const struct btf_member *member,
1002 const struct btf_type *member_type)
1004 const struct btf_type *resolved_type;
1005 u32 resolved_type_id = member->type;
1006 struct btf_member resolved_member;
1007 struct btf *btf = env->btf;
1009 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1010 if (!resolved_type) {
1011 btf_verifier_log_member(env, struct_type, member,
1016 resolved_member = *member;
1017 resolved_member.type = resolved_type_id;
1019 return btf_type_ops(resolved_type)->check_member(env, struct_type,
1024 static int btf_ptr_check_member(struct btf_verifier_env *env,
1025 const struct btf_type *struct_type,
1026 const struct btf_member *member,
1027 const struct btf_type *member_type)
1029 u32 struct_size, struct_bits_off, bytes_offset;
1031 struct_size = struct_type->size;
1032 struct_bits_off = member->offset;
1033 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1035 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1036 btf_verifier_log_member(env, struct_type, member,
1037 "Member is not byte aligned");
1041 if (struct_size - bytes_offset < sizeof(void *)) {
1042 btf_verifier_log_member(env, struct_type, member,
1043 "Member exceeds struct_size");
1050 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1051 const struct btf_type *t,
1054 if (btf_type_vlen(t)) {
1055 btf_verifier_log_type(env, t, "vlen != 0");
1059 if (BTF_TYPE_PARENT(t->type)) {
1060 btf_verifier_log_type(env, t, "Invalid type_id");
1064 btf_verifier_log_type(env, t, NULL);
1069 static int btf_modifier_resolve(struct btf_verifier_env *env,
1070 const struct resolve_vertex *v)
1072 const struct btf_type *t = v->t;
1073 const struct btf_type *next_type;
1074 u32 next_type_id = t->type;
1075 struct btf *btf = env->btf;
1076 u32 next_type_size = 0;
1078 next_type = btf_type_by_id(btf, next_type_id);
1080 btf_verifier_log_type(env, v->t, "Invalid type_id");
1084 /* "typedef void new_void", "const void"...etc */
1085 if (btf_type_is_void(next_type))
1088 if (!env_type_is_resolve_sink(env, next_type) &&
1089 !env_type_is_resolved(env, next_type_id))
1090 return env_stack_push(env, next_type, next_type_id);
1092 /* Figure out the resolved next_type_id with size.
1093 * They will be stored in the current modifier's
1094 * resolved_ids and resolved_sizes such that it can
1095 * save us a few type-following when we use it later (e.g. in
1098 if (!btf_type_id_size(btf, &next_type_id, &next_type_size) &&
1099 !btf_type_is_void(btf_type_id_resolve(btf, &next_type_id))) {
1100 btf_verifier_log_type(env, v->t, "Invalid type_id");
1105 env_stack_pop_resolved(env, next_type_id, next_type_size);
1110 static int btf_ptr_resolve(struct btf_verifier_env *env,
1111 const struct resolve_vertex *v)
1113 const struct btf_type *next_type;
1114 const struct btf_type *t = v->t;
1115 u32 next_type_id = t->type;
1116 struct btf *btf = env->btf;
1117 u32 next_type_size = 0;
1119 next_type = btf_type_by_id(btf, next_type_id);
1121 btf_verifier_log_type(env, v->t, "Invalid type_id");
1126 if (btf_type_is_void(next_type))
1129 if (!env_type_is_resolve_sink(env, next_type) &&
1130 !env_type_is_resolved(env, next_type_id))
1131 return env_stack_push(env, next_type, next_type_id);
1133 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1134 * the modifier may have stopped resolving when it was resolved
1135 * to a ptr (last-resolved-ptr).
1137 * We now need to continue from the last-resolved-ptr to
1138 * ensure the last-resolved-ptr will not referring back to
1139 * the currenct ptr (t).
1141 if (btf_type_is_modifier(next_type)) {
1142 const struct btf_type *resolved_type;
1143 u32 resolved_type_id;
1145 resolved_type_id = next_type_id;
1146 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1148 if (btf_type_is_ptr(resolved_type) &&
1149 !env_type_is_resolve_sink(env, resolved_type) &&
1150 !env_type_is_resolved(env, resolved_type_id))
1151 return env_stack_push(env, resolved_type,
1155 if (!btf_type_id_size(btf, &next_type_id, &next_type_size) &&
1156 !btf_type_is_void(btf_type_id_resolve(btf, &next_type_id))) {
1157 btf_verifier_log_type(env, v->t, "Invalid type_id");
1162 env_stack_pop_resolved(env, next_type_id, 0);
1167 static void btf_modifier_seq_show(const struct btf *btf,
1168 const struct btf_type *t,
1169 u32 type_id, void *data,
1170 u8 bits_offset, struct seq_file *m)
1172 t = btf_type_id_resolve(btf, &type_id);
1174 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1177 static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1178 u32 type_id, void *data, u8 bits_offset,
1181 /* It is a hashed value */
1182 seq_printf(m, "%p", *(void **)data);
1185 static void btf_ref_type_log(struct btf_verifier_env *env,
1186 const struct btf_type *t)
1188 btf_verifier_log(env, "type_id=%u", t->type);
1191 static struct btf_kind_operations modifier_ops = {
1192 .check_meta = btf_ref_type_check_meta,
1193 .resolve = btf_modifier_resolve,
1194 .check_member = btf_modifier_check_member,
1195 .log_details = btf_ref_type_log,
1196 .seq_show = btf_modifier_seq_show,
1199 static struct btf_kind_operations ptr_ops = {
1200 .check_meta = btf_ref_type_check_meta,
1201 .resolve = btf_ptr_resolve,
1202 .check_member = btf_ptr_check_member,
1203 .log_details = btf_ref_type_log,
1204 .seq_show = btf_ptr_seq_show,
1207 static struct btf_kind_operations fwd_ops = {
1208 .check_meta = btf_ref_type_check_meta,
1209 .resolve = btf_df_resolve,
1210 .check_member = btf_df_check_member,
1211 .log_details = btf_ref_type_log,
1212 .seq_show = btf_df_seq_show,
1215 static int btf_array_check_member(struct btf_verifier_env *env,
1216 const struct btf_type *struct_type,
1217 const struct btf_member *member,
1218 const struct btf_type *member_type)
1220 u32 struct_bits_off = member->offset;
1221 u32 struct_size, bytes_offset;
1222 u32 array_type_id, array_size;
1223 struct btf *btf = env->btf;
1225 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1226 btf_verifier_log_member(env, struct_type, member,
1227 "Member is not byte aligned");
1231 array_type_id = member->type;
1232 btf_type_id_size(btf, &array_type_id, &array_size);
1233 struct_size = struct_type->size;
1234 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1235 if (struct_size - bytes_offset < array_size) {
1236 btf_verifier_log_member(env, struct_type, member,
1237 "Member exceeds struct_size");
1244 static s32 btf_array_check_meta(struct btf_verifier_env *env,
1245 const struct btf_type *t,
1248 const struct btf_array *array = btf_type_array(t);
1249 u32 meta_needed = sizeof(*array);
1251 if (meta_left < meta_needed) {
1252 btf_verifier_log_basic(env, t,
1253 "meta_left:%u meta_needed:%u",
1254 meta_left, meta_needed);
1258 if (btf_type_vlen(t)) {
1259 btf_verifier_log_type(env, t, "vlen != 0");
1263 /* We are a little forgiving on array->index_type since
1264 * the kernel is not using it.
1266 /* Array elem cannot be in type void,
1267 * so !array->type is not allowed.
1269 if (!array->type || BTF_TYPE_PARENT(array->type)) {
1270 btf_verifier_log_type(env, t, "Invalid type_id");
1274 btf_verifier_log_type(env, t, NULL);
1279 static int btf_array_resolve(struct btf_verifier_env *env,
1280 const struct resolve_vertex *v)
1282 const struct btf_array *array = btf_type_array(v->t);
1283 const struct btf_type *elem_type;
1284 u32 elem_type_id = array->type;
1285 struct btf *btf = env->btf;
1288 elem_type = btf_type_by_id(btf, elem_type_id);
1289 if (btf_type_is_void_or_null(elem_type)) {
1290 btf_verifier_log_type(env, v->t,
1295 if (!env_type_is_resolve_sink(env, elem_type) &&
1296 !env_type_is_resolved(env, elem_type_id))
1297 return env_stack_push(env, elem_type, elem_type_id);
1299 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1301 btf_verifier_log_type(env, v->t, "Invalid elem");
1305 if (btf_type_is_int(elem_type)) {
1306 int int_type_data = btf_type_int(elem_type);
1307 u16 nr_bits = BTF_INT_BITS(int_type_data);
1308 u16 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
1310 /* Put more restriction on array of int. The int cannot
1311 * be a bit field and it must be either u8/u16/u32/u64.
1313 if (BITS_PER_BYTE_MASKED(nr_bits) ||
1314 BTF_INT_OFFSET(int_type_data) ||
1315 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
1316 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64))) {
1317 btf_verifier_log_type(env, v->t,
1318 "Invalid array of int");
1323 if (array->nelems && elem_size > U32_MAX / array->nelems) {
1324 btf_verifier_log_type(env, v->t,
1325 "Array size overflows U32_MAX");
1329 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
1334 static void btf_array_log(struct btf_verifier_env *env,
1335 const struct btf_type *t)
1337 const struct btf_array *array = btf_type_array(t);
1339 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
1340 array->type, array->index_type, array->nelems);
1343 static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
1344 u32 type_id, void *data, u8 bits_offset,
1347 const struct btf_array *array = btf_type_array(t);
1348 const struct btf_kind_operations *elem_ops;
1349 const struct btf_type *elem_type;
1350 u32 i, elem_size, elem_type_id;
1352 elem_type_id = array->type;
1353 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1354 elem_ops = btf_type_ops(elem_type);
1356 for (i = 0; i < array->nelems; i++) {
1360 elem_ops->seq_show(btf, elem_type, elem_type_id, data,
1367 static struct btf_kind_operations array_ops = {
1368 .check_meta = btf_array_check_meta,
1369 .resolve = btf_array_resolve,
1370 .check_member = btf_array_check_member,
1371 .log_details = btf_array_log,
1372 .seq_show = btf_array_seq_show,
1375 static int btf_struct_check_member(struct btf_verifier_env *env,
1376 const struct btf_type *struct_type,
1377 const struct btf_member *member,
1378 const struct btf_type *member_type)
1380 u32 struct_bits_off = member->offset;
1381 u32 struct_size, bytes_offset;
1383 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1384 btf_verifier_log_member(env, struct_type, member,
1385 "Member is not byte aligned");
1389 struct_size = struct_type->size;
1390 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1391 if (struct_size - bytes_offset < member_type->size) {
1392 btf_verifier_log_member(env, struct_type, member,
1393 "Member exceeds struct_size");
1400 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1401 const struct btf_type *t,
1404 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
1405 const struct btf_member *member;
1406 struct btf *btf = env->btf;
1407 u32 struct_size = t->size;
1411 meta_needed = btf_type_vlen(t) * sizeof(*member);
1412 if (meta_left < meta_needed) {
1413 btf_verifier_log_basic(env, t,
1414 "meta_left:%u meta_needed:%u",
1415 meta_left, meta_needed);
1419 btf_verifier_log_type(env, t, NULL);
1421 for_each_member(i, t, member) {
1422 if (!btf_name_offset_valid(btf, member->name)) {
1423 btf_verifier_log_member(env, t, member,
1424 "Invalid member name_offset:%u",
1429 /* A member cannot be in type void */
1430 if (!member->type || BTF_TYPE_PARENT(member->type)) {
1431 btf_verifier_log_member(env, t, member,
1436 if (is_union && member->offset) {
1437 btf_verifier_log_member(env, t, member,
1438 "Invalid member bits_offset");
1442 if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) {
1443 btf_verifier_log_member(env, t, member,
1444 "Memmber bits_offset exceeds its struct size");
1448 btf_verifier_log_member(env, t, member, NULL);
1454 static int btf_struct_resolve(struct btf_verifier_env *env,
1455 const struct resolve_vertex *v)
1457 const struct btf_member *member;
1461 /* Before continue resolving the next_member,
1462 * ensure the last member is indeed resolved to a
1463 * type with size info.
1465 if (v->next_member) {
1466 const struct btf_type *last_member_type;
1467 const struct btf_member *last_member;
1468 u16 last_member_type_id;
1470 last_member = btf_type_member(v->t) + v->next_member - 1;
1471 last_member_type_id = last_member->type;
1472 if (WARN_ON_ONCE(!env_type_is_resolved(env,
1473 last_member_type_id)))
1476 last_member_type = btf_type_by_id(env->btf,
1477 last_member_type_id);
1478 err = btf_type_ops(last_member_type)->check_member(env, v->t,
1485 for_each_member_from(i, v->next_member, v->t, member) {
1486 u32 member_type_id = member->type;
1487 const struct btf_type *member_type = btf_type_by_id(env->btf,
1490 if (btf_type_is_void_or_null(member_type)) {
1491 btf_verifier_log_member(env, v->t, member,
1496 if (!env_type_is_resolve_sink(env, member_type) &&
1497 !env_type_is_resolved(env, member_type_id)) {
1498 env_stack_set_next_member(env, i + 1);
1499 return env_stack_push(env, member_type, member_type_id);
1502 err = btf_type_ops(member_type)->check_member(env, v->t,
1509 env_stack_pop_resolved(env, 0, 0);
1514 static void btf_struct_log(struct btf_verifier_env *env,
1515 const struct btf_type *t)
1517 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
1520 static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
1521 u32 type_id, void *data, u8 bits_offset,
1524 const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
1525 const struct btf_member *member;
1529 for_each_member(i, t, member) {
1530 const struct btf_type *member_type = btf_type_by_id(btf,
1532 u32 member_offset = member->offset;
1533 u32 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
1534 u8 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
1535 const struct btf_kind_operations *ops;
1540 ops = btf_type_ops(member_type);
1541 ops->seq_show(btf, member_type, member->type,
1542 data + bytes_offset, bits8_offset, m);
1547 static struct btf_kind_operations struct_ops = {
1548 .check_meta = btf_struct_check_meta,
1549 .resolve = btf_struct_resolve,
1550 .check_member = btf_struct_check_member,
1551 .log_details = btf_struct_log,
1552 .seq_show = btf_struct_seq_show,
1555 static int btf_enum_check_member(struct btf_verifier_env *env,
1556 const struct btf_type *struct_type,
1557 const struct btf_member *member,
1558 const struct btf_type *member_type)
1560 u32 struct_bits_off = member->offset;
1561 u32 struct_size, bytes_offset;
1563 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1564 btf_verifier_log_member(env, struct_type, member,
1565 "Member is not byte aligned");
1569 struct_size = struct_type->size;
1570 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1571 if (struct_size - bytes_offset < sizeof(int)) {
1572 btf_verifier_log_member(env, struct_type, member,
1573 "Member exceeds struct_size");
1580 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
1581 const struct btf_type *t,
1584 const struct btf_enum *enums = btf_type_enum(t);
1585 struct btf *btf = env->btf;
1589 nr_enums = btf_type_vlen(t);
1590 meta_needed = nr_enums * sizeof(*enums);
1592 if (meta_left < meta_needed) {
1593 btf_verifier_log_basic(env, t,
1594 "meta_left:%u meta_needed:%u",
1595 meta_left, meta_needed);
1599 if (t->size != sizeof(int)) {
1600 btf_verifier_log_type(env, t, "Expected size:%zu",
1605 btf_verifier_log_type(env, t, NULL);
1607 for (i = 0; i < nr_enums; i++) {
1608 if (!btf_name_offset_valid(btf, enums[i].name)) {
1609 btf_verifier_log(env, "\tInvalid name_offset:%u",
1614 btf_verifier_log(env, "\t%s val=%d\n",
1615 btf_name_by_offset(btf, enums[i].name),
1622 static void btf_enum_log(struct btf_verifier_env *env,
1623 const struct btf_type *t)
1625 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
1628 static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
1629 u32 type_id, void *data, u8 bits_offset,
1632 const struct btf_enum *enums = btf_type_enum(t);
1633 u32 i, nr_enums = btf_type_vlen(t);
1634 int v = *(int *)data;
1636 for (i = 0; i < nr_enums; i++) {
1637 if (v == enums[i].val) {
1639 btf_name_by_offset(btf, enums[i].name));
1644 seq_printf(m, "%d", v);
1647 static struct btf_kind_operations enum_ops = {
1648 .check_meta = btf_enum_check_meta,
1649 .resolve = btf_df_resolve,
1650 .check_member = btf_enum_check_member,
1651 .log_details = btf_enum_log,
1652 .seq_show = btf_enum_seq_show,
1655 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
1656 [BTF_KIND_INT] = &int_ops,
1657 [BTF_KIND_PTR] = &ptr_ops,
1658 [BTF_KIND_ARRAY] = &array_ops,
1659 [BTF_KIND_STRUCT] = &struct_ops,
1660 [BTF_KIND_UNION] = &struct_ops,
1661 [BTF_KIND_ENUM] = &enum_ops,
1662 [BTF_KIND_FWD] = &fwd_ops,
1663 [BTF_KIND_TYPEDEF] = &modifier_ops,
1664 [BTF_KIND_VOLATILE] = &modifier_ops,
1665 [BTF_KIND_CONST] = &modifier_ops,
1666 [BTF_KIND_RESTRICT] = &modifier_ops,
1669 static s32 btf_check_meta(struct btf_verifier_env *env,
1670 const struct btf_type *t,
1673 u32 saved_meta_left = meta_left;
1676 if (meta_left < sizeof(*t)) {
1677 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
1678 env->log_type_id, meta_left, sizeof(*t));
1681 meta_left -= sizeof(*t);
1683 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
1684 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
1685 btf_verifier_log(env, "[%u] Invalid kind:%u",
1686 env->log_type_id, BTF_INFO_KIND(t->info));
1690 if (!btf_name_offset_valid(env->btf, t->name)) {
1691 btf_verifier_log(env, "[%u] Invalid name_offset:%u",
1692 env->log_type_id, t->name);
1696 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
1697 if (var_meta_size < 0)
1698 return var_meta_size;
1700 meta_left -= var_meta_size;
1702 return saved_meta_left - meta_left;
1705 static int btf_check_all_metas(struct btf_verifier_env *env)
1707 struct btf *btf = env->btf;
1708 struct btf_header *hdr;
1712 cur = btf->nohdr_data + hdr->type_off;
1713 end = btf->nohdr_data + hdr->str_off;
1715 env->log_type_id = 1;
1717 struct btf_type *t = cur;
1720 meta_size = btf_check_meta(env, t, end - cur);
1724 btf_add_type(env, t);
1732 static int btf_resolve(struct btf_verifier_env *env,
1733 const struct btf_type *t, u32 type_id)
1735 const struct resolve_vertex *v;
1738 env->resolve_mode = RESOLVE_TBD;
1739 env_stack_push(env, t, type_id);
1740 while (!err && (v = env_stack_peak(env))) {
1741 env->log_type_id = v->type_id;
1742 err = btf_type_ops(v->t)->resolve(env, v);
1745 env->log_type_id = type_id;
1747 btf_verifier_log_type(env, t,
1748 "Exceeded max resolving depth:%u",
1750 else if (err == -EEXIST)
1751 btf_verifier_log_type(env, t, "Loop detected");
1756 static bool btf_resolve_valid(struct btf_verifier_env *env,
1757 const struct btf_type *t,
1760 struct btf *btf = env->btf;
1762 if (!env_type_is_resolved(env, type_id))
1765 if (btf_type_is_struct(t))
1766 return !btf->resolved_ids[type_id] &&
1767 !btf->resolved_sizes[type_id];
1769 if (btf_type_is_modifier(t) || btf_type_is_ptr(t)) {
1770 t = btf_type_id_resolve(btf, &type_id);
1771 return t && !btf_type_is_modifier(t);
1774 if (btf_type_is_array(t)) {
1775 const struct btf_array *array = btf_type_array(t);
1776 const struct btf_type *elem_type;
1777 u32 elem_type_id = array->type;
1780 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1781 return elem_type && !btf_type_is_modifier(elem_type) &&
1782 (array->nelems * elem_size ==
1783 btf->resolved_sizes[type_id]);
1789 static int btf_check_all_types(struct btf_verifier_env *env)
1791 struct btf *btf = env->btf;
1795 err = env_resolve_init(env);
1800 for (type_id = 1; type_id <= btf->nr_types; type_id++) {
1801 const struct btf_type *t = btf_type_by_id(btf, type_id);
1803 env->log_type_id = type_id;
1804 if (btf_type_needs_resolve(t) &&
1805 !env_type_is_resolved(env, type_id)) {
1806 err = btf_resolve(env, t, type_id);
1811 if (btf_type_needs_resolve(t) &&
1812 !btf_resolve_valid(env, t, type_id)) {
1813 btf_verifier_log_type(env, t, "Invalid resolve state");
1821 static int btf_parse_type_sec(struct btf_verifier_env *env)
1825 err = btf_check_all_metas(env);
1829 return btf_check_all_types(env);
1832 static int btf_parse_str_sec(struct btf_verifier_env *env)
1834 const struct btf_header *hdr;
1835 struct btf *btf = env->btf;
1836 const char *start, *end;
1839 start = btf->nohdr_data + hdr->str_off;
1840 end = start + hdr->str_len;
1842 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
1843 start[0] || end[-1]) {
1844 btf_verifier_log(env, "Invalid string section");
1848 btf->strings = start;
1853 static int btf_parse_hdr(struct btf_verifier_env *env)
1855 const struct btf_header *hdr;
1856 struct btf *btf = env->btf;
1859 if (btf->data_size < sizeof(*hdr)) {
1860 btf_verifier_log(env, "btf_header not found");
1864 btf_verifier_log_hdr(env);
1867 if (hdr->magic != BTF_MAGIC) {
1868 btf_verifier_log(env, "Invalid magic");
1872 if (hdr->version != BTF_VERSION) {
1873 btf_verifier_log(env, "Unsupported version");
1878 btf_verifier_log(env, "Unsupported flags");
1882 meta_left = btf->data_size - sizeof(*hdr);
1884 btf_verifier_log(env, "No data");
1888 if (meta_left < hdr->type_off || hdr->str_off <= hdr->type_off ||
1889 /* Type section must align to 4 bytes */
1890 hdr->type_off & (sizeof(u32) - 1)) {
1891 btf_verifier_log(env, "Invalid type_off");
1895 if (meta_left < hdr->str_off ||
1896 meta_left - hdr->str_off < hdr->str_len) {
1897 btf_verifier_log(env, "Invalid str_off or str_len");
1901 btf->nohdr_data = btf->hdr + 1;
1906 static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
1907 u32 log_level, char __user *log_ubuf, u32 log_size)
1909 struct btf_verifier_env *env = NULL;
1910 struct bpf_verifier_log *log;
1911 struct btf *btf = NULL;
1915 if (btf_data_size > BTF_MAX_SIZE)
1916 return ERR_PTR(-E2BIG);
1918 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
1920 return ERR_PTR(-ENOMEM);
1923 if (log_level || log_ubuf || log_size) {
1924 /* user requested verbose verifier output
1925 * and supplied buffer to store the verification trace
1927 log->level = log_level;
1928 log->ubuf = log_ubuf;
1929 log->len_total = log_size;
1931 /* log attributes have to be sane */
1932 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
1933 !log->level || !log->ubuf) {
1939 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
1945 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
1952 btf->data_size = btf_data_size;
1954 if (copy_from_user(data, btf_data, btf_data_size)) {
1961 err = btf_parse_hdr(env);
1965 err = btf_parse_str_sec(env);
1969 err = btf_parse_type_sec(env);
1973 if (!err && log->level && bpf_verifier_log_full(log)) {
1979 btf_verifier_env_free(env);
1985 btf_verifier_env_free(env);
1988 return ERR_PTR(err);
1991 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
1994 const struct btf_type *t = btf_type_by_id(btf, type_id);
1996 btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
1999 static int btf_release(struct inode *inode, struct file *filp)
2001 btf_put(filp->private_data);
2005 const struct file_operations btf_fops = {
2006 .release = btf_release,
2009 int btf_new_fd(const union bpf_attr *attr)
2014 btf = btf_parse(u64_to_user_ptr(attr->btf),
2015 attr->btf_size, attr->btf_log_level,
2016 u64_to_user_ptr(attr->btf_log_buf),
2017 attr->btf_log_size);
2019 return PTR_ERR(btf);
2021 fd = anon_inode_getfd("btf", &btf_fops, btf,
2022 O_RDONLY | O_CLOEXEC);
2029 struct btf *btf_get_by_fd(int fd)
2037 return ERR_PTR(-EBADF);
2039 if (f.file->f_op != &btf_fops) {
2041 return ERR_PTR(-EINVAL);
2044 btf = f.file->private_data;
2051 int btf_get_info_by_fd(const struct btf *btf,
2052 const union bpf_attr *attr,
2053 union bpf_attr __user *uattr)
2055 void __user *udata = u64_to_user_ptr(attr->info.info);
2056 u32 copy_len = min_t(u32, btf->data_size,
2057 attr->info.info_len);
2059 if (copy_to_user(udata, btf->data, copy_len) ||
2060 put_user(btf->data_size, &uattr->info.info_len))