2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 * Copyright (c) 2017 Facebook
6 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
14 #include <asm/types.h>
15 #include <linux/types.h>
28 #include <sys/capability.h>
30 #include <linux/unistd.h>
31 #include <linux/filter.h>
32 #include <linux/bpf_perf_event.h>
33 #include <linux/bpf.h>
34 #include <linux/if_ether.h>
35 #include <linux/btf.h>
38 #include <bpf/libbpf.h>
41 # include "autoconf.h"
43 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
44 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
47 #include "bpf_rlimit.h"
50 #include "../../../include/linux/filter.h"
52 #define MAX_INSNS BPF_MAXINSNS
53 #define MAX_TEST_INSNS 1000000
55 #define MAX_NR_MAPS 14
56 #define MAX_TEST_RUNS 8
57 #define POINTER_VALUE 0xcafe4all
58 #define TEST_DATA_LEN 64
60 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
61 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
63 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
64 static bool unpriv_disabled = false;
69 struct bpf_insn insns[MAX_INSNS];
70 struct bpf_insn *fill_insns;
71 int fixup_map_hash_8b[MAX_FIXUPS];
72 int fixup_map_hash_48b[MAX_FIXUPS];
73 int fixup_map_hash_16b[MAX_FIXUPS];
74 int fixup_map_array_48b[MAX_FIXUPS];
75 int fixup_map_sockmap[MAX_FIXUPS];
76 int fixup_map_sockhash[MAX_FIXUPS];
77 int fixup_map_xskmap[MAX_FIXUPS];
78 int fixup_map_stacktrace[MAX_FIXUPS];
79 int fixup_prog1[MAX_FIXUPS];
80 int fixup_prog2[MAX_FIXUPS];
81 int fixup_map_in_map[MAX_FIXUPS];
82 int fixup_cgroup_storage[MAX_FIXUPS];
83 int fixup_percpu_cgroup_storage[MAX_FIXUPS];
84 int fixup_map_spin_lock[MAX_FIXUPS];
86 const char *errstr_unpriv;
87 uint32_t retval, retval_unpriv, insn_processed;
93 } result, result_unpriv;
94 enum bpf_prog_type prog_type;
96 __u8 data[TEST_DATA_LEN];
97 void (*fill_helper)(struct bpf_test *self);
100 uint32_t retval, retval_unpriv;
102 __u8 data[TEST_DATA_LEN];
103 __u64 data64[TEST_DATA_LEN / 8];
105 } retvals[MAX_TEST_RUNS];
108 /* Note we want this to be 64 bit aligned so that the end of our array is
109 * actually the end of the structure.
111 #define MAX_ENTRIES 11
115 int foo[MAX_ENTRIES];
123 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
125 /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
127 /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
128 unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
129 struct bpf_insn *insn = self->fill_insns;
132 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
134 for (j = 0; j < PUSH_CNT; j++) {
135 insn[i++] = BPF_LD_ABS(BPF_B, 0);
136 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
138 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
139 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
140 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
141 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
142 BPF_FUNC_skb_vlan_push),
143 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
147 for (j = 0; j < PUSH_CNT; j++) {
148 insn[i++] = BPF_LD_ABS(BPF_B, 0);
149 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
151 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
152 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
153 BPF_FUNC_skb_vlan_pop),
154 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
160 for (; i < len - 1; i++)
161 insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
162 insn[len - 1] = BPF_EXIT_INSN();
163 self->prog_len = len;
166 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
168 struct bpf_insn *insn = self->fill_insns;
169 /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns */
170 unsigned int len = (1 << 15) / 6;
173 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
174 insn[i++] = BPF_LD_ABS(BPF_B, 0);
175 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
178 insn[i++] = BPF_LD_ABS(BPF_B, 1);
179 insn[i] = BPF_EXIT_INSN();
180 self->prog_len = i + 1;
183 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
185 struct bpf_insn *insn = self->fill_insns;
189 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
190 while (i < self->retval) {
191 uint64_t val = bpf_semi_rand_get();
192 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
197 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
199 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
200 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
201 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
202 insn[i] = BPF_EXIT_INSN();
203 self->prog_len = i + 1;
205 self->retval = (uint32_t)res;
208 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
209 #define BPF_SK_LOOKUP(func) \
210 /* struct bpf_sock_tuple tuple = {} */ \
211 BPF_MOV64_IMM(BPF_REG_2, 0), \
212 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
213 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
214 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
215 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
216 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
217 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
218 /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
219 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
221 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
222 BPF_MOV64_IMM(BPF_REG_4, 0), \
223 BPF_MOV64_IMM(BPF_REG_5, 0), \
224 BPF_EMIT_CALL(BPF_FUNC_ ## func)
226 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
227 * value into 0 and does necessary preparation for direct packet access
228 * through r2. The allowed access range is 8 bytes.
230 #define BPF_DIRECT_PKT_R2 \
231 BPF_MOV64_IMM(BPF_REG_0, 0), \
232 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
233 offsetof(struct __sk_buff, data)), \
234 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
235 offsetof(struct __sk_buff, data_end)), \
236 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
238 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
241 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
242 * positive u32, and zero-extend it into 64-bit.
244 #define BPF_RAND_UEXT_R7 \
245 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
246 BPF_FUNC_get_prandom_u32), \
247 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
248 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
249 BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
251 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
252 * negative u32, and sign-extend it into 64-bit.
254 #define BPF_RAND_SEXT_R7 \
255 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
256 BPF_FUNC_get_prandom_u32), \
257 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
258 BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
259 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
260 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
262 static struct bpf_test tests[] = {
264 #include <verifier/tests.h>
268 static int probe_filter_length(const struct bpf_insn *fp)
272 for (len = MAX_INSNS - 1; len > 0; --len)
273 if (fp[len].code != 0 || fp[len].imm != 0)
278 static bool skip_unsupported_map(enum bpf_map_type map_type)
280 if (!bpf_probe_map_type(map_type, 0)) {
281 printf("SKIP (unsupported map type %d)\n", map_type);
288 static int create_map(uint32_t type, uint32_t size_key,
289 uint32_t size_value, uint32_t max_elem)
293 fd = bpf_create_map(type, size_key, size_value, max_elem,
294 type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
296 if (skip_unsupported_map(type))
298 printf("Failed to create hash map '%s'!\n", strerror(errno));
304 static void update_map(int fd, int index)
306 struct test_val value = {
307 .index = (6 + 1) * sizeof(int),
308 .foo[6] = 0xabcdef12,
311 assert(!bpf_map_update_elem(fd, &index, &value, 0));
314 static int create_prog_dummy1(enum bpf_prog_type prog_type)
316 struct bpf_insn prog[] = {
317 BPF_MOV64_IMM(BPF_REG_0, 42),
321 return bpf_load_program(prog_type, prog,
322 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
325 static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
327 struct bpf_insn prog[] = {
328 BPF_MOV64_IMM(BPF_REG_3, idx),
329 BPF_LD_MAP_FD(BPF_REG_2, mfd),
330 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
332 BPF_MOV64_IMM(BPF_REG_0, 41),
336 return bpf_load_program(prog_type, prog,
337 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
340 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
346 mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
347 sizeof(int), max_elem, 0);
349 if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
351 printf("Failed to create prog array '%s'!\n", strerror(errno));
355 p1fd = create_prog_dummy1(prog_type);
356 p2fd = create_prog_dummy2(prog_type, mfd, p2key);
357 if (p1fd < 0 || p2fd < 0)
359 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
361 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
374 static int create_map_in_map(void)
376 int inner_map_fd, outer_map_fd;
378 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
380 if (inner_map_fd < 0) {
381 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
383 printf("Failed to create array '%s'!\n", strerror(errno));
387 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
388 sizeof(int), inner_map_fd, 1, 0);
389 if (outer_map_fd < 0) {
390 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
392 printf("Failed to create array of maps '%s'!\n",
401 static int create_cgroup_storage(bool percpu)
403 enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
404 BPF_MAP_TYPE_CGROUP_STORAGE;
407 fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
408 TEST_DATA_LEN, 0, 0);
410 if (skip_unsupported_map(type))
412 printf("Failed to create cgroup storage '%s'!\n",
419 #define BTF_INFO_ENC(kind, kind_flag, vlen) \
420 ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
421 #define BTF_TYPE_ENC(name, info, size_or_type) \
422 (name), (info), (size_or_type)
423 #define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
424 ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
425 #define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
426 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
427 BTF_INT_ENC(encoding, bits_offset, bits)
428 #define BTF_MEMBER_ENC(name, type, bits_offset) \
429 (name), (type), (bits_offset)
431 struct btf_raw_data {
437 /* struct bpf_spin_lock {
442 * struct bpf_spin_lock l;
445 static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
446 static __u32 btf_raw_types[] = {
448 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
449 /* struct bpf_spin_lock */ /* [2] */
450 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
451 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
452 /* struct val */ /* [3] */
453 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
454 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
455 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
458 static int load_btf(void)
460 struct btf_header hdr = {
462 .version = BTF_VERSION,
463 .hdr_len = sizeof(struct btf_header),
464 .type_len = sizeof(btf_raw_types),
465 .str_off = sizeof(btf_raw_types),
466 .str_len = sizeof(btf_str_sec),
471 ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
472 sizeof(btf_str_sec));
474 memcpy(ptr, &hdr, sizeof(hdr));
476 memcpy(ptr, btf_raw_types, hdr.type_len);
478 memcpy(ptr, btf_str_sec, hdr.str_len);
481 btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
488 static int create_map_spin_lock(void)
490 struct bpf_create_map_attr attr = {
492 .map_type = BPF_MAP_TYPE_ARRAY,
496 .btf_key_type_id = 1,
497 .btf_value_type_id = 3,
504 attr.btf_fd = btf_fd;
505 fd = bpf_create_map_xattr(&attr);
507 printf("Failed to create map with spin_lock\n");
511 static char bpf_vlog[UINT_MAX >> 8];
513 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
514 struct bpf_insn *prog, int *map_fds)
516 int *fixup_map_hash_8b = test->fixup_map_hash_8b;
517 int *fixup_map_hash_48b = test->fixup_map_hash_48b;
518 int *fixup_map_hash_16b = test->fixup_map_hash_16b;
519 int *fixup_map_array_48b = test->fixup_map_array_48b;
520 int *fixup_map_sockmap = test->fixup_map_sockmap;
521 int *fixup_map_sockhash = test->fixup_map_sockhash;
522 int *fixup_map_xskmap = test->fixup_map_xskmap;
523 int *fixup_map_stacktrace = test->fixup_map_stacktrace;
524 int *fixup_prog1 = test->fixup_prog1;
525 int *fixup_prog2 = test->fixup_prog2;
526 int *fixup_map_in_map = test->fixup_map_in_map;
527 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
528 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
529 int *fixup_map_spin_lock = test->fixup_map_spin_lock;
531 if (test->fill_helper) {
532 test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
533 test->fill_helper(test);
536 /* Allocating HTs with 1 elem is fine here, since we only test
537 * for verifier and not do a runtime lookup, so the only thing
538 * that really matters is value size in this case.
540 if (*fixup_map_hash_8b) {
541 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
542 sizeof(long long), 1);
544 prog[*fixup_map_hash_8b].imm = map_fds[0];
546 } while (*fixup_map_hash_8b);
549 if (*fixup_map_hash_48b) {
550 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
551 sizeof(struct test_val), 1);
553 prog[*fixup_map_hash_48b].imm = map_fds[1];
554 fixup_map_hash_48b++;
555 } while (*fixup_map_hash_48b);
558 if (*fixup_map_hash_16b) {
559 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
560 sizeof(struct other_val), 1);
562 prog[*fixup_map_hash_16b].imm = map_fds[2];
563 fixup_map_hash_16b++;
564 } while (*fixup_map_hash_16b);
567 if (*fixup_map_array_48b) {
568 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
569 sizeof(struct test_val), 1);
570 update_map(map_fds[3], 0);
572 prog[*fixup_map_array_48b].imm = map_fds[3];
573 fixup_map_array_48b++;
574 } while (*fixup_map_array_48b);
578 map_fds[4] = create_prog_array(prog_type, 4, 0);
580 prog[*fixup_prog1].imm = map_fds[4];
582 } while (*fixup_prog1);
586 map_fds[5] = create_prog_array(prog_type, 8, 7);
588 prog[*fixup_prog2].imm = map_fds[5];
590 } while (*fixup_prog2);
593 if (*fixup_map_in_map) {
594 map_fds[6] = create_map_in_map();
596 prog[*fixup_map_in_map].imm = map_fds[6];
598 } while (*fixup_map_in_map);
601 if (*fixup_cgroup_storage) {
602 map_fds[7] = create_cgroup_storage(false);
604 prog[*fixup_cgroup_storage].imm = map_fds[7];
605 fixup_cgroup_storage++;
606 } while (*fixup_cgroup_storage);
609 if (*fixup_percpu_cgroup_storage) {
610 map_fds[8] = create_cgroup_storage(true);
612 prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
613 fixup_percpu_cgroup_storage++;
614 } while (*fixup_percpu_cgroup_storage);
616 if (*fixup_map_sockmap) {
617 map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
620 prog[*fixup_map_sockmap].imm = map_fds[9];
622 } while (*fixup_map_sockmap);
624 if (*fixup_map_sockhash) {
625 map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
628 prog[*fixup_map_sockhash].imm = map_fds[10];
629 fixup_map_sockhash++;
630 } while (*fixup_map_sockhash);
632 if (*fixup_map_xskmap) {
633 map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
636 prog[*fixup_map_xskmap].imm = map_fds[11];
638 } while (*fixup_map_xskmap);
640 if (*fixup_map_stacktrace) {
641 map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
644 prog[*fixup_map_stacktrace].imm = map_fds[12];
645 fixup_map_stacktrace++;
646 } while (*fixup_map_stacktrace);
648 if (*fixup_map_spin_lock) {
649 map_fds[13] = create_map_spin_lock();
651 prog[*fixup_map_spin_lock].imm = map_fds[13];
652 fixup_map_spin_lock++;
653 } while (*fixup_map_spin_lock);
657 static int set_admin(bool admin)
660 const cap_value_t cap_val = CAP_SYS_ADMIN;
663 caps = cap_get_proc();
665 perror("cap_get_proc");
668 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
669 admin ? CAP_SET : CAP_CLEAR)) {
670 perror("cap_set_flag");
673 if (cap_set_proc(caps)) {
674 perror("cap_set_proc");
684 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
685 void *data, size_t size_data)
687 __u8 tmp[TEST_DATA_LEN << 2];
688 __u32 size_tmp = sizeof(tmp);
694 err = bpf_prog_test_run(fd_prog, 1, data, size_data,
695 tmp, &size_tmp, &retval, NULL);
698 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
699 printf("Unexpected bpf_prog_test_run error ");
702 if (!err && retval != expected_val &&
703 expected_val != POINTER_VALUE) {
704 printf("FAIL retval %d != %d ", retval, expected_val);
711 static void do_test_single(struct bpf_test *test, bool unpriv,
712 int *passes, int *errors)
714 int fd_prog, expected_ret, alignment_prevented_execution;
715 int prog_len, prog_type = test->prog_type;
716 struct bpf_insn *prog = test->insns;
717 int run_errs, run_successes;
718 int map_fds[MAX_NR_MAPS];
719 const char *expected_err;
724 for (i = 0; i < MAX_NR_MAPS; i++)
728 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
730 do_test_fixup(test, prog_type, prog, map_fds);
731 if (test->fill_insns) {
732 prog = test->fill_insns;
733 prog_len = test->prog_len;
735 prog_len = probe_filter_length(prog);
737 /* If there were some map skips during fixup due to missing bpf
738 * features, skip this test.
740 if (fixup_skips != skips)
744 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
745 pflags |= BPF_F_STRICT_ALIGNMENT;
746 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
747 pflags |= BPF_F_ANY_ALIGNMENT;
748 fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
749 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 4);
750 if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
751 printf("SKIP (unsupported program type %d)\n", prog_type);
756 expected_ret = unpriv && test->result_unpriv != UNDEF ?
757 test->result_unpriv : test->result;
758 expected_err = unpriv && test->errstr_unpriv ?
759 test->errstr_unpriv : test->errstr;
761 alignment_prevented_execution = 0;
763 if (expected_ret == ACCEPT) {
765 printf("FAIL\nFailed to load prog '%s'!\n",
769 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
771 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
772 alignment_prevented_execution = 1;
776 printf("FAIL\nUnexpected success to load!\n");
779 if (!strstr(bpf_vlog, expected_err)) {
780 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
781 expected_err, bpf_vlog);
786 if (test->insn_processed) {
787 uint32_t insn_processed;
790 proc = strstr(bpf_vlog, "processed ");
791 insn_processed = atoi(proc + 10);
792 if (test->insn_processed != insn_processed) {
793 printf("FAIL\nUnexpected insn_processed %u vs %u\n",
794 insn_processed, test->insn_processed);
801 if (!alignment_prevented_execution && fd_prog >= 0) {
802 uint32_t expected_val;
806 expected_val = unpriv && test->retval_unpriv ?
807 test->retval_unpriv : test->retval;
809 err = do_prog_test_run(fd_prog, unpriv, expected_val,
810 test->data, sizeof(test->data));
817 for (i = 0; i < test->runs; i++) {
818 if (unpriv && test->retvals[i].retval_unpriv)
819 expected_val = test->retvals[i].retval_unpriv;
821 expected_val = test->retvals[i].retval;
823 err = do_prog_test_run(fd_prog, unpriv, expected_val,
824 test->retvals[i].data,
825 sizeof(test->retvals[i].data));
827 printf("(run %d/%d) ", i + 1, test->runs);
837 if (run_successes > 1)
838 printf("%d cases ", run_successes);
840 if (alignment_prevented_execution)
841 printf(" (NOTE: not executed due to unknown alignment)");
848 if (test->fill_insns)
849 free(test->fill_insns);
851 for (i = 0; i < MAX_NR_MAPS; i++)
857 printf("%s", bpf_vlog);
861 static bool is_admin(void)
864 cap_flag_value_t sysadmin = CAP_CLEAR;
865 const cap_value_t cap_val = CAP_SYS_ADMIN;
867 #ifdef CAP_IS_SUPPORTED
868 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
869 perror("cap_get_flag");
873 caps = cap_get_proc();
875 perror("cap_get_proc");
878 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
879 perror("cap_get_flag");
882 return (sysadmin == CAP_SET);
885 static void get_unpriv_disabled()
890 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
892 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
893 unpriv_disabled = true;
896 if (fgets(buf, 2, fd) == buf && atoi(buf))
897 unpriv_disabled = true;
901 static bool test_as_unpriv(struct bpf_test *test)
903 return !test->prog_type ||
904 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
905 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
908 static int do_test(bool unpriv, unsigned int from, unsigned int to)
910 int i, passes = 0, errors = 0;
912 for (i = from; i < to; i++) {
913 struct bpf_test *test = &tests[i];
915 /* Program types that are not supported by non-root we
918 if (test_as_unpriv(test) && unpriv_disabled) {
919 printf("#%d/u %s SKIP\n", i, test->descr);
921 } else if (test_as_unpriv(test)) {
924 printf("#%d/u %s ", i, test->descr);
925 do_test_single(test, true, &passes, &errors);
931 printf("#%d/p %s SKIP\n", i, test->descr);
934 printf("#%d/p %s ", i, test->descr);
935 do_test_single(test, false, &passes, &errors);
939 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
941 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
944 int main(int argc, char **argv)
946 unsigned int from = 0, to = ARRAY_SIZE(tests);
947 bool unpriv = !is_admin();
950 unsigned int l = atoi(argv[argc - 2]);
951 unsigned int u = atoi(argv[argc - 1]);
953 if (l < to && u < to) {
957 } else if (argc == 2) {
958 unsigned int t = atoi(argv[argc - 1]);
966 get_unpriv_disabled();
967 if (unpriv && unpriv_disabled) {
968 printf("Cannot run as unprivileged user with sysctl %s.\n",
973 bpf_semi_rand_init();
974 return do_test(unpriv, from, to);