2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 * Copyright (c) 2017 Facebook
6 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
14 #include <asm/types.h>
15 #include <linux/types.h>
28 #include <sys/capability.h>
30 #include <linux/unistd.h>
31 #include <linux/filter.h>
32 #include <linux/bpf_perf_event.h>
33 #include <linux/bpf.h>
34 #include <linux/if_ether.h>
35 #include <linux/btf.h>
38 #include <bpf/libbpf.h>
41 # include "autoconf.h"
43 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
44 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
47 #include "bpf_rlimit.h"
50 #include "../../../include/linux/filter.h"
52 #define MAX_INSNS BPF_MAXINSNS
54 #define MAX_NR_MAPS 14
55 #define MAX_TEST_RUNS 8
56 #define POINTER_VALUE 0xcafe4all
57 #define TEST_DATA_LEN 64
59 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
60 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
62 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
63 static bool unpriv_disabled = false;
68 struct bpf_insn insns[MAX_INSNS];
69 int fixup_map_hash_8b[MAX_FIXUPS];
70 int fixup_map_hash_48b[MAX_FIXUPS];
71 int fixup_map_hash_16b[MAX_FIXUPS];
72 int fixup_map_array_48b[MAX_FIXUPS];
73 int fixup_map_sockmap[MAX_FIXUPS];
74 int fixup_map_sockhash[MAX_FIXUPS];
75 int fixup_map_xskmap[MAX_FIXUPS];
76 int fixup_map_stacktrace[MAX_FIXUPS];
77 int fixup_prog1[MAX_FIXUPS];
78 int fixup_prog2[MAX_FIXUPS];
79 int fixup_map_in_map[MAX_FIXUPS];
80 int fixup_cgroup_storage[MAX_FIXUPS];
81 int fixup_percpu_cgroup_storage[MAX_FIXUPS];
82 int fixup_map_spin_lock[MAX_FIXUPS];
84 const char *errstr_unpriv;
85 uint32_t retval, retval_unpriv, insn_processed;
90 } result, result_unpriv;
91 enum bpf_prog_type prog_type;
93 __u8 data[TEST_DATA_LEN];
94 void (*fill_helper)(struct bpf_test *self);
97 uint32_t retval, retval_unpriv;
99 __u8 data[TEST_DATA_LEN];
100 __u64 data64[TEST_DATA_LEN / 8];
102 } retvals[MAX_TEST_RUNS];
105 /* Note we want this to be 64 bit aligned so that the end of our array is
106 * actually the end of the structure.
108 #define MAX_ENTRIES 11
112 int foo[MAX_ENTRIES];
120 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
122 /* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
124 unsigned int len = BPF_MAXINSNS;
125 struct bpf_insn *insn = self->insns;
128 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
130 for (j = 0; j < PUSH_CNT; j++) {
131 insn[i++] = BPF_LD_ABS(BPF_B, 0);
132 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
134 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
135 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
136 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
137 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
138 BPF_FUNC_skb_vlan_push),
139 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
143 for (j = 0; j < PUSH_CNT; j++) {
144 insn[i++] = BPF_LD_ABS(BPF_B, 0);
145 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
147 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
148 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
149 BPF_FUNC_skb_vlan_pop),
150 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
156 for (; i < len - 1; i++)
157 insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
158 insn[len - 1] = BPF_EXIT_INSN();
161 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
163 struct bpf_insn *insn = self->insns;
164 unsigned int len = BPF_MAXINSNS;
167 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
168 insn[i++] = BPF_LD_ABS(BPF_B, 0);
169 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
172 insn[i++] = BPF_LD_ABS(BPF_B, 1);
173 insn[i] = BPF_EXIT_INSN();
176 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
178 struct bpf_insn *insn = self->insns;
182 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
183 while (i < self->retval) {
184 uint64_t val = bpf_semi_rand_get();
185 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
190 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
192 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
193 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
194 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
195 insn[i] = BPF_EXIT_INSN();
197 self->retval = (uint32_t)res;
200 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
201 #define BPF_SK_LOOKUP(func) \
202 /* struct bpf_sock_tuple tuple = {} */ \
203 BPF_MOV64_IMM(BPF_REG_2, 0), \
204 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
205 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
206 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
207 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
208 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
209 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
210 /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
211 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
212 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
213 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
214 BPF_MOV64_IMM(BPF_REG_4, 0), \
215 BPF_MOV64_IMM(BPF_REG_5, 0), \
216 BPF_EMIT_CALL(BPF_FUNC_ ## func)
218 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
219 * value into 0 and does necessary preparation for direct packet access
220 * through r2. The allowed access range is 8 bytes.
222 #define BPF_DIRECT_PKT_R2 \
223 BPF_MOV64_IMM(BPF_REG_0, 0), \
224 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
225 offsetof(struct __sk_buff, data)), \
226 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
227 offsetof(struct __sk_buff, data_end)), \
228 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
230 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
233 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
234 * positive u32, and zero-extend it into 64-bit.
236 #define BPF_RAND_UEXT_R7 \
237 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
238 BPF_FUNC_get_prandom_u32), \
239 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
240 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
241 BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
243 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
244 * negative u32, and sign-extend it into 64-bit.
246 #define BPF_RAND_SEXT_R7 \
247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
248 BPF_FUNC_get_prandom_u32), \
249 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
250 BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
251 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
252 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
254 static struct bpf_test tests[] = {
256 #include <verifier/tests.h>
260 static int probe_filter_length(const struct bpf_insn *fp)
264 for (len = MAX_INSNS - 1; len > 0; --len)
265 if (fp[len].code != 0 || fp[len].imm != 0)
270 static bool skip_unsupported_map(enum bpf_map_type map_type)
272 if (!bpf_probe_map_type(map_type, 0)) {
273 printf("SKIP (unsupported map type %d)\n", map_type);
280 static int create_map(uint32_t type, uint32_t size_key,
281 uint32_t size_value, uint32_t max_elem)
285 fd = bpf_create_map(type, size_key, size_value, max_elem,
286 type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
288 if (skip_unsupported_map(type))
290 printf("Failed to create hash map '%s'!\n", strerror(errno));
296 static void update_map(int fd, int index)
298 struct test_val value = {
299 .index = (6 + 1) * sizeof(int),
300 .foo[6] = 0xabcdef12,
303 assert(!bpf_map_update_elem(fd, &index, &value, 0));
306 static int create_prog_dummy1(enum bpf_prog_type prog_type)
308 struct bpf_insn prog[] = {
309 BPF_MOV64_IMM(BPF_REG_0, 42),
313 return bpf_load_program(prog_type, prog,
314 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
317 static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
319 struct bpf_insn prog[] = {
320 BPF_MOV64_IMM(BPF_REG_3, idx),
321 BPF_LD_MAP_FD(BPF_REG_2, mfd),
322 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
324 BPF_MOV64_IMM(BPF_REG_0, 41),
328 return bpf_load_program(prog_type, prog,
329 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
332 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
338 mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
339 sizeof(int), max_elem, 0);
341 if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
343 printf("Failed to create prog array '%s'!\n", strerror(errno));
347 p1fd = create_prog_dummy1(prog_type);
348 p2fd = create_prog_dummy2(prog_type, mfd, p2key);
349 if (p1fd < 0 || p2fd < 0)
351 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
353 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
366 static int create_map_in_map(void)
368 int inner_map_fd, outer_map_fd;
370 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
372 if (inner_map_fd < 0) {
373 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
375 printf("Failed to create array '%s'!\n", strerror(errno));
379 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
380 sizeof(int), inner_map_fd, 1, 0);
381 if (outer_map_fd < 0) {
382 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
384 printf("Failed to create array of maps '%s'!\n",
393 static int create_cgroup_storage(bool percpu)
395 enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
396 BPF_MAP_TYPE_CGROUP_STORAGE;
399 fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
400 TEST_DATA_LEN, 0, 0);
402 if (skip_unsupported_map(type))
404 printf("Failed to create cgroup storage '%s'!\n",
411 #define BTF_INFO_ENC(kind, kind_flag, vlen) \
412 ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
413 #define BTF_TYPE_ENC(name, info, size_or_type) \
414 (name), (info), (size_or_type)
415 #define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
416 ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
417 #define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
418 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
419 BTF_INT_ENC(encoding, bits_offset, bits)
420 #define BTF_MEMBER_ENC(name, type, bits_offset) \
421 (name), (type), (bits_offset)
423 struct btf_raw_data {
429 /* struct bpf_spin_lock {
434 * struct bpf_spin_lock l;
437 static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
438 static __u32 btf_raw_types[] = {
440 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
441 /* struct bpf_spin_lock */ /* [2] */
442 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
443 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
444 /* struct val */ /* [3] */
445 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
446 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
447 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
450 static int load_btf(void)
452 struct btf_header hdr = {
454 .version = BTF_VERSION,
455 .hdr_len = sizeof(struct btf_header),
456 .type_len = sizeof(btf_raw_types),
457 .str_off = sizeof(btf_raw_types),
458 .str_len = sizeof(btf_str_sec),
463 ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
464 sizeof(btf_str_sec));
466 memcpy(ptr, &hdr, sizeof(hdr));
468 memcpy(ptr, btf_raw_types, hdr.type_len);
470 memcpy(ptr, btf_str_sec, hdr.str_len);
473 btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
480 static int create_map_spin_lock(void)
482 struct bpf_create_map_attr attr = {
484 .map_type = BPF_MAP_TYPE_ARRAY,
488 .btf_key_type_id = 1,
489 .btf_value_type_id = 3,
496 attr.btf_fd = btf_fd;
497 fd = bpf_create_map_xattr(&attr);
499 printf("Failed to create map with spin_lock\n");
503 static char bpf_vlog[UINT_MAX >> 8];
505 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
506 struct bpf_insn *prog, int *map_fds)
508 int *fixup_map_hash_8b = test->fixup_map_hash_8b;
509 int *fixup_map_hash_48b = test->fixup_map_hash_48b;
510 int *fixup_map_hash_16b = test->fixup_map_hash_16b;
511 int *fixup_map_array_48b = test->fixup_map_array_48b;
512 int *fixup_map_sockmap = test->fixup_map_sockmap;
513 int *fixup_map_sockhash = test->fixup_map_sockhash;
514 int *fixup_map_xskmap = test->fixup_map_xskmap;
515 int *fixup_map_stacktrace = test->fixup_map_stacktrace;
516 int *fixup_prog1 = test->fixup_prog1;
517 int *fixup_prog2 = test->fixup_prog2;
518 int *fixup_map_in_map = test->fixup_map_in_map;
519 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
520 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
521 int *fixup_map_spin_lock = test->fixup_map_spin_lock;
523 if (test->fill_helper)
524 test->fill_helper(test);
526 /* Allocating HTs with 1 elem is fine here, since we only test
527 * for verifier and not do a runtime lookup, so the only thing
528 * that really matters is value size in this case.
530 if (*fixup_map_hash_8b) {
531 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
532 sizeof(long long), 1);
534 prog[*fixup_map_hash_8b].imm = map_fds[0];
536 } while (*fixup_map_hash_8b);
539 if (*fixup_map_hash_48b) {
540 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
541 sizeof(struct test_val), 1);
543 prog[*fixup_map_hash_48b].imm = map_fds[1];
544 fixup_map_hash_48b++;
545 } while (*fixup_map_hash_48b);
548 if (*fixup_map_hash_16b) {
549 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
550 sizeof(struct other_val), 1);
552 prog[*fixup_map_hash_16b].imm = map_fds[2];
553 fixup_map_hash_16b++;
554 } while (*fixup_map_hash_16b);
557 if (*fixup_map_array_48b) {
558 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
559 sizeof(struct test_val), 1);
560 update_map(map_fds[3], 0);
562 prog[*fixup_map_array_48b].imm = map_fds[3];
563 fixup_map_array_48b++;
564 } while (*fixup_map_array_48b);
568 map_fds[4] = create_prog_array(prog_type, 4, 0);
570 prog[*fixup_prog1].imm = map_fds[4];
572 } while (*fixup_prog1);
576 map_fds[5] = create_prog_array(prog_type, 8, 7);
578 prog[*fixup_prog2].imm = map_fds[5];
580 } while (*fixup_prog2);
583 if (*fixup_map_in_map) {
584 map_fds[6] = create_map_in_map();
586 prog[*fixup_map_in_map].imm = map_fds[6];
588 } while (*fixup_map_in_map);
591 if (*fixup_cgroup_storage) {
592 map_fds[7] = create_cgroup_storage(false);
594 prog[*fixup_cgroup_storage].imm = map_fds[7];
595 fixup_cgroup_storage++;
596 } while (*fixup_cgroup_storage);
599 if (*fixup_percpu_cgroup_storage) {
600 map_fds[8] = create_cgroup_storage(true);
602 prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
603 fixup_percpu_cgroup_storage++;
604 } while (*fixup_percpu_cgroup_storage);
606 if (*fixup_map_sockmap) {
607 map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
610 prog[*fixup_map_sockmap].imm = map_fds[9];
612 } while (*fixup_map_sockmap);
614 if (*fixup_map_sockhash) {
615 map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
618 prog[*fixup_map_sockhash].imm = map_fds[10];
619 fixup_map_sockhash++;
620 } while (*fixup_map_sockhash);
622 if (*fixup_map_xskmap) {
623 map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
626 prog[*fixup_map_xskmap].imm = map_fds[11];
628 } while (*fixup_map_xskmap);
630 if (*fixup_map_stacktrace) {
631 map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
634 prog[*fixup_map_stacktrace].imm = map_fds[12];
635 fixup_map_stacktrace++;
636 } while (*fixup_map_stacktrace);
638 if (*fixup_map_spin_lock) {
639 map_fds[13] = create_map_spin_lock();
641 prog[*fixup_map_spin_lock].imm = map_fds[13];
642 fixup_map_spin_lock++;
643 } while (*fixup_map_spin_lock);
647 static int set_admin(bool admin)
650 const cap_value_t cap_val = CAP_SYS_ADMIN;
653 caps = cap_get_proc();
655 perror("cap_get_proc");
658 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
659 admin ? CAP_SET : CAP_CLEAR)) {
660 perror("cap_set_flag");
663 if (cap_set_proc(caps)) {
664 perror("cap_set_proc");
674 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
675 void *data, size_t size_data)
677 __u8 tmp[TEST_DATA_LEN << 2];
678 __u32 size_tmp = sizeof(tmp);
684 err = bpf_prog_test_run(fd_prog, 1, data, size_data,
685 tmp, &size_tmp, &retval, NULL);
688 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
689 printf("Unexpected bpf_prog_test_run error ");
692 if (!err && retval != expected_val &&
693 expected_val != POINTER_VALUE) {
694 printf("FAIL retval %d != %d ", retval, expected_val);
701 static void do_test_single(struct bpf_test *test, bool unpriv,
702 int *passes, int *errors)
704 int fd_prog, expected_ret, alignment_prevented_execution;
705 int prog_len, prog_type = test->prog_type;
706 struct bpf_insn *prog = test->insns;
707 int run_errs, run_successes;
708 int map_fds[MAX_NR_MAPS];
709 const char *expected_err;
714 for (i = 0; i < MAX_NR_MAPS; i++)
718 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
720 do_test_fixup(test, prog_type, prog, map_fds);
721 /* If there were some map skips during fixup due to missing bpf
722 * features, skip this test.
724 if (fixup_skips != skips)
726 prog_len = probe_filter_length(prog);
729 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
730 pflags |= BPF_F_STRICT_ALIGNMENT;
731 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
732 pflags |= BPF_F_ANY_ALIGNMENT;
733 fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
734 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
735 if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
736 printf("SKIP (unsupported program type %d)\n", prog_type);
741 expected_ret = unpriv && test->result_unpriv != UNDEF ?
742 test->result_unpriv : test->result;
743 expected_err = unpriv && test->errstr_unpriv ?
744 test->errstr_unpriv : test->errstr;
746 alignment_prevented_execution = 0;
748 if (expected_ret == ACCEPT) {
750 printf("FAIL\nFailed to load prog '%s'!\n",
754 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
756 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
757 alignment_prevented_execution = 1;
761 printf("FAIL\nUnexpected success to load!\n");
764 if (!strstr(bpf_vlog, expected_err)) {
765 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
766 expected_err, bpf_vlog);
771 if (test->insn_processed) {
772 uint32_t insn_processed;
775 proc = strstr(bpf_vlog, "processed ");
776 insn_processed = atoi(proc + 10);
777 if (test->insn_processed != insn_processed) {
778 printf("FAIL\nUnexpected insn_processed %u vs %u\n",
779 insn_processed, test->insn_processed);
786 if (!alignment_prevented_execution && fd_prog >= 0) {
787 uint32_t expected_val;
791 expected_val = unpriv && test->retval_unpriv ?
792 test->retval_unpriv : test->retval;
794 err = do_prog_test_run(fd_prog, unpriv, expected_val,
795 test->data, sizeof(test->data));
802 for (i = 0; i < test->runs; i++) {
803 if (unpriv && test->retvals[i].retval_unpriv)
804 expected_val = test->retvals[i].retval_unpriv;
806 expected_val = test->retvals[i].retval;
808 err = do_prog_test_run(fd_prog, unpriv, expected_val,
809 test->retvals[i].data,
810 sizeof(test->retvals[i].data));
812 printf("(run %d/%d) ", i + 1, test->runs);
822 if (run_successes > 1)
823 printf("%d cases ", run_successes);
825 if (alignment_prevented_execution)
826 printf(" (NOTE: not executed due to unknown alignment)");
834 for (i = 0; i < MAX_NR_MAPS; i++)
840 printf("%s", bpf_vlog);
844 static bool is_admin(void)
847 cap_flag_value_t sysadmin = CAP_CLEAR;
848 const cap_value_t cap_val = CAP_SYS_ADMIN;
850 #ifdef CAP_IS_SUPPORTED
851 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
852 perror("cap_get_flag");
856 caps = cap_get_proc();
858 perror("cap_get_proc");
861 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
862 perror("cap_get_flag");
865 return (sysadmin == CAP_SET);
868 static void get_unpriv_disabled()
873 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
875 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
876 unpriv_disabled = true;
879 if (fgets(buf, 2, fd) == buf && atoi(buf))
880 unpriv_disabled = true;
884 static bool test_as_unpriv(struct bpf_test *test)
886 return !test->prog_type ||
887 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
888 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
891 static int do_test(bool unpriv, unsigned int from, unsigned int to)
893 int i, passes = 0, errors = 0;
895 for (i = from; i < to; i++) {
896 struct bpf_test *test = &tests[i];
898 /* Program types that are not supported by non-root we
901 if (test_as_unpriv(test) && unpriv_disabled) {
902 printf("#%d/u %s SKIP\n", i, test->descr);
904 } else if (test_as_unpriv(test)) {
907 printf("#%d/u %s ", i, test->descr);
908 do_test_single(test, true, &passes, &errors);
914 printf("#%d/p %s SKIP\n", i, test->descr);
917 printf("#%d/p %s ", i, test->descr);
918 do_test_single(test, false, &passes, &errors);
922 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
924 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
927 int main(int argc, char **argv)
929 unsigned int from = 0, to = ARRAY_SIZE(tests);
930 bool unpriv = !is_admin();
933 unsigned int l = atoi(argv[argc - 2]);
934 unsigned int u = atoi(argv[argc - 1]);
936 if (l < to && u < to) {
940 } else if (argc == 2) {
941 unsigned int t = atoi(argv[argc - 1]);
949 get_unpriv_disabled();
950 if (unpriv && unpriv_disabled) {
951 printf("Cannot run as unprivileged user with sysctl %s.\n",
956 bpf_semi_rand_init();
957 return do_test(unpriv, from, to);