]> asedeno.scripts.mit.edu Git - linux.git/blob - tools/testing/selftests/bpf/test_verifier.c
75ef63b42f2c0a8304ff53a432e092be5b26da58
[linux.git] / tools / testing / selftests / bpf / test_verifier.c
1 /*
2  * Testsuite for eBPF verifier
3  *
4  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5  * Copyright (c) 2017 Facebook
6  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of version 2 of the GNU General Public
10  * License as published by the Free Software Foundation.
11  */
12
13 #include <endian.h>
14 #include <asm/types.h>
15 #include <linux/types.h>
16 #include <stdint.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <unistd.h>
20 #include <errno.h>
21 #include <string.h>
22 #include <stddef.h>
23 #include <stdbool.h>
24 #include <sched.h>
25 #include <limits.h>
26 #include <assert.h>
27
28 #include <sys/capability.h>
29
30 #include <linux/unistd.h>
31 #include <linux/filter.h>
32 #include <linux/bpf_perf_event.h>
33 #include <linux/bpf.h>
34 #include <linux/if_ether.h>
35 #include <linux/btf.h>
36
37 #include <bpf/bpf.h>
38 #include <bpf/libbpf.h>
39
40 #ifdef HAVE_GENHDR
41 # include "autoconf.h"
42 #else
43 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
44 #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
45 # endif
46 #endif
47 #include "bpf_rlimit.h"
48 #include "bpf_rand.h"
49 #include "bpf_util.h"
50 #include "../../../include/linux/filter.h"
51
52 #define MAX_INSNS       BPF_MAXINSNS
53 #define MAX_TEST_INSNS  1000000
54 #define MAX_FIXUPS      8
55 #define MAX_NR_MAPS     14
56 #define MAX_TEST_RUNS   8
57 #define POINTER_VALUE   0xcafe4all
58 #define TEST_DATA_LEN   64
59
60 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS      (1 << 0)
61 #define F_LOAD_WITH_STRICT_ALIGNMENT            (1 << 1)
62
63 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
64 static bool unpriv_disabled = false;
65 static int skips;
66
67 struct bpf_test {
68         const char *descr;
69         struct bpf_insn insns[MAX_INSNS];
70         struct bpf_insn *fill_insns;
71         int fixup_map_hash_8b[MAX_FIXUPS];
72         int fixup_map_hash_48b[MAX_FIXUPS];
73         int fixup_map_hash_16b[MAX_FIXUPS];
74         int fixup_map_array_48b[MAX_FIXUPS];
75         int fixup_map_sockmap[MAX_FIXUPS];
76         int fixup_map_sockhash[MAX_FIXUPS];
77         int fixup_map_xskmap[MAX_FIXUPS];
78         int fixup_map_stacktrace[MAX_FIXUPS];
79         int fixup_prog1[MAX_FIXUPS];
80         int fixup_prog2[MAX_FIXUPS];
81         int fixup_map_in_map[MAX_FIXUPS];
82         int fixup_cgroup_storage[MAX_FIXUPS];
83         int fixup_percpu_cgroup_storage[MAX_FIXUPS];
84         int fixup_map_spin_lock[MAX_FIXUPS];
85         const char *errstr;
86         const char *errstr_unpriv;
87         uint32_t retval, retval_unpriv, insn_processed;
88         int prog_len;
89         enum {
90                 UNDEF,
91                 ACCEPT,
92                 REJECT
93         } result, result_unpriv;
94         enum bpf_prog_type prog_type;
95         uint8_t flags;
96         __u8 data[TEST_DATA_LEN];
97         void (*fill_helper)(struct bpf_test *self);
98         uint8_t runs;
99         struct {
100                 uint32_t retval, retval_unpriv;
101                 union {
102                         __u8 data[TEST_DATA_LEN];
103                         __u64 data64[TEST_DATA_LEN / 8];
104                 };
105         } retvals[MAX_TEST_RUNS];
106 };
107
108 /* Note we want this to be 64 bit aligned so that the end of our array is
109  * actually the end of the structure.
110  */
111 #define MAX_ENTRIES 11
112
113 struct test_val {
114         unsigned int index;
115         int foo[MAX_ENTRIES];
116 };
117
118 struct other_val {
119         long long foo;
120         long long bar;
121 };
122
123 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
124 {
125         /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
126 #define PUSH_CNT 51
127         /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
128         unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
129         struct bpf_insn *insn = self->fill_insns;
130         int i = 0, j, k = 0;
131
132         insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
133 loop:
134         for (j = 0; j < PUSH_CNT; j++) {
135                 insn[i++] = BPF_LD_ABS(BPF_B, 0);
136                 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
137                 i++;
138                 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
139                 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
140                 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
141                 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
142                                          BPF_FUNC_skb_vlan_push),
143                 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
144                 i++;
145         }
146
147         for (j = 0; j < PUSH_CNT; j++) {
148                 insn[i++] = BPF_LD_ABS(BPF_B, 0);
149                 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
150                 i++;
151                 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
152                 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
153                                          BPF_FUNC_skb_vlan_pop),
154                 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
155                 i++;
156         }
157         if (++k < 5)
158                 goto loop;
159
160         for (; i < len - 1; i++)
161                 insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
162         insn[len - 1] = BPF_EXIT_INSN();
163         self->prog_len = len;
164 }
165
166 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
167 {
168         struct bpf_insn *insn = self->fill_insns;
169         /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns */
170         unsigned int len = (1 << 15) / 6;
171         int i = 0;
172
173         insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
174         insn[i++] = BPF_LD_ABS(BPF_B, 0);
175         insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
176         i++;
177         while (i < len - 1)
178                 insn[i++] = BPF_LD_ABS(BPF_B, 1);
179         insn[i] = BPF_EXIT_INSN();
180         self->prog_len = i + 1;
181 }
182
183 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
184 {
185         struct bpf_insn *insn = self->fill_insns;
186         uint64_t res = 0;
187         int i = 0;
188
189         insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
190         while (i < self->retval) {
191                 uint64_t val = bpf_semi_rand_get();
192                 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
193
194                 res ^= val;
195                 insn[i++] = tmp[0];
196                 insn[i++] = tmp[1];
197                 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
198         }
199         insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
200         insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
201         insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
202         insn[i] = BPF_EXIT_INSN();
203         self->prog_len = i + 1;
204         res ^= (res >> 32);
205         self->retval = (uint32_t)res;
206 }
207
208 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
209 #define BPF_SK_LOOKUP(func)                                             \
210         /* struct bpf_sock_tuple tuple = {} */                          \
211         BPF_MOV64_IMM(BPF_REG_2, 0),                                    \
212         BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),                  \
213         BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),                \
214         BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),                \
215         BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),                \
216         BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),                \
217         BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),                \
218         /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */                \
219         BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),                           \
220         BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),                         \
221         BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),        \
222         BPF_MOV64_IMM(BPF_REG_4, 0),                                    \
223         BPF_MOV64_IMM(BPF_REG_5, 0),                                    \
224         BPF_EMIT_CALL(BPF_FUNC_ ## func)
225
226 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
227  * value into 0 and does necessary preparation for direct packet access
228  * through r2. The allowed access range is 8 bytes.
229  */
230 #define BPF_DIRECT_PKT_R2                                               \
231         BPF_MOV64_IMM(BPF_REG_0, 0),                                    \
232         BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,                        \
233                     offsetof(struct __sk_buff, data)),                  \
234         BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,                        \
235                     offsetof(struct __sk_buff, data_end)),              \
236         BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),                            \
237         BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),                           \
238         BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),                  \
239         BPF_EXIT_INSN()
240
241 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
242  * positive u32, and zero-extend it into 64-bit.
243  */
244 #define BPF_RAND_UEXT_R7                                                \
245         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,                       \
246                      BPF_FUNC_get_prandom_u32),                         \
247         BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),                            \
248         BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33),                          \
249         BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
250
251 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
252  * negative u32, and sign-extend it into 64-bit.
253  */
254 #define BPF_RAND_SEXT_R7                                                \
255         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,                       \
256                      BPF_FUNC_get_prandom_u32),                         \
257         BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),                            \
258         BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000),                   \
259         BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32),                          \
260         BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
261
262 static struct bpf_test tests[] = {
263 #define FILL_ARRAY
264 #include <verifier/tests.h>
265 #undef FILL_ARRAY
266 };
267
268 static int probe_filter_length(const struct bpf_insn *fp)
269 {
270         int len;
271
272         for (len = MAX_INSNS - 1; len > 0; --len)
273                 if (fp[len].code != 0 || fp[len].imm != 0)
274                         break;
275         return len + 1;
276 }
277
278 static bool skip_unsupported_map(enum bpf_map_type map_type)
279 {
280         if (!bpf_probe_map_type(map_type, 0)) {
281                 printf("SKIP (unsupported map type %d)\n", map_type);
282                 skips++;
283                 return true;
284         }
285         return false;
286 }
287
288 static int create_map(uint32_t type, uint32_t size_key,
289                       uint32_t size_value, uint32_t max_elem)
290 {
291         int fd;
292
293         fd = bpf_create_map(type, size_key, size_value, max_elem,
294                             type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
295         if (fd < 0) {
296                 if (skip_unsupported_map(type))
297                         return -1;
298                 printf("Failed to create hash map '%s'!\n", strerror(errno));
299         }
300
301         return fd;
302 }
303
304 static void update_map(int fd, int index)
305 {
306         struct test_val value = {
307                 .index = (6 + 1) * sizeof(int),
308                 .foo[6] = 0xabcdef12,
309         };
310
311         assert(!bpf_map_update_elem(fd, &index, &value, 0));
312 }
313
314 static int create_prog_dummy1(enum bpf_prog_type prog_type)
315 {
316         struct bpf_insn prog[] = {
317                 BPF_MOV64_IMM(BPF_REG_0, 42),
318                 BPF_EXIT_INSN(),
319         };
320
321         return bpf_load_program(prog_type, prog,
322                                 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
323 }
324
325 static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
326 {
327         struct bpf_insn prog[] = {
328                 BPF_MOV64_IMM(BPF_REG_3, idx),
329                 BPF_LD_MAP_FD(BPF_REG_2, mfd),
330                 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
331                              BPF_FUNC_tail_call),
332                 BPF_MOV64_IMM(BPF_REG_0, 41),
333                 BPF_EXIT_INSN(),
334         };
335
336         return bpf_load_program(prog_type, prog,
337                                 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
338 }
339
340 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
341                              int p1key)
342 {
343         int p2key = 1;
344         int mfd, p1fd, p2fd;
345
346         mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
347                              sizeof(int), max_elem, 0);
348         if (mfd < 0) {
349                 if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
350                         return -1;
351                 printf("Failed to create prog array '%s'!\n", strerror(errno));
352                 return -1;
353         }
354
355         p1fd = create_prog_dummy1(prog_type);
356         p2fd = create_prog_dummy2(prog_type, mfd, p2key);
357         if (p1fd < 0 || p2fd < 0)
358                 goto out;
359         if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
360                 goto out;
361         if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
362                 goto out;
363         close(p2fd);
364         close(p1fd);
365
366         return mfd;
367 out:
368         close(p2fd);
369         close(p1fd);
370         close(mfd);
371         return -1;
372 }
373
374 static int create_map_in_map(void)
375 {
376         int inner_map_fd, outer_map_fd;
377
378         inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
379                                       sizeof(int), 1, 0);
380         if (inner_map_fd < 0) {
381                 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
382                         return -1;
383                 printf("Failed to create array '%s'!\n", strerror(errno));
384                 return inner_map_fd;
385         }
386
387         outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
388                                              sizeof(int), inner_map_fd, 1, 0);
389         if (outer_map_fd < 0) {
390                 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
391                         return -1;
392                 printf("Failed to create array of maps '%s'!\n",
393                        strerror(errno));
394         }
395
396         close(inner_map_fd);
397
398         return outer_map_fd;
399 }
400
401 static int create_cgroup_storage(bool percpu)
402 {
403         enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
404                 BPF_MAP_TYPE_CGROUP_STORAGE;
405         int fd;
406
407         fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
408                             TEST_DATA_LEN, 0, 0);
409         if (fd < 0) {
410                 if (skip_unsupported_map(type))
411                         return -1;
412                 printf("Failed to create cgroup storage '%s'!\n",
413                        strerror(errno));
414         }
415
416         return fd;
417 }
418
419 #define BTF_INFO_ENC(kind, kind_flag, vlen) \
420         ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
421 #define BTF_TYPE_ENC(name, info, size_or_type) \
422         (name), (info), (size_or_type)
423 #define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
424         ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
425 #define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
426         BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
427         BTF_INT_ENC(encoding, bits_offset, bits)
428 #define BTF_MEMBER_ENC(name, type, bits_offset) \
429         (name), (type), (bits_offset)
430
431 struct btf_raw_data {
432         __u32 raw_types[64];
433         const char *str_sec;
434         __u32 str_sec_size;
435 };
436
437 /* struct bpf_spin_lock {
438  *   int val;
439  * };
440  * struct val {
441  *   int cnt;
442  *   struct bpf_spin_lock l;
443  * };
444  */
445 static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
446 static __u32 btf_raw_types[] = {
447         /* int */
448         BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
449         /* struct bpf_spin_lock */                      /* [2] */
450         BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
451         BTF_MEMBER_ENC(15, 1, 0), /* int val; */
452         /* struct val */                                /* [3] */
453         BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
454         BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
455         BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
456 };
457
458 static int load_btf(void)
459 {
460         struct btf_header hdr = {
461                 .magic = BTF_MAGIC,
462                 .version = BTF_VERSION,
463                 .hdr_len = sizeof(struct btf_header),
464                 .type_len = sizeof(btf_raw_types),
465                 .str_off = sizeof(btf_raw_types),
466                 .str_len = sizeof(btf_str_sec),
467         };
468         void *ptr, *raw_btf;
469         int btf_fd;
470
471         ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
472                                sizeof(btf_str_sec));
473
474         memcpy(ptr, &hdr, sizeof(hdr));
475         ptr += sizeof(hdr);
476         memcpy(ptr, btf_raw_types, hdr.type_len);
477         ptr += hdr.type_len;
478         memcpy(ptr, btf_str_sec, hdr.str_len);
479         ptr += hdr.str_len;
480
481         btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
482         free(raw_btf);
483         if (btf_fd < 0)
484                 return -1;
485         return btf_fd;
486 }
487
488 static int create_map_spin_lock(void)
489 {
490         struct bpf_create_map_attr attr = {
491                 .name = "test_map",
492                 .map_type = BPF_MAP_TYPE_ARRAY,
493                 .key_size = 4,
494                 .value_size = 8,
495                 .max_entries = 1,
496                 .btf_key_type_id = 1,
497                 .btf_value_type_id = 3,
498         };
499         int fd, btf_fd;
500
501         btf_fd = load_btf();
502         if (btf_fd < 0)
503                 return -1;
504         attr.btf_fd = btf_fd;
505         fd = bpf_create_map_xattr(&attr);
506         if (fd < 0)
507                 printf("Failed to create map with spin_lock\n");
508         return fd;
509 }
510
511 static char bpf_vlog[UINT_MAX >> 8];
512
513 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
514                           struct bpf_insn *prog, int *map_fds)
515 {
516         int *fixup_map_hash_8b = test->fixup_map_hash_8b;
517         int *fixup_map_hash_48b = test->fixup_map_hash_48b;
518         int *fixup_map_hash_16b = test->fixup_map_hash_16b;
519         int *fixup_map_array_48b = test->fixup_map_array_48b;
520         int *fixup_map_sockmap = test->fixup_map_sockmap;
521         int *fixup_map_sockhash = test->fixup_map_sockhash;
522         int *fixup_map_xskmap = test->fixup_map_xskmap;
523         int *fixup_map_stacktrace = test->fixup_map_stacktrace;
524         int *fixup_prog1 = test->fixup_prog1;
525         int *fixup_prog2 = test->fixup_prog2;
526         int *fixup_map_in_map = test->fixup_map_in_map;
527         int *fixup_cgroup_storage = test->fixup_cgroup_storage;
528         int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
529         int *fixup_map_spin_lock = test->fixup_map_spin_lock;
530
531         if (test->fill_helper) {
532                 test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
533                 test->fill_helper(test);
534         }
535
536         /* Allocating HTs with 1 elem is fine here, since we only test
537          * for verifier and not do a runtime lookup, so the only thing
538          * that really matters is value size in this case.
539          */
540         if (*fixup_map_hash_8b) {
541                 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
542                                         sizeof(long long), 1);
543                 do {
544                         prog[*fixup_map_hash_8b].imm = map_fds[0];
545                         fixup_map_hash_8b++;
546                 } while (*fixup_map_hash_8b);
547         }
548
549         if (*fixup_map_hash_48b) {
550                 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
551                                         sizeof(struct test_val), 1);
552                 do {
553                         prog[*fixup_map_hash_48b].imm = map_fds[1];
554                         fixup_map_hash_48b++;
555                 } while (*fixup_map_hash_48b);
556         }
557
558         if (*fixup_map_hash_16b) {
559                 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
560                                         sizeof(struct other_val), 1);
561                 do {
562                         prog[*fixup_map_hash_16b].imm = map_fds[2];
563                         fixup_map_hash_16b++;
564                 } while (*fixup_map_hash_16b);
565         }
566
567         if (*fixup_map_array_48b) {
568                 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
569                                         sizeof(struct test_val), 1);
570                 update_map(map_fds[3], 0);
571                 do {
572                         prog[*fixup_map_array_48b].imm = map_fds[3];
573                         fixup_map_array_48b++;
574                 } while (*fixup_map_array_48b);
575         }
576
577         if (*fixup_prog1) {
578                 map_fds[4] = create_prog_array(prog_type, 4, 0);
579                 do {
580                         prog[*fixup_prog1].imm = map_fds[4];
581                         fixup_prog1++;
582                 } while (*fixup_prog1);
583         }
584
585         if (*fixup_prog2) {
586                 map_fds[5] = create_prog_array(prog_type, 8, 7);
587                 do {
588                         prog[*fixup_prog2].imm = map_fds[5];
589                         fixup_prog2++;
590                 } while (*fixup_prog2);
591         }
592
593         if (*fixup_map_in_map) {
594                 map_fds[6] = create_map_in_map();
595                 do {
596                         prog[*fixup_map_in_map].imm = map_fds[6];
597                         fixup_map_in_map++;
598                 } while (*fixup_map_in_map);
599         }
600
601         if (*fixup_cgroup_storage) {
602                 map_fds[7] = create_cgroup_storage(false);
603                 do {
604                         prog[*fixup_cgroup_storage].imm = map_fds[7];
605                         fixup_cgroup_storage++;
606                 } while (*fixup_cgroup_storage);
607         }
608
609         if (*fixup_percpu_cgroup_storage) {
610                 map_fds[8] = create_cgroup_storage(true);
611                 do {
612                         prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
613                         fixup_percpu_cgroup_storage++;
614                 } while (*fixup_percpu_cgroup_storage);
615         }
616         if (*fixup_map_sockmap) {
617                 map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
618                                         sizeof(int), 1);
619                 do {
620                         prog[*fixup_map_sockmap].imm = map_fds[9];
621                         fixup_map_sockmap++;
622                 } while (*fixup_map_sockmap);
623         }
624         if (*fixup_map_sockhash) {
625                 map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
626                                         sizeof(int), 1);
627                 do {
628                         prog[*fixup_map_sockhash].imm = map_fds[10];
629                         fixup_map_sockhash++;
630                 } while (*fixup_map_sockhash);
631         }
632         if (*fixup_map_xskmap) {
633                 map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
634                                         sizeof(int), 1);
635                 do {
636                         prog[*fixup_map_xskmap].imm = map_fds[11];
637                         fixup_map_xskmap++;
638                 } while (*fixup_map_xskmap);
639         }
640         if (*fixup_map_stacktrace) {
641                 map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
642                                          sizeof(u64), 1);
643                 do {
644                         prog[*fixup_map_stacktrace].imm = map_fds[12];
645                         fixup_map_stacktrace++;
646                 } while (*fixup_map_stacktrace);
647         }
648         if (*fixup_map_spin_lock) {
649                 map_fds[13] = create_map_spin_lock();
650                 do {
651                         prog[*fixup_map_spin_lock].imm = map_fds[13];
652                         fixup_map_spin_lock++;
653                 } while (*fixup_map_spin_lock);
654         }
655 }
656
657 static int set_admin(bool admin)
658 {
659         cap_t caps;
660         const cap_value_t cap_val = CAP_SYS_ADMIN;
661         int ret = -1;
662
663         caps = cap_get_proc();
664         if (!caps) {
665                 perror("cap_get_proc");
666                 return -1;
667         }
668         if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
669                                 admin ? CAP_SET : CAP_CLEAR)) {
670                 perror("cap_set_flag");
671                 goto out;
672         }
673         if (cap_set_proc(caps)) {
674                 perror("cap_set_proc");
675                 goto out;
676         }
677         ret = 0;
678 out:
679         if (cap_free(caps))
680                 perror("cap_free");
681         return ret;
682 }
683
684 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
685                             void *data, size_t size_data)
686 {
687         __u8 tmp[TEST_DATA_LEN << 2];
688         __u32 size_tmp = sizeof(tmp);
689         uint32_t retval;
690         int err;
691
692         if (unpriv)
693                 set_admin(true);
694         err = bpf_prog_test_run(fd_prog, 1, data, size_data,
695                                 tmp, &size_tmp, &retval, NULL);
696         if (unpriv)
697                 set_admin(false);
698         if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
699                 printf("Unexpected bpf_prog_test_run error ");
700                 return err;
701         }
702         if (!err && retval != expected_val &&
703             expected_val != POINTER_VALUE) {
704                 printf("FAIL retval %d != %d ", retval, expected_val);
705                 return 1;
706         }
707
708         return 0;
709 }
710
711 static void do_test_single(struct bpf_test *test, bool unpriv,
712                            int *passes, int *errors)
713 {
714         int fd_prog, expected_ret, alignment_prevented_execution;
715         int prog_len, prog_type = test->prog_type;
716         struct bpf_insn *prog = test->insns;
717         int run_errs, run_successes;
718         int map_fds[MAX_NR_MAPS];
719         const char *expected_err;
720         int fixup_skips;
721         __u32 pflags;
722         int i, err;
723
724         for (i = 0; i < MAX_NR_MAPS; i++)
725                 map_fds[i] = -1;
726
727         if (!prog_type)
728                 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
729         fixup_skips = skips;
730         do_test_fixup(test, prog_type, prog, map_fds);
731         if (test->fill_insns) {
732                 prog = test->fill_insns;
733                 prog_len = test->prog_len;
734         } else {
735                 prog_len = probe_filter_length(prog);
736         }
737         /* If there were some map skips during fixup due to missing bpf
738          * features, skip this test.
739          */
740         if (fixup_skips != skips)
741                 return;
742
743         pflags = 0;
744         if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
745                 pflags |= BPF_F_STRICT_ALIGNMENT;
746         if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
747                 pflags |= BPF_F_ANY_ALIGNMENT;
748         fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
749                                      "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 4);
750         if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
751                 printf("SKIP (unsupported program type %d)\n", prog_type);
752                 skips++;
753                 goto close_fds;
754         }
755
756         expected_ret = unpriv && test->result_unpriv != UNDEF ?
757                        test->result_unpriv : test->result;
758         expected_err = unpriv && test->errstr_unpriv ?
759                        test->errstr_unpriv : test->errstr;
760
761         alignment_prevented_execution = 0;
762
763         if (expected_ret == ACCEPT) {
764                 if (fd_prog < 0) {
765                         printf("FAIL\nFailed to load prog '%s'!\n",
766                                strerror(errno));
767                         goto fail_log;
768                 }
769 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
770                 if (fd_prog >= 0 &&
771                     (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
772                         alignment_prevented_execution = 1;
773 #endif
774         } else {
775                 if (fd_prog >= 0) {
776                         printf("FAIL\nUnexpected success to load!\n");
777                         goto fail_log;
778                 }
779                 if (!strstr(bpf_vlog, expected_err)) {
780                         printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
781                               expected_err, bpf_vlog);
782                         goto fail_log;
783                 }
784         }
785
786         if (test->insn_processed) {
787                 uint32_t insn_processed;
788                 char *proc;
789
790                 proc = strstr(bpf_vlog, "processed ");
791                 insn_processed = atoi(proc + 10);
792                 if (test->insn_processed != insn_processed) {
793                         printf("FAIL\nUnexpected insn_processed %u vs %u\n",
794                                insn_processed, test->insn_processed);
795                         goto fail_log;
796                 }
797         }
798
799         run_errs = 0;
800         run_successes = 0;
801         if (!alignment_prevented_execution && fd_prog >= 0) {
802                 uint32_t expected_val;
803                 int i;
804
805                 if (!test->runs) {
806                         expected_val = unpriv && test->retval_unpriv ?
807                                 test->retval_unpriv : test->retval;
808
809                         err = do_prog_test_run(fd_prog, unpriv, expected_val,
810                                                test->data, sizeof(test->data));
811                         if (err)
812                                 run_errs++;
813                         else
814                                 run_successes++;
815                 }
816
817                 for (i = 0; i < test->runs; i++) {
818                         if (unpriv && test->retvals[i].retval_unpriv)
819                                 expected_val = test->retvals[i].retval_unpriv;
820                         else
821                                 expected_val = test->retvals[i].retval;
822
823                         err = do_prog_test_run(fd_prog, unpriv, expected_val,
824                                                test->retvals[i].data,
825                                                sizeof(test->retvals[i].data));
826                         if (err) {
827                                 printf("(run %d/%d) ", i + 1, test->runs);
828                                 run_errs++;
829                         } else {
830                                 run_successes++;
831                         }
832                 }
833         }
834
835         if (!run_errs) {
836                 (*passes)++;
837                 if (run_successes > 1)
838                         printf("%d cases ", run_successes);
839                 printf("OK");
840                 if (alignment_prevented_execution)
841                         printf(" (NOTE: not executed due to unknown alignment)");
842                 printf("\n");
843         } else {
844                 printf("\n");
845                 goto fail_log;
846         }
847 close_fds:
848         if (test->fill_insns)
849                 free(test->fill_insns);
850         close(fd_prog);
851         for (i = 0; i < MAX_NR_MAPS; i++)
852                 close(map_fds[i]);
853         sched_yield();
854         return;
855 fail_log:
856         (*errors)++;
857         printf("%s", bpf_vlog);
858         goto close_fds;
859 }
860
861 static bool is_admin(void)
862 {
863         cap_t caps;
864         cap_flag_value_t sysadmin = CAP_CLEAR;
865         const cap_value_t cap_val = CAP_SYS_ADMIN;
866
867 #ifdef CAP_IS_SUPPORTED
868         if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
869                 perror("cap_get_flag");
870                 return false;
871         }
872 #endif
873         caps = cap_get_proc();
874         if (!caps) {
875                 perror("cap_get_proc");
876                 return false;
877         }
878         if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
879                 perror("cap_get_flag");
880         if (cap_free(caps))
881                 perror("cap_free");
882         return (sysadmin == CAP_SET);
883 }
884
885 static void get_unpriv_disabled()
886 {
887         char buf[2];
888         FILE *fd;
889
890         fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
891         if (!fd) {
892                 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
893                 unpriv_disabled = true;
894                 return;
895         }
896         if (fgets(buf, 2, fd) == buf && atoi(buf))
897                 unpriv_disabled = true;
898         fclose(fd);
899 }
900
901 static bool test_as_unpriv(struct bpf_test *test)
902 {
903         return !test->prog_type ||
904                test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
905                test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
906 }
907
908 static int do_test(bool unpriv, unsigned int from, unsigned int to)
909 {
910         int i, passes = 0, errors = 0;
911
912         for (i = from; i < to; i++) {
913                 struct bpf_test *test = &tests[i];
914
915                 /* Program types that are not supported by non-root we
916                  * skip right away.
917                  */
918                 if (test_as_unpriv(test) && unpriv_disabled) {
919                         printf("#%d/u %s SKIP\n", i, test->descr);
920                         skips++;
921                 } else if (test_as_unpriv(test)) {
922                         if (!unpriv)
923                                 set_admin(false);
924                         printf("#%d/u %s ", i, test->descr);
925                         do_test_single(test, true, &passes, &errors);
926                         if (!unpriv)
927                                 set_admin(true);
928                 }
929
930                 if (unpriv) {
931                         printf("#%d/p %s SKIP\n", i, test->descr);
932                         skips++;
933                 } else {
934                         printf("#%d/p %s ", i, test->descr);
935                         do_test_single(test, false, &passes, &errors);
936                 }
937         }
938
939         printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
940                skips, errors);
941         return errors ? EXIT_FAILURE : EXIT_SUCCESS;
942 }
943
944 int main(int argc, char **argv)
945 {
946         unsigned int from = 0, to = ARRAY_SIZE(tests);
947         bool unpriv = !is_admin();
948
949         if (argc == 3) {
950                 unsigned int l = atoi(argv[argc - 2]);
951                 unsigned int u = atoi(argv[argc - 1]);
952
953                 if (l < to && u < to) {
954                         from = l;
955                         to   = u + 1;
956                 }
957         } else if (argc == 2) {
958                 unsigned int t = atoi(argv[argc - 1]);
959
960                 if (t < to) {
961                         from = t;
962                         to   = t + 1;
963                 }
964         }
965
966         get_unpriv_disabled();
967         if (unpriv && unpriv_disabled) {
968                 printf("Cannot run as unprivileged user with sysctl %s.\n",
969                        UNPRIV_SYSCTL);
970                 return EXIT_FAILURE;
971         }
972
973         bpf_semi_rand_init();
974         return do_test(unpriv, from, to);
975 }