]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - tools/testing/selftests/bpf/test_verifier.c
bpf: allow BPF programs access skb_shared_info->gso_segs field
[linux.git] / tools / testing / selftests / bpf / test_verifier.c
index 33f7d38849b8279355bbcfc86c8af8bd23456db3..e4fef6ca8071b9febc3acbec193e0988c925643c 100644 (file)
@@ -23,6 +23,7 @@
 #include <stdbool.h>
 #include <sched.h>
 #include <limits.h>
+#include <assert.h>
 
 #include <sys/capability.h>
 
@@ -2577,6 +2578,7 @@ static struct bpf_test tests[] = {
                },
                .result = REJECT,
                .errstr = "invalid stack off=-79992 size=8",
+               .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
        },
        {
                "PTR_TO_STACK store/load - out of bounds high",
@@ -3104,6 +3106,8 @@ static struct bpf_test tests[] = {
                        BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
                        BPF_EXIT_INSN(),
                },
+               .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+               .result_unpriv = REJECT,
                .result = ACCEPT,
        },
        {
@@ -3205,6 +3209,243 @@ static struct bpf_test tests[] = {
                /* Verifier rewrite for unpriv skips tail call here. */
                .retval_unpriv = 2,
        },
+       {
+               "PTR_TO_STACK check high 1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 42,
+       },
+       {
+               "PTR_TO_STACK check high 2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 42,
+       },
+       {
+               "PTR_TO_STACK check high 3",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+               .retval = 42,
+       },
+       {
+               "PTR_TO_STACK check high 4",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+               .errstr = "invalid stack off=0 size=1",
+               .result = REJECT,
+       },
+       {
+               "PTR_TO_STACK check high 5",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack off",
+       },
+       {
+               "PTR_TO_STACK check high 6",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack off",
+       },
+       {
+               "PTR_TO_STACK check high 7",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+               .errstr = "fp pointer offset",
+       },
+       {
+               "PTR_TO_STACK check low 1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 42,
+       },
+       {
+               "PTR_TO_STACK check low 2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+               .result = ACCEPT,
+               .retval = 42,
+       },
+       {
+               "PTR_TO_STACK check low 3",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+               .errstr = "invalid stack off=-513 size=1",
+               .result = REJECT,
+       },
+       {
+               "PTR_TO_STACK check low 4",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "math between fp pointer",
+       },
+       {
+               "PTR_TO_STACK check low 5",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack off",
+       },
+       {
+               "PTR_TO_STACK check low 6",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack off",
+       },
+       {
+               "PTR_TO_STACK check low 7",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+               .errstr = "fp pointer offset",
+       },
+       {
+               "PTR_TO_STACK mixed reg/k, 1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+                       BPF_MOV64_IMM(BPF_REG_2, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 42,
+       },
+       {
+               "PTR_TO_STACK mixed reg/k, 2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+                       BPF_MOV64_IMM(BPF_REG_2, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+                       BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 42,
+       },
+       {
+               "PTR_TO_STACK mixed reg/k, 3",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+                       BPF_MOV64_IMM(BPF_REG_2, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = -3,
+       },
+       {
+               "PTR_TO_STACK reg",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_MOV64_IMM(BPF_REG_2, -3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+                       BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "invalid stack off=0 size=1",
+               .result = ACCEPT,
+               .retval = 42,
+       },
        {
                "stack pointer arithmetic",
                .insns = {
@@ -5422,6 +5663,42 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
        },
+       {
+               "read gso_segs from CGROUP_SKB",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, gso_segs)),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+       },
+       {
+               "write gso_segs from CGROUP_SKB",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, gso_segs)),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .result_unpriv = REJECT,
+               .errstr = "invalid bpf_context access off=164 size=4",
+               .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+       },
+       {
+               "read gso_segs from CLS",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, gso_segs)),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
        {
                "multiple registers share map_lookup_elem result",
                .insns = {
@@ -6610,11 +6887,16 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "map access: known scalar += value_ptr",
+               "map access: known scalar += value_ptr from different maps",
                .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, len)),
                        BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
                        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
                                     BPF_FUNC_map_lookup_elem),
@@ -6625,88 +6907,652 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 1),
                        BPF_EXIT_INSN(),
                },
-               .fixup_map_array_48b = { 3 },
+               .fixup_map_hash_16b = { 5 },
+               .fixup_map_array_48b = { 8 },
                .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R1 tried to add from different maps",
                .retval = 1,
        },
        {
-               "map access: value_ptr += known scalar",
+               "map access: value_ptr -= known scalar from different maps",
                .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, len)),
                        BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
                        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
                                     BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
                        BPF_MOV64_IMM(BPF_REG_1, 4),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                        BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
-                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
                        BPF_MOV64_IMM(BPF_REG_0, 1),
                        BPF_EXIT_INSN(),
                },
-               .fixup_map_array_48b = { 3 },
+               .fixup_map_hash_16b = { 5 },
+               .fixup_map_array_48b = { 8 },
                .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 min value is outside of the array range",
                .retval = 1,
        },
        {
-               "map access: unknown scalar += value_ptr",
+               "map access: known scalar += value_ptr from different maps, but same value properties",
                .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, len)),
                        BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
                        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
                                     BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
-                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
-                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+                       BPF_MOV64_IMM(BPF_REG_1, 4),
                        BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
                        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
                        BPF_MOV64_IMM(BPF_REG_0, 1),
                        BPF_EXIT_INSN(),
                },
-               .fixup_map_array_48b = { 3 },
+               .fixup_map_hash_48b = { 5 },
+               .fixup_map_array_48b = { 8 },
                .result = ACCEPT,
                .retval = 1,
        },
        {
-               "map access: value_ptr += unknown scalar",
+               "map access: mixing value pointer and scalar, 1",
                .insns = {
-                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
-                       BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
-                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
-                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
-                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       // load map value pointer into r0 and r2
                        BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+                       BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       // load some number from the map into r1
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       // depending on r1, branch:
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
+                       // branch A
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_JMP_A(2),
+                       // branch B
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+                       // common instruction
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+                       // depending on r1, branch:
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+                       // branch A
+                       BPF_JMP_A(4),
+                       // branch B
+                       BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+                       // verifier follows fall-through
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       // fake-dead code; targeted from branch A to
+                       // prevent dead code sanitization
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .fixup_map_array_48b = { 3 },
+               .fixup_map_array_48b = { 1 },
                .result = ACCEPT,
-               .retval = 1,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R2 tried to add from different pointers or scalars",
+               .retval = 0,
        },
        {
-               "map access: value_ptr += value_ptr",
+               "map access: mixing value pointer and scalar, 2",
                .insns = {
-                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
-                       BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
-                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       // load map value pointer into r0 and r2
                        BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+                       BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
                        BPF_EXIT_INSN(),
-               },
-               .fixup_map_array_48b = { 3 },
+                       // load some number from the map into r1
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       // depending on r1, branch:
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+                       // branch A
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+                       BPF_JMP_A(2),
+                       // branch B
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       // common instruction
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+                       // depending on r1, branch:
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+                       // branch A
+                       BPF_JMP_A(4),
+                       // branch B
+                       BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+                       // verifier follows fall-through
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       // fake-dead code; targeted from branch A to
+                       // prevent dead code sanitization
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 1 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R2 tried to add from different maps or paths",
+               .retval = 0,
+       },
+       {
+               "sanitation: alu with different scalars",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+                       BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+                       BPF_JMP_A(2),
+                       BPF_MOV64_IMM(BPF_REG_2, 42),
+                       BPF_MOV64_IMM(BPF_REG_3, 0x100001),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 1 },
+               .result = ACCEPT,
+               .retval = 0x100000,
+       },
+       {
+               "map access: value_ptr += known scalar, upper oob arith, test 1",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_IMM(BPF_REG_1, 48),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+               .retval = 1,
+       },
+       {
+               "map access: value_ptr += known scalar, upper oob arith, test 2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_IMM(BPF_REG_1, 49),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+               .retval = 1,
+       },
+       {
+               "map access: value_ptr += known scalar, upper oob arith, test 3",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_IMM(BPF_REG_1, 47),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+               .retval = 1,
+       },
+       {
+               "map access: value_ptr -= known scalar, lower oob arith, test 1",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_MOV64_IMM(BPF_REG_1, 47),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_1, 48),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = REJECT,
+               .errstr = "R0 min value is outside of the array range",
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+       },
+       {
+               "map access: value_ptr -= known scalar, lower oob arith, test 2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_MOV64_IMM(BPF_REG_1, 47),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_1, 48),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_1, 1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+               .retval = 1,
+       },
+       {
+               "map access: value_ptr -= known scalar, lower oob arith, test 3",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_MOV64_IMM(BPF_REG_1, 47),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_1, 47),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+               .retval = 1,
+       },
+       {
+               "map access: known scalar += value_ptr",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+                       BPF_MOV64_IMM(BPF_REG_1, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .retval = 1,
+       },
+       {
+               "map access: value_ptr += known scalar, 1",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+                       BPF_MOV64_IMM(BPF_REG_1, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .retval = 1,
+       },
+       {
+               "map access: value_ptr += known scalar, 2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+                       BPF_MOV64_IMM(BPF_REG_1, 49),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = REJECT,
+               .errstr = "invalid access to map value",
+       },
+       {
+               "map access: value_ptr += known scalar, 3",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+                       BPF_MOV64_IMM(BPF_REG_1, -1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = REJECT,
+               .errstr = "invalid access to map value",
+       },
+       {
+               "map access: value_ptr += known scalar, 4",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_MOV64_IMM(BPF_REG_1, 5),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_1, -2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_1, -1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+               .retval = 1,
+       },
+       {
+               "map access: value_ptr += known scalar, 5",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+                       BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .retval = 0xabcdef12,
+       },
+       {
+               "map access: value_ptr += known scalar, 6",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .retval = 0xabcdef12,
+       },
+       {
+               "map access: unknown scalar += value_ptr, 1",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .retval = 1,
+       },
+       {
+               "map access: unknown scalar += value_ptr, 2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .retval = 0xabcdef12,
+       },
+       {
+               "map access: unknown scalar += value_ptr, 3",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+                       BPF_MOV64_IMM(BPF_REG_1, -1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_1, 1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+               .retval = 0xabcdef12,
+       },
+       {
+               "map access: unknown scalar += value_ptr, 4",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_IMM(BPF_REG_1, 19),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = REJECT,
+               .errstr = "R1 max value is outside of the array range",
+               .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range",
+       },
+       {
+               "map access: value_ptr += unknown scalar, 1",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .retval = 1,
+       },
+       {
+               "map access: value_ptr += unknown scalar, 2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .retval = 0xabcdef12,
+       },
+       {
+               "map access: value_ptr += unknown scalar, 3",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1),
+                       BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 2),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .retval = 1,
+       },
+       {
+               "map access: value_ptr += value_ptr",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
                .result = REJECT,
                .errstr = "R0 pointer += pointer prohibited",
        },
@@ -6770,6 +7616,8 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_array_48b = { 3 },
                .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
                .retval = 1,
        },
        {
@@ -6837,6 +7685,8 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_array_48b = { 3 },
                .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
                .retval = 1,
        },
        {
@@ -8376,6 +9226,7 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_hash_8b = { 3 },
                .errstr = "unbounded min value",
+               .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
                .result = REJECT,
        },
        {
@@ -8400,6 +9251,7 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_hash_8b = { 3 },
                .errstr = "unbounded min value",
+               .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
                .result = REJECT,
        },
        {
@@ -8426,6 +9278,7 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_hash_8b = { 3 },
                .errstr = "unbounded min value",
+               .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
                .result = REJECT,
        },
        {
@@ -8451,6 +9304,7 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_hash_8b = { 3 },
                .errstr = "unbounded min value",
+               .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
                .result = REJECT,
        },
        {
@@ -8499,6 +9353,7 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_hash_8b = { 3 },
                .errstr = "unbounded min value",
+               .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
                .result = REJECT,
        },
        {
@@ -8570,6 +9425,7 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_hash_8b = { 3 },
                .errstr = "unbounded min value",
+               .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
                .result = REJECT,
        },
        {
@@ -8621,6 +9477,7 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_hash_8b = { 3 },
                .errstr = "unbounded min value",
+               .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
                .result = REJECT,
        },
        {
@@ -8648,6 +9505,7 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_hash_8b = { 3 },
                .errstr = "unbounded min value",
+               .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
                .result = REJECT,
        },
        {
@@ -8674,6 +9532,7 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_hash_8b = { 3 },
                .errstr = "unbounded min value",
+               .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
                .result = REJECT,
        },
        {
@@ -8703,6 +9562,7 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_hash_8b = { 3 },
                .errstr = "unbounded min value",
+               .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
                .result = REJECT,
        },
        {
@@ -8733,6 +9593,7 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_hash_8b = { 4 },
                .errstr = "unbounded min value",
+               .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
                .result = REJECT,
        },
        {
@@ -8761,6 +9622,7 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_hash_8b = { 3 },
                .errstr = "unbounded min value",
+               .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
                .result = REJECT,
                .result_unpriv = REJECT,
        },
@@ -8813,8 +9675,38 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_hash_8b = { 3 },
                .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+               .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
                .result = REJECT,
        },
+       {
+               "check subtraction on pointers for unpriv",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+                       BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0),
+                       BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+                       BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_hash_8b = { 1, 9 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R9 pointer -= pointer prohibited",
+       },
        {
                "bounds check based on zero-extended MOV",
                .insns = {
@@ -9145,6 +10037,36 @@ static struct bpf_test tests[] = {
                .errstr = "R0 unbounded memory access",
                .result = REJECT
        },
+       {
+               "bounds check after 32-bit right shift with 64-bit input",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       /* r1 = 2 */
+                       BPF_MOV64_IMM(BPF_REG_1, 2),
+                       /* r1 = 1<<32 */
+                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31),
+                       /* r1 = 0 (NOT 2!) */
+                       BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
+                       /* r1 = 0xffff'fffe (NOT 0!) */
+                       BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
+                       /* computes OOB pointer */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_hash_8b = { 3 },
+               .errstr = "R0 invalid mem access",
+               .result = REJECT,
+       },
        {
                "bounds check map access with off+size signed 32bit overflow. test1",
                .insns = {
@@ -9185,6 +10107,7 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_hash_8b = { 3 },
                .errstr = "pointer offset 1073741822",
+               .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
                .result = REJECT
        },
        {
@@ -9206,6 +10129,7 @@ static struct bpf_test tests[] = {
                },
                .fixup_map_hash_8b = { 3 },
                .errstr = "pointer offset -1073741822",
+               .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
                .result = REJECT
        },
        {
@@ -9377,6 +10301,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN()
                },
                .errstr = "fp pointer offset 1073741822",
+               .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
                .result = REJECT
        },
        {
@@ -13574,149 +14499,471 @@ static struct bpf_test tests[] = {
                        BPF_EMIT_CALL(BPF_FUNC_sk_release),
                        BPF_EXIT_INSN(),
                },
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
-               .errstr = "Unreleased reference",
-               .result = REJECT,
-               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .errstr = "Unreleased reference",
+               .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "reference tracking: alloc, check, free in both subbranches",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
+                       /* if (offsetof(skb, mark) > data_len) exit; */
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_SK_LOOKUP,
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "reference tracking in call: free reference in subprog",
+               .insns = {
+                       BPF_SK_LOOKUP,
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 1 */
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
+                       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+       },
+       {
+               "pass modified ctx pointer to helper, 1",
+               .insns = {
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_csum_update),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = REJECT,
+               .errstr = "dereference of modified ctx ptr",
+       },
+       {
+               "pass modified ctx pointer to helper, 2",
+               .insns = {
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_get_socket_cookie),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result_unpriv = REJECT,
+               .result = REJECT,
+               .errstr_unpriv = "dereference of modified ctx ptr",
+               .errstr = "dereference of modified ctx ptr",
+       },
+       {
+               "pass modified ctx pointer to helper, 3",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_csum_update),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = REJECT,
+               .errstr = "variable ctx access var_off=(0x0; 0x4)",
+       },
+       {
+               "mov64 src == dst",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
+                       // Check bounds are OK
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+       },
+       {
+               "mov64 src != dst",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
+                       // Check bounds are OK
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+       },
+       {
+               "allocated_stack",
+               .insns = {
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
+                       BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .result_unpriv = ACCEPT,
+               .insn_processed = 15,
+       },
+       {
+               "masking, test out of bounds 1",
+               .insns = {
+                       BPF_MOV32_IMM(BPF_REG_1, 5),
+                       BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "masking, test out of bounds 2",
+               .insns = {
+                       BPF_MOV32_IMM(BPF_REG_1, 1),
+                       BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "masking, test out of bounds 3",
+               .insns = {
+                       BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
+                       BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "masking, test out of bounds 4",
+               .insns = {
+                       BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
+                       BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "masking, test out of bounds 5",
+               .insns = {
+                       BPF_MOV32_IMM(BPF_REG_1, -1),
+                       BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "masking, test out of bounds 6",
+               .insns = {
+                       BPF_MOV32_IMM(BPF_REG_1, -1),
+                       BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "masking, test out of bounds 7",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 5),
+                       BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "masking, test out of bounds 8",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 1),
+                       BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "masking, test out of bounds 9",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
+                       BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "masking, test out of bounds 10",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
+                       BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 0,
        },
        {
-               "reference tracking: alloc, check, free in both subbranches",
+               "masking, test out of bounds 11",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data_end)),
-                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
-                       /* if (offsetof(skb, mark) > data_len) exit; */
-                       BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
-                       BPF_EXIT_INSN(),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
-                                   offsetof(struct __sk_buff, mark)),
-                       BPF_SK_LOOKUP,
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
-                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
-                       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+                       BPF_MOV64_IMM(BPF_REG_1, -1),
+                       BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
                        BPF_EXIT_INSN(),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
-                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
-                       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+               },
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "masking, test out of bounds 12",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, -1),
+                       BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
                        BPF_EXIT_INSN(),
                },
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = ACCEPT,
-               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+               .retval = 0,
        },
        {
-               "reference tracking in call: free reference in subprog",
+               "masking, test in bounds 1",
                .insns = {
-                       BPF_SK_LOOKUP,
-                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
-                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_MOV32_IMM(BPF_REG_1, 4),
+                       BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
                        BPF_EXIT_INSN(),
-
-                       /* subprog 1 */
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
-                       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+               },
+               .result = ACCEPT,
+               .retval = 4,
+       },
+       {
+               "masking, test in bounds 2",
+               .insns = {
+                       BPF_MOV32_IMM(BPF_REG_1, 0),
+                       BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
                        BPF_EXIT_INSN(),
                },
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = ACCEPT,
+               .retval = 0,
        },
        {
-               "pass modified ctx pointer to helper, 1",
+               "masking, test in bounds 3",
                .insns = {
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
-                       BPF_MOV64_IMM(BPF_REG_2, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_csum_update),
-                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe),
+                       BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
                        BPF_EXIT_INSN(),
                },
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
-               .result = REJECT,
-               .errstr = "dereference of modified ctx ptr",
+               .result = ACCEPT,
+               .retval = 0xfffffffe,
        },
        {
-               "pass modified ctx pointer to helper, 2",
+               "masking, test in bounds 4",
                .insns = {
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_get_socket_cookie),
-                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_MOV32_IMM(BPF_REG_1, 0xabcde),
+                       BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
                        BPF_EXIT_INSN(),
                },
-               .result_unpriv = REJECT,
-               .result = REJECT,
-               .errstr_unpriv = "dereference of modified ctx ptr",
-               .errstr = "dereference of modified ctx ptr",
+               .result = ACCEPT,
+               .retval = 0xabcde,
        },
        {
-               "pass modified ctx pointer to helper, 3",
+               "masking, test in bounds 5",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
-                       BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
-                       BPF_MOV64_IMM(BPF_REG_2, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_csum_update),
-                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_MOV32_IMM(BPF_REG_1, 0),
+                       BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
                        BPF_EXIT_INSN(),
                },
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
-               .result = REJECT,
-               .errstr = "variable ctx access var_off=(0x0; 0x4)",
+               .result = ACCEPT,
+               .retval = 0,
        },
        {
-               "mov64 src == dst",
+               "masking, test in bounds 6",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_2, 0),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
-                       // Check bounds are OK
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
-                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_MOV32_IMM(BPF_REG_1, 46),
+                       BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
                        BPF_EXIT_INSN(),
                },
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = ACCEPT,
+               .retval = 46,
        },
        {
-               "mov64 src != dst",
+               "masking, test in bounds 7",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_3, 0),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
-                       // Check bounds are OK
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
-                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_3, -46),
+                       BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
+                       BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
                        BPF_EXIT_INSN(),
                },
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = ACCEPT,
+               .retval = 46,
        },
        {
-               "allocated_stack",
+               "masking, test in bounds 8",
                .insns = {
-                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
-                       BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
-                       BPF_MOV64_IMM(BPF_REG_0, 0),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
-                       BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
-                       BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
-                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
-                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
-                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
-                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_3, -47),
+                       BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
+                       BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
                        BPF_EXIT_INSN(),
                },
                .result = ACCEPT,
-               .result_unpriv = ACCEPT,
-               .insn_processed = 15,
+               .retval = 0,
        },
        {
                "reference tracking in call: free reference in subprog and outside",
@@ -14388,6 +15635,166 @@ static struct bpf_test tests[] = {
                .result_unpriv = ACCEPT,
                .result = ACCEPT,
        },
+       {
+               "dead code: start",
+               .insns = {
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 7),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 7,
+       },
+       {
+               "dead code: mid 1",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 7),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 7,
+       },
+       {
+               "dead code: mid 2",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_get_prandom_u32),
+                       BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 4),
+                       BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 7),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 1,
+       },
+       {
+               "dead code: end 1",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 7),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 7,
+       },
+       {
+               "dead code: end 2",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 7),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 12),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 7,
+       },
+       {
+               "dead code: end 3",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 7),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_MOV64_IMM(BPF_REG_0, 12),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, -5),
+               },
+               .result = ACCEPT,
+               .retval = 7,
+       },
+       {
+               "dead code: tail of main + func",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 7),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 12),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+               .retval = 7,
+       },
+       {
+               "dead code: tail of main + two functions",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 7),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 12),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+               .retval = 7,
+       },
+       {
+               "dead code: function in the middle and mid of another func",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 7),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 12),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 7),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 7, 1),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+               .retval = 7,
+       },
+       {
+               "dead code: middle of main before call",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 2),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 2, 1),
+                       BPF_MOV64_IMM(BPF_REG_1, 5),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+               .retval = 2,
+       },
+       {
+               "dead code: start of a function",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 2),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+               .retval = 2,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
@@ -14413,6 +15820,16 @@ static int create_map(uint32_t type, uint32_t size_key,
        return fd;
 }
 
+static void update_map(int fd, int index)
+{
+       struct test_val value = {
+               .index = (6 + 1) * sizeof(int),
+               .foo[6] = 0xabcdef12,
+       };
+
+       assert(!bpf_map_update_elem(fd, &index, &value, 0));
+}
+
 static int create_prog_dummy1(enum bpf_prog_type prog_type)
 {
        struct bpf_insn prog[] = {
@@ -14564,6 +15981,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
        if (*fixup_map_array_48b) {
                map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
                                        sizeof(struct test_val), 1);
+               update_map(map_fds[3], 0);
                do {
                        prog[*fixup_map_array_48b].imm = map_fds[3];
                        fixup_map_array_48b++;