]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
bpf: migrate ebpf ld_abs/ld_ind tests to test_verifier
authorDaniel Borkmann <daniel@iogearbox.net>
Thu, 3 May 2018 23:08:13 +0000 (01:08 +0200)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 3 May 2018 23:49:19 +0000 (16:49 -0700)
Remove all eBPF tests involving LD_ABS/LD_IND from test_bpf.ko. Reason
is that the eBPF tests from test_bpf module do not go via BPF verifier
and therefore any instruction rewrites from verifier cannot take place.

Therefore, move them into test_verifier which runs out of user space,
so that verfier can rewrite LD_ABS/LD_IND internally in upcoming patches.
It will have the same effect since runtime tests are also performed from
there. This also allows to finally unexport bpf_skb_vlan_{push,pop}_proto
and keep it internal to core kernel.

Additionally, also add further cBPF LD_ABS/LD_IND test coverage into
test_bpf.ko suite.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
lib/test_bpf.c
net/core/filter.c
tools/testing/selftests/bpf/test_verifier.c

index 68ecdb4eea09a05079312748a4227582b57c640d..d0e3d7ef36a831300660af14746e704f3981ec4c 100644 (file)
@@ -714,8 +714,6 @@ extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
 extern const struct bpf_func_proto bpf_get_current_comm_proto;
-extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
-extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
 extern const struct bpf_func_proto bpf_get_stackid_proto;
 extern const struct bpf_func_proto bpf_get_stack_proto;
 extern const struct bpf_func_proto bpf_sock_map_update_proto;
index 8e157806df7a6d78fed7afc63787d8517fbcc976..317f231462d4d98b004432e439a98e2d5e2a8a55 100644 (file)
@@ -386,116 +386,6 @@ static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
        return 0;
 }
 
-#define PUSH_CNT 68
-/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
-static int bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
-{
-       unsigned int len = BPF_MAXINSNS;
-       struct bpf_insn *insn;
-       int i = 0, j, k = 0;
-
-       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
-       if (!insn)
-               return -ENOMEM;
-
-       insn[i++] = BPF_MOV64_REG(R6, R1);
-loop:
-       for (j = 0; j < PUSH_CNT; j++) {
-               insn[i++] = BPF_LD_ABS(BPF_B, 0);
-               insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
-               i++;
-               insn[i++] = BPF_MOV64_REG(R1, R6);
-               insn[i++] = BPF_MOV64_IMM(R2, 1);
-               insn[i++] = BPF_MOV64_IMM(R3, 2);
-               insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                        bpf_skb_vlan_push_proto.func - __bpf_call_base);
-               insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
-               i++;
-       }
-
-       for (j = 0; j < PUSH_CNT; j++) {
-               insn[i++] = BPF_LD_ABS(BPF_B, 0);
-               insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
-               i++;
-               insn[i++] = BPF_MOV64_REG(R1, R6);
-               insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                        bpf_skb_vlan_pop_proto.func - __bpf_call_base);
-               insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
-               i++;
-       }
-       if (++k < 5)
-               goto loop;
-
-       for (; i < len - 1; i++)
-               insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xbef);
-
-       insn[len - 1] = BPF_EXIT_INSN();
-
-       self->u.ptr.insns = insn;
-       self->u.ptr.len = len;
-
-       return 0;
-}
-
-static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self)
-{
-       struct bpf_insn *insn;
-
-       insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL);
-       if (!insn)
-               return -ENOMEM;
-
-       /* Due to func address being non-const, we need to
-        * assemble this here.
-        */
-       insn[0] = BPF_MOV64_REG(R6, R1);
-       insn[1] = BPF_LD_ABS(BPF_B, 0);
-       insn[2] = BPF_LD_ABS(BPF_H, 0);
-       insn[3] = BPF_LD_ABS(BPF_W, 0);
-       insn[4] = BPF_MOV64_REG(R7, R6);
-       insn[5] = BPF_MOV64_IMM(R6, 0);
-       insn[6] = BPF_MOV64_REG(R1, R7);
-       insn[7] = BPF_MOV64_IMM(R2, 1);
-       insn[8] = BPF_MOV64_IMM(R3, 2);
-       insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                              bpf_skb_vlan_push_proto.func - __bpf_call_base);
-       insn[10] = BPF_MOV64_REG(R6, R7);
-       insn[11] = BPF_LD_ABS(BPF_B, 0);
-       insn[12] = BPF_LD_ABS(BPF_H, 0);
-       insn[13] = BPF_LD_ABS(BPF_W, 0);
-       insn[14] = BPF_MOV64_IMM(R0, 42);
-       insn[15] = BPF_EXIT_INSN();
-
-       self->u.ptr.insns = insn;
-       self->u.ptr.len = 16;
-
-       return 0;
-}
-
-static int bpf_fill_jump_around_ld_abs(struct bpf_test *self)
-{
-       unsigned int len = BPF_MAXINSNS;
-       struct bpf_insn *insn;
-       int i = 0;
-
-       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
-       if (!insn)
-               return -ENOMEM;
-
-       insn[i++] = BPF_MOV64_REG(R6, R1);
-       insn[i++] = BPF_LD_ABS(BPF_B, 0);
-       insn[i] = BPF_JMP_IMM(BPF_JEQ, R0, 10, len - i - 2);
-       i++;
-       while (i < len - 1)
-               insn[i++] = BPF_LD_ABS(BPF_B, 1);
-       insn[i] = BPF_EXIT_INSN();
-
-       self->u.ptr.insns = insn;
-       self->u.ptr.len = len;
-
-       return 0;
-}
-
 static int __bpf_fill_stxdw(struct bpf_test *self, int size)
 {
        unsigned int len = BPF_MAXINSNS;
@@ -1987,40 +1877,6 @@ static struct bpf_test tests[] = {
                { },
                { { 0, -1 } }
        },
-       {
-               "INT: DIV + ABS",
-               .u.insns_int = {
-                       BPF_ALU64_REG(BPF_MOV, R6, R1),
-                       BPF_LD_ABS(BPF_B, 3),
-                       BPF_ALU64_IMM(BPF_MOV, R2, 2),
-                       BPF_ALU32_REG(BPF_DIV, R0, R2),
-                       BPF_ALU64_REG(BPF_MOV, R8, R0),
-                       BPF_LD_ABS(BPF_B, 4),
-                       BPF_ALU64_REG(BPF_ADD, R8, R0),
-                       BPF_LD_IND(BPF_B, R8, -70),
-                       BPF_EXIT_INSN(),
-               },
-               INTERNAL,
-               { 10, 20, 30, 40, 50 },
-               { { 4, 0 }, { 5, 10 } }
-       },
-       {
-               /* This one doesn't go through verifier, but is just raw insn
-                * as opposed to cBPF tests from here. Thus div by 0 tests are
-                * done in test_verifier in BPF kselftests.
-                */
-               "INT: DIV by -1",
-               .u.insns_int = {
-                       BPF_ALU64_REG(BPF_MOV, R6, R1),
-                       BPF_ALU64_IMM(BPF_MOV, R7, -1),
-                       BPF_LD_ABS(BPF_B, 3),
-                       BPF_ALU32_REG(BPF_DIV, R0, R7),
-                       BPF_EXIT_INSN(),
-               },
-               INTERNAL,
-               { 10, 20, 30, 40, 50 },
-               { { 3, 0 }, { 4, 0 } }
-       },
        {
                "check: missing ret",
                .u.insns = {
@@ -2383,50 +2239,6 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 1 } }
        },
-       {
-               "nmap reduced",
-               .u.insns_int = {
-                       BPF_MOV64_REG(R6, R1),
-                       BPF_LD_ABS(BPF_H, 12),
-                       BPF_JMP_IMM(BPF_JNE, R0, 0x806, 28),
-                       BPF_LD_ABS(BPF_H, 12),
-                       BPF_JMP_IMM(BPF_JNE, R0, 0x806, 26),
-                       BPF_MOV32_IMM(R0, 18),
-                       BPF_STX_MEM(BPF_W, R10, R0, -64),
-                       BPF_LDX_MEM(BPF_W, R7, R10, -64),
-                       BPF_LD_IND(BPF_W, R7, 14),
-                       BPF_STX_MEM(BPF_W, R10, R0, -60),
-                       BPF_MOV32_IMM(R0, 280971478),
-                       BPF_STX_MEM(BPF_W, R10, R0, -56),
-                       BPF_LDX_MEM(BPF_W, R7, R10, -56),
-                       BPF_LDX_MEM(BPF_W, R0, R10, -60),
-                       BPF_ALU32_REG(BPF_SUB, R0, R7),
-                       BPF_JMP_IMM(BPF_JNE, R0, 0, 15),
-                       BPF_LD_ABS(BPF_H, 12),
-                       BPF_JMP_IMM(BPF_JNE, R0, 0x806, 13),
-                       BPF_MOV32_IMM(R0, 22),
-                       BPF_STX_MEM(BPF_W, R10, R0, -56),
-                       BPF_LDX_MEM(BPF_W, R7, R10, -56),
-                       BPF_LD_IND(BPF_H, R7, 14),
-                       BPF_STX_MEM(BPF_W, R10, R0, -52),
-                       BPF_MOV32_IMM(R0, 17366),
-                       BPF_STX_MEM(BPF_W, R10, R0, -48),
-                       BPF_LDX_MEM(BPF_W, R7, R10, -48),
-                       BPF_LDX_MEM(BPF_W, R0, R10, -52),
-                       BPF_ALU32_REG(BPF_SUB, R0, R7),
-                       BPF_JMP_IMM(BPF_JNE, R0, 0, 2),
-                       BPF_MOV32_IMM(R0, 256),
-                       BPF_EXIT_INSN(),
-                       BPF_MOV32_IMM(R0, 0),
-                       BPF_EXIT_INSN(),
-               },
-               INTERNAL,
-               { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0,
-                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6},
-               { { 38, 256 } },
-               .stack_depth = 64,
-       },
        /* BPF_ALU | BPF_MOV | BPF_X */
        {
                "ALU_MOV_X: dst = 2",
@@ -5485,22 +5297,6 @@ static struct bpf_test tests[] = {
                { { 1, 0xbee } },
                .fill_helper = bpf_fill_ld_abs_get_processor_id,
        },
-       {
-               "BPF_MAXINSNS: ld_abs+vlan_push/pop",
-               { },
-               INTERNAL,
-               { 0x34 },
-               { { ETH_HLEN, 0xbef } },
-               .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
-       },
-       {
-               "BPF_MAXINSNS: jump around ld_abs",
-               { },
-               INTERNAL,
-               { 10, 11 },
-               { { 2, 10 } },
-               .fill_helper = bpf_fill_jump_around_ld_abs,
-       },
        /*
         * LD_IND / LD_ABS on fragmented SKBs
         */
@@ -5682,6 +5478,53 @@ static struct bpf_test tests[] = {
                { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
                { {0x40, 0x05 } },
        },
+       {
+               "LD_IND byte positive offset, all ff",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
+               { {0x40, 0xff } },
+       },
+       {
+               "LD_IND byte positive offset, out of bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x3f, 0 }, },
+       },
+       {
+               "LD_IND byte negative offset, out of bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x3f),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x3f, 0 } },
+       },
+       {
+               "LD_IND byte negative offset, multiple calls",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 1),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 2),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 3),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 4),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x40, 0x82 }, },
+       },
        {
                "LD_IND halfword positive offset",
                .u.insns = {
@@ -5730,6 +5573,39 @@ static struct bpf_test tests[] = {
                },
                { {0x40, 0x66cc } },
        },
+       {
+               "LD_IND halfword positive offset, all ff",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x3d),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
+               { {0x40, 0xffff } },
+       },
+       {
+               "LD_IND halfword positive offset, out of bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x3f, 0 }, },
+       },
+       {
+               "LD_IND halfword negative offset, out of bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x3f),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x3f, 0 } },
+       },
        {
                "LD_IND word positive offset",
                .u.insns = {
@@ -5820,6 +5696,39 @@ static struct bpf_test tests[] = {
                },
                { {0x40, 0x66cc77dd } },
        },
+       {
+               "LD_IND word positive offset, all ff",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
+               { {0x40, 0xffffffff } },
+       },
+       {
+               "LD_IND word positive offset, out of bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x3f, 0 }, },
+       },
+       {
+               "LD_IND word negative offset, out of bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3f),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x3f, 0 } },
+       },
        {
                "LD_ABS byte",
                .u.insns = {
@@ -5837,6 +5746,68 @@ static struct bpf_test tests[] = {
                },
                { {0x40, 0xcc } },
        },
+       {
+               "LD_ABS byte positive offset, all ff",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
+               { {0x40, 0xff } },
+       },
+       {
+               "LD_ABS byte positive offset, out of bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x3f, 0 }, },
+       },
+       {
+               "LD_ABS byte negative offset, out of bounds load",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_B, -1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_EXPECTED_FAIL,
+               .expected_errcode = -EINVAL,
+       },
+       {
+               "LD_ABS byte negative offset, in bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x40, 0x82 }, },
+       },
+       {
+               "LD_ABS byte negative offset, out of bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x3f, 0 }, },
+       },
+       {
+               "LD_ABS byte negative offset, multiple calls",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3c),
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3d),
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3e),
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x40, 0x82 }, },
+       },
        {
                "LD_ABS halfword",
                .u.insns = {
@@ -5871,6 +5842,55 @@ static struct bpf_test tests[] = {
                },
                { {0x40, 0x99ff } },
        },
+       {
+               "LD_ABS halfword positive offset, all ff",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3e),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
+               { {0x40, 0xffff } },
+       },
+       {
+               "LD_ABS halfword positive offset, out of bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x3f, 0 }, },
+       },
+       {
+               "LD_ABS halfword negative offset, out of bounds load",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_H, -1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_EXPECTED_FAIL,
+               .expected_errcode = -EINVAL,
+       },
+       {
+               "LD_ABS halfword negative offset, in bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x40, 0x1982 }, },
+       },
+       {
+               "LD_ABS halfword negative offset, out of bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x3f, 0 }, },
+       },
        {
                "LD_ABS word",
                .u.insns = {
@@ -5939,6 +5959,140 @@ static struct bpf_test tests[] = {
                },
                { {0x40, 0x88ee99ff } },
        },
+       {
+               "LD_ABS word positive offset, all ff",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3c),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
+               { {0x40, 0xffffffff } },
+       },
+       {
+               "LD_ABS word positive offset, out of bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3f),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x3f, 0 }, },
+       },
+       {
+               "LD_ABS word negative offset, out of bounds load",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_W, -1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_EXPECTED_FAIL,
+               .expected_errcode = -EINVAL,
+       },
+       {
+               "LD_ABS word negative offset, in bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x40, 0x25051982 }, },
+       },
+       {
+               "LD_ABS word negative offset, out of bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x3f, 0 }, },
+       },
+       {
+               "LDX_MSH standalone, preserved A",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+                       BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x40, 0xffeebbaa }, },
+       },
+       {
+               "LDX_MSH standalone, preserved A 2",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 0x175e9d63),
+                       BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
+                       BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3d),
+                       BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
+                       BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3f),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x40, 0x175e9d63 }, },
+       },
+       {
+               "LDX_MSH standalone, test result 1",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+                       BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x40, 0x14 }, },
+       },
+       {
+               "LDX_MSH standalone, test result 2",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+                       BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x40, 0x24 }, },
+       },
+       {
+               "LDX_MSH standalone, negative offset",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+                       BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, -1),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x40, 0 }, },
+       },
+       {
+               "LDX_MSH standalone, negative offset 2",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+                       BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, SKF_LL_OFF + 0x3e),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x40, 0x24 }, },
+       },
+       {
+               "LDX_MSH standalone, out of bounds",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+                       BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x40),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x40, 0 }, },
+       },
        /*
         * verify that the interpreter or JIT correctly sets A and X
         * to 0.
@@ -6127,14 +6281,6 @@ static struct bpf_test tests[] = {
                {},
                { {0x1, 0x42 } },
        },
-       {
-               "LD_ABS with helper changing skb data",
-               { },
-               INTERNAL,
-               { 0x34 },
-               { { ETH_HLEN, 42 } },
-               .fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
-       },
        /* Checking interpreter vs JIT wrt signed extended imms. */
        {
                "JNE signed compare, test 1",
index c33595a8d6042318e54304447b083aec34d90b25..865500f6180de026223bcc1f67a1cd19866b4702 100644 (file)
@@ -2181,7 +2181,7 @@ BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
        return ret;
 }
 
-const struct bpf_func_proto bpf_skb_vlan_push_proto = {
+static const struct bpf_func_proto bpf_skb_vlan_push_proto = {
        .func           = bpf_skb_vlan_push,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
@@ -2189,7 +2189,6 @@ const struct bpf_func_proto bpf_skb_vlan_push_proto = {
        .arg2_type      = ARG_ANYTHING,
        .arg3_type      = ARG_ANYTHING,
 };
-EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
 
 BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
 {
@@ -2203,13 +2202,12 @@ BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
        return ret;
 }
 
-const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
+static const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
        .func           = bpf_skb_vlan_pop,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_CTX,
 };
-EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
 
 static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
 {
index 1acafe26498b797520005cf35ca3db5f9fd4c4b8..275b4570b5b81386940c5ef53b03a950d7d0c083 100644 (file)
@@ -47,7 +47,7 @@
 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
 #endif
 
-#define MAX_INSNS      512
+#define MAX_INSNS      BPF_MAXINSNS
 #define MAX_FIXUPS     8
 #define MAX_NR_MAPS    4
 #define POINTER_VALUE  0xcafe4all
@@ -77,6 +77,8 @@ struct bpf_test {
        } result, result_unpriv;
        enum bpf_prog_type prog_type;
        uint8_t flags;
+       __u8 data[TEST_DATA_LEN];
+       void (*fill_helper)(struct bpf_test *self);
 };
 
 /* Note we want this to be 64 bit aligned so that the end of our array is
@@ -94,6 +96,62 @@ struct other_val {
        long long bar;
 };
 
+static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
+{
+       /* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
+#define PUSH_CNT 51
+       unsigned int len = BPF_MAXINSNS;
+       struct bpf_insn *insn = self->insns;
+       int i = 0, j, k = 0;
+
+       insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
+loop:
+       for (j = 0; j < PUSH_CNT; j++) {
+               insn[i++] = BPF_LD_ABS(BPF_B, 0);
+               insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
+               i++;
+               insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
+               insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
+               insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
+               insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                        BPF_FUNC_skb_vlan_push),
+               insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
+               i++;
+       }
+
+       for (j = 0; j < PUSH_CNT; j++) {
+               insn[i++] = BPF_LD_ABS(BPF_B, 0);
+               insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
+               i++;
+               insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
+               insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                        BPF_FUNC_skb_vlan_pop),
+               insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
+               i++;
+       }
+       if (++k < 5)
+               goto loop;
+
+       for (; i < len - 1; i++)
+               insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
+       insn[len - 1] = BPF_EXIT_INSN();
+}
+
+static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
+{
+       struct bpf_insn *insn = self->insns;
+       unsigned int len = BPF_MAXINSNS;
+       int i = 0;
+
+       insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
+       insn[i++] = BPF_LD_ABS(BPF_B, 0);
+       insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
+       i++;
+       while (i < len - 1)
+               insn[i++] = BPF_LD_ABS(BPF_B, 1);
+       insn[i] = BPF_EXIT_INSN();
+}
+
 static struct bpf_test tests[] = {
        {
                "add+sub+mul",
@@ -11725,6 +11783,197 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
+       {
+               "ld_abs: invalid op 1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_LD_ABS(BPF_DW, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = REJECT,
+               .errstr = "unknown opcode",
+       },
+       {
+               "ld_abs: invalid op 2",
+               .insns = {
+                       BPF_MOV32_IMM(BPF_REG_0, 256),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = REJECT,
+               .errstr = "unknown opcode",
+       },
+       {
+               "ld_abs: nmap reduced",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_LD_ABS(BPF_H, 12),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
+                       BPF_LD_ABS(BPF_H, 12),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
+                       BPF_MOV32_IMM(BPF_REG_0, 18),
+                       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
+                       BPF_LD_IND(BPF_W, BPF_REG_7, 14),
+                       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
+                       BPF_MOV32_IMM(BPF_REG_0, 280971478),
+                       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
+                       BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
+                       BPF_LD_ABS(BPF_H, 12),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
+                       BPF_MOV32_IMM(BPF_REG_0, 22),
+                       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
+                       BPF_LD_IND(BPF_H, BPF_REG_7, 14),
+                       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
+                       BPF_MOV32_IMM(BPF_REG_0, 17366),
+                       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
+                       BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV32_IMM(BPF_REG_0, 256),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .data = {
+                       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
+                       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                       0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 256,
+       },
+       {
+               "ld_abs: div + abs, test 1",
+               .insns = {
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+                       BPF_LD_ABS(BPF_B, 3),
+                       BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
+                       BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
+                       BPF_LD_ABS(BPF_B, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+                       BPF_LD_IND(BPF_B, BPF_REG_8, -70),
+                       BPF_EXIT_INSN(),
+               },
+               .data = {
+                       10, 20, 30, 40, 50,
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 10,
+       },
+       {
+               "ld_abs: div + abs, test 2",
+               .insns = {
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+                       BPF_LD_ABS(BPF_B, 3),
+                       BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
+                       BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
+                       BPF_LD_ABS(BPF_B, 128),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+                       BPF_LD_IND(BPF_B, BPF_REG_8, -70),
+                       BPF_EXIT_INSN(),
+               },
+               .data = {
+                       10, 20, 30, 40, 50,
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "ld_abs: div + abs, test 3",
+               .insns = {
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
+                       BPF_LD_ABS(BPF_B, 3),
+                       BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
+                       BPF_EXIT_INSN(),
+               },
+               .data = {
+                       10, 20, 30, 40, 50,
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "ld_abs: div + abs, test 4",
+               .insns = {
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
+                       BPF_LD_ABS(BPF_B, 256),
+                       BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
+                       BPF_EXIT_INSN(),
+               },
+               .data = {
+                       10, 20, 30, 40, 50,
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "ld_abs: vlan + abs, test 1",
+               .insns = { },
+               .data = {
+                       0x34,
+               },
+               .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 0xbef,
+       },
+       {
+               "ld_abs: vlan + abs, test 2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_LD_ABS(BPF_B, 0),
+                       BPF_LD_ABS(BPF_H, 0),
+                       BPF_LD_ABS(BPF_W, 0),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_6, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+                       BPF_MOV64_IMM(BPF_REG_2, 1),
+                       BPF_MOV64_IMM(BPF_REG_3, 2),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_vlan_push),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+                       BPF_LD_ABS(BPF_B, 0),
+                       BPF_LD_ABS(BPF_H, 0),
+                       BPF_LD_ABS(BPF_W, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 42),
+                       BPF_EXIT_INSN(),
+               },
+               .data = {
+                       0x34,
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 42,
+       },
+       {
+               "ld_abs: jump around ld_abs",
+               .insns = { },
+               .data = {
+                       10, 11,
+               },
+               .fill_helper = bpf_fill_jump_around_ld_abs,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+               .retval = 10,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
@@ -11828,7 +12077,7 @@ static int create_map_in_map(void)
        return outer_map_fd;
 }
 
-static char bpf_vlog[32768];
+static char bpf_vlog[UINT_MAX >> 8];
 
 static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
                          int *map_fds)
@@ -11839,6 +12088,9 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
        int *fixup_prog = test->fixup_prog;
        int *fixup_map_in_map = test->fixup_map_in_map;
 
+       if (test->fill_helper)
+               test->fill_helper(test);
+
        /* Allocating HTs with 1 elem is fine here, since we only test
         * for verifier and not do a runtime lookup, so the only thing
         * that really matters is value size in this case.
@@ -11888,10 +12140,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
                           int *passes, int *errors)
 {
        int fd_prog, expected_ret, reject_from_alignment;
+       int prog_len, prog_type = test->prog_type;
        struct bpf_insn *prog = test->insns;
-       int prog_len = probe_filter_length(prog);
-       char data_in[TEST_DATA_LEN] = {};
-       int prog_type = test->prog_type;
        int map_fds[MAX_NR_MAPS];
        const char *expected_err;
        uint32_t retval;
@@ -11901,6 +12151,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
                map_fds[i] = -1;
 
        do_test_fixup(test, prog, map_fds);
+       prog_len = probe_filter_length(prog);
 
        fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
                                     prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
@@ -11940,8 +12191,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
        }
 
        if (fd_prog >= 0) {
-               err = bpf_prog_test_run(fd_prog, 1, data_in, sizeof(data_in),
-                                       NULL, NULL, &retval, NULL);
+               err = bpf_prog_test_run(fd_prog, 1, test->data,
+                                       sizeof(test->data), NULL, NULL,
+                                       &retval, NULL);
                if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
                        printf("Unexpected bpf_prog_test_run error\n");
                        goto fail_log;