]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
selftests/bpf: allow specifying helper for BPF_SK_LOOKUP
authorLorenz Bauer <lmb@cloudflare.com>
Fri, 22 Mar 2019 01:54:04 +0000 (09:54 +0800)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 22 Mar 2019 01:59:11 +0000 (18:59 -0700)
Make the BPF_SK_LOOKUP macro take a helper function, to ease
writing tests for new helpers.

Signed-off-by: Lorenz Bauer <lmb@cloudflare.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/bpf/verifier/ref_tracking.c
tools/testing/selftests/bpf/verifier/unpriv.c

index 477a9dcf9ffff4b5b47a73759ba00d92d13b0106..19b5d03acc2a83d8425f5356dc53b2165ee96a1a 100644 (file)
@@ -198,7 +198,7 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self)
 }
 
 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
-#define BPF_SK_LOOKUP                                                  \
+#define BPF_SK_LOOKUP(func)                                            \
        /* struct bpf_sock_tuple tuple = {} */                          \
        BPF_MOV64_IMM(BPF_REG_2, 0),                                    \
        BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),                  \
@@ -207,13 +207,13 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self)
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),                \
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),                \
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),                \
-       /* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */       \
+       /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */                \
        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),                           \
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),                         \
        BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),        \
        BPF_MOV64_IMM(BPF_REG_4, 0),                                    \
        BPF_MOV64_IMM(BPF_REG_5, 0),                                    \
-       BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
+       BPF_EMIT_CALL(BPF_FUNC_ ## func)
 
 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
  * value into 0 and does necessary preparation for direct packet access
index 923f2110072d6f1f4a124824228c082d43f094d8..a6905e5017dcf6fda084a448ee0fb357bd273152 100644 (file)
@@ -1,7 +1,7 @@
 {
        "reference tracking: leak potential reference",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
        BPF_EXIT_INSN(),
        },
@@ -12,7 +12,7 @@
 {
        "reference tracking: leak potential reference on stack",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
        BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
@@ -26,7 +26,7 @@
 {
        "reference tracking: leak potential reference on stack 2",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
        BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
@@ -41,7 +41,7 @@
 {
        "reference tracking: zero potential reference",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
        BPF_EXIT_INSN(),
        },
@@ -52,7 +52,7 @@
 {
        "reference tracking: copy and zero potential references",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
@@ -65,7 +65,7 @@
 {
        "reference tracking: release reference without check",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        /* reference in r0 may be NULL */
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
        BPF_MOV64_IMM(BPF_REG_2, 0),
@@ -79,7 +79,7 @@
 {
        "reference tracking: release reference",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
        BPF_EMIT_CALL(BPF_FUNC_sk_release),
@@ -91,7 +91,7 @@
 {
        "reference tracking: release reference 2",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
        BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
        BPF_EXIT_INSN(),
 {
        "reference tracking: release reference twice",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
 {
        "reference tracking: release reference twice inside branch",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
        BPF_EXIT_INSN(),
        BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
                    offsetof(struct __sk_buff, mark)),
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
        /* Leak reference in R0 */
        BPF_EXIT_INSN(),
        BPF_EXIT_INSN(),
        BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
                    offsetof(struct __sk_buff, mark)),
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
 {
        "reference tracking in call: free reference in subprog",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
        BPF_MOV64_IMM(BPF_REG_0, 0),
 {
        "reference tracking in call: free reference in subprog and outside",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
 
        /* subprog 1 */
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        /* spill unchecked sk_ptr into stack of caller */
        BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
        BPF_EXIT_INSN(),
 
        /* subprog 1 */
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_EXIT_INSN(), /* return sk */
        },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        BPF_EXIT_INSN(),
 
        /* subprog 2 */
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        BPF_EXIT_INSN(),
 
        /* subprog 2 */
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        "reference tracking: allow LD_ABS",
        .insns = {
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
        BPF_EMIT_CALL(BPF_FUNC_sk_release),
        "reference tracking: forbid LD_ABS while holding reference",
        .insns = {
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_LD_ABS(BPF_B, 0),
        BPF_LD_ABS(BPF_H, 0),
        BPF_LD_ABS(BPF_W, 0),
        "reference tracking: allow LD_IND",
        .insns = {
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
        BPF_EMIT_CALL(BPF_FUNC_sk_release),
        "reference tracking: forbid LD_IND while holding reference",
        .insns = {
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
        BPF_MOV64_IMM(BPF_REG_7, 1),
        BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
        "reference tracking: check reference or tail call",
        .insns = {
        BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        /* if (sk) bpf_sk_release() */
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
        BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
        "reference tracking: release reference then tail call",
        .insns = {
        BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        /* if (sk) bpf_sk_release() */
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
        .insns = {
        BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
        /* Look up socket and store in REG_6 */
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        /* bpf_tail_call() */
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
        BPF_MOV64_IMM(BPF_REG_3, 2),
        .insns = {
        BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
        /* Look up socket and store in REG_6 */
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
        /* if (!sk) goto end */
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
 {
        "reference tracking: mangle and release sock_or_null",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
 {
        "reference tracking: mangle and release sock",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
 {
        "reference tracking: access member",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
 {
        "reference tracking: write to member",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
 {
        "reference tracking: invalid 64-bit access of member",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
        BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
 {
        "reference tracking: access after release",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
        BPF_EMIT_CALL(BPF_FUNC_sk_release),
 {
        "reference tracking: use ptr from bpf_tcp_sock() after release",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
        BPF_EXIT_INSN(),
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 {
        "reference tracking: use ptr from bpf_sk_fullsock() after release",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
        BPF_EXIT_INSN(),
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 {
        "reference tracking: use ptr from bpf_sk_fullsock(tp) after release",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
        BPF_EXIT_INSN(),
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 {
        "reference tracking: use sk after bpf_sk_release(tp)",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
        BPF_EXIT_INSN(),
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 {
        "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
        BPF_EXIT_INSN(),
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
 {
        "reference tracking: bpf_sk_release(listen_sk)",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
        BPF_EXIT_INSN(),
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
        /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
        "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)",
        .insns = {
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
        BPF_EXIT_INSN(),
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
index dbaf5be947b2be8ea3007cae023ef94be600a553..91bb77c24a2ef3bb392ce2763e90d7a05f2af34d 100644 (file)
        .insns = {
        BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
        /* struct bpf_sock *sock = bpf_sock_lookup(...); */
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
        /* u64 foo; */
        /* void *target = &foo; */
        .insns = {
        BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
        /* struct bpf_sock *sock = bpf_sock_lookup(...); */
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
        /* u64 foo; */
        /* void *target = &foo; */
        .insns = {
        BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
        /* struct bpf_sock *sock = bpf_sock_lookup(...); */
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
        /* u64 foo; */
        /* void *target = &foo; */
        .insns = {
        BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
        /* struct bpf_sock *sock = bpf_sock_lookup(...); */
-       BPF_SK_LOOKUP,
+       BPF_SK_LOOKUP(sk_lookup_tcp),
        BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
        /* u64 foo; */
        /* void *target = &foo; */