]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - tools/testing/selftests/bpf/verifier/jmp32.c
selftests/bpf: Add verifier tests for better jmp32 register bounds
[linux.git] / tools / testing / selftests / bpf / verifier / jmp32.c
index f0961c58581ea98036a725904bb2268c6faf6afe..bf0322eb53464d41bb6d38098f8d95fc2c2536a7 100644 (file)
        .result = ACCEPT,
        .retval = 2,
 },
+{
+       "jgt32: range bound deduction, reg op imm",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
+       BPF_JMP32_IMM(BPF_JGT, BPF_REG_0, 1, 5),
+       BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
+       BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
+       BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+       BPF_MOV32_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .fixup_map_hash_48b = { 4 },
+       .result = ACCEPT,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "jgt32: range bound deduction, reg1 op reg2, reg1 unknown",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
+       BPF_MOV32_IMM(BPF_REG_2, 1),
+       BPF_JMP32_REG(BPF_JGT, BPF_REG_0, BPF_REG_2, 5),
+       BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
+       BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
+       BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+       BPF_MOV32_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .fixup_map_hash_48b = { 4 },
+       .result = ACCEPT,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "jle32: range bound deduction, reg1 op reg2, reg2 unknown",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
+       BPF_MOV32_IMM(BPF_REG_2, 1),
+       BPF_JMP32_REG(BPF_JLE, BPF_REG_2, BPF_REG_0, 5),
+       BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
+       BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
+       BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+       BPF_MOV32_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .fixup_map_hash_48b = { 4 },
+       .result = ACCEPT,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},