]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
bpf: fix ldx in ld_abs rewrite for large offsets
authorDaniel Borkmann <daniel@iogearbox.net>
Mon, 9 Jul 2018 22:43:22 +0000 (00:43 +0200)
committerAlexei Starovoitov <ast@kernel.org>
Tue, 10 Jul 2018 15:15:30 +0000 (08:15 -0700)
Mark reported that syzkaller triggered a KASAN detected slab-out-of-bounds
bug in ___bpf_prog_run() with a BPF_LD | BPF_ABS word load at offset 0x8001.
After further investigation it became clear that the issue was the
BPF_LDX_MEM() which takes offset as an argument whereas it cannot encode
larger than S16_MAX offsets into it. For this synthetical case we need to
move the full address into tmp register instead and do the LDX without
immediate value.

Fixes: e0cea7ce988c ("bpf: implement ld_abs/ld_ind in native bpf")
Reported-by: syzbot <syzkaller@googlegroups.com>
Reported-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
net/core/filter.c

index 5fa66a33927fefca08b24357a3275727118e9083..a13f5b1f16364d87c96e17fbfd45291042abdec2 100644 (file)
@@ -459,11 +459,21 @@ static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
             (!unaligned_ok && offset >= 0 &&
              offset + ip_align >= 0 &&
              offset + ip_align % size == 0))) {
+               bool ldx_off_ok = offset <= S16_MAX;
+
                *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
                *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
-               *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian);
-               *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D,
-                                     offset);
+               *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
+                                     size, 2 + endian + (!ldx_off_ok * 2));
+               if (ldx_off_ok) {
+                       *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
+                                             BPF_REG_D, offset);
+               } else {
+                       *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
+                       *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
+                       *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
+                                             BPF_REG_TMP, 0);
+               }
                if (endian)
                        *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
                *insn++ = BPF_JMP_A(8);