]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
nfp: bpf: optimize comparisons to negative constants
authorJakub Kicinski <jakub.kicinski@netronome.com>
Wed, 25 Apr 2018 04:22:39 +0000 (21:22 -0700)
committerDaniel Borkmann <daniel@iogearbox.net>
Wed, 25 Apr 2018 07:56:10 +0000 (09:56 +0200)
Comparison instruction requires a subtraction.  If the constant
is negative we are more likely to fit it into a NFP instruction
directly if we change the sign and use addition.

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
drivers/net/ethernet/netronome/nfp/bpf/jit.c
drivers/net/ethernet/netronome/nfp/bpf/main.h

index 5b8da7a67df46757fbf00178307b7beb5c039299..65f0791cae0c6c16cd17cc129eaded38bdb2d7f4 100644 (file)
@@ -1247,6 +1247,7 @@ static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
        const struct bpf_insn *insn = &meta->insn;
        u64 imm = insn->imm; /* sign extend */
        const struct jmp_code_map *code;
+       enum alu_op alu_op, carry_op;
        u8 reg = insn->dst_reg * 2;
        swreg tmp_reg;
 
@@ -1254,19 +1255,22 @@ static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
        if (!code)
                return -EINVAL;
 
+       alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB;
+       carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C;
+
        tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
        if (!code->swap)
-               emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
+               emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg);
        else
-               emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg));
+               emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg));
 
        tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
        if (!code->swap)
                emit_alu(nfp_prog, reg_none(),
-                        reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg);
+                        reg_a(reg + 1), carry_op, tmp_reg);
        else
                emit_alu(nfp_prog, reg_none(),
-                        tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1));
+                        tmp_reg, carry_op, reg_a(reg + 1));
 
        emit_br(nfp_prog, code->br_mask, insn->off, 0);
 
@@ -2745,21 +2749,35 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
                        continue;
 
                if (BPF_CLASS(insn.code) != BPF_ALU &&
-                   BPF_CLASS(insn.code) != BPF_ALU64)
+                   BPF_CLASS(insn.code) != BPF_ALU64 &&
+                   BPF_CLASS(insn.code) != BPF_JMP)
                        continue;
                if (BPF_SRC(insn.code) != BPF_K)
                        continue;
                if (insn.imm >= 0)
                        continue;
 
-               if (BPF_OP(insn.code) == BPF_ADD)
-                       insn.code = BPF_CLASS(insn.code) | BPF_SUB;
-               else if (BPF_OP(insn.code) == BPF_SUB)
-                       insn.code = BPF_CLASS(insn.code) | BPF_ADD;
-               else
-                       continue;
+               if (BPF_CLASS(insn.code) == BPF_JMP) {
+                       switch (BPF_OP(insn.code)) {
+                       case BPF_JGE:
+                       case BPF_JSGE:
+                       case BPF_JLT:
+                       case BPF_JSLT:
+                               meta->jump_neg_op = true;
+                               break;
+                       default:
+                               continue;
+                       }
+               } else {
+                       if (BPF_OP(insn.code) == BPF_ADD)
+                               insn.code = BPF_CLASS(insn.code) | BPF_SUB;
+                       else if (BPF_OP(insn.code) == BPF_SUB)
+                               insn.code = BPF_CLASS(insn.code) | BPF_ADD;
+                       else
+                               continue;
 
-               meta->insn.code = insn.code | BPF_K;
+                       meta->insn.code = insn.code | BPF_K;
+               }
 
                meta->insn.imm = -insn.imm;
        }
index 4981c8944ca35accd3ddc4c34c77c8a56309f2d5..68b5d326483dae7aa17b7ea9b1d8f6e083f7b7c3 100644 (file)
@@ -236,6 +236,7 @@ struct nfp_bpf_reg_state {
  * @xadd_over_16bit: 16bit immediate is not guaranteed
  * @xadd_maybe_16bit: 16bit immediate is possible
  * @jmp_dst: destination info for jump instructions
+ * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB
  * @func_id: function id for call instructions
  * @arg1: arg1 for call instructions
  * @arg2: arg2 for call instructions
@@ -264,7 +265,10 @@ struct nfp_insn_meta {
                        bool xadd_maybe_16bit;
                };
                /* jump */
-               struct nfp_insn_meta *jmp_dst;
+               struct {
+                       struct nfp_insn_meta *jmp_dst;
+                       bool jump_neg_op;
+               };
                /* function calls */
                struct {
                        u32 func_id;