]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
nfp: bpf: optimize add/sub of a negative constant
authorJakub Kicinski <jakub.kicinski@netronome.com>
Wed, 25 Apr 2018 04:22:37 +0000 (21:22 -0700)
committerDaniel Borkmann <daniel@iogearbox.net>
Wed, 25 Apr 2018 07:56:10 +0000 (09:56 +0200)
NFP instruction set can fit small immediates into the instruction.
Negative integers, however, will never fit because they will have
highest bit set.  If we swap the ALU op between ADD and SUB and
negate the constant we have a better chance of fitting small negative
integers into the instruction itself and saving one or two cycles.

immed[gprB_21, 0xfffffffc]
alu[gprA_4, gprA_4, +, gprB_21], gpr_wrboth
immed[gprB_21, 0xffffffff]
alu[gprA_5, gprA_5, +carry, gprB_21], gpr_wrboth

now becomes:

alu[gprA_4, gprA_4, -, 4], gpr_wrboth
alu[gprA_5, gprA_5, -carry, 0], gpr_wrboth

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
drivers/net/ethernet/netronome/nfp/bpf/jit.c

index 9cc638718272ad0935b5056c9a9b61a552c28ff6..a5590988fc699e9c20c92dbd75e04523be1ee0d2 100644 (file)
@@ -2777,6 +2777,40 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
        }
 }
 
+/* abs(insn.imm) will fit better into unrestricted reg immediate -
+ * convert add/sub of a negative number into a sub/add of a positive one.
+ */
+static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
+{
+       struct nfp_insn_meta *meta;
+
+       list_for_each_entry(meta, &nfp_prog->insns, l) {
+               struct bpf_insn insn = meta->insn;
+
+               if (meta->skip)
+                       continue;
+
+               if (BPF_CLASS(insn.code) != BPF_ALU &&
+                   BPF_CLASS(insn.code) != BPF_ALU64)
+                       continue;
+               if (BPF_SRC(insn.code) != BPF_K)
+                       continue;
+               if (insn.imm >= 0)
+                       continue;
+
+               if (BPF_OP(insn.code) == BPF_ADD)
+                       insn.code = BPF_CLASS(insn.code) | BPF_SUB;
+               else if (BPF_OP(insn.code) == BPF_SUB)
+                       insn.code = BPF_CLASS(insn.code) | BPF_ADD;
+               else
+                       continue;
+
+               meta->insn.code = insn.code | BPF_K;
+
+               meta->insn.imm = -insn.imm;
+       }
+}
+
 /* Remove masking after load since our load guarantees this is not needed */
 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
 {
@@ -3212,6 +3246,7 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
 {
        nfp_bpf_opt_reg_init(nfp_prog);
 
+       nfp_bpf_opt_neg_add_sub(nfp_prog);
        nfp_bpf_opt_ld_mask(nfp_prog);
        nfp_bpf_opt_ld_shift(nfp_prog);
        nfp_bpf_opt_ldst_gather(nfp_prog);