Comparison instruction requires a subtraction. If the constant
is negative we are more likely to fit it into a NFP instruction
directly if we change the sign and use addition.
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
const struct jmp_code_map *code;
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
const struct jmp_code_map *code;
+ enum alu_op alu_op, carry_op;
u8 reg = insn->dst_reg * 2;
swreg tmp_reg;
u8 reg = insn->dst_reg * 2;
swreg tmp_reg;
if (!code)
return -EINVAL;
if (!code)
return -EINVAL;
+ alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB;
+ carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C;
+
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
if (!code->swap)
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
if (!code->swap)
- emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
+ emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg);
- emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg));
+ emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg));
tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
if (!code->swap)
emit_alu(nfp_prog, reg_none(),
tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
if (!code->swap)
emit_alu(nfp_prog, reg_none(),
- reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg);
+ reg_a(reg + 1), carry_op, tmp_reg);
else
emit_alu(nfp_prog, reg_none(),
else
emit_alu(nfp_prog, reg_none(),
- tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1));
+ tmp_reg, carry_op, reg_a(reg + 1));
emit_br(nfp_prog, code->br_mask, insn->off, 0);
emit_br(nfp_prog, code->br_mask, insn->off, 0);
continue;
if (BPF_CLASS(insn.code) != BPF_ALU &&
continue;
if (BPF_CLASS(insn.code) != BPF_ALU &&
- BPF_CLASS(insn.code) != BPF_ALU64)
+ BPF_CLASS(insn.code) != BPF_ALU64 &&
+ BPF_CLASS(insn.code) != BPF_JMP)
continue;
if (BPF_SRC(insn.code) != BPF_K)
continue;
if (insn.imm >= 0)
continue;
continue;
if (BPF_SRC(insn.code) != BPF_K)
continue;
if (insn.imm >= 0)
continue;
- if (BPF_OP(insn.code) == BPF_ADD)
- insn.code = BPF_CLASS(insn.code) | BPF_SUB;
- else if (BPF_OP(insn.code) == BPF_SUB)
- insn.code = BPF_CLASS(insn.code) | BPF_ADD;
- else
- continue;
+ if (BPF_CLASS(insn.code) == BPF_JMP) {
+ switch (BPF_OP(insn.code)) {
+ case BPF_JGE:
+ case BPF_JSGE:
+ case BPF_JLT:
+ case BPF_JSLT:
+ meta->jump_neg_op = true;
+ break;
+ default:
+ continue;
+ }
+ } else {
+ if (BPF_OP(insn.code) == BPF_ADD)
+ insn.code = BPF_CLASS(insn.code) | BPF_SUB;
+ else if (BPF_OP(insn.code) == BPF_SUB)
+ insn.code = BPF_CLASS(insn.code) | BPF_ADD;
+ else
+ continue;
- meta->insn.code = insn.code | BPF_K;
+ meta->insn.code = insn.code | BPF_K;
+ }
meta->insn.imm = -insn.imm;
}
meta->insn.imm = -insn.imm;
}
* @xadd_over_16bit: 16bit immediate is not guaranteed
* @xadd_maybe_16bit: 16bit immediate is possible
* @jmp_dst: destination info for jump instructions
* @xadd_over_16bit: 16bit immediate is not guaranteed
* @xadd_maybe_16bit: 16bit immediate is possible
* @jmp_dst: destination info for jump instructions
+ * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
bool xadd_maybe_16bit;
};
/* jump */
bool xadd_maybe_16bit;
};
/* jump */
- struct nfp_insn_meta *jmp_dst;
+ struct {
+ struct nfp_insn_meta *jmp_dst;
+ bool jump_neg_op;
+ };
/* function calls */
struct {
u32 func_id;
/* function calls */
struct {
u32 func_id;