]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
net/mlx5e: Tx, Enforce L4 inline copy when needed
authorTariq Toukan <tariqt@mellanox.com>
Fri, 5 Jul 2019 15:30:16 +0000 (18:30 +0300)
committerDavid S. Miller <davem@davemloft.net>
Fri, 5 Jul 2019 23:29:19 +0000 (16:29 -0700)
When ctrl->tisn field exists, this indicates an operation (HW offload)
on the TCP payload.
For such WQEs, inline the headers up to L4.

This is in preparation for kTLS HW offload support, added in
a downstream patch.

Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

index 7fdf69e08d58ff94331240d1d6bf83f13bcbcd89..bd41f89afef115286783e99e5b7cbc71fd1b16b6 100644 (file)
@@ -77,6 +77,11 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
        mlx5_write64((__be32 *)ctrl, uar_map);
 }
 
+static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5e_tx_wqe *wqe)
+{
+       return !!wqe->ctrl.tisn;
+}
+
 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
 {
        struct mlx5_core_cq *mcq;
index dc77fe9ae3670d39d3f091b7b7b158e64423b0bf..b1a163e660530c408b025afc62d5070a434cc3c8 100644 (file)
@@ -304,9 +304,12 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
                stats->packets += skb_shinfo(skb)->gso_segs;
        } else {
+               u8 mode = mlx5e_transport_inline_tx_wqe(wqe) ?
+                       MLX5_INLINE_MODE_TCP_UDP : sq->min_inline_mode;
+
                opcode    = MLX5_OPCODE_SEND;
                mss       = 0;
-               ihs       = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
+               ihs       = mlx5e_calc_min_inline(mode, skb);
                num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
                stats->packets++;
        }