]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
net/mlx5e: Refactor RQ XDP_TX indication
authorTariq Toukan <tariqt@mellanox.com>
Tue, 12 Dec 2017 13:46:49 +0000 (15:46 +0200)
committerSaeed Mahameed <saeedm@mellanox.com>
Fri, 30 Mar 2018 23:55:05 +0000 (16:55 -0700)
Make the xdp_xmit indication available for Striding RQ
by taking it out of the type-specific union.
This refactor is a preparation for a downstream patch that
adds XDP support over Striding RQ.
In addition, use a bitmap instead of a boolean for possible
future flags.

Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index d26dd4bc89f487c85aaecbf1b3373d9f0599c8b1..a6ca54393bb6ea8509ccc6bf5860ad8960cca6a9 100644 (file)
@@ -479,6 +479,10 @@ typedef struct sk_buff *
 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
 typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
 
+enum mlx5e_rq_flag {
+       MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
+};
+
 struct mlx5e_rq {
        /* data path */
        struct mlx5_wq_ll      wq;
@@ -489,7 +493,6 @@ struct mlx5e_rq {
                        u32 frag_sz;    /* max possible skb frag_sz */
                        union {
                                bool page_reuse;
-                               bool xdp_xmit;
                        };
                } wqe;
                struct {
@@ -528,6 +531,7 @@ struct mlx5e_rq {
        struct bpf_prog       *xdp_prog;
        unsigned int           hw_mtu;
        struct mlx5e_xdpsq     xdpsq;
+       DECLARE_BITMAP(flags, 8);
 
        /* control */
        struct mlx5_wq_ctrl    wq_ctrl;
index 07db8a58d0a2e033792bd414f700a2b1ed1b6b1c..a827571deb858733dd304e10df691d966beb3a16 100644 (file)
@@ -788,7 +788,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
        /* move page to reference to sq responsibility,
         * and mark so it's not put back in page-cache.
         */
-       rq->wqe.xdp_xmit = true;
+       __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
        sq->db.di[pi] = *di;
        sq->pc++;
 
@@ -913,9 +913,8 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
        if (!skb) {
                /* probably for XDP */
-               if (rq->wqe.xdp_xmit) {
+               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
                        wi->di.page = NULL;
-                       rq->wqe.xdp_xmit = false;
                        /* do not return page to cache, it will be returned on XDP_TX completion */
                        goto wq_ll_pop;
                }
@@ -955,9 +954,8 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
 
        skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
        if (!skb) {
-               if (rq->wqe.xdp_xmit) {
+               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
                        wi->di.page = NULL;
-                       rq->wqe.xdp_xmit = false;
                        /* do not return page to cache, it will be returned on XDP_TX completion */
                        goto wq_ll_pop;
                }