]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
net: mvneta: rely on page_pool_recycle_direct in mvneta_run_xdp
authorLorenzo Bianconi <lorenzo@kernel.org>
Wed, 20 Nov 2019 14:54:17 +0000 (16:54 +0200)
committerDavid S. Miller <davem@davemloft.net>
Wed, 20 Nov 2019 20:34:17 +0000 (12:34 -0800)
Rely on page_pool_recycle_direct and not on xdp_return_buff in
mvneta_run_xdp. This is a preliminary patch to limit the dma sync len
to the one strictly necessary

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/marvell/mvneta.c

index 12e03b15f0ab2e8a82691631e94e55aeb48af262..f7713c2c68e1cbb8f6906e0f6db2ee3c7d99d7bf 100644 (file)
@@ -2097,7 +2097,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
                err = xdp_do_redirect(pp->dev, xdp, prog);
                if (err) {
                        ret = MVNETA_XDP_DROPPED;
-                       xdp_return_buff(xdp);
+                       page_pool_recycle_direct(rxq->page_pool,
+                                                virt_to_head_page(xdp->data));
                } else {
                        ret = MVNETA_XDP_REDIR;
                }
@@ -2106,7 +2107,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
        case XDP_TX:
                ret = mvneta_xdp_xmit_back(pp, xdp);
                if (ret != MVNETA_XDP_TX)
-                       xdp_return_buff(xdp);
+                       page_pool_recycle_direct(rxq->page_pool,
+                                                virt_to_head_page(xdp->data));
                break;
        default:
                bpf_warn_invalid_xdp_action(act);