]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
net/mlx5e: xsk: dynamically allocate mlx5e_channel_param
authorArnd Bergmann <arnd@arndb.de>
Tue, 23 Jul 2019 12:02:26 +0000 (12:02 +0000)
committerDavid S. Miller <davem@davemloft.net>
Tue, 23 Jul 2019 20:39:43 +0000 (13:39 -0700)
The structure is too large to put on the stack, resulting in a
warning on 32-bit ARM:

drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c:59:5: error: stack frame size of 1344 bytes in function
      'mlx5e_open_xsk' [-Werror,-Wframe-larger-than=]

Use kvzalloc() instead.

Fixes: a038e9794541 ("net/mlx5e: Add XSK zero-copy support")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Maxim Mikityanskiy <maximmi@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c

index aaffa6f68dc01c161999896175ea21661be33d81..f701e4f3c0760cc2fa33557483b15e212e8d4603 100644 (file)
@@ -60,24 +60,28 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
                   struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
                   struct mlx5e_channel *c)
 {
-       struct mlx5e_channel_param cparam = {};
+       struct mlx5e_channel_param *cparam;
        struct dim_cq_moder icocq_moder = {};
        int err;
 
        if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev))
                return -EINVAL;
 
-       mlx5e_build_xsk_cparam(priv, params, xsk, &cparam);
+       cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL);
+       if (!cparam)
+               return -ENOMEM;
 
-       err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam.rx_cq, &c->xskrq.cq);
+       mlx5e_build_xsk_cparam(priv, params, xsk, cparam);
+
+       err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->xskrq.cq);
        if (unlikely(err))
-               return err;
+               goto err_free_cparam;
 
-       err = mlx5e_open_rq(c, params, &cparam.rq, xsk, umem, &c->xskrq);
+       err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq);
        if (unlikely(err))
                goto err_close_rx_cq;
 
-       err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam.tx_cq, &c->xsksq.cq);
+       err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xsksq.cq);
        if (unlikely(err))
                goto err_close_rq;
 
@@ -87,21 +91,23 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
         * is disabled and then reenabled, but the SQ continues receiving CQEs
         * from the old UMEM.
         */
-       err = mlx5e_open_xdpsq(c, params, &cparam.xdp_sq, umem, &c->xsksq, true);
+       err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true);
        if (unlikely(err))
                goto err_close_tx_cq;
 
-       err = mlx5e_open_cq(c, icocq_moder, &cparam.icosq_cq, &c->xskicosq.cq);
+       err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->xskicosq.cq);
        if (unlikely(err))
                goto err_close_sq;
 
        /* Create a dedicated SQ for posting NOPs whenever we need an IRQ to be
         * triggered and NAPI to be called on the correct CPU.
         */
-       err = mlx5e_open_icosq(c, params, &cparam.icosq, &c->xskicosq);
+       err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->xskicosq);
        if (unlikely(err))
                goto err_close_icocq;
 
+       kvfree(cparam);
+
        spin_lock_init(&c->xskicosq_lock);
 
        set_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
@@ -123,6 +129,9 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
 err_close_rx_cq:
        mlx5e_close_cq(&c->xskrq.cq);
 
+err_free_cparam:
+       kvfree(cparam);
+
        return err;
 }