1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019 Mellanox Technologies. */
4 #include <net/devlink.h>
8 #define MLX5E_TX_REPORTER_PER_SQ_MAX_LEN 256
10 struct mlx5e_tx_err_ctx {
11 int (*recover)(struct mlx5e_txqsq *sq);
12 struct mlx5e_txqsq *sq;
15 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
17 unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
19 while (time_before(jiffies, exp_time)) {
26 netdev_err(sq->channel->netdev,
27 "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
28 sq->sqn, sq->cc, sq->pc);
33 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
35 WARN_ONCE(sq->cc != sq->pc,
36 "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
37 sq->sqn, sq->cc, sq->pc);
43 static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
45 struct mlx5_core_dev *mdev = sq->channel->mdev;
46 struct net_device *dev = sq->channel->netdev;
47 struct mlx5e_modify_sq_param msp = {0};
50 msp.curr_state = curr_state;
51 msp.next_state = MLX5_SQC_STATE_RST;
53 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
55 netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
59 memset(&msp, 0, sizeof(msp));
60 msp.curr_state = MLX5_SQC_STATE_RST;
61 msp.next_state = MLX5_SQC_STATE_RDY;
63 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
65 netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
72 static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
74 struct mlx5_core_dev *mdev = sq->channel->mdev;
75 struct net_device *dev = sq->channel->netdev;
79 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
81 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
86 if (state != MLX5_SQC_STATE_ERR)
89 mlx5e_tx_disable_queue(sq->txq);
91 err = mlx5e_wait_for_sq_flush(sq);
95 /* At this point, no new packets will arrive from the stack as TXQ is
96 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
97 * pending WQEs. SQ can safely reset the SQ.
100 err = mlx5e_sq_to_ready(sq, state);
104 mlx5e_reset_txqsq_cc_pc(sq);
105 sq->stats->recover++;
106 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
107 mlx5e_activate_txqsq(sq);
111 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
115 static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
117 struct mlx5e_tx_err_ctx *err_ctx)
119 if (IS_ERR_OR_NULL(tx_reporter)) {
120 netdev_err(err_ctx->sq->channel->netdev, err_str);
121 return err_ctx->recover(err_ctx->sq);
124 return devlink_health_report(tx_reporter, err_str, err_ctx);
127 void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq)
129 char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN];
130 struct mlx5e_tx_err_ctx err_ctx = {0};
133 err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
134 sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn);
136 mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str,
140 static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq)
142 struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
145 netdev_err(sq->channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
146 eq->core.eqn, eq->core.cons_index, eq->core.irqn);
148 eqe_count = mlx5_eq_poll_irq_disabled(eq);
150 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
154 netdev_err(sq->channel->netdev, "Recover %d eqes on EQ 0x%x\n",
155 eqe_count, eq->core.eqn);
156 sq->channel->stats->eq_rearm++;
160 int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq)
162 char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN];
163 struct mlx5e_tx_err_ctx err_ctx;
166 err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
168 "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
169 sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
170 jiffies_to_usecs(jiffies - sq->txq->trans_start));
172 return mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str,
176 /* state lock cannot be grabbed within this function.
177 * It can cause a dead lock or a read-after-free.
179 static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
181 return err_ctx->recover(err_ctx->sq);
184 static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
189 mutex_lock(&priv->state_lock);
191 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
194 err = mlx5e_safe_reopen_channels(priv);
197 mutex_unlock(&priv->state_lock);
203 static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
206 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
207 struct mlx5e_tx_err_ctx *err_ctx = context;
209 return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) :
210 mlx5e_tx_reporter_recover_all(priv);
214 mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
215 u32 sqn, u8 state, bool stopped)
219 err = devlink_fmsg_obj_nest_start(fmsg);
223 err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sqn);
227 err = devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
231 err = devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped);
235 err = devlink_fmsg_obj_nest_end(fmsg);
242 static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
243 struct devlink_fmsg *fmsg)
245 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
248 mutex_lock(&priv->state_lock);
250 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
253 err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
257 for (i = 0; i < priv->channels.num * priv->channels.params.num_tc;
259 struct mlx5e_txqsq *sq = priv->txq2sq[i];
262 err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
266 err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq->sqn,
268 netif_xmit_stopped(sq->txq));
272 err = devlink_fmsg_arr_pair_nest_end(fmsg);
277 mutex_unlock(&priv->state_lock);
281 static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
283 .recover = mlx5e_tx_reporter_recover,
284 .diagnose = mlx5e_tx_reporter_diagnose,
287 #define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
289 int mlx5e_tx_reporter_create(struct mlx5e_priv *priv)
291 struct mlx5_core_dev *mdev = priv->mdev;
292 struct devlink *devlink = priv_to_devlink(mdev);
295 devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops,
296 MLX5_REPORTER_TX_GRACEFUL_PERIOD,
298 if (IS_ERR(priv->tx_reporter))
299 netdev_warn(priv->netdev,
300 "Failed to create tx reporter, err = %ld\n",
301 PTR_ERR(priv->tx_reporter));
302 return IS_ERR_OR_NULL(priv->tx_reporter);
305 void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv)
307 if (IS_ERR_OR_NULL(priv->tx_reporter))
310 devlink_health_reporter_destroy(priv->tx_reporter);