summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/health.c
diff options
context:
space:
mode:
authorShay Drory <shayd@nvidia.com>2020-11-23 08:39:10 +0200
committerSaeed Mahameed <saeedm@nvidia.com>2021-02-11 18:50:11 -0800
commit51d138c2610a236c1ed0059d034ee4c74f452b86 (patch)
tree27c1a8285e29e88d74b84c98475b1bfb1f4518f2 /drivers/net/ethernet/mellanox/mlx5/core/health.c
parent65ba8594a238c20e458b3d2d39d91067cbffd0b1 (diff)
net/mlx5: Fix health error state handling
Currently, when we discover a fatal error, we are queueing a work that will wait for a lock in order to enter the device to error state. Meanwhile, FW commands are still being processed, and gets timeouts. This can block the driver for few minutes before the work will manage to get the lock and enter to error state. Setting the device to error state before queueing health work, in order to avoid FW commands being processed while the work is waiting for the lock. Fixes: c1d4d2e92ad6 ("net/mlx5: Avoid calling sleeping function by the health poll thread") Signed-off-by: Shay Drory <shayd@nvidia.com> Reviewed-by: Moshe Shemesh <moshe@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/health.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 54523bed16cd..0c32c485eb58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -190,6 +190,16 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
return true;
}
+static void enter_error_state(struct mlx5_core_dev *dev, bool force)
+{
+ if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ mlx5_cmd_flush(dev);
+ }
+
+ mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
+}
+
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
{
bool err_detected = false;
@@ -208,12 +218,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
goto unlock;
}
- if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
- dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
- mlx5_cmd_flush(dev);
- }
-
- mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
+ enter_error_state(dev, force);
unlock:
mutex_unlock(&dev->intf_state_mutex);
}
@@ -613,7 +618,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
- mlx5_enter_error_state(dev, false);
+ enter_error_state(dev, false);
if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
if (mlx5_health_try_recover(dev))
mlx5_core_err(dev, "health recovery failed\n");
@@ -707,8 +712,9 @@ static void poll_health(struct timer_list *t)
mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
dev->priv.health.fatal_error = fatal_error;
print_health_info(dev);
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mlx5_trigger_health_work(dev);
- goto out;
+ return;
}
count = ioread32be(health->health_counter);