summaryrefslogtreecommitdiff
path: root/include/linux/mlx5
diff options
context:
space:
mode:
authorEran Ben Elisha <eranbe@mellanox.com>2020-03-18 21:44:32 +0200
committerSaeed Mahameed <saeedm@mellanox.com>2020-05-22 17:28:36 -0700
commitd43b7007dbd1195a5b6b83213e49b1516aaf6f5e (patch)
tree2de29011ba9f66e3883e37253b06e87baaf23a34 /include/linux/mlx5
parent17d00e839d3b592da9659c1977d45f85b77f986a (diff)
net/mlx5: Fix a race when moving command interface to events mode
After driver creates (via FW command) an EQ for commands, the driver will be informed on new commands completion by EQE. However, due to a race in driver's internal command mode metadata update, some new commands will still be miss-handled by driver as if we are in polling mode. Such commands can get two non forced completion, leading to already freed command entry access. CREATE_EQ command, that maps EQ to the command queue must be posted to the command queue while it is empty and no other command should be posted. Add SW mechanism that once the CREATE_EQ command is about to be executed, all other commands will return error without being sent to the FW. Allow sending other commands only after successfully changing the driver's internal command mode metadata. We can safely return error to all other commands while creating the command EQ, as all other commands might be sent from the user/application during driver load. Application can rerun them later after driver's load was finished. Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") Signed-off-by: Eran Ben Elisha <eranbe@mellanox.com> Signed-off-by: Moshe Shemesh <moshe@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Diffstat (limited to 'include/linux/mlx5')
-rw-r--r--include/linux/mlx5/driver.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 9b1f29f26c27..c03778c75dfa 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -284,6 +284,7 @@ struct mlx5_cmd {
struct semaphore sem;
struct semaphore pages_sem;
int mode;
+ u16 allowed_opcode;
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
struct dma_pool *pool;
struct mlx5_cmd_debug dbg;
@@ -875,10 +876,15 @@ mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
}
+enum {
+ CMD_ALLOWED_OPCODE_ALL,
+};
+
int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
+void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
struct mlx5_async_ctx {
struct mlx5_core_dev *dev;