summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorMaxim Mikityanskiy <maximmi@nvidia.com>2021-05-24 16:15:22 +0300
committerSaeed Mahameed <saeedm@nvidia.com>2021-08-09 20:56:43 -0700
commit8ba3e4c85825c8801a2c298dcadac650a40d7137 (patch)
tree1c4aca309e98b998c4adff1a4daba5f26c3c2d09 /drivers
parent6d8680da2e98410a25fe49e0a53f28c004be6d6d (diff)
net/mlx5e: Destroy page pool after XDP SQ to fix use-after-free
mlx5e_close_xdpsq does the cleanup: it calls mlx5e_free_xdpsq_descs to free the outstanding descriptors, which relies on mlx5e_page_release_dynamic and page_pool_release_page. However, page_pool_destroy is already called by this point, because mlx5e_close_rq runs before mlx5e_close_xdpsq. This commit fixes the use-after-free by swapping mlx5e_close_xdpsq and mlx5e_close_rq. The commit cited below started calling page_pool_destroy directly from the driver. Previously, the page pool was destroyed under a call_rcu from xdp_rxq_info_unreg_mem_model, which would defer the deallocation until after the XDPSQ is cleaned up. Fixes: 1da4bbeffe41 ("net: core: page_pool: add user refcnt and reintroduce page_pool_destroy") Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 37c440837945..fd250f7bcd88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1891,30 +1891,30 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_icosq;
+ err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
+ if (err)
+ goto err_close_sqs;
+
if (c->xdp) {
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
&c->rq_xdpsq, false);
if (err)
- goto err_close_sqs;
+ goto err_close_rq;
}
- err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
- if (err)
- goto err_close_xdp_sq;
-
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
if (err)
- goto err_close_rq;
+ goto err_close_xdp_sq;
return 0;
-err_close_rq:
- mlx5e_close_rq(&c->rq);
-
err_close_xdp_sq:
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
+err_close_rq:
+ mlx5e_close_rq(&c->rq);
+
err_close_sqs:
mlx5e_close_sqs(c);
@@ -1949,9 +1949,9 @@ err_close_async_icosq_cq:
static void mlx5e_close_queues(struct mlx5e_channel *c)
{
mlx5e_close_xdpsq(&c->xdpsq);
- mlx5e_close_rq(&c->rq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
+ mlx5e_close_rq(&c->rq);
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
mlx5e_close_icosq(&c->async_icosq);