summaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2020-07-19 09:54:35 +0300
committerJason Gunthorpe <jgg@nvidia.com>2020-07-21 13:51:35 -0300
commita862192e9227ad46e0447fd0a03869ba1b30d16f (patch)
treed65d0f17788a568af2eea1b7664087ab7367af85 /drivers/infiniband
parent87c4c774cbef5c68b3df96827c2fb07f1aa80152 (diff)
RDMA/mlx5: Prevent prefetch from racing with implicit destruction
Prefetch work in mlx5_ib_prefetch_mr_work can be queued and able to run concurrently with destruction of the implicit MR. The num_deferred_work was intended to serialize this, but there is a race: CPU0 CPU1 mlx5_ib_free_implicit_mr() xa_erase(odp_mkeys) synchronize_srcu() __xa_erase(implicit_children) mlx5_ib_prefetch_mr_work() pagefault_mr() pagefault_implicit_mr() implicit_get_child_mr() xa_cmpxchg() atomic_dec_and_test(num_deferred_mr) wait_event(imr->q_deferred_work) ib_umem_odp_release(odp_imr) kfree(odp_imr) At this point in mlx5_ib_free_implicit_mr() the implicit_children list is supposed to be empty forever so that destroy_unused_implicit_child_mr() and related are not and will not be running. Since it is not empty the destroy_unused_implicit_child_mr() flow ends up touching deallocated memory as mlx5_ib_free_implicit_mr() already tore down the imr parent. The solution is to flush out the prefetch wq by driving num_deferred_work to zero after creation of new prefetch work is blocked. Fixes: 5256edcb98a1 ("RDMA/mlx5: Rework implicit ODP destroy") Link: https://lore.kernel.org/r/20200719065435.130722-1-leon@kernel.org Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c22
1 files changed, 19 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 7d2ec9ee5097..1ab676b66894 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -601,6 +601,23 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
*/
synchronize_srcu(&dev->odp_srcu);
+ /*
+ * All work on the prefetch list must be completed, xa_erase() prevented
+ * new work from being created.
+ */
+ wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
+
+ /*
+ * At this point it is forbidden for any other thread to enter
+ * pagefault_mr() on this imr. It is already forbidden to call
+ * pagefault_mr() on an implicit child. Due to this additions to
+ * implicit_children are prevented.
+ */
+
+ /*
+ * Block destroy_unused_implicit_child_mr() from incrementing
+ * num_deferred_work.
+ */
xa_lock(&imr->implicit_children);
xa_for_each (&imr->implicit_children, idx, mtt) {
__xa_erase(&imr->implicit_children, idx);
@@ -609,9 +626,8 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
xa_unlock(&imr->implicit_children);
/*
- * num_deferred_work can only be incremented inside the odp_srcu, or
- * under xa_lock while the child is in the xarray. Thus at this point
- * it is only decreasing, and all work holding it is now on the wq.
+ * Wait for any concurrent destroy_unused_implicit_child_mr() to
+ * complete.
*/
wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));