summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rxe/rxe_recv.c
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2023-04-04 23:26:11 -0500
committerJason Gunthorpe <jgg@nvidia.com>2023-04-17 16:34:04 -0300
commitf605f26ea196a3b49bea249330cbd18dba61a33e (patch)
treefc7bb3a98ec7549d60e7be5ae6db058ad219bb4a /drivers/infiniband/sw/rxe/rxe_recv.c
parent7b560b89a08d35c23dfc95dc44aee10651c8b9a0 (diff)
RDMA/rxe: Protect QP state with qp->state_lock
Currently the rxe driver makes little effort to make the changes to qp state (which includes qp->attr.qp_state, qp->attr.sq_draining and qp->valid) atomic between different client threads and IO threads. In particular a common template is for an RDMA application to call ib_modify_qp() to move a qp to ERR state and then wait until all the packet and work queues have drained before calling ib_destroy_qp(). None of these state changes are protected by locks to assure that the changes are executed atomically and that memory barriers are included. This has been observed to lead to incorrect behavior around qp cleanup. This patch continues the work of the previous patches in this series and adds locking code around qp state changes and lookups. Link: https://lore.kernel.org/r/20230405042611.6467-5-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_recv.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index ca17ac6c5878..2f953cc74256 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -38,13 +38,19 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
return -EINVAL;
}
+ spin_lock_bh(&qp->state_lock);
if (pkt->mask & RXE_REQ_MASK) {
- if (unlikely(qp_state(qp) < IB_QPS_RTR))
+ if (unlikely(qp_state(qp) < IB_QPS_RTR)) {
+ spin_unlock_bh(&qp->state_lock);
return -EINVAL;
+ }
} else {
- if (unlikely(qp_state(qp) < IB_QPS_RTS))
+ if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
+ spin_unlock_bh(&qp->state_lock);
return -EINVAL;
+ }
}
+ spin_unlock_bh(&qp->state_lock);
return 0;
}