summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rxe/rxe_cq.c
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2022-02-15 13:44:49 -0600
committerJason Gunthorpe <jgg@nvidia.com>2022-02-16 11:51:28 -0400
commita099b08599e6ae6b8e9faccee83760dab622c11e (patch)
tree7eb8cfe2ef11fc51cb2fe174422c8a6be06ac34d /drivers/infiniband/sw/rxe/rxe_cq.c
parent3c8bc3954d771fc4889508ad5d16f084adc4d64d (diff)
RDMA/rxe: Revert changes from irqsave to bh locks
A previous patch replaced all irqsave locks in rxe with bh locks. This ran into problems because rdmacm has a bad habit of calling rdma verbs APIs while disabling irqs. This is not allowed during spin_unlock_bh() causing programs that use rdmacm to fail. This patch reverts the changes to locks that had this problem or got dragged into the same mess. After this patch blktests/check -q srp now runs correctly. Link: https://lore.kernel.org/r/20220215194448.44369-1-rpearsonhpe@gmail.com Fixes: 21adfa7a3c4e ("RDMA/rxe: Replace irqsave locks with bh locks") Reported-by: Guoqing Jiang <guoqing.jiang@linux.dev> Reported-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Tested-by: Bart Van Assche <bvanassche@acm.org> Acked-by: Zhu Yanjun <zyjzyj2000@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_cq.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 6baaaa34458e..642b52539ac3 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -42,13 +42,14 @@ err1:
static void rxe_send_complete(struct tasklet_struct *t)
{
struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
+ unsigned long flags;
- spin_lock_bh(&cq->cq_lock);
+ spin_lock_irqsave(&cq->cq_lock, flags);
if (cq->is_dying) {
- spin_unlock_bh(&cq->cq_lock);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
return;
}
- spin_unlock_bh(&cq->cq_lock);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
@@ -107,12 +108,13 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
struct ib_event ev;
int full;
void *addr;
+ unsigned long flags;
- spin_lock_bh(&cq->cq_lock);
+ spin_lock_irqsave(&cq->cq_lock, flags);
full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (unlikely(full)) {
- spin_unlock_bh(&cq->cq_lock);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
if (cq->ibcq.event_handler) {
ev.device = cq->ibcq.device;
ev.element.cq = &cq->ibcq;
@@ -128,7 +130,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
- spin_unlock_bh(&cq->cq_lock);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
if ((cq->notify == IB_CQ_NEXT_COMP) ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
@@ -141,9 +143,11 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
void rxe_cq_disable(struct rxe_cq *cq)
{
- spin_lock_bh(&cq->cq_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
cq->is_dying = true;
- spin_unlock_bh(&cq->cq_lock);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
}
void rxe_cq_cleanup(struct rxe_pool_elem *elem)