summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/irdma/verbs.c
diff options
context:
space:
mode:
authorMustafa Ismail <mustafa.ismail@intel.com>2022-04-25 13:16:24 -0500
committerJason Gunthorpe <jgg@nvidia.com>2022-05-11 15:58:40 -0300
commit81091d7696ae71627ff80bbf2c6b0986d2c1cce3 (patch)
treee949b113ef7aa37039994a3242c57576709a8712 /drivers/infiniband/hw/irdma/verbs.c
parenta2d36b02c15d49cec1f5a44c5123bb7f3bdfea8e (diff)
RDMA/irdma: Add SW mechanism to generate completions on error
HW flushes after QP in error state is not reliable. This can lead to application hang waiting on a completion for outstanding WRs. Implement a SW mechanism to generate completions for any outstanding WR's after the QP is modified to error. This is accomplished by starting a delayed worker after the QP is modified to error and the HW flush is performed. The worker will generate completions that will be returned to the application when it polls the CQ. This mechanism only applies to Kernel applications. Link: https://lore.kernel.org/r/20220425181624.1617-1-shiraz.saleem@intel.com Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com> Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/irdma/verbs.c')
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c56
1 files changed, 33 insertions, 23 deletions
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index f70ddf9b45bf..83ba18873dac 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -535,6 +535,9 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
irdma_modify_qp_to_err(&iwqp->sc_qp);
+ if (!iwqp->user_mode)
+ cancel_delayed_work_sync(&iwqp->dwork_flush);
+
irdma_qp_rem_ref(&iwqp->ibqp);
wait_for_completion(&iwqp->free_qp);
irdma_free_lsmm_rsrc(iwqp);
@@ -790,6 +793,14 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
return 0;
}
+static void irdma_flush_worker(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
+
+ irdma_generate_flush_completions(iwqp);
+}
+
/**
* irdma_create_qp - create qp
* @ibqp: ptr of qp
@@ -909,6 +920,7 @@ static int irdma_create_qp(struct ib_qp *ibqp,
init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
irdma_setup_virt_qp(iwdev, iwqp, &init_info);
} else {
+ INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
}
@@ -1400,11 +1412,11 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
if (iwqp->ibqp_state > IB_QPS_RTS &&
!iwqp->flush_issued) {
- iwqp->flush_issued = 1;
spin_unlock_irqrestore(&iwqp->lock, flags);
irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
IRDMA_FLUSH_RQ |
IRDMA_FLUSH_WAIT);
+ iwqp->flush_issued = 1;
} else {
spin_unlock_irqrestore(&iwqp->lock, flags);
}
@@ -1757,6 +1769,8 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
unsigned long flags;
spin_lock_irqsave(&iwcq->lock, flags);
+ if (!list_empty(&iwcq->cmpl_generated))
+ irdma_remove_cmpls_list(iwcq);
if (!list_empty(&iwcq->resize_list))
irdma_process_resize_list(iwcq, iwdev, NULL);
spin_unlock_irqrestore(&iwcq->lock, flags);
@@ -1961,6 +1975,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
cq->back_cq = iwcq;
spin_lock_init(&iwcq->lock);
INIT_LIST_HEAD(&iwcq->resize_list);
+ INIT_LIST_HEAD(&iwcq->cmpl_generated);
info.dev = dev;
ukinfo->cq_size = max(entries, 4);
ukinfo->cq_id = cq_num;
@@ -3046,15 +3061,12 @@ static int irdma_post_send(struct ib_qp *ibqp,
unsigned long flags;
bool inv_stag;
struct irdma_ah *ah;
- bool reflush = false;
iwqp = to_iwqp(ibqp);
ukqp = &iwqp->sc_qp.qp_uk;
dev = &iwqp->iwdev->rf->sc_dev;
spin_lock_irqsave(&iwqp->lock, flags);
- if (iwqp->flush_issued && ukqp->sq_flush_complete)
- reflush = true;
while (ib_wr) {
memset(&info, 0, sizeof(info));
inv_stag = false;
@@ -3204,15 +3216,14 @@ static int irdma_post_send(struct ib_qp *ibqp,
ib_wr = ib_wr->next;
}
- if (!iwqp->flush_issued && iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) {
- irdma_uk_qp_post_wr(ukqp);
+ if (!iwqp->flush_issued) {
+ if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
+ irdma_uk_qp_post_wr(ukqp);
spin_unlock_irqrestore(&iwqp->lock, flags);
- } else if (reflush) {
- ukqp->sq_flush_complete = false;
- spin_unlock_irqrestore(&iwqp->lock, flags);
- irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_REFLUSH);
} else {
spin_unlock_irqrestore(&iwqp->lock, flags);
+ mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
+ msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
}
if (err)
*bad_wr = ib_wr;
@@ -3235,14 +3246,11 @@ static int irdma_post_recv(struct ib_qp *ibqp,
struct irdma_post_rq_info post_recv = {};
unsigned long flags;
int err = 0;
- bool reflush = false;
iwqp = to_iwqp(ibqp);
ukqp = &iwqp->sc_qp.qp_uk;
spin_lock_irqsave(&iwqp->lock, flags);
- if (iwqp->flush_issued && ukqp->rq_flush_complete)
- reflush = true;
while (ib_wr) {
post_recv.num_sges = ib_wr->num_sge;
post_recv.wr_id = ib_wr->wr_id;
@@ -3258,13 +3266,10 @@ static int irdma_post_recv(struct ib_qp *ibqp,
}
out:
- if (reflush) {
- ukqp->rq_flush_complete = false;
- spin_unlock_irqrestore(&iwqp->lock, flags);
- irdma_flush_wqes(iwqp, IRDMA_FLUSH_RQ | IRDMA_REFLUSH);
- } else {
- spin_unlock_irqrestore(&iwqp->lock, flags);
- }
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (iwqp->flush_issued)
+ mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
+ msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
if (err)
*bad_wr = ib_wr;
@@ -3476,6 +3481,11 @@ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc
/* check the current CQ for new cqes */
while (npolled < num_entries) {
ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
+ if (ret == -ENOENT) {
+ ret = irdma_generated_cmpls(iwcq, cur_cqe);
+ if (!ret)
+ irdma_process_cqe(entry + npolled, cur_cqe);
+ }
if (!ret) {
++npolled;
cq_new_cqe = true;
@@ -3557,13 +3567,13 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq,
if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
promo_event = true;
- if (!iwcq->armed || promo_event) {
- iwcq->armed = true;
+ if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) {
iwcq->last_notify = cq_notify;
irdma_uk_cq_request_notification(ukcq, cq_notify);
}
- if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq))
+ if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated)))
ret = 1;
spin_unlock_irqrestore(&iwcq->lock, flags);