summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/irdma/verbs.c
diff options
context:
space:
mode:
authorSindhu Devale <sindhu.devale@intel.com>2023-07-25 10:55:24 -0500
committerLeon Romanovsky <leon@kernel.org>2023-07-30 15:43:00 +0300
commit3a8498720450174b8db450d3375a04dca81b3534 (patch)
tree57ec891ff8849482750eebcf0192df82fc86fb9d /drivers/infiniband/hw/irdma/verbs.c
parentcb06b6b3f6cbc56c534587db2aac3e0958a4a314 (diff)
RDMA/irdma: Allow accurate reporting on QP max send/recv WR
Currently the attribute cap.max_send_wr and cap.max_recv_wr sent from user-space during create QP are the provider computed SQ/RQ depth as opposed to raw values passed from application. This inhibits computation of an accurate value for max_send_wr and max_recv_wr for this QP in the kernel which matches the value returned in user create QP. Also these capabilities needs to be reported from the driver in query QP. Add support by extending the ABI to allow the raw cap.max_send_wr and cap.max_recv_wr to be passed from user-space, while keeping compatibility for the older scheme. The internal HW depth and shift needed for the WQs needs to be computed now for both kernel and user-mode QPs. Add new helpers to assist with this: irdma_uk_calc_depth_shift_sq, irdma_uk_calc_depth_shift_rq and irdma_uk_calc_depth_shift_wq. Consolidate all the user mode QP setup into a new function irdma_setup_umode_qp which keeps it with its counterpart irdma_setup_kmode_qp. Signed-off-by: Youvaraj Sagar <youvaraj.sagar@intel.com> Signed-off-by: Sindhu Devale <sindhu.devale@intel.com> Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com> Link: https://lore.kernel.org/r/20230725155525.1081-2-shiraz.saleem@intel.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'drivers/infiniband/hw/irdma/verbs.c')
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c182
1 files changed, 110 insertions, 72 deletions
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index a1a42a7cd783..0187cff7b9c6 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -277,7 +277,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
struct irdma_alloc_ucontext_req req = {};
struct irdma_alloc_ucontext_resp uresp = {};
struct irdma_ucontext *ucontext = to_ucontext(uctx);
- struct irdma_uk_attrs *uk_attrs;
+ struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
@@ -292,7 +292,9 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
ucontext->iwdev = iwdev;
ucontext->abi_ver = req.userspace_ver;
- uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
+ if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
+ ucontext->use_raw_attrs = true;
+
/* GEN_1 legacy support with libi40iw */
if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
if (uk_attrs->hw_rev != IRDMA_GEN_1)
@@ -327,6 +329,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
uresp.hw_rev = uk_attrs->hw_rev;
+ uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
if (ib_copy_to_udata(udata, &uresp,
min(sizeof(uresp), udata->outlen))) {
rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
@@ -567,6 +570,86 @@ static void irdma_setup_virt_qp(struct irdma_device *iwdev,
}
/**
+ * irdma_setup_umode_qp - setup sq and rq size in user mode qp
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: initialize info to return
+ * @init_attr: Initial QP create attributes
+ */
+static int irdma_setup_umode_qp(struct ib_udata *udata,
+ struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *info,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext, ibucontext);
+ struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
+ struct irdma_create_qp_req req;
+ unsigned long flags;
+ int ret;
+
+ ret = ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen));
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n");
+ return ret;
+ }
+
+ iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
+ iwqp->user_mode = 1;
+ if (req.user_wqe_bufs) {
+ info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
+ &ucontext->qp_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+ if (!iwqp->iwpbl) {
+ ret = -ENODATA;
+ ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
+ return ret;
+ }
+ }
+
+ if (!ucontext->use_raw_attrs) {
+ /**
+ * Maintain backward compat with older ABI which passes sq and
+ * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr.
+ * There is no way to compute the correct value of
+ * iwqp->max_send_wr/max_recv_wr in the kernel.
+ */
+ iwqp->max_send_wr = init_attr->cap.max_send_wr;
+ iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
+ ukinfo->sq_size = init_attr->cap.max_send_wr;
+ ukinfo->rq_size = init_attr->cap.max_recv_wr;
+ irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift,
+ &ukinfo->rq_shift);
+ } else {
+ ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
+ &ukinfo->sq_shift);
+ if (ret)
+ return ret;
+
+ ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
+ &ukinfo->rq_shift);
+ if (ret)
+ return ret;
+
+ iwqp->max_send_wr =
+ (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
+ iwqp->max_recv_wr =
+ (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
+ ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
+ ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
+ }
+
+ irdma_setup_virt_qp(iwdev, iwqp, info);
+
+ return 0;
+}
+
+/**
* irdma_setup_kmode_qp - setup initialization for kernel mode qp
* @iwdev: iwarp device
* @iwqp: qp ptr (user or kernel)
@@ -579,40 +662,28 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
struct ib_qp_init_attr *init_attr)
{
struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
- u32 sqdepth, rqdepth;
- u8 sqshift, rqshift;
u32 size;
int status;
struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
- struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
- irdma_get_wqe_shift(uk_attrs,
- uk_attrs->hw_rev >= IRDMA_GEN_2 ? ukinfo->max_sq_frag_cnt + 1 :
- ukinfo->max_sq_frag_cnt,
- ukinfo->max_inline_data, &sqshift);
- status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift,
- &sqdepth);
+ status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
+ &ukinfo->sq_shift);
if (status)
return status;
- if (uk_attrs->hw_rev == IRDMA_GEN_1)
- rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
- else
- irdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0,
- &rqshift);
-
- status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift,
- &rqdepth);
+ status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
+ &ukinfo->rq_shift);
if (status)
return status;
iwqp->kqp.sq_wrid_mem =
- kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
+ kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
if (!iwqp->kqp.sq_wrid_mem)
return -ENOMEM;
iwqp->kqp.rq_wrid_mem =
- kcalloc(rqdepth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
+ kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
+
if (!iwqp->kqp.rq_wrid_mem) {
kfree(iwqp->kqp.sq_wrid_mem);
iwqp->kqp.sq_wrid_mem = NULL;
@@ -622,7 +693,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
- size = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE;
+ size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
size += (IRDMA_SHADOW_AREA_SIZE << 3);
mem->size = ALIGN(size, 256);
@@ -638,16 +709,19 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
ukinfo->sq = mem->va;
info->sq_pa = mem->pa;
- ukinfo->rq = &ukinfo->sq[sqdepth];
- info->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE);
- ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
- info->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE);
- ukinfo->sq_size = sqdepth >> sqshift;
- ukinfo->rq_size = rqdepth >> rqshift;
+ ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
+ info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
+ ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
+ info->shadow_area_pa =
+ info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
+ ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
+ ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
ukinfo->qp_id = iwqp->ibqp.qp_num;
- init_attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
- init_attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
+ iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
+ iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
+ init_attr->cap.max_send_wr = iwqp->max_send_wr;
+ init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
return 0;
}
@@ -803,18 +877,14 @@ static int irdma_create_qp(struct ib_qp *ibqp,
struct irdma_device *iwdev = to_iwdev(ibpd->device);
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_qp *iwqp = to_iwqp(ibqp);
- struct irdma_create_qp_req req = {};
struct irdma_create_qp_resp uresp = {};
u32 qp_num = 0;
int err_code;
- int sq_size;
- int rq_size;
struct irdma_sc_qp *qp;
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
struct irdma_qp_init_info init_info = {};
struct irdma_qp_host_ctx_info *ctx_info;
- unsigned long flags;
err_code = irdma_validate_qp_attrs(init_attr, iwdev);
if (err_code)
@@ -824,13 +894,10 @@ static int irdma_create_qp(struct ib_qp *ibqp,
udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
return -EINVAL;
- sq_size = init_attr->cap.max_send_wr;
- rq_size = init_attr->cap.max_recv_wr;
-
init_info.vsi = &iwdev->vsi;
init_info.qp_uk_init_info.uk_attrs = uk_attrs;
- init_info.qp_uk_init_info.sq_size = sq_size;
- init_info.qp_uk_init_info.rq_size = rq_size;
+ init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
+ init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
@@ -880,36 +947,9 @@ static int irdma_create_qp(struct ib_qp *ibqp,
init_waitqueue_head(&iwqp->mod_qp_waitq);
if (udata) {
- err_code = ib_copy_from_udata(&req, udata,
- min(sizeof(req), udata->inlen));
- if (err_code) {
- ibdev_dbg(&iwdev->ibdev,
- "VERBS: ib_copy_from_data fail\n");
- goto error;
- }
-
- iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
- iwqp->user_mode = 1;
- if (req.user_wqe_bufs) {
- struct irdma_ucontext *ucontext =
- rdma_udata_to_drv_context(udata,
- struct irdma_ucontext,
- ibucontext);
-
- init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
- spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
- iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
- &ucontext->qp_reg_mem_list);
- spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
-
- if (!iwqp->iwpbl) {
- err_code = -ENODATA;
- ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
- goto error;
- }
- }
init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
- irdma_setup_virt_qp(iwdev, iwqp, &init_info);
+ err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info,
+ init_attr);
} else {
INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
@@ -964,8 +1004,6 @@ static int irdma_create_qp(struct ib_qp *ibqp,
spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
rf->qp_table[qp_num] = iwqp;
- iwqp->max_send_wr = sq_size;
- iwqp->max_recv_wr = rq_size;
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
if (dev->ws_add(&iwdev->vsi, 0)) {
@@ -986,8 +1024,8 @@ static int irdma_create_qp(struct ib_qp *ibqp,
if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
uresp.lsmm = 1;
}
- uresp.actual_sq_size = sq_size;
- uresp.actual_rq_size = rq_size;
+ uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
+ uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
uresp.qp_id = qp_num;
uresp.qp_caps = qp->qp_uk.qp_caps;