summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
diff options
context:
space:
mode:
authorLuoyouming <luoyouming@huawei.com>2022-11-08 21:38:46 +0800
committerJason Gunthorpe <jgg@nvidia.com>2022-11-18 20:19:48 -0400
commit8eaa6f7d569b4a22bfc1b0a3fdfeeb401feb65a4 (patch)
tree90a63fcd125eee1c15cdbdfa63734dbce030d074 /drivers/infiniband/hw/hns/hns_roce_hw_v2.c
parent7d984dac8f6bf4ebd3398af82b357e1d181ecaac (diff)
RDMA/hns: Fix ext_sge num error when post send
In the HNS ROCE driver, The sge is divided into standard sge and extended sge. There are 2 standard sge in RC/XRC, and the UD standard sge is 0. In the scenario of RC SQ inline, if the data does not exceed 32bytes, the standard sge will be used. If it exceeds, only the extended sge will be used to fill the data. Currently, when filling the extended sge, max_gs is directly used as the number of the extended sge, which did not subtract the number of standard sge. There is a logical error. The new algorithm subtracts the number of standard sge from max_gs to get the actual number of extended sge. Fixes: 30b707886aeb ("RDMA/hns: Support inline data in extented sge space for RC") Link: https://lore.kernel.org/r/20221108133847.2304539-2-xuhaoyue1@hisilicon.com Signed-off-by: Luoyouming <luoyouming@huawei.com> Signed-off-by: Haoyue Xu <xuhaoyue1@hisilicon.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/hns/hns_roce_hw_v2.c')
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 1ead35fb031b..dcb59c05edfd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -188,20 +188,29 @@ static void set_atomic_seg(const struct ib_send_wr *wr,
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
}
+static unsigned int get_std_sge_num(struct hns_roce_qp *qp)
+{
+ if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD)
+ return 0;
+
+ return HNS_ROCE_SGE_IN_WQE;
+}
+
static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
const struct ib_send_wr *wr,
unsigned int *sge_idx, u32 msg_len)
{
struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
- unsigned int ext_sge_sz = qp->sq.max_gs * HNS_ROCE_SGE_SIZE;
unsigned int left_len_in_pg;
unsigned int idx = *sge_idx;
+ unsigned int std_sge_num;
unsigned int i = 0;
unsigned int len;
void *addr;
void *dseg;
- if (msg_len > ext_sge_sz) {
+ std_sge_num = get_std_sge_num(qp);
+ if (msg_len > (qp->sq.max_gs - std_sge_num) * HNS_ROCE_SGE_SIZE) {
ibdev_err(ibdev,
"no enough extended sge space for inline data.\n");
return -EINVAL;