summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
diff options
context:
space:
mode:
authorLuoyouming <luoyouming@huawei.com>2022-11-08 21:38:47 +0800
committerJason Gunthorpe <jgg@nvidia.com>2022-11-18 20:19:49 -0400
commit0c5e259b06a8efc69f929ad777ea49281bb58e37 (patch)
treed97cda0a6b3fdbc1ba4273c17439ccb22c54baf0 /drivers/infiniband/hw/hns/hns_roce_hw_v2.c
parent8eaa6f7d569b4a22bfc1b0a3fdfeeb401feb65a4 (diff)
RDMA/hns: Fix incorrect sge nums calculation
The user usually configures the number of sge through the max_send_sge parameter when creating qp, and configures the maximum size of inline data that can be sent through max_inline_data. Inline uses sge to fill data to send. Expect the following: 1) When the sge space cannot hold inline data, the sge space needs to be expanded to accommodate all inline data 2) When the sge space is enough to accommodate inline data, the upper limit of inline data can be increased so that users can send larger inline data Currently case one is not implemented. When the inline data is larger than the sge space, an error of insufficient sge space occurs. This part of the code needs to be reimplemented according to the expected rules. The calculation method of sge num is modified to take the maximum value of max_send_sge and the sge for max_inline_data to solve this problem. Fixes: 05201e01be93 ("RDMA/hns: Refactor process of setting extended sge") Fixes: 30b707886aeb ("RDMA/hns: Support inline data in extented sge space for RC") Link: https://lore.kernel.org/r/20221108133847.2304539-3-xuhaoyue1@hisilicon.com Signed-off-by: Luoyouming <luoyouming@huawei.com> Signed-off-by: Haoyue Xu <xuhaoyue1@hisilicon.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/hns/hns_roce_hw_v2.c')
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c12
1 files changed, 1 insertions, 11 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index dcb59c05edfd..939811867249 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -188,14 +188,6 @@ static void set_atomic_seg(const struct ib_send_wr *wr,
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
}
-static unsigned int get_std_sge_num(struct hns_roce_qp *qp)
-{
- if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD)
- return 0;
-
- return HNS_ROCE_SGE_IN_WQE;
-}
-
static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
const struct ib_send_wr *wr,
unsigned int *sge_idx, u32 msg_len)
@@ -203,14 +195,12 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
unsigned int left_len_in_pg;
unsigned int idx = *sge_idx;
- unsigned int std_sge_num;
unsigned int i = 0;
unsigned int len;
void *addr;
void *dseg;
- std_sge_num = get_std_sge_num(qp);
- if (msg_len > (qp->sq.max_gs - std_sge_num) * HNS_ROCE_SGE_SIZE) {
+ if (msg_len > qp->sq.ext_sge_cnt * HNS_ROCE_SGE_SIZE) {
ibdev_err(ibdev,
"no enough extended sge space for inline data.\n");
return -EINVAL;