summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/hns/hns_roce_mr.c
diff options
context:
space:
mode:
authorChengchang Tang <tangchengchang@huawei.com>2024-01-13 16:59:34 +0800
committerLeon Romanovsky <leon@kernel.org>2024-01-25 11:54:38 +0200
commit2eb999b3d40ff825121784ec38dd5ab0abb7b606 (patch)
treec26bb4ba7e7b4c1b4c6a105aaee05e278994f36b /drivers/infiniband/hw/hns/hns_roce_mr.c
parent0ff6c9779aafc24dd253fdf4e2d6e2dc68940818 (diff)
RDMA/hns: Support adaptive PBL hopnum
In the current implementation, a fixed addressing level is used for PBL. But in fact, the necessary addressing level is related to page size and the size of MR. This patch calculates the addressing level according to page size and the size of MR, and uses the addressing level to configure the PBL. Signed-off-by: Chengchang Tang <tangchengchang@huawei.com> Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com> Link: https://lore.kernel.org/r/20240113085935.2838701-6-huangjunxian6@hisilicon.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'drivers/infiniband/hw/hns/hns_roce_mr.c')
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c57
1 files changed, 54 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index d00b4aa7214b..9e05b57a2d67 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -112,10 +112,13 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
udata, start);
- if (err)
+ if (err) {
ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
- else
- mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
+ return err;
+ }
+
+ mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
+ mr->pbl_hop_num = buf_attr.region[0].hopnum;
return err;
}
@@ -943,6 +946,50 @@ static int get_best_page_shift(struct hns_roce_dev *hr_dev,
return 0;
}
+static int get_best_hop_num(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr,
+ unsigned int ba_pg_shift)
+{
+#define INVALID_HOPNUM -1
+#define MIN_BA_CNT 1
+ size_t buf_pg_sz = 1 << buf_attr->page_shift;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ size_t ba_pg_sz = 1 << ba_pg_shift;
+ int hop_num = INVALID_HOPNUM;
+ size_t unit = MIN_BA_CNT;
+ size_t ba_cnt;
+ int j;
+
+ if (!buf_attr->adaptive || buf_attr->type != MTR_PBL)
+ return 0;
+
+ /* Caculating the number of buf pages, each buf page need a BA */
+ if (mtr->umem)
+ ba_cnt = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz);
+ else
+ ba_cnt = DIV_ROUND_UP(buf_attr->region[0].size, buf_pg_sz);
+
+ for (j = 0; j <= HNS_ROCE_MAX_HOP_NUM; j++) {
+ if (ba_cnt <= unit) {
+ hop_num = j;
+ break;
+ }
+ /* Number of BAs can be represented at per hop */
+ unit *= ba_pg_sz / BA_BYTE_LEN;
+ }
+
+ if (hop_num < 0) {
+ ibdev_err(ibdev,
+ "failed to calculate a valid hopnum.\n");
+ return -EINVAL;
+ }
+
+ buf_attr->region[0].hopnum = hop_num;
+
+ return 0;
+}
+
static bool is_buf_attr_valid(struct hns_roce_dev *hr_dev,
struct hns_roce_buf_attr *attr)
{
@@ -1116,6 +1163,10 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
ret = get_best_page_shift(hr_dev, mtr, buf_attr);
if (ret)
goto err_init_buf;
+
+ ret = get_best_hop_num(hr_dev, mtr, buf_attr, ba_page_shift);
+ if (ret)
+ goto err_init_buf;
}
ret = mtr_init_buf_cfg(hr_dev, mtr, buf_attr);