diff options
Diffstat (limited to 'drivers/infiniband/hw/hns/hns_roce_restrack.c')
| -rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_restrack.c | 155 |
1 files changed, 79 insertions, 76 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index 989a2af2e938..230187dda6a0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -4,13 +4,10 @@ #include <rdma/rdma_cm.h> #include <rdma/restrack.h> #include <uapi/rdma/rdma_netlink.h> -#include "hnae3.h" #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_hw_v2.h" -#define MAX_ENTRY_NUM 256 - int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq) { struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); @@ -47,8 +44,6 @@ int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq) struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); struct hns_roce_v2_cq_context context; - u32 data[MAX_ENTRY_NUM] = {}; - int offset = 0; int ret; if (!hr_dev->hw->query_cqc) @@ -58,23 +53,7 @@ int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq) if (ret) return -EINVAL; - data[offset++] = hr_reg_read(&context, CQC_CQ_ST); - data[offset++] = hr_reg_read(&context, CQC_SHIFT); - data[offset++] = hr_reg_read(&context, CQC_CQE_SIZE); - data[offset++] = hr_reg_read(&context, CQC_CQE_CNT); - data[offset++] = hr_reg_read(&context, CQC_CQ_PRODUCER_IDX); - data[offset++] = hr_reg_read(&context, CQC_CQ_CONSUMER_IDX); - data[offset++] = hr_reg_read(&context, CQC_DB_RECORD_EN); - data[offset++] = hr_reg_read(&context, CQC_ARM_ST); - data[offset++] = hr_reg_read(&context, CQC_CMD_SN); - data[offset++] = hr_reg_read(&context, CQC_CEQN); - data[offset++] = hr_reg_read(&context, CQC_CQ_MAX_CNT); - data[offset++] = hr_reg_read(&context, CQC_CQ_PERIOD); - data[offset++] = hr_reg_read(&context, CQC_CQE_HOP_NUM); - data[offset++] = hr_reg_read(&context, CQC_CQE_BAR_PG_SZ); - data[offset++] = hr_reg_read(&context, CQC_CQE_BUF_PG_SZ); - - ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data); + ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context); return ret; } @@ -117,54 +96,41 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp) { struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp); - struct hns_roce_v2_qp_context context; - u32 data[MAX_ENTRY_NUM] = {}; - int offset = 0; + struct hns_roce_full_qp_ctx { + struct hns_roce_v2_qp_context qpc; + struct hns_roce_v2_scc_context sccc; + } context = {}; + u32 sccn = hr_qp->qpn; int ret; if (!hr_dev->hw->query_qpc) return -EINVAL; - ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context); + ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context.qpc); if (ret) - return -EINVAL; + return ret; + + /* If SCC is disabled or the query fails, the queried SCCC will + * be all 0. + */ + if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) || + !hr_dev->hw->query_sccc) + goto out; + + if (hr_qp->cong_type == CONG_TYPE_DIP) { + if (!hr_qp->dip) + goto out; + sccn = hr_qp->dip->dip_idx; + } + + ret = hr_dev->hw->query_sccc(hr_dev, sccn, &context.sccc); + if (ret) + ibdev_warn_ratelimited(&hr_dev->ib_dev, + "failed to query SCCC, ret = %d.\n", + ret); - data[offset++] = hr_reg_read(&context, QPC_QP_ST); - data[offset++] = hr_reg_read(&context, QPC_ERR_TYPE); - data[offset++] = hr_reg_read(&context, QPC_CHECK_FLG); - data[offset++] = hr_reg_read(&context, QPC_SRQ_EN); - data[offset++] = hr_reg_read(&context, QPC_SRQN); - data[offset++] = hr_reg_read(&context, QPC_QKEY_XRCD); - data[offset++] = hr_reg_read(&context, QPC_TX_CQN); - data[offset++] = hr_reg_read(&context, QPC_RX_CQN); - data[offset++] = hr_reg_read(&context, QPC_SQ_PRODUCER_IDX); - data[offset++] = hr_reg_read(&context, QPC_SQ_CONSUMER_IDX); - data[offset++] = hr_reg_read(&context, QPC_RQ_RECORD_EN); - data[offset++] = hr_reg_read(&context, QPC_RQ_PRODUCER_IDX); - data[offset++] = hr_reg_read(&context, QPC_RQ_CONSUMER_IDX); - data[offset++] = hr_reg_read(&context, QPC_SQ_SHIFT); - data[offset++] = hr_reg_read(&context, QPC_RQWS); - data[offset++] = hr_reg_read(&context, QPC_RQ_SHIFT); - data[offset++] = hr_reg_read(&context, QPC_SGE_SHIFT); - data[offset++] = hr_reg_read(&context, QPC_SQ_HOP_NUM); - data[offset++] = hr_reg_read(&context, QPC_RQ_HOP_NUM); - data[offset++] = hr_reg_read(&context, QPC_SGE_HOP_NUM); - data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BA_PG_SZ); - data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BUF_PG_SZ); - data[offset++] = hr_reg_read(&context, QPC_RETRY_NUM_INIT); - data[offset++] = hr_reg_read(&context, QPC_RETRY_CNT); - data[offset++] = hr_reg_read(&context, QPC_SQ_CUR_PSN); - data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_PSN); - data[offset++] = hr_reg_read(&context, QPC_SQ_FLUSH_IDX); - data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_IDX); - data[offset++] = hr_reg_read(&context, QPC_SQ_TX_ERR); - data[offset++] = hr_reg_read(&context, QPC_SQ_RX_ERR); - data[offset++] = hr_reg_read(&context, QPC_RQ_RX_ERR); - data[offset++] = hr_reg_read(&context, QPC_RQ_TX_ERR); - data[offset++] = hr_reg_read(&context, QPC_RQ_CQE_IDX); - data[offset++] = hr_reg_read(&context, QPC_RQ_RTY_TX_ERR); - - ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data); +out: + ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context); return ret; } @@ -204,8 +170,6 @@ int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr) struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device); struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr); struct hns_roce_v2_mpt_entry context; - u32 data[MAX_ENTRY_NUM] = {}; - int offset = 0; int ret; if (!hr_dev->hw->query_mpt) @@ -215,17 +179,56 @@ int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr) if (ret) return -EINVAL; - data[offset++] = hr_reg_read(&context, MPT_ST); - data[offset++] = hr_reg_read(&context, MPT_PD); - data[offset++] = hr_reg_read(&context, MPT_LKEY); - data[offset++] = hr_reg_read(&context, MPT_LEN_L); - data[offset++] = hr_reg_read(&context, MPT_LEN_H); - data[offset++] = hr_reg_read(&context, MPT_PBL_SIZE); - data[offset++] = hr_reg_read(&context, MPT_PBL_HOP_NUM); - data[offset++] = hr_reg_read(&context, MPT_PBL_BA_PG_SZ); - data[offset++] = hr_reg_read(&context, MPT_PBL_BUF_PG_SZ); - - ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data); + ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context); + + return ret; +} + +int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq) +{ + struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq); + struct nlattr *table_attr; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + return -EMSGSIZE; + + if (rdma_nl_put_driver_u32_hex(msg, "srqn", hr_srq->srqn)) + goto err; + + if (rdma_nl_put_driver_u32_hex(msg, "wqe_cnt", hr_srq->wqe_cnt)) + goto err; + + if (rdma_nl_put_driver_u32_hex(msg, "max_gs", hr_srq->max_gs)) + goto err; + + if (rdma_nl_put_driver_u32_hex(msg, "xrcdn", hr_srq->xrcdn)) + goto err; + + nla_nest_end(msg, table_attr); + + return 0; + +err: + nla_nest_cancel(msg, table_attr); + return -EMSGSIZE; +} + +int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device); + struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq); + struct hns_roce_srq_context context; + int ret; + + if (!hr_dev->hw->query_srqc) + return -EINVAL; + + ret = hr_dev->hw->query_srqc(hr_dev, hr_srq->srqn, &context); + if (ret) + return ret; + + ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context); return ret; } |
