diff options
Diffstat (limited to 'drivers/infiniband/hw/hns')
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_device.h | 21 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hem.c | 18 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 134 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 16 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_main.c | 32 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_mr.c | 120 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_qp.c | 4 |
7 files changed, 93 insertions, 252 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 1dcc9cbb4678..78ee04a48a74 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -316,16 +316,6 @@ struct hns_roce_mtr { struct hns_roce_hem_cfg hem_cfg; /* config for hardware addressing */ }; -struct hns_roce_mw { - struct ib_mw ibmw; - u32 pdn; - u32 rkey; - int enabled; /* MW's active status */ - u32 pbl_hop_num; - u32 pbl_ba_pg_sz; - u32 pbl_buf_pg_sz; -}; - struct hns_roce_mr { struct ib_mr ibmr; u64 iova; /* MR's virtual original addr */ @@ -856,6 +846,7 @@ struct hns_roce_caps { u16 default_ceq_arm_st; u8 cong_cap; enum hns_roce_cong_type default_cong_type; + u32 max_ack_req_msg_len; }; enum hns_roce_device_state { @@ -933,7 +924,6 @@ struct hns_roce_hw { struct hns_roce_mr *mr, int flags, void *mb_buf); int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr); - int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw); void (*write_cqc)(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, dma_addr_t dma_handle); @@ -1078,11 +1068,6 @@ static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr) return container_of(ibmr, struct hns_roce_mr, ibmr); } -static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw) -{ - return container_of(ibmw, struct hns_roce_mw, ibmw); -} - static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp) { return container_of(ibqp, struct hns_roce_qp, ibqp); @@ -1234,6 +1219,7 @@ int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, + struct ib_dmah *dmah, struct ib_udata *udata); struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length, u64 virt_addr, @@ -1246,9 +1232,6 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); unsigned long key_to_hw_index(u32 key); -int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata); -int hns_roce_dealloc_mw(struct ib_mw *ibmw); - void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf); struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 page_shift, u32 flags); diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index ca0798224e56..3d479c63b117 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -249,15 +249,12 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, } static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, - unsigned long hem_alloc_size, - gfp_t gfp_mask) + unsigned long hem_alloc_size) { struct hns_roce_hem *hem; int order; void *buf; - WARN_ON(gfp_mask & __GFP_HIGHMEM); - order = get_order(hem_alloc_size); if (PAGE_SIZE << order != hem_alloc_size) { dev_err(hr_dev->dev, "invalid hem_alloc_size: %lu!\n", @@ -265,13 +262,12 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, return NULL; } - hem = kmalloc(sizeof(*hem), - gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); + hem = kmalloc(sizeof(*hem), GFP_KERNEL); if (!hem) return NULL; buf = dma_alloc_coherent(hr_dev->dev, hem_alloc_size, - &hem->dma, gfp_mask); + &hem->dma, GFP_KERNEL); if (!buf) goto fail; @@ -378,7 +374,6 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev, { u32 bt_size = mhop->bt_chunk_size; struct device *dev = hr_dev->dev; - gfp_t flag; u64 bt_ba; u32 size; int ret; @@ -417,8 +412,7 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev, * alloc bt space chunk for MTT/CQE. */ size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size; - flag = GFP_KERNEL | __GFP_NOWARN; - table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size, flag); + table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size); if (!table->hem[index->buf]) { ret = -ENOMEM; goto err_alloc_hem; @@ -546,9 +540,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev, goto out; } - table->hem[i] = hns_roce_alloc_hem(hr_dev, - table->table_chunk_size, - GFP_KERNEL | __GFP_NOWARN); + table->hem[i] = hns_roce_alloc_hem(hr_dev, table->table_chunk_size); if (!table->hem[i]) { ret = -ENOMEM; goto out; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index fa8747656f25..64bca08f3f1a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -144,7 +144,7 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, u64 pbl_ba; /* use ib_access_flags */ - hr_reg_write_bool(fseg, FRMR_BIND_EN, wr->access & IB_ACCESS_MW_BIND); + hr_reg_write_bool(fseg, FRMR_BIND_EN, 0); hr_reg_write_bool(fseg, FRMR_ATOMIC, wr->access & IB_ACCESS_REMOTE_ATOMIC); hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ); @@ -2196,31 +2196,36 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev) static int hns_roce_query_caps(struct hns_roce_dev *hr_dev) { - struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM]; + struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM] = {}; struct hns_roce_caps *caps = &hr_dev->caps; struct hns_roce_query_pf_caps_a *resp_a; struct hns_roce_query_pf_caps_b *resp_b; struct hns_roce_query_pf_caps_c *resp_c; struct hns_roce_query_pf_caps_d *resp_d; struct hns_roce_query_pf_caps_e *resp_e; + struct hns_roce_query_pf_caps_f *resp_f; enum hns_roce_opcode_type cmd; int ctx_hop_num; int pbl_hop_num; + int cmd_num; int ret; int i; cmd = hr_dev->is_vf ? HNS_ROCE_OPC_QUERY_VF_CAPS_NUM : HNS_ROCE_OPC_QUERY_PF_CAPS_NUM; + cmd_num = hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ? + HNS_ROCE_QUERY_PF_CAPS_CMD_NUM_HIP08 : + HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; - for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) { + for (i = 0; i < cmd_num - 1; i++) { hns_roce_cmq_setup_basic_desc(&desc[i], cmd, true); - if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1)) - desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); - else - desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); } - ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM); + hns_roce_cmq_setup_basic_desc(&desc[cmd_num - 1], cmd, true); + desc[cmd_num - 1].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + + ret = hns_roce_cmq_send(hr_dev, desc, cmd_num); if (ret) return ret; @@ -2229,6 +2234,7 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev) resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data; resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data; resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data; + resp_f = (struct hns_roce_query_pf_caps_f *)desc[5].data; caps->local_ca_ack_delay = resp_a->local_ca_ack_delay; caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg); @@ -2293,6 +2299,8 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev) caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS); caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS); + caps->max_ack_req_msg_len = le32_to_cpu(resp_f->max_ack_req_msg_len); + caps->qpc_hop_num = ctx_hop_num; caps->sccc_hop_num = ctx_hop_num; caps->srqc_hop_num = ctx_hop_num; @@ -2627,7 +2635,7 @@ static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev) struct ib_pd *pd; hr_pd = kzalloc(sizeof(*hr_pd), GFP_KERNEL); - if (ZERO_OR_NULL_PTR(hr_pd)) + if (!hr_pd) return NULL; pd = &hr_pd->ibpd; pd->device = ibdev; @@ -2658,7 +2666,7 @@ static struct ib_cq *free_mr_init_cq(struct hns_roce_dev *hr_dev) cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM; hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL); - if (ZERO_OR_NULL_PTR(hr_cq)) + if (!hr_cq) return NULL; cq = &hr_cq->ib_cq; @@ -2691,7 +2699,7 @@ static int free_mr_init_qp(struct hns_roce_dev *hr_dev, struct ib_cq *cq, int ret; hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); - if (ZERO_OR_NULL_PTR(hr_qp)) + if (!hr_qp) return -ENOMEM; qp = &hr_qp->ibqp; @@ -2986,14 +2994,22 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) { int ret; + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { + ret = free_mr_init(hr_dev); + if (ret) { + dev_err(hr_dev->dev, "failed to init free mr!\n"); + return ret; + } + } + /* The hns ROCEE requires the extdb info to be cleared before using */ ret = hns_roce_clear_extdb_list_info(hr_dev); if (ret) - return ret; + goto err_clear_extdb_failed; ret = get_hem_table(hr_dev); if (ret) - return ret; + goto err_get_hem_table_failed; if (hr_dev->is_vf) return 0; @@ -3008,6 +3024,11 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) err_llm_init_failed: put_hem_table(hr_dev); +err_get_hem_table_failed: + hns_roce_function_clear(hr_dev); +err_clear_extdb_failed: + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) + free_mr_exit(hr_dev); return ret; } @@ -3313,8 +3334,6 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID); hr_reg_write(mpt_entry, MPT_PD, mr->pd); - hr_reg_write_bool(mpt_entry, MPT_BIND_EN, - mr->access & IB_ACCESS_MW_BIND); hr_reg_write_bool(mpt_entry, MPT_ATOMIC_EN, mr->access & IB_ACCESS_REMOTE_ATOMIC); hr_reg_write_bool(mpt_entry, MPT_RR_EN, @@ -3358,8 +3377,6 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, hr_reg_write(mpt_entry, MPT_PD, mr->pd); if (flags & IB_MR_REREG_ACCESS) { - hr_reg_write(mpt_entry, MPT_BIND_EN, - (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0)); hr_reg_write(mpt_entry, MPT_ATOMIC_EN, mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); hr_reg_write(mpt_entry, MPT_RR_EN, @@ -3397,7 +3414,6 @@ static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) hr_reg_enable(mpt_entry, MPT_R_INV_EN); hr_reg_enable(mpt_entry, MPT_FRE); - hr_reg_clear(mpt_entry, MPT_MR_MW); hr_reg_enable(mpt_entry, MPT_BPD); hr_reg_clear(mpt_entry, MPT_PA); @@ -3417,38 +3433,6 @@ static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) return 0; } -static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw) -{ - struct hns_roce_v2_mpt_entry *mpt_entry; - - mpt_entry = mb_buf; - memset(mpt_entry, 0, sizeof(*mpt_entry)); - - hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE); - hr_reg_write(mpt_entry, MPT_PD, mw->pdn); - - hr_reg_enable(mpt_entry, MPT_R_INV_EN); - hr_reg_enable(mpt_entry, MPT_LW_EN); - - hr_reg_enable(mpt_entry, MPT_MR_MW); - hr_reg_enable(mpt_entry, MPT_BPD); - hr_reg_clear(mpt_entry, MPT_PA); - hr_reg_write(mpt_entry, MPT_BQP, - mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1); - - mpt_entry->lkey = cpu_to_le32(mw->rkey); - - hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, - mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : - mw->pbl_hop_num); - hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, - mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET); - hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, - mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET); - - return 0; -} - static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp) { struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); @@ -3849,7 +3833,6 @@ static const u32 wc_send_op_map[] = { HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP), HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD, MASKED_FETCH_ADD), HR_WC_OP_MAP(FAST_REG_PMR, REG_MR), - HR_WC_OP_MAP(BIND_MW, REG_MR), }; static int to_ib_wc_send_op(u32 hr_opcode) @@ -4560,7 +4543,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, dma_addr_t trrl_ba; dma_addr_t irrl_ba; enum ib_mtu ib_mtu; + u8 ack_req_freq; const u8 *smac; + int lp_msg_len; u8 lp_pktn_ini; u64 *mtts; u8 *dmac; @@ -4643,7 +4628,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, return -EINVAL; #define MIN_LP_MSG_LEN 1024 /* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */ - lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu); + lp_msg_len = max(mtu, MIN_LP_MSG_LEN); + lp_pktn_ini = ilog2(lp_msg_len / mtu); if (attr_mask & IB_QP_PATH_MTU) { hr_reg_write(context, QPC_MTU, ib_mtu); @@ -4653,8 +4639,22 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini); hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI); - /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */ - hr_reg_write(context, QPC_ACK_REQ_FREQ, lp_pktn_ini); + /* + * There are several constraints for ACK_REQ_FREQ: + * 1. mtu * (2 ^ ACK_REQ_FREQ) should not be too large, otherwise + * it may cause some unexpected retries when sending large + * payload. + * 2. ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI. + * 3. ACK_REQ_FREQ must be equal to LP_PKTN_INI when using LDCP + * or HC3 congestion control algorithm. + */ + if (hr_qp->cong_type == CONG_TYPE_LDCP || + hr_qp->cong_type == CONG_TYPE_HC3 || + hr_dev->caps.max_ack_req_msg_len < lp_msg_len) + ack_req_freq = lp_pktn_ini; + else + ack_req_freq = ilog2(hr_dev->caps.max_ack_req_msg_len / mtu); + hr_reg_write(context, QPC_ACK_REQ_FREQ, ack_req_freq); hr_reg_clear(qpc_mask, QPC_ACK_REQ_FREQ); hr_reg_clear(qpc_mask, QPC_RX_REQ_PSN_ERR); @@ -5349,11 +5349,10 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_v2_qp_context ctx[2]; - struct hns_roce_v2_qp_context *context = ctx; - struct hns_roce_v2_qp_context *qpc_mask = ctx + 1; + struct hns_roce_v2_qp_context *context; + struct hns_roce_v2_qp_context *qpc_mask; struct ib_device *ibdev = &hr_dev->ib_dev; - int ret; + int ret = -ENOMEM; if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; @@ -5364,7 +5363,11 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, * we should set all bits of the relevant fields in context mask to * 0 at the same time, else set them to 0x1. */ - memset(context, 0, hr_dev->caps.qpc_sz); + context = kvzalloc(sizeof(*context), GFP_KERNEL); + qpc_mask = kvzalloc(sizeof(*qpc_mask), GFP_KERNEL); + if (!context || !qpc_mask) + goto out; + memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz); ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state, @@ -5406,6 +5409,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, clear_qp(hr_qp); out: + kvfree(qpc_mask); + kvfree(context); return ret; } @@ -6948,7 +6953,6 @@ static const struct hns_roce_hw hns_roce_hw_v2 = { .write_mtpt = hns_roce_v2_write_mtpt, .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt, .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt, - .mw_write_mtpt = hns_roce_v2_mw_write_mtpt, .write_cqc = hns_roce_v2_write_cqc, .set_hem = hns_roce_v2_set_hem, .clear_hem = hns_roce_v2_clear_hem, @@ -7044,21 +7048,11 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) goto error_failed_roce_init; } - if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { - ret = free_mr_init(hr_dev); - if (ret) { - dev_err(hr_dev->dev, "failed to init free mr!\n"); - goto error_failed_free_mr_init; - } - } handle->priv = hr_dev; return 0; -error_failed_free_mr_init: - hns_roce_exit(hr_dev); - error_failed_roce_init: kfree(hr_dev->priv); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index bc7466830eaf..e64a04d6f85b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -814,24 +814,16 @@ struct hns_roce_v2_mpt_entry { #define V2_MPT_BYTE_8_LW_EN_S 7 -#define V2_MPT_BYTE_8_MW_CNT_S 8 -#define V2_MPT_BYTE_8_MW_CNT_M GENMASK(31, 8) - #define V2_MPT_BYTE_12_FRE_S 0 #define V2_MPT_BYTE_12_PA_S 1 -#define V2_MPT_BYTE_12_MR_MW_S 4 - #define V2_MPT_BYTE_12_BPD_S 5 #define V2_MPT_BYTE_12_BQP_S 6 #define V2_MPT_BYTE_12_INNER_PA_VLD_S 7 -#define V2_MPT_BYTE_12_MW_BIND_QPN_S 8 -#define V2_MPT_BYTE_12_MW_BIND_QPN_M GENMASK(31, 8) - #define V2_MPT_BYTE_48_PBL_BA_H_S 0 #define V2_MPT_BYTE_48_PBL_BA_H_M GENMASK(28, 0) @@ -1168,7 +1160,8 @@ struct hns_roce_cfg_gmv_tb_b { #define GMV_TB_B_SMAC_H GMV_TB_B_FIELD_LOC(47, 32) #define GMV_TB_B_SGID_IDX GMV_TB_B_FIELD_LOC(71, 64) -#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 5 +#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM_HIP08 5 +#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 6 struct hns_roce_query_pf_caps_a { u8 number_ports; u8 local_ca_ack_delay; @@ -1280,6 +1273,11 @@ struct hns_roce_query_pf_caps_e { __le16 aeq_period; }; +struct hns_roce_query_pf_caps_f { + __le32 max_ack_req_msg_len; + __le32 rsv[5]; +}; + #define PF_CAPS_E_FIELD_LOC(h, l) \ FIELD_LOC(struct hns_roce_query_pf_caps_e, h, l) diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index e7a497cc125c..d50f36f8a110 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -672,13 +672,6 @@ static const struct ib_device_ops hns_roce_dev_mr_ops = { .rereg_user_mr = hns_roce_rereg_user_mr, }; -static const struct ib_device_ops hns_roce_dev_mw_ops = { - .alloc_mw = hns_roce_alloc_mw, - .dealloc_mw = hns_roce_dealloc_mw, - - INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw), -}; - static const struct ib_device_ops hns_roce_dev_frmr_ops = { .alloc_mr = hns_roce_alloc_mr, .map_mr_sg = hns_roce_map_mr_sg, @@ -732,9 +725,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops); - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) - ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops); - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops); @@ -947,10 +937,7 @@ err_unmap_dmpt: static void hns_roce_teardown_hca(struct hns_roce_dev *hr_dev) { hns_roce_cleanup_bitmap(hr_dev); - - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || - hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) - mutex_destroy(&hr_dev->pgdir_mutex); + mutex_destroy(&hr_dev->pgdir_mutex); } /** @@ -965,11 +952,11 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) spin_lock_init(&hr_dev->sm_lock); - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || - hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) { - INIT_LIST_HEAD(&hr_dev->pgdir_list); - mutex_init(&hr_dev->pgdir_mutex); - } + INIT_LIST_HEAD(&hr_dev->qp_list); + spin_lock_init(&hr_dev->qp_list_lock); + + INIT_LIST_HEAD(&hr_dev->pgdir_list); + mutex_init(&hr_dev->pgdir_mutex); hns_roce_init_uar_table(hr_dev); @@ -1001,9 +988,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) err_uar_table_free: ida_destroy(&hr_dev->uar_ida.ida); - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || - hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) - mutex_destroy(&hr_dev->pgdir_mutex); + mutex_destroy(&hr_dev->pgdir_mutex); return ret; } @@ -1132,9 +1117,6 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) } } - INIT_LIST_HEAD(&hr_dev->qp_list); - spin_lock_init(&hr_dev->qp_list_lock); - ret = hns_roce_register_device(hr_dev); if (ret) goto error_failed_register_device; diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 93a48b41955b..0f037e545520 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -231,12 +231,18 @@ err_free: struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, + struct ib_dmah *dmah, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); struct hns_roce_mr *mr; int ret; + if (dmah) { + ret = -EOPNOTSUPP; + goto err_out; + } + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) { ret = -ENOMEM; @@ -483,120 +489,6 @@ err_page_list: return sg_num; } -static void hns_roce_mw_free(struct hns_roce_dev *hr_dev, - struct hns_roce_mw *mw) -{ - struct device *dev = hr_dev->dev; - int ret; - - if (mw->enabled) { - ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT, - key_to_hw_index(mw->rkey) & - (hr_dev->caps.num_mtpts - 1)); - if (ret) - dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret); - - hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, - key_to_hw_index(mw->rkey)); - } - - ida_free(&hr_dev->mr_table.mtpt_ida.ida, - (int)key_to_hw_index(mw->rkey)); -} - -static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev, - struct hns_roce_mw *mw) -{ - struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; - struct hns_roce_cmd_mailbox *mailbox; - struct device *dev = hr_dev->dev; - unsigned long mtpt_idx = key_to_hw_index(mw->rkey); - int ret; - - /* prepare HEM entry memory */ - ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx); - if (ret) - return ret; - - mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR(mailbox)) { - ret = PTR_ERR(mailbox); - goto err_table; - } - - ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw); - if (ret) { - dev_err(dev, "MW write mtpt fail!\n"); - goto err_page; - } - - ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT, - mtpt_idx & (hr_dev->caps.num_mtpts - 1)); - if (ret) { - dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret); - goto err_page; - } - - mw->enabled = 1; - - hns_roce_free_cmd_mailbox(hr_dev, mailbox); - - return 0; - -err_page: - hns_roce_free_cmd_mailbox(hr_dev, mailbox); - -err_table: - hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx); - - return ret; -} - -int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device); - struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida; - struct ib_device *ibdev = &hr_dev->ib_dev; - struct hns_roce_mw *mw = to_hr_mw(ibmw); - int ret; - int id; - - /* Allocate a key for mw from mr_table */ - id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max, - GFP_KERNEL); - if (id < 0) { - ibdev_err(ibdev, "failed to alloc id for MW key, id(%d)\n", id); - return -ENOMEM; - } - - mw->rkey = hw_index_to_key(id); - - ibmw->rkey = mw->rkey; - mw->pdn = to_hr_pd(ibmw->pd)->pdn; - mw->pbl_hop_num = hr_dev->caps.pbl_hop_num; - mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; - mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; - - ret = hns_roce_mw_enable(hr_dev, mw); - if (ret) - goto err_mw; - - return 0; - -err_mw: - hns_roce_mw_free(hr_dev, mw); - return ret; -} - -int hns_roce_dealloc_mw(struct ib_mw *ibmw) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device); - struct hns_roce_mw *mw = to_hr_mw(ibmw); - - hns_roce_mw_free(hr_dev, mw); - return 0; -} - static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, struct hns_roce_buf_region *region, dma_addr_t *pages, int max_count) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 9f376a2232b0..6ff1b8ce580c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1003,14 +1003,14 @@ static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev, int ret; sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); - if (ZERO_OR_NULL_PTR(sq_wrid)) { + if (!sq_wrid) { ibdev_err(ibdev, "failed to alloc SQ wrid.\n"); return -ENOMEM; } if (hr_qp->rq.wqe_cnt) { rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL); - if (ZERO_OR_NULL_PTR(rq_wrid)) { + if (!rq_wrid) { ibdev_err(ibdev, "failed to alloc RQ wrid.\n"); ret = -ENOMEM; goto err_sq; |