diff options
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re/qplib_sp.c')
| -rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_sp.c | 251 |
1 files changed, 189 insertions, 62 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 7e20ae3d2c4f..408a34df2667 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -66,14 +66,15 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw) return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ); } -static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, - char *fw_ver) +void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw) { struct creq_query_version_resp resp = {}; struct bnxt_qplib_cmdqmsg msg = {}; struct cmdq_query_version req = {}; + struct bnxt_qplib_dev_attr *attr; int rc; + attr = rcfw->res->dattr; bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, CMDQ_BASE_OPCODE_QUERY_VERSION, sizeof(req)); @@ -82,15 +83,15 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) return; - fw_ver[0] = resp.fw_maj; - fw_ver[1] = resp.fw_minor; - fw_ver[2] = resp.fw_bld; - fw_ver[3] = resp.fw_rsvd; + attr->fw_ver[0] = resp.fw_maj; + attr->fw_ver[1] = resp.fw_minor; + attr->fw_ver[2] = resp.fw_bld; + attr->fw_ver[3] = resp.fw_rsvd; } -int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, - struct bnxt_qplib_dev_attr *attr) +int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw) { + struct bnxt_qplib_dev_attr *attr = rcfw->res->dattr; struct creq_query_func_resp resp = {}; struct bnxt_qplib_cmdqmsg msg = {}; struct creq_query_func_resp_sb *sb; @@ -129,12 +130,18 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_qp_init_rd_atom = sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; - attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); - /* - * 128 WQEs needs to be reserved for the HW (8916). Prevent - * reporting the max number - */ - attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1; + attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1; + if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) { + /* + * 128 WQEs needs to be reserved for the HW (8916). Prevent + * reporting the max number on legacy devices + */ + attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1; + } + + /* Adjust for max_qp_wqes for variable wqe */ + if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) + attr->max_qp_wqes = BNXT_VAR_MAX_WQE - 1; attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ? min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6; @@ -155,7 +162,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1; attr->max_srq_sges = sb->max_srq_sge; attr->max_pkey = 1; - attr->max_inline_data = le32_to_cpu(sb->max_inline_data); + attr->max_inline_data = attr->max_qp_sges * sizeof(struct sq_sge); if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx)) attr->l2_db_size = (sb->l2_db_space_size + 1) * (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); @@ -170,7 +177,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags); attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2); - bnxt_qplib_query_version(rcfw, attr->fw_ver); + if (_is_max_srq_ext_supported(attr->dev_cap_flags2)) + attr->max_srq += le16_to_cpu(sb->max_srq_ext); for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) { temp = le32_to_cpu(sb->tqm_alloc_reqs[i]); @@ -300,7 +308,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_gid *gid, const u8 *smac, - u16 vlan_id, bool update, u32 *index) + u16 vlan_id, bool update, u32 *index, + bool is_ugid, u32 stats_ctx_id) { struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, struct bnxt_qplib_res, @@ -365,6 +374,9 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]); req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]); + req.stats_ctx = cpu_to_le16(CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID | + (u16)stats_ctx_id); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); @@ -388,46 +400,6 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, return 0; } -int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, - struct bnxt_qplib_gid *gid, u16 gid_idx, - const u8 *smac) -{ - struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, - struct bnxt_qplib_res, - sgid_tbl); - struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct creq_modify_gid_resp resp = {}; - struct bnxt_qplib_cmdqmsg msg = {}; - struct cmdq_modify_gid req = {}; - int rc; - - bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, - CMDQ_BASE_OPCODE_MODIFY_GID, - sizeof(req)); - - req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]); - req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]); - req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]); - req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]); - if (res->prio) { - req.vlan |= cpu_to_le16 - (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 | - CMDQ_ADD_GID_VLAN_VLAN_EN); - } - - /* MAC in network format */ - req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]); - req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]); - req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]); - - req.gid_index = cpu_to_le16(gid_idx); - - bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), - sizeof(resp), 0); - rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); - return rc; -} - /* AH */ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, bool block) @@ -606,7 +578,7 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw, } int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, - struct ib_umem *umem, int num_pbls, u32 buf_pg_size) + struct ib_umem *umem, int num_pbls, u32 buf_pg_size, bool unified_mr) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_hwq_attr hwq_attr = {}; @@ -665,10 +637,10 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) << CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) & CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK)); - req.access = (mr->access_flags & 0xFFFF); + req.access = (mr->access_flags & BNXT_QPLIB_MR_ACCESS_MASK); req.va = cpu_to_le64(mr->va); req.key = cpu_to_le32(mr->lkey); - if (_is_alloc_mr_unified(res->dattr->dev_cap_flags)) + if (unified_mr) req.key = cpu_to_le32(mr->pd->id); req.flags = cpu_to_le16(mr->flags); req.mr_size = cpu_to_le64(mr->total_size); @@ -679,7 +651,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, if (rc) goto fail; - if (_is_alloc_mr_unified(res->dattr->dev_cap_flags)) { + if (unified_mr) { mr->lkey = le32_to_cpu(resp.xid); mr->rkey = mr->lkey; } @@ -837,7 +809,12 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid, req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; req.resp_addr = cpu_to_le64(sbuf.dma_addr); - req.function_id = cpu_to_le32(fid); + if (bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx) && rcfw->res->is_vf) + req.function_id = + cpu_to_le32(CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID | + (fid << CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT)); + else + req.function_id = cpu_to_le32(fid); req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID); bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), @@ -1016,3 +993,153 @@ free_mem: dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr); return rc; } + +static void bnxt_qplib_read_cc_gen1(struct bnxt_qplib_cc_param_ext *cc_ext, + struct creq_query_roce_cc_gen1_resp_sb_tlv *sb) +{ + cc_ext->inact_th_hi = le16_to_cpu(sb->inactivity_th_hi); + cc_ext->min_delta_cnp = le16_to_cpu(sb->min_time_between_cnps); + cc_ext->init_cp = le16_to_cpu(sb->init_cp); + cc_ext->tr_update_mode = sb->tr_update_mode; + cc_ext->tr_update_cyls = sb->tr_update_cycles; + cc_ext->fr_rtt = sb->fr_num_rtts; + cc_ext->ai_rate_incr = sb->ai_rate_increase; + cc_ext->rr_rtt_th = le16_to_cpu(sb->reduction_relax_rtts_th); + cc_ext->ar_cr_th = le16_to_cpu(sb->additional_relax_cr_th); + cc_ext->cr_min_th = le16_to_cpu(sb->cr_min_th); + cc_ext->bw_avg_weight = sb->bw_avg_weight; + cc_ext->cr_factor = sb->actual_cr_factor; + cc_ext->cr_th_max_cp = le16_to_cpu(sb->max_cp_cr_th); + cc_ext->cp_bias_en = sb->cp_bias_en; + cc_ext->cp_bias = sb->cp_bias; + cc_ext->cnp_ecn = sb->cnp_ecn; + cc_ext->rtt_jitter_en = sb->rtt_jitter_en; + cc_ext->bytes_per_usec = le16_to_cpu(sb->link_bytes_per_usec); + cc_ext->cc_cr_reset_th = le16_to_cpu(sb->reset_cc_cr_th); + cc_ext->cr_width = sb->cr_width; + cc_ext->min_quota = sb->quota_period_min; + cc_ext->max_quota = sb->quota_period_max; + cc_ext->abs_max_quota = sb->quota_period_abs_max; + cc_ext->tr_lb = le16_to_cpu(sb->tr_lower_bound); + cc_ext->cr_prob_fac = sb->cr_prob_factor; + cc_ext->tr_prob_fac = sb->tr_prob_factor; + cc_ext->fair_cr_th = le16_to_cpu(sb->fairness_cr_th); + cc_ext->red_div = sb->red_div; + cc_ext->cnp_ratio_th = sb->cnp_ratio_th; + cc_ext->ai_ext_rtt = le16_to_cpu(sb->exp_ai_rtts); + cc_ext->exp_crcp_ratio = sb->exp_ai_cr_cp_ratio; + cc_ext->low_rate_en = sb->use_rate_table; + cc_ext->cpcr_update_th = le16_to_cpu(sb->cp_exp_update_th); + cc_ext->ai_rtt_th1 = le16_to_cpu(sb->high_exp_ai_rtts_th1); + cc_ext->ai_rtt_th2 = le16_to_cpu(sb->high_exp_ai_rtts_th2); + cc_ext->cf_rtt_th = le16_to_cpu(sb->actual_cr_cong_free_rtts_th); + cc_ext->sc_cr_th1 = le16_to_cpu(sb->severe_cong_cr_th1); + cc_ext->sc_cr_th2 = le16_to_cpu(sb->severe_cong_cr_th2); + cc_ext->l64B_per_rtt = le32_to_cpu(sb->link64B_per_rtt); + cc_ext->cc_ack_bytes = sb->cc_ack_bytes; + cc_ext->reduce_cf_rtt_th = le16_to_cpu(sb->reduce_init_cong_free_rtts_th); +} + +int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res, + struct bnxt_qplib_cc_param *cc_param) +{ + struct bnxt_qplib_tlv_query_rcc_sb *ext_sb; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_query_roce_cc_resp resp = {}; + struct creq_query_roce_cc_resp_sb *sb; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_query_roce_cc req = {}; + struct bnxt_qplib_rcfw_sbuf sbuf; + size_t resp_size; + int rc; + + /* Query the parameters from chip */ + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, CMDQ_BASE_OPCODE_QUERY_ROCE_CC, + sizeof(req)); + if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) + resp_size = sizeof(*ext_sb); + else + resp_size = sizeof(*sb); + + sbuf.size = ALIGN(resp_size, BNXT_QPLIB_CMDQE_UNITS); + sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size, + &sbuf.dma_addr, GFP_KERNEL); + if (!sbuf.sb) + return -ENOMEM; + + req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg); + if (rc) + goto out; + + ext_sb = sbuf.sb; + sb = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ? &ext_sb->base_sb : + (struct creq_query_roce_cc_resp_sb *)ext_sb; + + cc_param->enable = sb->enable_cc & CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC; + cc_param->tos_ecn = (sb->tos_dscp_tos_ecn & + CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK) >> + CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT; + cc_param->tos_dscp = (sb->tos_dscp_tos_ecn & + CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK) >> + CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT; + cc_param->alt_tos_dscp = sb->alt_tos_dscp; + cc_param->alt_vlan_pcp = sb->alt_vlan_pcp; + + cc_param->g = sb->g; + cc_param->nph_per_state = sb->num_phases_per_state; + cc_param->init_cr = le16_to_cpu(sb->init_cr); + cc_param->init_tr = le16_to_cpu(sb->init_tr); + cc_param->cc_mode = sb->cc_mode; + cc_param->inact_th = le16_to_cpu(sb->inactivity_th); + cc_param->rtt = le16_to_cpu(sb->rtt); + cc_param->tcp_cp = le16_to_cpu(sb->tcp_cp); + cc_param->time_pph = sb->time_per_phase; + cc_param->pkts_pph = sb->pkts_per_phase; + if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) { + bnxt_qplib_read_cc_gen1(&cc_param->cc_ext, &ext_sb->gen1_sb); + cc_param->inact_th |= (cc_param->cc_ext.inact_th_hi & 0x3F) << 16; + } +out: + dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr); + return rc; +} + +int bnxt_qplib_create_flow(struct bnxt_qplib_res *res) +{ + struct creq_roce_mirror_cfg_resp resp = {}; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct cmdq_roce_mirror_cfg req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG, + sizeof(req)); + + req.mirror_flags = (u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE; + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + return bnxt_qplib_rcfw_send_message(rcfw, &msg); +} + +int bnxt_qplib_destroy_flow(struct bnxt_qplib_res *res) +{ + struct creq_roce_mirror_cfg_resp resp = {}; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct cmdq_roce_mirror_cfg req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG, + sizeof(req)); + + req.mirror_flags &= ~((u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE); + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + + return bnxt_qplib_rcfw_send_message(rcfw, &msg); +} |
