diff options
Diffstat (limited to 'drivers/infiniband/hw/irdma/utils.c')
| -rw-r--r-- | drivers/infiniband/hw/irdma/utils.c | 509 |
1 files changed, 182 insertions, 327 deletions
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index 445e69e86409..cc2a12f735d3 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "main.h" @@ -320,9 +320,6 @@ int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event, case NETDEV_DOWN: iwdev->iw_status = 0; fallthrough; - case NETDEV_UP: - irdma_port_ibevent(iwdev); - break; default: break; } @@ -455,6 +452,7 @@ struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp, cqp_request->waiting = wait; refcount_set(&cqp_request->refcnt, 1); memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info)); + memset(&cqp_request->info, 0, sizeof(cqp_request->info)); return cqp_request; } @@ -481,9 +479,10 @@ void irdma_free_cqp_request(struct irdma_cqp *cqp, if (cqp_request->dynamic) { kfree(cqp_request); } else { - cqp_request->request_done = false; + WRITE_ONCE(cqp_request->request_done, false); cqp_request->callback_fcn = NULL; cqp_request->waiting = false; + cqp_request->pending = false; spin_lock_irqsave(&cqp->req_lock, flags); list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); @@ -515,7 +514,7 @@ irdma_free_pending_cqp_request(struct irdma_cqp *cqp, { if (cqp_request->waiting) { cqp_request->compl_info.error = true; - cqp_request->request_done = true; + WRITE_ONCE(cqp_request->request_done, true); wake_up(&cqp_request->waitq); } wait_event_timeout(cqp->remove_wq, @@ -524,6 +523,22 @@ irdma_free_pending_cqp_request(struct irdma_cqp *cqp, } /** + * irdma_cleanup_deferred_cqp_ops - clean-up cqp with no completions + * @dev: sc_dev + * @cqp: cqp + */ +static void irdma_cleanup_deferred_cqp_ops(struct irdma_sc_dev *dev, + struct irdma_cqp *cqp) +{ + u64 scratch; + + /* process all CQP requests with deferred/pending completions */ + while ((scratch = irdma_sc_cqp_cleanup_handler(dev))) + irdma_free_pending_cqp_request(cqp, (struct irdma_cqp_request *) + (uintptr_t)scratch); +} + +/** * irdma_cleanup_pending_cqp_op - clean-up cqp with no * completions * @rf: RDMA PCI function @@ -536,6 +551,8 @@ void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf) struct cqp_cmds_info *pcmdinfo = NULL; u32 i, pending_work, wqe_idx; + if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) + irdma_cleanup_deferred_cqp_ops(dev, cqp); pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring); wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring); for (i = 0; i < pending_work; i++) { @@ -555,6 +572,26 @@ void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf) } } +static int irdma_get_timeout_threshold(struct irdma_sc_dev *dev) +{ + u16 time_s = dev->vc_caps.cqp_timeout_s; + + if (!time_s) + return CQP_TIMEOUT_THRESHOLD; + + return time_s * 1000 / dev->hw_attrs.max_cqp_compl_wait_time_ms; +} + +static int irdma_get_def_timeout_threshold(struct irdma_sc_dev *dev) +{ + u16 time_s = dev->vc_caps.cqp_def_timeout_s; + + if (!time_s) + return CQP_DEF_CMPL_TIMEOUT_THRESHOLD; + + return time_s * 1000 / dev->hw_attrs.max_cqp_compl_wait_time_ms; +} + /** * irdma_wait_event - wait for completion * @rf: RDMA PCI function @@ -564,20 +601,29 @@ static int irdma_wait_event(struct irdma_pci_f *rf, struct irdma_cqp_request *cqp_request) { struct irdma_cqp_timeout cqp_timeout = {}; + int timeout_threshold = irdma_get_timeout_threshold(&rf->sc_dev); bool cqp_error = false; int err_code = 0; - cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]; + cqp_timeout.compl_cqp_cmds = atomic64_read(&rf->sc_dev.cqp->completed_ops); do { irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); if (wait_event_timeout(cqp_request->waitq, - cqp_request->request_done, + READ_ONCE(cqp_request->request_done), msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS))) break; + if (cqp_request->pending) + /* There was a deferred or pending completion + * received for this CQP request, so we need + * to wait longer than usual. + */ + timeout_threshold = + irdma_get_def_timeout_threshold(&rf->sc_dev); + irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev); - if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD) + if (cqp_timeout.count < timeout_threshold) continue; if (!rf->reset) { @@ -652,6 +698,9 @@ static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = { [IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd", [IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd", [IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd", + [IRDMA_OP_SRQ_CREATE] = "Create SRQ Cmd", + [IRDMA_OP_SRQ_MODIFY] = "Modify SRQ Cmd", + [IRDMA_OP_SRQ_DESTROY] = "Destroy SRQ Cmd", }; static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = { @@ -760,6 +809,31 @@ void irdma_qp_rem_ref(struct ib_qp *ibqp) complete(&iwqp->free_qp); } +void irdma_cq_add_ref(struct ib_cq *ibcq) +{ + struct irdma_cq *iwcq = to_iwcq(ibcq); + + refcount_inc(&iwcq->refcnt); +} + +void irdma_cq_rem_ref(struct ib_cq *ibcq) +{ + struct ib_device *ibdev = ibcq->device; + struct irdma_device *iwdev = to_iwdev(ibdev); + struct irdma_cq *iwcq = to_iwcq(ibcq); + unsigned long flags; + + spin_lock_irqsave(&iwdev->rf->cqtable_lock, flags); + if (!refcount_dec_and_test(&iwcq->refcnt)) { + spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags); + return; + } + + iwdev->rf->cq_table[iwcq->cq_num] = NULL; + spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags); + complete(&iwcq->free_cq); +} + struct ib_device *to_ibdev(struct irdma_sc_dev *dev) { return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev; @@ -908,7 +982,7 @@ void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred) static void irdma_terminate_timeout(struct timer_list *t) { - struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer); + struct irdma_qp *iwqp = timer_container_of(iwqp, t, terminate_timer); struct irdma_sc_qp *qp = &iwqp->sc_qp; irdma_terminate_done(qp, 1); @@ -941,80 +1015,12 @@ void irdma_terminate_del_timer(struct irdma_sc_qp *qp) int ret; iwqp = qp->qp_uk.back_qp; - ret = del_timer(&iwqp->terminate_timer); + ret = timer_delete(&iwqp->terminate_timer); if (ret) irdma_qp_rem_ref(&iwqp->ibqp); } /** - * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm - * @dev: function device struct - * @val_mem: buffer for fpm - * @hmc_fn_id: function id for fpm - */ -int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev, - struct irdma_dma_mem *val_mem, u8 hmc_fn_id) -{ - struct irdma_cqp_request *cqp_request; - struct cqp_cmds_info *cqp_info; - struct irdma_pci_f *rf = dev_to_rf(dev); - int status; - - cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); - if (!cqp_request) - return -ENOMEM; - - cqp_info = &cqp_request->info; - cqp_request->param = NULL; - cqp_info->in.u.query_fpm_val.cqp = dev->cqp; - cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa; - cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va; - cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id; - cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL; - cqp_info->post_sq = 1; - cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request; - - status = irdma_handle_cqp_op(rf, cqp_request); - irdma_put_cqp_request(&rf->cqp, cqp_request); - - return status; -} - -/** - * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw - * @dev: hardware control device structure - * @val_mem: buffer with fpm values - * @hmc_fn_id: function id for fpm - */ -int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev, - struct irdma_dma_mem *val_mem, u8 hmc_fn_id) -{ - struct irdma_cqp_request *cqp_request; - struct cqp_cmds_info *cqp_info; - struct irdma_pci_f *rf = dev_to_rf(dev); - int status; - - cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); - if (!cqp_request) - return -ENOMEM; - - cqp_info = &cqp_request->info; - cqp_request->param = NULL; - cqp_info->in.u.commit_fpm_val.cqp = dev->cqp; - cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa; - cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va; - cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id; - cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL; - cqp_info->post_sq = 1; - cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request; - - status = irdma_handle_cqp_op(rf, cqp_request); - irdma_put_cqp_request(&rf->cqp, cqp_request); - - return status; -} - -/** * irdma_cqp_cq_create_cmd - create a cq for the cqp * @dev: device pointer * @cq: pointer to created cq @@ -1063,7 +1069,6 @@ int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) cqp_info = &cqp_request->info; qp_info = &cqp_request->info.in.u.qp_create.info; - memset(qp_info, 0, sizeof(*qp_info)); qp_info->cq_num_valid = true; qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS; cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE; @@ -1111,6 +1116,26 @@ static void irdma_dealloc_push_page(struct irdma_pci_f *rf, irdma_put_cqp_request(&rf->cqp, cqp_request); } +static void irdma_free_gsi_qp_rsrc(struct irdma_qp *iwqp, u32 qp_num) +{ + struct irdma_device *iwdev = iwqp->iwdev; + struct irdma_pci_f *rf = iwdev->rf; + unsigned long flags; + + if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_3) + return; + + irdma_vchnl_req_del_vport(&rf->sc_dev, iwdev->vport_id, qp_num); + + if (qp_num == 1) { + spin_lock_irqsave(&rf->rsrc_lock, flags); + rf->hwqp1_rsvd = false; + spin_unlock_irqrestore(&rf->rsrc_lock, flags); + } else if (qp_num > 2) { + irdma_free_rsrc(rf, rf->allocated_qps, qp_num); + } +} + /** * irdma_free_qp_rsrc - free up memory resources for qp * @iwqp: qp ptr (user or kernel) @@ -1119,7 +1144,7 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp) { struct irdma_device *iwdev = iwqp->iwdev; struct irdma_pci_f *rf = iwdev->rf; - u32 qp_num = iwqp->ibqp.qp_num; + u32 qp_num = iwqp->sc_qp.qp_uk.qp_id; irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp); irdma_dealloc_push_page(rf, &iwqp->sc_qp); @@ -1129,8 +1154,12 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp) iwqp->sc_qp.user_pri); } - if (qp_num > 2) - irdma_free_rsrc(rf, rf->allocated_qps, qp_num); + if (iwqp->ibqp.qp_type == IB_QPT_GSI) { + irdma_free_gsi_qp_rsrc(iwqp, qp_num); + } else { + if (qp_num > 2) + irdma_free_rsrc(rf, rf->allocated_qps, qp_num); + } dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size, iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa); iwqp->q2_ctx_mem.va = NULL; @@ -1142,6 +1171,30 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp) } /** + * irdma_srq_wq_destroy - send srq destroy cqp + * @rf: RDMA PCI function + * @srq: hardware control srq + */ +void irdma_srq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_srq *srq) +{ + struct irdma_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + + cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = IRDMA_OP_SRQ_DESTROY; + cqp_info->post_sq = 1; + cqp_info->in.u.srq_destroy.srq = srq; + cqp_info->in.u.srq_destroy.scratch = (uintptr_t)cqp_request; + + irdma_handle_cqp_op(rf, cqp_request); + irdma_put_cqp_request(&rf->cqp, cqp_request); +} + +/** * irdma_cq_wq_destroy - send cq destroy cqp * @rf: RDMA PCI function * @cq: hardware control cq @@ -1290,7 +1343,6 @@ int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) return -ENOMEM; cqp_info = &cqp_request->info; - memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY; cqp_info->post_sq = 1; cqp_info->in.u.qp_destroy.qp = qp; @@ -1320,65 +1372,17 @@ void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) } /** - * irdma_init_hash_desc - initialize hash for crc calculation - * @desc: cryption type - */ -int irdma_init_hash_desc(struct shash_desc **desc) -{ - struct crypto_shash *tfm; - struct shash_desc *tdesc; - - tfm = crypto_alloc_shash("crc32c", 0, 0); - if (IS_ERR(tfm)) - return -EINVAL; - - tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm), - GFP_KERNEL); - if (!tdesc) { - crypto_free_shash(tfm); - return -EINVAL; - } - - tdesc->tfm = tfm; - *desc = tdesc; - - return 0; -} - -/** - * irdma_free_hash_desc - free hash desc - * @desc: to be freed - */ -void irdma_free_hash_desc(struct shash_desc *desc) -{ - if (desc) { - crypto_free_shash(desc->tfm); - kfree(desc); - } -} - -/** * irdma_ieq_check_mpacrc - check if mpa crc is OK - * @desc: desc for hash * @addr: address of buffer for crc * @len: length of buffer * @val: value to be compared */ -int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len, - u32 val) +int irdma_ieq_check_mpacrc(const void *addr, u32 len, u32 val) { - u32 crc = 0; - int ret; - int ret_code = 0; - - crypto_shash_init(desc); - ret = crypto_shash_update(desc, addr, len); - if (!ret) - crypto_shash_final(desc, (u8 *)&crc); - if (crc != val) - ret_code = -EINVAL; + if ((__force u32)cpu_to_le32(~crc32c(~0, addr, len)) != val) + return -EINVAL; - return ret_code; + return 0; } /** @@ -1631,13 +1635,13 @@ int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, static void irdma_hw_stats_timeout(struct timer_list *t) { struct irdma_vsi_pestat *pf_devstat = - from_timer(pf_devstat, t, stats_timer); + timer_container_of(pf_devstat, t, stats_timer); struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi; - if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) - irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat); - else + if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false); + else + irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat); mod_timer(&pf_devstat->stats_timer, jiffies + msecs_to_jiffies(STATS_TIMER_DELAY)); @@ -1664,7 +1668,7 @@ void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi) { struct irdma_vsi_pestat *devstat = vsi->pestat; - del_timer_sync(&devstat->stats_timer); + timer_delete_sync(&devstat->stats_timer); } /** @@ -1686,164 +1690,28 @@ void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev, { struct irdma_gather_stats *gather_stats = pestat->gather_info.gather_stats_va; + const struct irdma_hw_stat_map *map = dev->hw_stats_map; + u16 max_stats_idx = dev->hw_attrs.max_stat_idx; u32 stats_inst_offset_32; u32 stats_inst_offset_64; + u64 new_val; + u16 i; stats_inst_offset_32 = (pestat->gather_info.use_stats_inst) ? - pestat->gather_info.stats_inst_index : - pestat->hw->hmc.hmc_fn_id; + pestat->gather_info.stats_inst_index : + pestat->hw->hmc.hmc_fn_id; stats_inst_offset_32 *= 4; stats_inst_offset_64 = stats_inst_offset_32 * 2; - gather_stats->rxvlanerr = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_RXVLANERR] - + stats_inst_offset_32); - gather_stats->ip4rxdiscard = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] - + stats_inst_offset_32); - gather_stats->ip4rxtrunc = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] - + stats_inst_offset_32); - gather_stats->ip4txnoroute = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] - + stats_inst_offset_32); - gather_stats->ip6rxdiscard = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] - + stats_inst_offset_32); - gather_stats->ip6rxtrunc = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] - + stats_inst_offset_32); - gather_stats->ip6txnoroute = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] - + stats_inst_offset_32); - gather_stats->tcprtxseg = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_TCPRTXSEG] - + stats_inst_offset_32); - gather_stats->tcprxopterr = - rd32(dev->hw, - dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] - + stats_inst_offset_32); - - gather_stats->ip4rxocts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXOCTS] - + stats_inst_offset_64); - gather_stats->ip4rxpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXPKTS] - + stats_inst_offset_64); - gather_stats->ip4txfrag = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] - + stats_inst_offset_64); - gather_stats->ip4rxmcpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] - + stats_inst_offset_64); - gather_stats->ip4txocts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXOCTS] - + stats_inst_offset_64); - gather_stats->ip4txpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXPKTS] - + stats_inst_offset_64); - gather_stats->ip4txfrag = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] - + stats_inst_offset_64); - gather_stats->ip4txmcpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] - + stats_inst_offset_64); - gather_stats->ip6rxocts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXOCTS] - + stats_inst_offset_64); - gather_stats->ip6rxpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXPKTS] - + stats_inst_offset_64); - gather_stats->ip6txfrags = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] - + stats_inst_offset_64); - gather_stats->ip6rxmcpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] - + stats_inst_offset_64); - gather_stats->ip6txocts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXOCTS] - + stats_inst_offset_64); - gather_stats->ip6txpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXPKTS] - + stats_inst_offset_64); - gather_stats->ip6txfrags = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] - + stats_inst_offset_64); - gather_stats->ip6txmcpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] - + stats_inst_offset_64); - gather_stats->tcprxsegs = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_TCPRXSEGS] - + stats_inst_offset_64); - gather_stats->tcptxsegs = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_TCPTXSEG] - + stats_inst_offset_64); - gather_stats->rdmarxrds = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXRDS] - + stats_inst_offset_64); - gather_stats->rdmarxsnds = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXSNDS] - + stats_inst_offset_64); - gather_stats->rdmarxwrs = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXWRS] - + stats_inst_offset_64); - gather_stats->rdmatxrds = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXRDS] - + stats_inst_offset_64); - gather_stats->rdmatxsnds = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXSNDS] - + stats_inst_offset_64); - gather_stats->rdmatxwrs = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXWRS] - + stats_inst_offset_64); - gather_stats->rdmavbn = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMAVBND] - + stats_inst_offset_64); - gather_stats->rdmavinv = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMAVINV] - + stats_inst_offset_64); - gather_stats->udprxpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_UDPRXPKTS] - + stats_inst_offset_64); - gather_stats->udptxpkts = - rd64(dev->hw, - dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_UDPTXPKTS] - + stats_inst_offset_64); + for (i = 0; i < max_stats_idx; i++) { + if (map[i].bitmask <= IRDMA_MAX_STATS_32) + new_val = rd32(dev->hw, + dev->hw_stats_regs[i] + stats_inst_offset_32); + else + new_val = rd64(dev->hw, + dev->hw_stats_regs[i] + stats_inst_offset_64); + gather_stats->val[map[i].byteoff / sizeof(u64)] = new_val; + } irdma_process_stats(pestat); } @@ -1880,7 +1748,6 @@ int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev, return -ENOMEM; cqp_info = &cqp_request->info; - memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER; cqp_info->post_sq = 1; cqp_info->in.u.stats_gather.info = pestat->gather_info; @@ -1920,7 +1787,6 @@ int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd, return -ENOMEM; cqp_info = &cqp_request->info; - memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = cmd; cqp_info->post_sq = 1; cqp_info->in.u.stats_manage.info = *stats_info; @@ -2021,7 +1887,6 @@ int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd, return -ENOMEM; cqp_info = &cqp_request->info; - memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = cmd; cqp_info->post_sq = 1; cqp_info->in.u.ws_node.info = *node_info; @@ -2488,21 +2353,6 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event) iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); } -bool irdma_cq_empty(struct irdma_cq *iwcq) -{ - struct irdma_cq_uk *ukcq; - u64 qword3; - __le64 *cqe; - u8 polarity; - - ukcq = &iwcq->sc_cq.cq_uk; - cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq); - get_64bit_val(cqe, 24, &qword3); - polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); - - return polarity != ukcq->polarity; -} - void irdma_remove_cmpls_list(struct irdma_cq *iwcq) { struct irdma_cmpl_gen *cmpl_node; @@ -2564,6 +2414,8 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk; struct irdma_ring *sq_ring = &qp->sq_ring; struct irdma_ring *rq_ring = &qp->rq_ring; + struct irdma_cq *iwscq = iwqp->iwscq; + struct irdma_cq *iwrcq = iwqp->iwrcq; struct irdma_cmpl_gen *cmpl; __le64 *sw_wqe; u64 wqe_qword; @@ -2571,8 +2423,8 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) bool compl_generated = false; unsigned long flags1; - spin_lock_irqsave(&iwqp->iwscq->lock, flags1); - if (irdma_cq_empty(iwqp->iwscq)) { + spin_lock_irqsave(&iwscq->lock, flags1); + if (irdma_uk_cq_empty(&iwscq->sc_cq.cq_uk)) { unsigned long flags2; spin_lock_irqsave(&iwqp->lock, flags2); @@ -2580,7 +2432,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); if (!cmpl) { spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); + spin_unlock_irqrestore(&iwscq->lock, flags1); return; } @@ -2595,25 +2447,28 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) /* remove the SQ WR by moving SQ tail*/ IRDMA_RING_SET_TAIL(*sq_ring, sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta); - - ibdev_dbg(iwqp->iwscq->ibcq.device, + if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) { + kfree(cmpl); + continue; + } + ibdev_dbg(iwscq->ibcq.device, "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n", __func__, cmpl->cpi.wr_id, qp->qp_id); - list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated); + list_add_tail(&cmpl->list, &iwscq->cmpl_generated); compl_generated = true; } spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); + spin_unlock_irqrestore(&iwscq->lock, flags1); if (compl_generated) - irdma_comp_handler(iwqp->iwscq); + irdma_comp_handler(iwscq); } else { - spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); + spin_unlock_irqrestore(&iwscq->lock, flags1); mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); } - spin_lock_irqsave(&iwqp->iwrcq->lock, flags1); - if (irdma_cq_empty(iwqp->iwrcq)) { + spin_lock_irqsave(&iwrcq->lock, flags1); + if (irdma_uk_cq_empty(&iwrcq->sc_cq.cq_uk)) { unsigned long flags2; spin_lock_irqsave(&iwqp->lock, flags2); @@ -2621,7 +2476,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); if (!cmpl) { spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); + spin_unlock_irqrestore(&iwrcq->lock, flags1); return; } @@ -2633,20 +2488,20 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ; /* remove the RQ WR by moving RQ tail */ IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1); - ibdev_dbg(iwqp->iwrcq->ibcq.device, + ibdev_dbg(iwrcq->ibcq.device, "DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n", __func__, cmpl->cpi.wr_id, qp->qp_id, wqe_idx); - list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated); + list_add_tail(&cmpl->list, &iwrcq->cmpl_generated); compl_generated = true; } spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); + spin_unlock_irqrestore(&iwrcq->lock, flags1); if (compl_generated) - irdma_comp_handler(iwqp->iwrcq); + irdma_comp_handler(iwrcq); } else { - spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); + spin_unlock_irqrestore(&iwrcq->lock, flags1); mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); } |
