diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_lib.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.c | 4650 |
1 files changed, 3014 insertions, 1636 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 27c3760ae5cb..15621707fbf8 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2,234 +2,62 @@ /* Copyright (c) 2018, Intel Corporation. */ #include "ice.h" +#include "ice_base.h" +#include "ice_flow.h" #include "ice_lib.h" +#include "ice_fltr.h" +#include "ice_dcb_lib.h" +#include "ice_type.h" +#include "ice_vsi_vlan_ops.h" /** - * ice_setup_rx_ctx - Configure a receive ring context - * @ring: The Rx ring to configure - * - * Configure the Rx descriptor ring in RLAN context. + * ice_vsi_type_str - maps VSI type enum to string equivalents + * @vsi_type: VSI type enum */ -static int ice_setup_rx_ctx(struct ice_ring *ring) +const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) { - struct ice_vsi *vsi = ring->vsi; - struct ice_hw *hw = &vsi->back->hw; - u32 rxdid = ICE_RXDID_FLEX_NIC; - struct ice_rlan_ctx rlan_ctx; - u32 regval; - u16 pf_q; - int err; - - /* what is Rx queue number in global space of 2K Rx queues */ - pf_q = vsi->rxq_map[ring->q_index]; - - /* clear the context structure first */ - memset(&rlan_ctx, 0, sizeof(rlan_ctx)); - - rlan_ctx.base = ring->dma >> 7; - - rlan_ctx.qlen = ring->count; - - /* Receive Packet Data Buffer Size. - * The Packet Data Buffer Size is defined in 128 byte units. - */ - rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; - - /* use 32 byte descriptors */ - rlan_ctx.dsize = 1; - - /* Strip the Ethernet CRC bytes before the packet is posted to host - * memory. - */ - rlan_ctx.crcstrip = 1; - - /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ - rlan_ctx.l2tsel = 1; - - rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; - rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; - rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; - - /* This controls whether VLAN is stripped from inner headers - * The VLAN in the inner L2 header is stripped to the receive - * descriptor if enabled by this flag. - */ - rlan_ctx.showiv = 0; - - /* Max packet size for this queue - must not be set to a larger value - * than 5 x DBUF - */ - rlan_ctx.rxmax = min_t(u16, vsi->max_frame, - ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); - - /* Rx queue threshold in units of 64 */ - rlan_ctx.lrxqthresh = 1; - - /* Enable Flexible Descriptors in the queue context which - * allows this driver to select a specific receive descriptor format - */ - if (vsi->type != ICE_VSI_VF) { - regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); - regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & - QRXFLXP_CNTXT_RXDID_IDX_M; - - /* increasing context priority to pick up profile id; - * default is 0x01; setting to 0x03 to ensure profile - * is programming if prev context is of same priority - */ - regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & - QRXFLXP_CNTXT_RXDID_PRIO_M; - - wr32(hw, QRXFLXP_CNTXT(pf_q), regval); - } - - /* Absolute queue number out of 2K needs to be passed */ - err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); - if (err) { - dev_err(&vsi->back->pdev->dev, - "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", - pf_q, err); - return -EIO; - } - - if (vsi->type == ICE_VSI_VF) - return 0; - - /* init queue specific tail register */ - ring->tail = hw->hw_addr + QRX_TAIL(pf_q); - writel(0, ring->tail); - ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); - - return 0; -} - -/** - * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance - * @ring: The Tx ring to configure - * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized - * @pf_q: queue index in the PF space - * - * Configure the Tx descriptor ring in TLAN context. - */ -static void -ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) -{ - struct ice_vsi *vsi = ring->vsi; - struct ice_hw *hw = &vsi->back->hw; - - tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; - - tlan_ctx->port_num = vsi->port_info->lport; - - /* Transmit Queue Length */ - tlan_ctx->qlen = ring->count; - - /* PF number */ - tlan_ctx->pf_num = hw->pf_id; - - /* queue belongs to a specific VSI type - * VF / VM index should be programmed per vmvf_type setting: - * for vmvf_type = VF, it is VF number between 0-256 - * for vmvf_type = VM, it is VM number between 0-767 - * for PF or EMP this field should be set to zero - */ - switch (vsi->type) { + switch (vsi_type) { case ICE_VSI_PF: - tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; - break; + return "ICE_VSI_PF"; case ICE_VSI_VF: - /* Firmware expects vmvf_num to be absolute VF id */ - tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; - tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; - break; + return "ICE_VSI_VF"; + case ICE_VSI_SF: + return "ICE_VSI_SF"; + case ICE_VSI_CTRL: + return "ICE_VSI_CTRL"; + case ICE_VSI_CHNL: + return "ICE_VSI_CHNL"; + case ICE_VSI_LB: + return "ICE_VSI_LB"; default: - return; - } - - /* make sure the context is associated with the right VSI */ - tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); - - tlan_ctx->tso_ena = ICE_TX_LEGACY; - tlan_ctx->tso_qnum = pf_q; - - /* Legacy or Advanced Host Interface: - * 0: Advanced Host Interface - * 1: Legacy Host Interface - */ - tlan_ctx->legacy_int = ICE_TX_LEGACY; -} - -/** - * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled - * @pf: the PF being configured - * @pf_q: the PF queue - * @ena: enable or disable state of the queue - * - * This routine will wait for the given Rx queue of the PF to reach the - * enabled or disabled state. - * Returns -ETIMEDOUT in case of failing to reach the requested state after - * multiple retries; else will return 0 in case of success. - */ -static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) -{ - int i; - - for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { - u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q)); - - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) - break; - - usleep_range(20, 40); + return "unknown"; } - if (i >= ICE_Q_WAIT_MAX_RETRY) - return -ETIMEDOUT; - - return 0; } /** - * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings + * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings * @vsi: the VSI being configured * @ena: start or stop the Rx rings + * + * First enable/disable all of the Rx rings, flush any remaining writes, and + * then verify that they have all been enabled/disabled successfully. This will + * let all of the register writes complete when enabling/disabling the Rx rings + * before waiting for the change in hardware to complete. */ -static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) +static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena) { - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - int i, j, ret = 0; - - for (i = 0; i < vsi->num_rxq; i++) { - int pf_q = vsi->rxq_map[i]; - u32 rx_reg; - - for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) { - rx_reg = rd32(hw, QRX_CTRL(pf_q)); - if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) == - ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1)) - break; - usleep_range(1000, 2000); - } + int ret = 0; + u16 i; - /* Skip if the queue is already in the requested state */ - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) - continue; + ice_for_each_rxq(vsi, i) + ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false); - /* turn on/off the queue */ - if (ena) - rx_reg |= QRX_CTRL_QENA_REQ_M; - else - rx_reg &= ~QRX_CTRL_QENA_REQ_M; - wr32(hw, QRX_CTRL(pf_q), rx_reg); + ice_flush(&vsi->back->hw); - /* wait for the change to finish */ - ret = ice_pf_rxq_wait(pf, pf_q, ena); - if (ret) { - dev_err(&pf->pdev->dev, - "VSI idx %d Rx ring %d %sable timeout\n", - vsi->idx, pf_q, (ena ? "en" : "dis")); + ice_for_each_rxq(vsi, i) { + ret = ice_vsi_wait_one_rx_ring(vsi, ena, i); + if (ret) break; - } } return ret; @@ -238,78 +66,186 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) /** * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI * @vsi: VSI pointer - * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. * * On error: returns error code (negative) * On success: returns 0 */ -static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors) +static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; + struct device *dev; + + dev = ice_pf_to_dev(pf); + if (vsi->type == ICE_VSI_CHNL) + return 0; /* allocate memory for both Tx and Rx ring pointers */ - vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, - sizeof(struct ice_ring *), GFP_KERNEL); + vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, + sizeof(*vsi->tx_rings), GFP_KERNEL); if (!vsi->tx_rings) - goto err_txrings; + return -ENOMEM; - vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, - sizeof(struct ice_ring *), GFP_KERNEL); + vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq, + sizeof(*vsi->rx_rings), GFP_KERNEL); if (!vsi->rx_rings) - goto err_rxrings; + goto err_rings; - if (alloc_qvectors) { - /* allocate memory for q_vector pointers */ - vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, - vsi->num_q_vectors, - sizeof(struct ice_q_vector *), - GFP_KERNEL); - if (!vsi->q_vectors) - goto err_vectors; - } + /* txq_map needs to have enough space to track both Tx (stack) rings + * and XDP rings; at this point vsi->num_xdp_txq might not be set, + * so use num_possible_cpus() as we want to always provide XDP ring + * per CPU, regardless of queue count settings from user that might + * have come from ethtool's set_channels() callback; + */ + vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), + sizeof(*vsi->txq_map), GFP_KERNEL); + + if (!vsi->txq_map) + goto err_txq_map; + + vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, + sizeof(*vsi->rxq_map), GFP_KERNEL); + if (!vsi->rxq_map) + goto err_rxq_map; + + /* There is no need to allocate q_vectors for a loopback VSI. */ + if (vsi->type == ICE_VSI_LB) + return 0; + + /* allocate memory for q_vector pointers */ + vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, + sizeof(*vsi->q_vectors), GFP_KERNEL); + if (!vsi->q_vectors) + goto err_vectors; return 0; err_vectors: - devm_kfree(&pf->pdev->dev, vsi->rx_rings); -err_rxrings: - devm_kfree(&pf->pdev->dev, vsi->tx_rings); -err_txrings: + devm_kfree(dev, vsi->rxq_map); +err_rxq_map: + devm_kfree(dev, vsi->txq_map); +err_txq_map: + devm_kfree(dev, vsi->rx_rings); +err_rings: + devm_kfree(dev, vsi->tx_rings); return -ENOMEM; } /** - * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI + * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI + * @vsi: the VSI being configured + */ +static void ice_vsi_set_num_desc(struct ice_vsi *vsi) +{ + switch (vsi->type) { + case ICE_VSI_PF: + case ICE_VSI_SF: + case ICE_VSI_CTRL: + case ICE_VSI_LB: + /* a user could change the values of num_[tr]x_desc using + * ethtool -G so we should keep those values instead of + * overwriting them with the defaults. + */ + if (!vsi->num_rx_desc) + vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; + if (!vsi->num_tx_desc) + vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; + break; + default: + dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", + vsi->type); + break; + } +} + +static u16 ice_get_rxq_count(struct ice_pf *pf) +{ + return min(ice_get_avail_rxq_count(pf), num_online_cpus()); +} + +static u16 ice_get_txq_count(struct ice_pf *pf) +{ + return min(ice_get_avail_txq_count(pf), num_online_cpus()); +} + +/** + * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI * @vsi: the VSI being configured * * Return 0 on success and a negative value on error */ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) { + enum ice_vsi_type vsi_type = vsi->type; struct ice_pf *pf = vsi->back; + struct ice_vf *vf = vsi->vf; - switch (vsi->type) { + if (WARN_ON(vsi_type == ICE_VSI_VF && !vf)) + return; + + switch (vsi_type) { case ICE_VSI_PF: - vsi->alloc_txq = pf->num_lan_tx; - vsi->alloc_rxq = pf->num_lan_rx; - vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE); - vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); + if (vsi->req_txq) { + vsi->alloc_txq = vsi->req_txq; + vsi->num_txq = vsi->req_txq; + } else { + vsi->alloc_txq = ice_get_txq_count(pf); + } + + pf->num_lan_tx = vsi->alloc_txq; + + /* only 1 Rx queue unless RSS is enabled */ + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + vsi->alloc_rxq = 1; + } else { + if (vsi->req_rxq) { + vsi->alloc_rxq = vsi->req_rxq; + vsi->num_rxq = vsi->req_rxq; + } else { + vsi->alloc_rxq = ice_get_rxq_count(pf); + } + } + + pf->num_lan_rx = vsi->alloc_rxq; + + vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq); + break; + case ICE_VSI_SF: + vsi->alloc_txq = 1; + vsi->alloc_rxq = 1; + vsi->num_q_vectors = 1; + vsi->irq_dyn_alloc = true; break; case ICE_VSI_VF: - vsi->alloc_txq = pf->num_vf_qps; - vsi->alloc_rxq = pf->num_vf_qps; - /* pf->num_vf_msix includes (VF miscellaneous vector + + if (vf->num_req_qs) + vf->num_vf_qs = vf->num_req_qs; + vsi->alloc_txq = vf->num_vf_qs; + vsi->alloc_rxq = vf->num_vf_qs; + /* pf->vfs.num_msix_per includes (VF miscellaneous vector + * data queue interrupts). Since vsi->num_q_vectors is number - * of queues vectors, subtract 1 from the original vector - * count + * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the + * original vector count */ - vsi->num_q_vectors = pf->num_vf_msix - 1; + vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF; + break; + case ICE_VSI_CTRL: + vsi->alloc_txq = 1; + vsi->alloc_rxq = 1; + vsi->num_q_vectors = 1; + break; + case ICE_VSI_CHNL: + vsi->alloc_txq = 0; + vsi->alloc_rxq = 0; + break; + case ICE_VSI_LB: + vsi->alloc_txq = 1; + vsi->alloc_rxq = 1; break; default: - dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", - vsi->type); + dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type); break; } + + ice_vsi_set_num_desc(vsi); } /** @@ -342,90 +278,216 @@ static int ice_get_free_slot(void *array, int size, int curr) } /** - * ice_vsi_delete - delete a VSI from the switch + * ice_vsi_delete_from_hw - delete a VSI from the switch * @vsi: pointer to VSI being removed */ -void ice_vsi_delete(struct ice_vsi *vsi) +static void ice_vsi_delete_from_hw(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - struct ice_vsi_ctx ctxt; - enum ice_status status; + struct ice_vsi_ctx *ctxt; + int status; + + ice_fltr_remove_all(vsi); + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return; if (vsi->type == ICE_VSI_VF) - ctxt.vf_num = vsi->vf_id; - ctxt.vsi_num = vsi->vsi_num; + ctxt->vf_num = vsi->vf->vf_id; + ctxt->vsi_num = vsi->vsi_num; - memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); + memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); - status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL); + status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); if (status) - dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", - vsi->vsi_num); + dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n", + vsi->vsi_num, status); + + kfree(ctxt); } /** - * ice_vsi_free_arrays - clean up VSI resources + * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI * @vsi: pointer to VSI being cleared - * @free_qvectors: bool to specify if q_vectors should be deallocated */ -static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors) +static void ice_vsi_free_arrays(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; + struct device *dev; + + dev = ice_pf_to_dev(pf); /* free the ring and vector containers */ - if (free_qvectors && vsi->q_vectors) { - devm_kfree(&pf->pdev->dev, vsi->q_vectors); - vsi->q_vectors = NULL; + devm_kfree(dev, vsi->q_vectors); + vsi->q_vectors = NULL; + devm_kfree(dev, vsi->tx_rings); + vsi->tx_rings = NULL; + devm_kfree(dev, vsi->rx_rings); + vsi->rx_rings = NULL; + devm_kfree(dev, vsi->txq_map); + vsi->txq_map = NULL; + devm_kfree(dev, vsi->rxq_map); + vsi->rxq_map = NULL; +} + +/** + * ice_vsi_free_stats - Free the ring statistics structures + * @vsi: VSI pointer + */ +static void ice_vsi_free_stats(struct ice_vsi *vsi) +{ + struct ice_vsi_stats *vsi_stat; + struct ice_pf *pf = vsi->back; + int i; + + if (vsi->type == ICE_VSI_CHNL) + return; + if (!pf->vsi_stats) + return; + + vsi_stat = pf->vsi_stats[vsi->idx]; + if (!vsi_stat) + return; + + ice_for_each_alloc_txq(vsi, i) { + if (vsi_stat->tx_ring_stats[i]) { + kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); + WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); + } } - if (vsi->tx_rings) { - devm_kfree(&pf->pdev->dev, vsi->tx_rings); - vsi->tx_rings = NULL; + + ice_for_each_alloc_rxq(vsi, i) { + if (vsi_stat->rx_ring_stats[i]) { + kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); + WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); + } } - if (vsi->rx_rings) { - devm_kfree(&pf->pdev->dev, vsi->rx_rings); - vsi->rx_rings = NULL; + + kfree(vsi_stat->tx_ring_stats); + kfree(vsi_stat->rx_ring_stats); + kfree(vsi_stat); + pf->vsi_stats[vsi->idx] = NULL; +} + +/** + * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI + * @vsi: VSI which is having stats allocated + */ +static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi) +{ + struct ice_ring_stats **tx_ring_stats; + struct ice_ring_stats **rx_ring_stats; + struct ice_vsi_stats *vsi_stats; + struct ice_pf *pf = vsi->back; + u16 i; + + vsi_stats = pf->vsi_stats[vsi->idx]; + tx_ring_stats = vsi_stats->tx_ring_stats; + rx_ring_stats = vsi_stats->rx_ring_stats; + + /* Allocate Tx ring stats */ + ice_for_each_alloc_txq(vsi, i) { + struct ice_ring_stats *ring_stats; + struct ice_tx_ring *ring; + + ring = vsi->tx_rings[i]; + ring_stats = tx_ring_stats[i]; + + if (!ring_stats) { + ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); + if (!ring_stats) + goto err_out; + + WRITE_ONCE(tx_ring_stats[i], ring_stats); + } + + ring->ring_stats = ring_stats; } + + /* Allocate Rx ring stats */ + ice_for_each_alloc_rxq(vsi, i) { + struct ice_ring_stats *ring_stats; + struct ice_rx_ring *ring; + + ring = vsi->rx_rings[i]; + ring_stats = rx_ring_stats[i]; + + if (!ring_stats) { + ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); + if (!ring_stats) + goto err_out; + + WRITE_ONCE(rx_ring_stats[i], ring_stats); + } + + ring->ring_stats = ring_stats; + } + + return 0; + +err_out: + ice_vsi_free_stats(vsi); + return -ENOMEM; } /** - * ice_vsi_clear - clean up and deallocate the provided VSI + * ice_vsi_free - clean up and deallocate the provided VSI * @vsi: pointer to VSI being cleared * * This deallocates the VSI's queue resources, removes it from the PF's * VSI array if necessary, and deallocates the VSI - * - * Returns 0 on success, negative on failure */ -int ice_vsi_clear(struct ice_vsi *vsi) +void ice_vsi_free(struct ice_vsi *vsi) { struct ice_pf *pf = NULL; + struct device *dev; - if (!vsi) - return 0; - - if (!vsi->back) - return -EINVAL; + if (!vsi || !vsi->back) + return; pf = vsi->back; + dev = ice_pf_to_dev(pf); if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { - dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n", - vsi->idx); - return -EINVAL; + dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); + return; } mutex_lock(&pf->sw_mutex); /* updates the PF for this cleared VSI */ pf->vsi[vsi->idx] = NULL; - if (vsi->idx < pf->next_vsi) - pf->next_vsi = vsi->idx; + pf->next_vsi = vsi->idx; - ice_vsi_free_arrays(vsi, true); + ice_vsi_free_stats(vsi); + ice_vsi_free_arrays(vsi); + mutex_destroy(&vsi->xdp_state_lock); mutex_unlock(&pf->sw_mutex); - devm_kfree(&pf->pdev->dev, vsi); + devm_kfree(dev, vsi); +} - return 0; +void ice_vsi_delete(struct ice_vsi *vsi) +{ + ice_vsi_delete_from_hw(vsi); + ice_vsi_free(vsi); +} + +/** + * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data) +{ + struct ice_q_vector *q_vector = (struct ice_q_vector *)data; + + if (!q_vector->tx.tx_ring) + return IRQ_HANDLED; + + ice_clean_ctrl_rx_irq(q_vector->rx.rx_ring); + ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring); + + return IRQ_HANDLED; } /** @@ -437,23 +499,121 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) { struct ice_q_vector *q_vector = (struct ice_q_vector *)data; - if (!q_vector->tx.ring && !q_vector->rx.ring) + if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) return IRQ_HANDLED; + q_vector->total_events++; + napi_schedule(&q_vector->napi); return IRQ_HANDLED; } /** + * ice_vsi_alloc_stat_arrays - Allocate statistics arrays + * @vsi: VSI pointer + */ +static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi) +{ + struct ice_vsi_stats *vsi_stat; + struct ice_pf *pf = vsi->back; + + if (vsi->type == ICE_VSI_CHNL) + return 0; + if (!pf->vsi_stats) + return -ENOENT; + + if (pf->vsi_stats[vsi->idx]) + /* realloc will happen in rebuild path */ + return 0; + + vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL); + if (!vsi_stat) + return -ENOMEM; + + vsi_stat->tx_ring_stats = + kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats), + GFP_KERNEL); + if (!vsi_stat->tx_ring_stats) + goto err_alloc_tx; + + vsi_stat->rx_ring_stats = + kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats), + GFP_KERNEL); + if (!vsi_stat->rx_ring_stats) + goto err_alloc_rx; + + pf->vsi_stats[vsi->idx] = vsi_stat; + + return 0; + +err_alloc_rx: + kfree(vsi_stat->rx_ring_stats); +err_alloc_tx: + kfree(vsi_stat->tx_ring_stats); + kfree(vsi_stat); + pf->vsi_stats[vsi->idx] = NULL; + return -ENOMEM; +} + +/** + * ice_vsi_alloc_def - set default values for already allocated VSI + * @vsi: ptr to VSI + * @ch: ptr to channel + */ +static int +ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) +{ + if (vsi->type != ICE_VSI_CHNL) { + ice_vsi_set_num_qs(vsi); + if (ice_vsi_alloc_arrays(vsi)) + return -ENOMEM; + } + + vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev); + + switch (vsi->type) { + case ICE_VSI_PF: + case ICE_VSI_SF: + /* Setup default MSIX irq handler for VSI */ + vsi->irq_handler = ice_msix_clean_rings; + break; + case ICE_VSI_CTRL: + /* Setup ctrl VSI MSIX irq handler */ + vsi->irq_handler = ice_msix_clean_ctrl_vsi; + break; + case ICE_VSI_CHNL: + if (!ch) + return -EINVAL; + + vsi->num_rxq = ch->num_rxq; + vsi->num_txq = ch->num_txq; + vsi->next_base_q = ch->base_q; + break; + case ICE_VSI_VF: + case ICE_VSI_LB: + break; + default: + ice_vsi_free_arrays(vsi); + return -EINVAL; + } + + return 0; +} + +/** * ice_vsi_alloc - Allocates the next available struct VSI in the PF * @pf: board private structure - * @type: type of VSI + * + * Reserves a VSI index from the PF and allocates an empty VSI structure + * without a type. The VSI structure must later be initialized by calling + * ice_vsi_cfg(). * * returns a pointer to a VSI on success, NULL on failure. */ -static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) +struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) { + struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi = NULL; /* Need to protect the allocation of the VSIs at the PF level */ @@ -464,136 +624,122 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) * is available to be populated */ if (pf->next_vsi == ICE_NO_VSI) { - dev_dbg(&pf->pdev->dev, "out of VSI slots!\n"); + dev_dbg(dev, "out of VSI slots!\n"); goto unlock_pf; } - vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL); + vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); if (!vsi) goto unlock_pf; - vsi->type = type; vsi->back = pf; - set_bit(__ICE_DOWN, vsi->state); - vsi->idx = pf->next_vsi; - vsi->work_lmt = ICE_DFLT_IRQ_WORK; - - ice_vsi_set_num_qs(vsi); - - switch (vsi->type) { - case ICE_VSI_PF: - if (ice_vsi_alloc_arrays(vsi, true)) - goto err_rings; + set_bit(ICE_VSI_DOWN, vsi->state); - /* Setup default MSIX irq handler for VSI */ - vsi->irq_handler = ice_msix_clean_rings; - break; - case ICE_VSI_VF: - if (ice_vsi_alloc_arrays(vsi, true)) - goto err_rings; - break; - default: - dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); - goto unlock_pf; - } - - /* fill VSI slot in the PF struct */ + /* fill slot and make note of the index */ + vsi->idx = pf->next_vsi; pf->vsi[pf->next_vsi] = vsi; /* prepare pf->next_vsi for next use */ pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, pf->next_vsi); - goto unlock_pf; -err_rings: - devm_kfree(&pf->pdev->dev, vsi); - vsi = NULL; + mutex_init(&vsi->xdp_state_lock); + unlock_pf: mutex_unlock(&pf->sw_mutex); return vsi; } /** - * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI - * @qs_cfg: gathered variables needed for PF->VSI queues assignment + * ice_alloc_fd_res - Allocate FD resource for a VSI + * @vsi: pointer to the ice_vsi + * + * This allocates the FD resources * - * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap + * Returns 0 on success, -EPERM on no-op or -EIO on failure */ -static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) +static int ice_alloc_fd_res(struct ice_vsi *vsi) { - int offset, i; + struct ice_pf *pf = vsi->back; + u32 g_val, b_val; - mutex_lock(qs_cfg->qs_mutex); - offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, - 0, qs_cfg->q_count, 0); - if (offset >= qs_cfg->pf_map_size) { - mutex_unlock(qs_cfg->qs_mutex); - return -ENOMEM; - } + /* Flow Director filters are only allocated/assigned to the PF VSI or + * CHNL VSI which passes the traffic. The CTRL VSI is only used to + * add/delete filters so resources are not allocated to it + */ + if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) + return -EPERM; - bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); - for (i = 0; i < qs_cfg->q_count; i++) - qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset; - mutex_unlock(qs_cfg->qs_mutex); + if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF || + vsi->type == ICE_VSI_CHNL)) + return -EPERM; - return 0; -} + /* FD filters from guaranteed pool per VSI */ + g_val = pf->hw.func_caps.fd_fltr_guar; + if (!g_val) + return -EPERM; -/** - * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI - * @qs_cfg: gathered variables needed for PF->VSI queues assignment - * - * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap - */ -static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) -{ - int i, index = 0; + /* FD filters from best effort pool */ + b_val = pf->hw.func_caps.fd_fltr_best_effort; + if (!b_val) + return -EPERM; - mutex_lock(qs_cfg->qs_mutex); - for (i = 0; i < qs_cfg->q_count; i++) { - index = find_next_zero_bit(qs_cfg->pf_map, - qs_cfg->pf_map_size, index); - if (index >= qs_cfg->pf_map_size) - goto err_scatter; - set_bit(index, qs_cfg->pf_map); - qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index; - } - mutex_unlock(qs_cfg->qs_mutex); + /* PF main VSI gets only 64 FD resources from guaranteed pool + * when ADQ is configured. + */ +#define ICE_PF_VSI_GFLTR 64 - return 0; -err_scatter: - for (index = 0; index < i; index++) { - clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map); - qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; - } - mutex_unlock(qs_cfg->qs_mutex); + /* determine FD filter resources per VSI from shared(best effort) and + * dedicated pool + */ + if (vsi->type == ICE_VSI_PF) { + vsi->num_gfltr = g_val; + /* if MQPRIO is configured, main VSI doesn't get all FD + * resources from guaranteed pool. PF VSI gets 64 FD resources + */ + if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { + if (g_val < ICE_PF_VSI_GFLTR) + return -EPERM; + /* allow bare minimum entries for PF VSI */ + vsi->num_gfltr = ICE_PF_VSI_GFLTR; + } - return -ENOMEM; -} + /* each VSI gets same "best_effort" quota */ + vsi->num_bfltr = b_val; + } else if (vsi->type == ICE_VSI_VF) { + vsi->num_gfltr = 0; -/** - * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI - * @qs_cfg: gathered variables needed for PF->VSI queues assignment - * - * This is an internal function for assigning queues from the PF to VSI and - * initially tries to find contiguous space. If it is not successful to find - * contiguous space, then it tries with the scatter approach. - * - * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap - */ -static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) -{ - int ret = 0; + /* each VSI gets same "best_effort" quota */ + vsi->num_bfltr = b_val; + } else { + struct ice_vsi *main_vsi; + int numtc; - ret = __ice_vsi_get_qs_contig(qs_cfg); - if (ret) { - /* contig failed, so try with scatter approach */ - qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; - qs_cfg->q_count = min_t(u16, qs_cfg->q_count, - qs_cfg->scatter_count); - ret = __ice_vsi_get_qs_sc(qs_cfg); + main_vsi = ice_get_main_vsi(pf); + if (!main_vsi) + return -EPERM; + + if (!main_vsi->all_numtc) + return -EINVAL; + + /* figure out ADQ numtc */ + numtc = main_vsi->all_numtc - ICE_CHNL_START_TC; + + /* only one TC but still asking resources for channels, + * invalid config + */ + if (numtc < ICE_CHNL_START_TC) + return -EPERM; + + g_val -= ICE_PF_VSI_GFLTR; + /* channel VSIs gets equal share from guaranteed pool */ + vsi->num_gfltr = g_val / numtc; + + /* each VSI gets same "best_effort" quota */ + vsi->num_bfltr = b_val; } - return ret; + + return 0; } /** @@ -608,52 +754,58 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi) struct ice_qs_cfg tx_qs_cfg = { .qs_mutex = &pf->avail_q_mutex, .pf_map = pf->avail_txqs, - .pf_map_size = ICE_MAX_TXQS, + .pf_map_size = pf->max_pf_txqs, .q_count = vsi->alloc_txq, .scatter_count = ICE_MAX_SCATTER_TXQS, .vsi_map = vsi->txq_map, .vsi_map_offset = 0, - .mapping_mode = vsi->tx_mapping_mode + .mapping_mode = ICE_VSI_MAP_CONTIG }; struct ice_qs_cfg rx_qs_cfg = { .qs_mutex = &pf->avail_q_mutex, .pf_map = pf->avail_rxqs, - .pf_map_size = ICE_MAX_RXQS, + .pf_map_size = pf->max_pf_rxqs, .q_count = vsi->alloc_rxq, .scatter_count = ICE_MAX_SCATTER_RXQS, .vsi_map = vsi->rxq_map, .vsi_map_offset = 0, - .mapping_mode = vsi->rx_mapping_mode + .mapping_mode = ICE_VSI_MAP_CONTIG }; - int ret = 0; + int ret; - vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; - vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; + if (vsi->type == ICE_VSI_CHNL) + return 0; ret = __ice_vsi_get_qs(&tx_qs_cfg); - if (!ret) - ret = __ice_vsi_get_qs(&rx_qs_cfg); + if (ret) + return ret; + vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode; - return ret; + ret = __ice_vsi_get_qs(&rx_qs_cfg); + if (ret) + return ret; + vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode; + + return 0; } /** * ice_vsi_put_qs - Release queues from VSI to PF * @vsi: the VSI that is going to release queues */ -void ice_vsi_put_qs(struct ice_vsi *vsi) +static void ice_vsi_put_qs(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; int i; mutex_lock(&pf->avail_q_mutex); - for (i = 0; i < vsi->alloc_txq; i++) { + ice_for_each_alloc_txq(vsi, i) { clear_bit(vsi->txq_map[i], pf->avail_txqs); vsi->txq_map[i] = ICE_INVAL_Q_INDEX; } - for (i = 0; i < vsi->alloc_rxq; i++) { + ice_for_each_alloc_rxq(vsi, i) { clear_bit(vsi->rxq_map[i], pf->avail_rxqs); vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; } @@ -662,19 +814,72 @@ void ice_vsi_put_qs(struct ice_vsi *vsi) } /** - * ice_rss_clean - Delete RSS related VSI structures that hold user inputs + * ice_is_safe_mode + * @pf: pointer to the PF struct + * + * returns true if driver is in safe mode, false otherwise + */ +bool ice_is_safe_mode(struct ice_pf *pf) +{ + return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags); +} + +/** + * ice_is_rdma_ena + * @pf: pointer to the PF struct + * + * returns true if RDMA is currently supported, false otherwise + */ +bool ice_is_rdma_ena(struct ice_pf *pf) +{ + union devlink_param_value value; + int err; + + err = devl_param_driverinit_value_get(priv_to_devlink(pf), + DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, + &value); + return err ? test_bit(ICE_FLAG_RDMA_ENA, pf->flags) : value.vbool; +} + +/** + * ice_vsi_clean_rss_flow_fld - Delete RSS configuration + * @vsi: the VSI being cleaned up + * + * This function deletes RSS input set for all flows that were configured + * for this VSI + */ +static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int status; + + if (ice_is_safe_mode(pf)) + return; + + status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); + if (status) + dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n", + vsi->vsi_num, status); +} + +/** + * ice_rss_clean - Delete RSS related VSI structures and configuration * @vsi: the VSI being removed */ static void ice_rss_clean(struct ice_vsi *vsi) { - struct ice_pf *pf; + struct ice_pf *pf = vsi->back; + struct device *dev; - pf = vsi->back; + dev = ice_pf_to_dev(pf); - if (vsi->rss_hkey_user) - devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user); - if (vsi->rss_lut_user) - devm_kfree(&pf->pdev->dev, vsi->rss_lut_user); + devm_kfree(dev, vsi->rss_hkey_user); + devm_kfree(dev, vsi->rss_lut_user); + + ice_vsi_clean_rss_flow_fld(vsi); + /* remove RSS replay list */ + if (!ice_is_safe_mode(pf)) + ice_rem_vsi_rss_list(&pf->hw, vsi->idx); } /** @@ -685,6 +890,7 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) { struct ice_hw_common_caps *cap; struct ice_pf *pf = vsi->back; + u16 max_rss_size; if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { vsi->rss_size = 1; @@ -692,37 +898,49 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) } cap = &pf->hw.func_caps.common_cap; + max_rss_size = BIT(cap->rss_table_entry_width); switch (vsi->type) { + case ICE_VSI_CHNL: case ICE_VSI_PF: /* PF VSI will inherit RSS instance of PF */ - vsi->rss_table_size = cap->rss_table_size; - vsi->rss_size = min_t(int, num_online_cpus(), - BIT(cap->rss_table_entry_width)); - vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; + vsi->rss_table_size = (u16)cap->rss_table_size; + if (vsi->type == ICE_VSI_CHNL) + vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size); + else + vsi->rss_size = min_t(u16, num_online_cpus(), + max_rss_size); + vsi->rss_lut_type = ICE_LUT_PF; + break; + case ICE_VSI_SF: + vsi->rss_table_size = ICE_LUT_VSI_SIZE; + vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size); + vsi->rss_lut_type = ICE_LUT_VSI; break; case ICE_VSI_VF: - /* VF VSI will gets a small RSS table - * For VSI_LUT, LUT size should be set to 64 bytes + /* VF VSI will get a small RSS table. + * For VSI_LUT, LUT size should be set to 64 bytes. */ - vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; - vsi->rss_size = min_t(int, num_online_cpus(), - BIT(cap->rss_table_entry_width)); - vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; + vsi->rss_table_size = ICE_LUT_VSI_SIZE; + vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; + vsi->rss_lut_type = ICE_LUT_VSI; + break; + case ICE_VSI_LB: break; default: - dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", - vsi->type); + dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n", + ice_vsi_type_str(vsi->type)); break; } } /** * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI + * @hw: HW structure used to determine the VLAN mode of the device * @ctxt: the VSI context being set * * This initializes a default VSI context for all sections except the Queues. */ -static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) +static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) { u32 table = 0; @@ -733,13 +951,28 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; /* Traffic from VSI can be sent to LAN */ ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; - /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy - * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all - * packets untagged/tagged. + /* allow all untagged/tagged packets by default on Tx */ + ctxt->info.inner_vlan_flags = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M, + ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL); + /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which + * results in legacy behavior (show VLAN, DEI, and UP) in descriptor. + * + * DVM - leave inner VLAN in packet by default */ - ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & - ICE_AQ_VSI_VLAN_MODE_M) >> - ICE_AQ_VSI_VLAN_MODE_S); + if (ice_is_dvm_ena(hw)) { + ctxt->info.inner_vlan_flags |= + FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M, + ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING); + ctxt->info.outer_vlan_flags = + FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M, + ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL); + ctxt->info.outer_vlan_flags |= + FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, + ICE_AQ_VSI_OUTER_TAG_VLAN_8100); + ctxt->info.outer_vlan_flags |= + FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M, + ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING); + } /* Have 1:1 UP mapping for both ingress/egress tables */ table |= ICE_UP_TABLE_TRANSLATE(0, 0); table |= ICE_UP_TABLE_TRANSLATE(1, 1); @@ -761,36 +994,30 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) * @vsi: the VSI being configured * @ctxt: VSI context structure */ -static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) +static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) { - u16 offset = 0, qmap = 0, tx_count = 0; + u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0; + u16 num_txq_per_tc, num_rxq_per_tc; u16 qcount_tx = vsi->alloc_txq; u16 qcount_rx = vsi->alloc_rxq; - u16 tx_numq_tc, rx_numq_tc; - u16 pow = 0, max_rss = 0; - bool ena_tc0 = false; u8 netdev_tc = 0; int i; - /* at least TC0 should be enabled by default */ - if (vsi->tc_cfg.numtc) { - if (!(vsi->tc_cfg.ena_tc & BIT(0))) - ena_tc0 = true; - } else { - ena_tc0 = true; + if (!vsi->tc_cfg.numtc) { + /* at least TC0 should be enabled by default */ + vsi->tc_cfg.numtc = 1; + vsi->tc_cfg.ena_tc = 1; } - if (ena_tc0) { - vsi->tc_cfg.numtc++; - vsi->tc_cfg.ena_tc |= 1; - } + num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); + if (!num_rxq_per_tc) + num_rxq_per_tc = 1; + num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; + if (!num_txq_per_tc) + num_txq_per_tc = 1; - rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc; - if (!rx_numq_tc) - rx_numq_tc = 1; - tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc; - if (!tx_numq_tc) - tx_numq_tc = 1; + /* find the (rounded up) power-of-2 of qcount */ + pow = (u16)order_base_2(num_rxq_per_tc); /* TC mapping is a function of the number of Rx queues assigned to the * VSI for each traffic class and the offset of these queues. @@ -803,25 +1030,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) * * Setup number and offset of Rx queues for all TCs for the VSI */ - - qcount_rx = rx_numq_tc; - - /* qcount will change if RSS is enabled */ - if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { - if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) { - if (vsi->type == ICE_VSI_PF) - max_rss = ICE_MAX_LG_RSS_QS; - else - max_rss = ICE_MAX_SMALL_RSS_QS; - qcount_rx = min_t(int, rx_numq_tc, max_rss); - qcount_rx = min_t(int, qcount_rx, vsi->rss_size); - } - } - - /* find the (rounded up) power-of-2 of qcount */ - pow = order_base_2(qcount_rx); - - for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + ice_for_each_traffic_class(i) { if (!(vsi->tc_cfg.ena_tc & BIT(i))) { /* TC is not enabled */ vsi->tc_cfg.tc_info[i].qoffset = 0; @@ -834,23 +1043,45 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) /* TC is enabled */ vsi->tc_cfg.tc_info[i].qoffset = offset; - vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; - vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc; + vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc; + vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; - qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & - ICE_AQ_VSI_TC_Q_OFFSET_M) | - ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & - ICE_AQ_VSI_TC_Q_NUM_M); - offset += qcount_rx; - tx_count += tx_numq_tc; + qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset); + qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow); + offset += num_rxq_per_tc; + tx_count += num_txq_per_tc; ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); } - vsi->num_rxq = offset; + + /* if offset is non-zero, means it is calculated correctly based on + * enabled TCs for a given VSI otherwise qcount_rx will always + * be correct and non-zero because it is based off - VSI's + * allocated Rx queues which is at least 1 (hence qcount_tx will be + * at least 1) + */ + if (offset) + rx_count = offset; + else + rx_count = num_rxq_per_tc; + + if (rx_count > vsi->alloc_rxq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", + rx_count, vsi->alloc_rxq); + return -EINVAL; + } + + if (tx_count > vsi->alloc_txq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", + tx_count, vsi->alloc_txq); + return -EINVAL; + } + vsi->num_txq = tx_count; + vsi->num_rxq = rx_count; if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { - dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); + dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); /* since there is a chance that num_rxq could have been changed * in the above for loop, make num_txq equal to num_rxq. */ @@ -865,293 +1096,248 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) */ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); + + return 0; } /** - * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI + * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI * @ctxt: the VSI context being set * @vsi: the VSI being configured */ -static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) +static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) { - u8 lut_type, hash_type; + u8 dflt_q_group, dflt_q_prio; + u16 dflt_q, report_q, val; - switch (vsi->type) { - case ICE_VSI_PF: - /* PF VSI will inherit RSS instance of PF */ - lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; - hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; - break; - case ICE_VSI_VF: - /* VF VSI will gets a small RSS table which is a VSI LUT type */ - lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; - hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; - break; - default: - dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", - vsi->type); + if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && + vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL) return; - } - ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & - ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | - ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & - ICE_AQ_VSI_Q_OPT_RSS_HASH_M); + val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; + ctxt->info.valid_sections |= cpu_to_le16(val); + dflt_q = 0; + dflt_q_group = 0; + report_q = 0; + dflt_q_prio = 0; + + /* enable flow director filtering/programming */ + val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE; + ctxt->info.fd_options = cpu_to_le16(val); + /* max of allocated flow director filters */ + ctxt->info.max_fd_fltr_dedicated = + cpu_to_le16(vsi->num_gfltr); + /* max of shared flow director filters any VSI may program */ + ctxt->info.max_fd_fltr_shared = + cpu_to_le16(vsi->num_bfltr); + /* default queue index within the VSI of the default FD */ + val = FIELD_PREP(ICE_AQ_VSI_FD_DEF_Q_M, dflt_q); + /* target queue or queue group to the FD filter */ + val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_GRP_M, dflt_q_group); + ctxt->info.fd_def_q = cpu_to_le16(val); + /* queue index on which FD filter completion is reported */ + val = FIELD_PREP(ICE_AQ_VSI_FD_REPORT_Q_M, report_q); + /* priority of the default qindex action */ + val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_PRIORITY_M, dflt_q_prio); + ctxt->info.fd_report_opt = cpu_to_le16(val); } /** - * ice_vsi_init - Create and initialize a VSI + * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI + * @ctxt: the VSI context being set * @vsi: the VSI being configured - * - * This initializes a VSI context depending on the VSI type to be added and - * passes it down to the add_vsi aq command to create a new VSI. */ -static int ice_vsi_init(struct ice_vsi *vsi) +static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) { - struct ice_vsi_ctx ctxt = { 0 }; - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - int ret = 0; + u8 lut_type, hash_type; + struct device *dev; + struct ice_pf *pf; + + pf = vsi->back; + dev = ice_pf_to_dev(pf); switch (vsi->type) { + case ICE_VSI_CHNL: case ICE_VSI_PF: - ctxt.flags = ICE_AQ_VSI_TYPE_PF; + /* PF VSI will inherit RSS instance of PF */ + lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; break; case ICE_VSI_VF: - ctxt.flags = ICE_AQ_VSI_TYPE_VF; - /* VF number here is the absolute VF number (0-255) */ - ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; + case ICE_VSI_SF: + /* VF VSI will gets a small RSS table which is a VSI LUT type */ + lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; break; default: - return -ENODEV; - } - - ice_set_dflt_vsi_ctx(&ctxt); - /* if the switch is in VEB mode, allow VSI loopback */ - if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) - ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; - - /* Set LUT type and HASH type if RSS is enabled */ - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) - ice_set_rss_vsi_ctx(&ctxt, vsi); - - ctxt.info.sw_id = vsi->port_info->sw_id; - ice_vsi_setup_q_map(vsi, &ctxt); - - ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL); - if (ret) { - dev_err(&pf->pdev->dev, - "Add VSI failed, err %d\n", ret); - return -EIO; + dev_dbg(dev, "Unsupported VSI type %s\n", + ice_vsi_type_str(vsi->type)); + return; } - /* keep context for update VSI operations */ - vsi->info = ctxt.info; + hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; + vsi->rss_hfunc = hash_type; - /* record VSI number returned */ - vsi->vsi_num = ctxt.vsi_num; - - return ret; + ctxt->info.q_opt_rss = + FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) | + FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type); } -/** - * ice_free_q_vector - Free memory allocated for a specific interrupt vector - * @vsi: VSI having the memory freed - * @v_idx: index of the vector to be freed - */ -static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) +static void +ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) { - struct ice_q_vector *q_vector; - struct ice_ring *ring; + u16 qcount, qmap; + u8 offset = 0; + int pow; - if (!vsi->q_vectors[v_idx]) { - dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n", - v_idx); - return; - } - q_vector = vsi->q_vectors[v_idx]; + qcount = vsi->num_rxq; - ice_for_each_ring(ring, q_vector->tx) - ring->q_vector = NULL; - ice_for_each_ring(ring, q_vector->rx) - ring->q_vector = NULL; + pow = order_base_2(qcount); + qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset); + qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow); - /* only VSI with an associated netdev is set up with NAPI */ - if (vsi->netdev) - netif_napi_del(&q_vector->napi); - - devm_kfree(&vsi->back->pdev->dev, q_vector); - vsi->q_vectors[v_idx] = NULL; + ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); + ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); + ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q); + ctxt->info.q_mapping[1] = cpu_to_le16(qcount); } /** - * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors - * @vsi: the VSI having memory freed + * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not + * @vsi: VSI to check whether or not VLAN pruning is enabled. + * + * returns true if Rx VLAN pruning is enabled and false otherwise. */ -void ice_vsi_free_q_vectors(struct ice_vsi *vsi) +static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi) { - int v_idx; - - for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) - ice_free_q_vector(vsi, v_idx); + return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; } /** - * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector + * ice_vsi_init - Create and initialize a VSI * @vsi: the VSI being configured - * @v_idx: index of the vector in the VSI struct + * @vsi_flags: VSI configuration flags * - * We allocate one q_vector. If allocation fails we return -ENOMEM. + * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to + * reconfigure an existing context. + * + * This initializes a VSI context depending on the VSI type to be added and + * passes it down to the add_vsi aq command to create a new VSI. */ -static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) +static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags) { struct ice_pf *pf = vsi->back; - struct ice_q_vector *q_vector; + struct ice_hw *hw = &pf->hw; + struct ice_vsi_ctx *ctxt; + struct device *dev; + int ret = 0; - /* allocate q_vector */ - q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); - if (!q_vector) + dev = ice_pf_to_dev(pf); + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) return -ENOMEM; - q_vector->vsi = vsi; - q_vector->v_idx = v_idx; - if (vsi->type == ICE_VSI_VF) + switch (vsi->type) { + case ICE_VSI_CTRL: + case ICE_VSI_LB: + case ICE_VSI_PF: + ctxt->flags = ICE_AQ_VSI_TYPE_PF; + break; + case ICE_VSI_SF: + case ICE_VSI_CHNL: + ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; + break; + case ICE_VSI_VF: + ctxt->flags = ICE_AQ_VSI_TYPE_VF; + /* VF number here is the absolute VF number (0-255) */ + ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id; + break; + default: + ret = -ENODEV; goto out; - /* only set affinity_mask if the CPU is online */ - if (cpu_online(v_idx)) - cpumask_set_cpu(v_idx, &q_vector->affinity_mask); + } - /* This will not be called in the driver load path because the netdev - * will not be created yet. All other cases with register the NAPI - * handler here (i.e. resume, reset/rebuild, etc.) + /* Handle VLAN pruning for channel VSI if main VSI has VLAN + * prune enabled */ - if (vsi->netdev) - netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, - NAPI_POLL_WEIGHT); - -out: - /* tie q_vector and VSI together */ - vsi->q_vectors[v_idx] = q_vector; + if (vsi->type == ICE_VSI_CHNL) { + struct ice_vsi *main_vsi; - return 0; -} + main_vsi = ice_get_main_vsi(pf); + if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi)) + ctxt->info.sw_flags2 |= + ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + else + ctxt->info.sw_flags2 &= + ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + } -/** - * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors - * @vsi: the VSI being configured - * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. - */ -static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - int v_idx = 0, num_q_vectors; - int err; + ice_set_dflt_vsi_ctx(hw, ctxt); + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) + ice_set_fd_vsi_ctx(ctxt, vsi); + /* if the switch is in VEB mode, allow VSI loopback */ + if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) + ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; - if (vsi->q_vectors[0]) { - dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", - vsi->vsi_num); - return -EEXIST; + /* Set LUT type and HASH type if RSS is enabled */ + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) && + vsi->type != ICE_VSI_CTRL) { + ice_set_rss_vsi_ctx(ctxt, vsi); + /* if updating VSI context, make sure to set valid_section: + * to indicate which section of VSI context being updated + */ + if (!(vsi_flags & ICE_VSI_FLAG_INIT)) + ctxt->info.valid_sections |= + cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); } - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - num_q_vectors = vsi->num_q_vectors; + ctxt->info.sw_id = vsi->port_info->sw_id; + if (vsi->type == ICE_VSI_CHNL) { + ice_chnl_vsi_setup_q_map(vsi, ctxt); } else { - err = -EINVAL; - goto err_out; - } + ret = ice_vsi_setup_q_map(vsi, ctxt); + if (ret) + goto out; - for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { - err = ice_vsi_alloc_q_vector(vsi, v_idx); - if (err) - goto err_out; + if (!(vsi_flags & ICE_VSI_FLAG_INIT)) + /* means VSI being updated */ + /* must to indicate which section of VSI context are + * being modified + */ + ctxt->info.valid_sections |= + cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); } - return 0; - -err_out: - while (v_idx--) - ice_free_q_vector(vsi, v_idx); - - dev_err(&pf->pdev->dev, - "Failed to allocate %d q_vector for VSI %d, ret=%d\n", - vsi->num_q_vectors, vsi->vsi_num, err); - vsi->num_q_vectors = 0; - return err; -} - -/** - * ice_vsi_setup_vector_base - Set up the base vector for the given VSI - * @vsi: ptr to the VSI - * - * This should only be called after ice_vsi_alloc() which allocates the - * corresponding SW VSI structure and initializes num_queue_pairs for the - * newly allocated VSI. - * - * Returns 0 on success or negative on failure - */ -static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - int num_q_vectors = 0; - - if (vsi->sw_base_vector || vsi->hw_base_vector) { - dev_dbg(&pf->pdev->dev, "VSI %d has non-zero HW base vector %d or SW base vector %d\n", - vsi->vsi_num, vsi->hw_base_vector, vsi->sw_base_vector); - return -EEXIST; + /* Allow control frames out of main VSI */ + if (vsi->type == ICE_VSI_PF) { + ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; + ctxt->info.valid_sections |= + cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); } - if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - return -ENOENT; - - switch (vsi->type) { - case ICE_VSI_PF: - num_q_vectors = vsi->num_q_vectors; - /* reserve slots from OS requested IRQs */ - vsi->sw_base_vector = ice_get_res(pf, pf->sw_irq_tracker, - num_q_vectors, vsi->idx); - if (vsi->sw_base_vector < 0) { - dev_err(&pf->pdev->dev, - "Failed to get tracking for %d SW vectors for VSI %d, err=%d\n", - num_q_vectors, vsi->vsi_num, - vsi->sw_base_vector); - return -ENOENT; + if (vsi_flags & ICE_VSI_FLAG_INIT) { + ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_err(dev, "Add VSI failed, err %d\n", ret); + ret = -EIO; + goto out; } - pf->num_avail_sw_msix -= num_q_vectors; - - /* reserve slots from HW interrupts */ - vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, - num_q_vectors, vsi->idx); - break; - case ICE_VSI_VF: - /* take VF misc vector and data vectors into account */ - num_q_vectors = pf->num_vf_msix; - /* For VF VSI, reserve slots only from HW interrupts */ - vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, - num_q_vectors, vsi->idx); - break; - default: - dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", - vsi->type); - break; - } - - if (vsi->hw_base_vector < 0) { - dev_err(&pf->pdev->dev, - "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n", - num_q_vectors, vsi->vsi_num, vsi->hw_base_vector); - if (vsi->type != ICE_VSI_VF) { - ice_free_res(vsi->back->sw_irq_tracker, - vsi->sw_base_vector, vsi->idx); - pf->num_avail_sw_msix += num_q_vectors; + } else { + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_err(dev, "Update VSI failed, err %d\n", ret); + ret = -EIO; + goto out; } - return -ENOENT; } - pf->num_avail_hw_msix -= num_q_vectors; + /* keep context for update VSI operations */ + vsi->info = ctxt->info; - return 0; + /* record VSI number returned */ + vsi->vsi_num = ctxt->vsi_num; + +out: + kfree(ctxt); + return ret; } /** @@ -1162,19 +1348,31 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi) { int i; + /* Avoid stale references by clearing map from vector to ring */ + if (vsi->q_vectors) { + ice_for_each_q_vector(vsi, i) { + struct ice_q_vector *q_vector = vsi->q_vectors[i]; + + if (q_vector) { + q_vector->tx.tx_ring = NULL; + q_vector->rx.rx_ring = NULL; + } + } + } + if (vsi->tx_rings) { - for (i = 0; i < vsi->alloc_txq; i++) { + ice_for_each_alloc_txq(vsi, i) { if (vsi->tx_rings[i]) { kfree_rcu(vsi->tx_rings[i], rcu); - vsi->tx_rings[i] = NULL; + WRITE_ONCE(vsi->tx_rings[i], NULL); } } } if (vsi->rx_rings) { - for (i = 0; i < vsi->alloc_rxq; i++) { + ice_for_each_alloc_rxq(vsi, i) { if (vsi->rx_rings[i]) { kfree_rcu(vsi->rx_rings[i], rcu); - vsi->rx_rings[i] = NULL; + WRITE_ONCE(vsi->rx_rings[i], NULL); } } } @@ -1186,12 +1384,15 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi) */ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) { + bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw); struct ice_pf *pf = vsi->back; - int i; + struct device *dev; + u16 i; + dev = ice_pf_to_dev(pf); /* Allocate Tx rings */ - for (i = 0; i < vsi->alloc_txq; i++) { - struct ice_ring *ring; + ice_for_each_alloc_txq(vsi, i) { + struct ice_tx_ring *ring; /* allocate with kzalloc(), free with kfree_rcu() */ ring = kzalloc(sizeof(*ring), GFP_KERNEL); @@ -1201,16 +1402,21 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->q_index = i; ring->reg_idx = vsi->txq_map[i]; - ring->ring_active = false; ring->vsi = vsi; - ring->dev = &pf->pdev->dev; - ring->count = vsi->num_desc; - vsi->tx_rings[i] = ring; + ring->tx_tstamps = &pf->ptp.port.tx; + ring->dev = dev; + ring->count = vsi->num_tx_desc; + ring->txq_teid = ICE_INVAL_TEID; + if (dvm_ena) + ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2; + else + ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1; + WRITE_ONCE(vsi->tx_rings[i], ring); } /* Allocate Rx rings */ - for (i = 0; i < vsi->alloc_rxq; i++) { - struct ice_ring *ring; + ice_for_each_alloc_rxq(vsi, i) { + struct ice_rx_ring *ring; /* allocate with kzalloc(), free with kfree_rcu() */ ring = kzalloc(sizeof(*ring), GFP_KERNEL); @@ -1219,12 +1425,15 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->q_index = i; ring->reg_idx = vsi->rxq_map[i]; - ring->ring_active = false; ring->vsi = vsi; ring->netdev = vsi->netdev; - ring->dev = &pf->pdev->dev; - ring->count = vsi->num_desc; - vsi->rx_rings[i] = ring; + ring->count = vsi->num_rx_desc; + ring->cached_phctime = pf->ptp.cached_phc_time; + + if (ice_is_feature_supported(pf, ICE_F_GCS)) + ring->flags |= ICE_RX_FLAGS_RING_GCS; + + WRITE_ONCE(vsi->rx_rings[i], ring); } return 0; @@ -1235,62 +1444,6 @@ err_out: } /** - * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors - * @vsi: the VSI being configured - * - * This function maps descriptor rings to the queue-specific vectors allotted - * through the MSI-X enabling code. On a constrained vector budget, we map Tx - * and Rx rings to the vector as "efficiently" as possible. - */ -static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) -{ - int q_vectors = vsi->num_q_vectors; - int tx_rings_rem, rx_rings_rem; - int v_id; - - /* initially assigning remaining rings count to VSIs num queue value */ - tx_rings_rem = vsi->num_txq; - rx_rings_rem = vsi->num_rxq; - - for (v_id = 0; v_id < q_vectors; v_id++) { - struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; - int tx_rings_per_v, rx_rings_per_v, q_id, q_base; - - /* Tx rings mapping to vector */ - tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); - q_vector->num_ring_tx = tx_rings_per_v; - q_vector->tx.ring = NULL; - q_vector->tx.itr_idx = ICE_TX_ITR; - q_base = vsi->num_txq - tx_rings_rem; - - for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { - struct ice_ring *tx_ring = vsi->tx_rings[q_id]; - - tx_ring->q_vector = q_vector; - tx_ring->next = q_vector->tx.ring; - q_vector->tx.ring = tx_ring; - } - tx_rings_rem -= tx_rings_per_v; - - /* Rx rings mapping to vector */ - rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); - q_vector->num_ring_rx = rx_rings_per_v; - q_vector->rx.ring = NULL; - q_vector->rx.itr_idx = ICE_RX_ITR; - q_base = vsi->num_rxq - rx_rings_rem; - - for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { - struct ice_ring *rx_ring = vsi->rx_rings[q_id]; - - rx_ring->q_vector = q_vector; - rx_ring->next = q_vector->rx.ring; - q_vector->rx.ring = rx_ring; - } - rx_rings_rem -= rx_rings_per_v; - } -} - -/** * ice_vsi_manage_rss_lut - disable/enable RSS * @vsi: the VSI being changed * @ena: boolean value indicating if this is an enable or disable request @@ -1299,15 +1452,13 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) * LUT, while in the event of enable request for RSS, it will reconfigure RSS * LUT. */ -int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) +void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) { - int err = 0; u8 *lut; - lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size, - GFP_KERNEL); + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) - return -ENOMEM; + return; if (ena) { if (vsi->rss_lut_user) @@ -1317,27 +1468,59 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) vsi->rss_size); } - err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size); - devm_kfree(&vsi->back->pdev->dev, lut); - return err; + ice_set_rss_lut(vsi, lut, vsi->rss_table_size); + kfree(lut); +} + +/** + * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI + * @vsi: VSI to be configured + * @disable: set to true to have FCS / CRC in the frame data + */ +void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable) +{ + int i; + + ice_for_each_rxq(vsi, i) + if (disable) + vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; + else + vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; } /** * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI * @vsi: VSI to be configured */ -static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) +int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) { - u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; - struct ice_aqc_get_set_rss_keys *key; struct ice_pf *pf = vsi->back; - enum ice_status status; - int err = 0; - u8 *lut; + struct device *dev; + u8 *lut, *key; + int err; + + dev = ice_pf_to_dev(pf); + if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size && + (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) { + vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size); + } else { + vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); - vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq); + /* If orig_rss_size is valid and it is less than determined + * main VSI's rss_size, update main VSI's rss_size to be + * orig_rss_size so that when tc-qdisc is deleted, main VSI + * RSS table gets programmed to be correct (whatever it was + * to begin with (prior to setup-tc for ADQ config) + */ + if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size && + vsi->orig_rss_size <= vsi->num_rxq) { + vsi->rss_size = vsi->orig_rss_size; + /* now orig_rss_size is used, reset it to zero */ + vsi->orig_rss_size = 0; + } + } - lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; @@ -1346,76 +1529,194 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) else ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); - status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut, - vsi->rss_table_size); - - if (status) { - dev_err(&vsi->back->pdev->dev, - "set_rss_lut failed, error %d\n", status); - err = -EIO; + err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); + if (err) { + dev_err(dev, "set_rss_lut failed, error %d\n", err); goto ice_vsi_cfg_rss_exit; } - key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL); + key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL); if (!key) { err = -ENOMEM; goto ice_vsi_cfg_rss_exit; } if (vsi->rss_hkey_user) - memcpy(seed, vsi->rss_hkey_user, - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); + memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); else - netdev_rss_key_fill((void *)seed, - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); - memcpy(&key->standard_rss_key, seed, - ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); - - status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key); + netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); - if (status) { - dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n", - status); - err = -EIO; - } + err = ice_set_rss_key(vsi, key); + if (err) + dev_err(dev, "set_rss_key failed, error %d\n", err); - devm_kfree(&pf->pdev->dev, key); + kfree(key); ice_vsi_cfg_rss_exit: - devm_kfree(&pf->pdev->dev, lut); + kfree(lut); return err; } /** - * ice_add_mac_to_list - Add a mac address filter entry to the list - * @vsi: the VSI to be forwarded to - * @add_list: pointer to the list which contains MAC filter entries - * @macaddr: the MAC address to be added. + * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows + * @vsi: VSI to be configured * - * Adds mac address filter entry to the temp list + * This function will only be called during the VF VSI setup. Upon successful + * completion of package download, this function will configure default RSS + * input sets for VF VSI. + */ +static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct device *dev; + int status; + + dev = ice_pf_to_dev(pf); + if (ice_is_safe_mode(pf)) { + dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", + vsi->vsi_num); + return; + } + + status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HASHCFG); + if (status) + dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n", + vsi->vsi_num, status); +} + +static const struct ice_rss_hash_cfg default_rss_cfgs[] = { + /* configure RSS for IPv4 with input set IP src/dst */ + {ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for IPv6 with input set IPv6 src/dst */ + {ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ + {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4, + ICE_HASH_TCP_IPV4, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ + {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4, + ICE_HASH_UDP_IPV4, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for sctp4 with input set IP src/dst - only support + * RSS on SCTPv4 on outer headers (non-tunneled) + */ + {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4, + ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for gtpc4 with input set IPv4 src/dst */ + {ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4, + ICE_FLOW_HASH_IPV4, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for gtpc4t with input set IPv4 src/dst */ + {ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4, + ICE_FLOW_HASH_GTP_C_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for gtpu4 with input set IPv4 src/dst */ + {ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4, + ICE_FLOW_HASH_GTP_U_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for gtpu4e with input set IPv4 src/dst */ + {ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4, + ICE_FLOW_HASH_GTP_U_IPV4_EH, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for gtpu4u with input set IPv4 src/dst */ + { ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4, + ICE_FLOW_HASH_GTP_U_IPV4_UP, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for gtpu4d with input set IPv4 src/dst */ + {ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4, + ICE_FLOW_HASH_GTP_U_IPV4_DWN, ICE_RSS_OUTER_HEADERS, false}, + + /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ + {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6, + ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ + {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6, + ICE_HASH_UDP_IPV6, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for sctp6 with input set IPv6 src/dst - only support + * RSS on SCTPv6 on outer headers (non-tunneled) + */ + {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6, + ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */ + {ICE_FLOW_SEG_HDR_ESP, + ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for gtpc6 with input set IPv6 src/dst */ + {ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6, + ICE_FLOW_HASH_IPV6, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for gtpc6t with input set IPv6 src/dst */ + {ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6, + ICE_FLOW_HASH_GTP_C_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for gtpu6 with input set IPv6 src/dst */ + {ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6, + ICE_FLOW_HASH_GTP_U_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for gtpu6e with input set IPv6 src/dst */ + {ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6, + ICE_FLOW_HASH_GTP_U_IPV6_EH, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for gtpu6u with input set IPv6 src/dst */ + { ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6, + ICE_FLOW_HASH_GTP_U_IPV6_UP, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for gtpu6d with input set IPv6 src/dst */ + {ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6, + ICE_FLOW_HASH_GTP_U_IPV6_DWN, ICE_RSS_OUTER_HEADERS, false}, +}; + +/** + * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows + * @vsi: VSI to be configured * - * Returns 0 on success or ENOMEM on failure. + * This function will only be called after successful download package call + * during initialization of PF. Since the downloaded package will erase the + * RSS section, this function will configure RSS input sets for different + * flow types. The last profile added has the highest priority, therefore 2 + * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles + * (i.e. IPv4 src/dst TCP src/dst port). */ -int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, - const u8 *macaddr) +static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) { - struct ice_fltr_list_entry *tmp; + u16 vsi_num = vsi->vsi_num; struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + struct device *dev; + int status; + u32 i; - tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC); - if (!tmp) - return -ENOMEM; + dev = ice_pf_to_dev(pf); + if (ice_is_safe_mode(pf)) { + dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", + vsi_num); + return; + } + for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) { + const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i]; - tmp->fltr_info.flag = ICE_FLTR_TX; - tmp->fltr_info.src_id = ICE_SRC_ID_VSI; - tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC; - tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; - tmp->fltr_info.vsi_handle = vsi->idx; - ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr); + status = ice_add_rss_cfg(hw, vsi, cfg); + if (status) + dev_dbg(dev, "ice_add_rss_cfg failed, addl_hdrs = %x, hash_flds = %llx, hdr_type = %d, symm = %d\n", + cfg->addl_hdrs, cfg->hash_flds, + cfg->hdr_type, cfg->symm); + } +} - INIT_LIST_HEAD(&tmp->list_entry); - list_add(&tmp->list_entry, add_list); +/** + * ice_pf_state_is_nominal - checks the PF for nominal state + * @pf: pointer to PF to check + * + * Check the PF's state for a collection of bits that would indicate + * the PF is in a state that would inhibit normal operation for + * driver functionality. + * + * Returns true if PF is in a nominal state, false otherwise + */ +bool ice_pf_state_is_nominal(struct ice_pf *pf) +{ + DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 }; - return 0; + if (!pf) + return false; + + bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS); + if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS)) + return false; + + return true; +} + +#define ICE_FW_MODE_REC_M BIT(1) +bool ice_is_recovery_mode(struct ice_hw *hw) +{ + return rd32(hw, GL_MNG_FWSM) & ICE_FW_MODE_REC_M; } /** @@ -1426,45 +1727,41 @@ void ice_update_eth_stats(struct ice_vsi *vsi) { struct ice_eth_stats *prev_es, *cur_es; struct ice_hw *hw = &vsi->back->hw; + struct ice_pf *pf = vsi->back; u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ prev_es = &vsi->eth_stats_prev; cur_es = &vsi->eth_stats; - ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_bytes, - &cur_es->rx_bytes); + if (ice_is_reset_in_progress(pf->state)) + vsi->stat_offsets_loaded = false; + + ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_bytes, &cur_es->rx_bytes); - ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_unicast, - &cur_es->rx_unicast); + ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_unicast, &cur_es->rx_unicast); - ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_multicast, - &cur_es->rx_multicast); + ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_multicast, &cur_es->rx_multicast); - ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_broadcast, - &cur_es->rx_broadcast); + ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_broadcast, &cur_es->rx_broadcast); ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, &prev_es->rx_discards, &cur_es->rx_discards); - ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_bytes, - &cur_es->tx_bytes); + ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_bytes, &cur_es->tx_bytes); - ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_unicast, - &cur_es->tx_unicast); + ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_unicast, &cur_es->tx_unicast); - ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_multicast, - &cur_es->tx_multicast); + ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_multicast, &cur_es->tx_multicast); - ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_broadcast, - &cur_es->tx_broadcast); + ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_broadcast, &cur_es->tx_broadcast); ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, &prev_es->tx_errors, &cur_es->tx_errors); @@ -1473,300 +1770,156 @@ void ice_update_eth_stats(struct ice_vsi *vsi) } /** - * ice_free_fltr_list - free filter lists helper - * @dev: pointer to the device struct - * @h: pointer to the list head to be freed - * - * Helper function to free filter lists previously created using - * ice_add_mac_to_list + * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register + * @hw: HW pointer + * @pf_q: index of the Rx queue in the PF's queue space + * @rxdid: flexible descriptor RXDID + * @prio: priority for the RXDID for this queue + * @ena_ts: true to enable timestamp and false to disable timestamp */ -void ice_free_fltr_list(struct device *dev, struct list_head *h) +void ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, + bool ena_ts) { - struct ice_fltr_list_entry *e, *tmp; + int regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); - list_for_each_entry_safe(e, tmp, h, list_entry) { - list_del(&e->list_entry); - devm_kfree(dev, e); - } -} + /* clear any previous values */ + regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M | + QRXFLXP_CNTXT_RXDID_PRIO_M | + QRXFLXP_CNTXT_TS_M); -/** - * ice_vsi_add_vlan - Add VSI membership for given VLAN - * @vsi: the VSI being configured - * @vid: VLAN id to be added - */ -int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) -{ - struct ice_fltr_list_entry *tmp; - struct ice_pf *pf = vsi->back; - LIST_HEAD(tmp_add_list); - enum ice_status status; - int err = 0; - - tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL); - if (!tmp) - return -ENOMEM; + regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_IDX_M, rxdid); + regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_PRIO_M, prio); - tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; - tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; - tmp->fltr_info.flag = ICE_FLTR_TX; - tmp->fltr_info.src_id = ICE_SRC_ID_VSI; - tmp->fltr_info.vsi_handle = vsi->idx; - tmp->fltr_info.l_data.vlan.vlan_id = vid; + if (ena_ts) + /* Enable TimeSync on this queue */ + regval |= QRXFLXP_CNTXT_TS_M; - INIT_LIST_HEAD(&tmp->list_entry); - list_add(&tmp->list_entry, &tmp_add_list); - - status = ice_add_vlan(&pf->hw, &tmp_add_list); - if (status) { - err = -ENODEV; - dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n", - vid, vsi->vsi_num); - } - - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); - return err; + wr32(hw, QRXFLXP_CNTXT(pf_q), regval); } /** - * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN - * @vsi: the VSI being configured - * @vid: VLAN id to be removed + * ice_intrl_usec_to_reg - convert interrupt rate limit to register value + * @intrl: interrupt rate limit in usecs + * @gran: interrupt rate limit granularity in usecs * - * Returns 0 on success and negative on failure + * This function converts a decimal interrupt rate limit in usecs to the format + * expected by firmware. */ -int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) +static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) { - struct ice_fltr_list_entry *list; - struct ice_pf *pf = vsi->back; - LIST_HEAD(tmp_add_list); - int status = 0; - - list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); - if (!list) - return -ENOMEM; - - list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; - list->fltr_info.vsi_handle = vsi->idx; - list->fltr_info.fltr_act = ICE_FWD_TO_VSI; - list->fltr_info.l_data.vlan.vlan_id = vid; - list->fltr_info.flag = ICE_FLTR_TX; - list->fltr_info.src_id = ICE_SRC_ID_VSI; - - INIT_LIST_HEAD(&list->list_entry); - list_add(&list->list_entry, &tmp_add_list); - - if (ice_remove_vlan(&pf->hw, &tmp_add_list)) { - dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n", - vid, vsi->vsi_num); - status = -EIO; - } + u32 val = intrl / gran; - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); - return status; + if (val) + return val | GLINT_RATE_INTRL_ENA_M; + return 0; } /** - * ice_vsi_cfg_rxqs - Configure the VSI for Rx - * @vsi: the VSI being configured - * - * Return 0 on success and a negative value on error - * Configure the Rx VSI for operation. + * ice_write_intrl - write throttle rate limit to interrupt specific register + * @q_vector: pointer to interrupt specific structure + * @intrl: throttle rate limit in microseconds to write */ -int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) +void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl) { - int err = 0; - u16 i; - - if (vsi->type == ICE_VSI_VF) - goto setup_rings; - - if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) - vsi->max_frame = vsi->netdev->mtu + - ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - else - vsi->max_frame = ICE_RXBUF_2048; + struct ice_hw *hw = &q_vector->vsi->back->hw; - vsi->rx_buf_len = ICE_RXBUF_2048; -setup_rings: - /* set up individual rings */ - for (i = 0; i < vsi->num_rxq && !err; i++) - err = ice_setup_rx_ctx(vsi->rx_rings[i]); - - if (err) { - dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n"); - return -EIO; - } - return err; + wr32(hw, GLINT_RATE(q_vector->reg_idx), + ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25)); } -/** - * ice_vsi_cfg_txqs - Configure the VSI for Tx - * @vsi: the VSI being configured - * @rings: Tx ring array to be configured - * @offset: offset within vsi->txq_map - * - * Return 0 on success and a negative value on error - * Configure the Tx VSI for operation. - */ -static int -ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) +static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc) { - struct ice_aqc_add_tx_qgrp *qg_buf; - struct ice_aqc_add_txqs_perq *txq; - struct ice_pf *pf = vsi->back; - u8 num_q_grps, q_idx = 0; - enum ice_status status; - u16 buf_len, i, pf_q; - int err = 0, tc; - - buf_len = sizeof(struct ice_aqc_add_tx_qgrp); - qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); - if (!qg_buf) - return -ENOMEM; - - qg_buf->num_txqs = 1; - num_q_grps = 1; - - /* set up and configure the Tx queues for each enabled TC */ - for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) { - if (!(vsi->tc_cfg.ena_tc & BIT(tc))) - break; - - for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { - struct ice_tlan_ctx tlan_ctx = { 0 }; - - pf_q = vsi->txq_map[q_idx + offset]; - ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q); - /* copy context contents into the qg_buf */ - qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, - ice_tlan_ctx_info); - - /* init queue specific tail reg. It is referred as - * transmit comm scheduler queue doorbell. - */ - rings[q_idx]->tail = - pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); - status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, - num_q_grps, qg_buf, buf_len, - NULL); - if (status) { - dev_err(&vsi->back->pdev->dev, - "Failed to set LAN Tx queue context, error: %d\n", - status); - err = -ENODEV; - goto err_cfg_txqs; - } - - /* Add Tx Queue TEID into the VSI Tx ring from the - * response. This will complete configuring and - * enabling the queue. - */ - txq = &qg_buf->txqs[0]; - if (pf_q == le16_to_cpu(txq->txq_id)) - rings[q_idx]->txq_teid = - le32_to_cpu(txq->q_teid); - - q_idx++; - } + switch (rc->type) { + case ICE_RX_CONTAINER: + if (rc->rx_ring) + return rc->rx_ring->q_vector; + break; + case ICE_TX_CONTAINER: + if (rc->tx_ring) + return rc->tx_ring->q_vector; + break; + default: + break; } -err_cfg_txqs: - devm_kfree(&pf->pdev->dev, qg_buf); - return err; -} -/** - * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx - * @vsi: the VSI being configured - * - * Return 0 on success and a negative value on error - * Configure the Tx VSI for operation. - */ -int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) -{ - return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0); + return NULL; } /** - * ice_intrl_usec_to_reg - convert interrupt rate limit to register value - * @intrl: interrupt rate limit in usecs - * @gran: interrupt rate limit granularity in usecs - * - * This function converts a decimal interrupt rate limit in usecs to the format - * expected by firmware. + * __ice_write_itr - write throttle rate to register + * @q_vector: pointer to interrupt data structure + * @rc: pointer to ring container + * @itr: throttle rate in microseconds to write */ -static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) +static void __ice_write_itr(struct ice_q_vector *q_vector, + struct ice_ring_container *rc, u16 itr) { - u32 val = intrl / gran; + struct ice_hw *hw = &q_vector->vsi->back->hw; - if (val) - return val | GLINT_RATE_INTRL_ENA_M; - return 0; + wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), + ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S); } /** - * ice_cfg_itr - configure the initial interrupt throttle values - * @hw: pointer to the HW structure - * @q_vector: interrupt vector that's being configured - * @vector: HW vector index to apply the interrupt throttling to - * - * Configure interrupt throttling values for the ring containers that are - * associated with the interrupt vector passed in. + * ice_write_itr - write throttle rate to queue specific register + * @rc: pointer to ring container + * @itr: throttle rate in microseconds to write */ -static void -ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) +void ice_write_itr(struct ice_ring_container *rc, u16 itr) { - if (q_vector->num_ring_rx) { - struct ice_ring_container *rc = &q_vector->rx; - - /* if this value is set then don't overwrite with default */ - if (!rc->itr_setting) - rc->itr_setting = ICE_DFLT_RX_ITR; - - rc->target_itr = ITR_TO_REG(rc->itr_setting); - rc->next_update = jiffies + 1; - rc->current_itr = rc->target_itr; - rc->latency_range = ICE_LOW_LATENCY; - wr32(hw, GLINT_ITR(rc->itr_idx, vector), - ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); - } + struct ice_q_vector *q_vector; - if (q_vector->num_ring_tx) { - struct ice_ring_container *rc = &q_vector->tx; + q_vector = ice_pull_qvec_from_rc(rc); + if (!q_vector) + return; - /* if this value is set then don't overwrite with default */ - if (!rc->itr_setting) - rc->itr_setting = ICE_DFLT_TX_ITR; + __ice_write_itr(q_vector, rc, itr); +} - rc->target_itr = ITR_TO_REG(rc->itr_setting); - rc->next_update = jiffies + 1; - rc->current_itr = rc->target_itr; - rc->latency_range = ICE_LOW_LATENCY; - wr32(hw, GLINT_ITR(rc->itr_idx, vector), - ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); +/** + * ice_set_q_vector_intrl - set up interrupt rate limiting + * @q_vector: the vector to be configured + * + * Interrupt rate limiting is local to the vector, not per-queue so we must + * detect if either ring container has dynamic moderation enabled to decide + * what to set the interrupt rate limit to via INTRL settings. In the case that + * dynamic moderation is disabled on both, write the value with the cached + * setting to make sure INTRL register matches the user visible value. + */ +void ice_set_q_vector_intrl(struct ice_q_vector *q_vector) +{ + if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) { + /* in the case of dynamic enabled, cap each vector to no more + * than (4 us) 250,000 ints/sec, which allows low latency + * but still less than 500,000 interrupts per second, which + * reduces CPU a bit in the case of the lowest latency + * setting. The 4 here is a value in microseconds. + */ + ice_write_intrl(q_vector, 4); + } else { + ice_write_intrl(q_vector, q_vector->intrl); } } /** * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW * @vsi: the VSI being configured + * + * This configures MSIX mode interrupts for the PF VSI, and should not be used + * for the VF VSI. */ void ice_vsi_cfg_msix(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - u16 vector = vsi->hw_base_vector; struct ice_hw *hw = &pf->hw; - u32 txq = 0, rxq = 0; + u16 txq = 0, rxq = 0; int i, q; - for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + ice_for_each_q_vector(vsi, i) { struct ice_q_vector *q_vector = vsi->q_vectors[i]; + u16 reg_idx = q_vector->reg_idx; - ice_cfg_itr(hw, q_vector, vector); - - wr32(hw, GLINT_RATE(vector), - ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); + ice_cfg_itr(hw, q_vector); /* Both Transmit Queue Interrupt Cause Control register * and Receive Queue Interrupt Cause control register @@ -1780,362 +1933,462 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) * tracked for this PF. */ for (q = 0; q < q_vector->num_ring_tx; q++) { - int itr_idx = q_vector->tx.itr_idx; - u32 val; - - if (vsi->type == ICE_VSI_VF) - val = QINT_TQCTL_CAUSE_ENA_M | - (itr_idx << QINT_TQCTL_ITR_INDX_S) | - ((i + 1) << QINT_TQCTL_MSIX_INDX_S); - else - val = QINT_TQCTL_CAUSE_ENA_M | - (itr_idx << QINT_TQCTL_ITR_INDX_S) | - (vector << QINT_TQCTL_MSIX_INDX_S); - wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); + ice_cfg_txq_interrupt(vsi, txq, reg_idx, + q_vector->tx.itr_idx); txq++; } for (q = 0; q < q_vector->num_ring_rx; q++) { - int itr_idx = q_vector->rx.itr_idx; - u32 val; - - if (vsi->type == ICE_VSI_VF) - val = QINT_RQCTL_CAUSE_ENA_M | - (itr_idx << QINT_RQCTL_ITR_INDX_S) | - ((i + 1) << QINT_RQCTL_MSIX_INDX_S); - else - val = QINT_RQCTL_CAUSE_ENA_M | - (itr_idx << QINT_RQCTL_ITR_INDX_S) | - (vector << QINT_RQCTL_MSIX_INDX_S); - wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); + ice_cfg_rxq_interrupt(vsi, rxq, reg_idx, + q_vector->rx.itr_idx); rxq++; } } - - ice_flush(hw); } /** - * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx - * @vsi: the VSI being changed + * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings + * @vsi: the VSI whose rings are to be enabled + * + * Returns 0 on success and a negative value on error */ -int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) +int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi) { - struct device *dev = &vsi->back->pdev->dev; - struct ice_hw *hw = &vsi->back->hw; - struct ice_vsi_ctx ctxt = { 0 }; - enum ice_status status; - - /* Here we are configuring the VSI to let the driver add VLAN tags by - * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag - * insertion happens in the Tx hot path, in ice_tx_map. - */ - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; - - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - - status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); - if (status) { - dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", - status, hw->adminq.sq_last_status); - return -EIO; - } + return ice_vsi_ctrl_all_rx_rings(vsi, true); +} - vsi->info.vlan_flags = ctxt.info.vlan_flags; - return 0; +/** + * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings + * @vsi: the VSI whose rings are to be disabled + * + * Returns 0 on success and a negative value on error + */ +int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi) +{ + return ice_vsi_ctrl_all_rx_rings(vsi, false); } /** - * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx - * @vsi: the VSI being changed - * @ena: boolean value indicating if this is a enable or disable request + * ice_vsi_stop_tx_rings - Disable Tx rings + * @vsi: the VSI being configured + * @rst_src: reset source + * @rel_vmvf_num: Relative ID of VF/VM + * @rings: Tx ring array to be stopped + * @count: number of Tx ring array elements */ -int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) +static int +ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count) { - struct device *dev = &vsi->back->pdev->dev; - struct ice_hw *hw = &vsi->back->hw; - struct ice_vsi_ctx ctxt = { 0 }; - enum ice_status status; + u16 q_idx; - /* Here we are configuring what the VSI should do with the VLAN tag in - * the Rx packet. We can either leave the tag in the packet or put it in - * the Rx descriptor. - */ - if (ena) { - /* Strip VLAN tag from Rx packet and put it in the desc */ - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; - } else { - /* Disable stripping. Leave tag in packet */ - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; - } + if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) + return -EINVAL; - /* Allow all packets untagged/tagged */ - ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; + for (q_idx = 0; q_idx < count; q_idx++) { + struct ice_txq_meta txq_meta = { }; + int status; - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); + if (!rings || !rings[q_idx]) + return -EINVAL; - status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); - if (status) { - dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n", - ena, status, hw->adminq.sq_last_status); - return -EIO; + ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); + status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num, + rings[q_idx], &txq_meta); + + if (status) + return status; } - vsi->info.vlan_flags = ctxt.info.vlan_flags; return 0; } /** - * ice_vsi_start_rx_rings - start VSI's Rx rings - * @vsi: the VSI whose rings are to be started - * - * Returns 0 on success and a negative value on error + * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings + * @vsi: the VSI being configured + * @rst_src: reset source + * @rel_vmvf_num: Relative ID of VF/VM */ -int ice_vsi_start_rx_rings(struct ice_vsi *vsi) +int +ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num) { - return ice_vsi_ctrl_rx_rings(vsi, true); + return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq); } /** - * ice_vsi_stop_rx_rings - stop VSI's Rx rings - * @vsi: the VSI - * - * Returns 0 on success and a negative value on error + * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings + * @vsi: the VSI being configured */ -int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) +int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi) { - return ice_vsi_ctrl_rx_rings(vsi, false); + return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); } /** - * ice_vsi_stop_tx_rings - Disable Tx rings + * ice_vsi_is_rx_queue_active * @vsi: the VSI being configured - * @rst_src: reset source - * @rel_vmvf_num: Relative id of VF/VM - * @rings: Tx ring array to be stopped - * @offset: offset within vsi->txq_map + * + * Return true if at least one queue is active. */ -static int -ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, - u16 rel_vmvf_num, struct ice_ring **rings, int offset) +bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; - u32 *q_teids, val; - u16 *q_ids, i; - int err = 0; - - if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) - return -EINVAL; + int i; - q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), - GFP_KERNEL); - if (!q_teids) - return -ENOMEM; + ice_for_each_rxq(vsi, i) { + u32 rx_reg; + int pf_q; - q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), - GFP_KERNEL); - if (!q_ids) { - err = -ENOMEM; - goto err_alloc_q_ids; + pf_q = vsi->rxq_map[i]; + rx_reg = rd32(hw, QRX_CTRL(pf_q)); + if (rx_reg & QRX_CTRL_QENA_STAT_M) + return true; } - /* set up the Tx queue list to be disabled */ - ice_for_each_txq(vsi, i) { - u16 v_idx; + return false; +} - if (!rings || !rings[i] || !rings[i]->q_vector) { - err = -EINVAL; - goto err_out; - } +static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) +{ + if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { + vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; + vsi->tc_cfg.numtc = 1; + return; + } - q_ids[i] = vsi->txq_map[i + offset]; - q_teids[i] = rings[i]->txq_teid; + /* set VSI TC information based on DCB config */ + ice_vsi_set_dcb_tc_cfg(vsi); +} - /* clear cause_ena bit for disabled queues */ - val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx)); - val &= ~QINT_TQCTL_CAUSE_ENA_M; - wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val); +/** + * ice_vsi_cfg_sw_lldp - Config switch rules for LLDP packet handling + * @vsi: the VSI being configured + * @tx: bool to determine Tx or Rx rule + * @create: bool to determine create or remove Rule + * + * Adding an ethtype Tx rule to the uplink VSI results in it being applied + * to the whole port, so LLDP transmission for VFs will be blocked too. + */ +void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) +{ + int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, + enum ice_sw_fwd_act_type act); + struct ice_pf *pf = vsi->back; + struct device *dev; + int status; - /* software is expected to wait for 100 ns */ - ndelay(100); + dev = ice_pf_to_dev(pf); + eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth; - /* trigger a software interrupt for the vector associated to - * the queue to schedule NAPI handler - */ - v_idx = rings[i]->q_vector->v_idx; - wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), - GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); - } - status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, - rst_src, rel_vmvf_num, NULL); - /* if the disable queue command was exercised during an active reset - * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as - * the reset operation disables queues at the hardware level anyway. - */ - if (status == ICE_ERR_RESET_ONGOING) { - dev_info(&pf->pdev->dev, - "Reset in progress. LAN Tx queues already disabled\n"); - } else if (status) { - dev_err(&pf->pdev->dev, - "Failed to disable LAN Tx queues, error: %d\n", - status); - err = -ENODEV; - } + if (tx) { + status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX, + ICE_DROP_PACKET); + } else { + if (!test_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags)) { + status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, + ICE_FWD_TO_VSI); + if (!status || !create) + goto report; + + dev_info(dev, + "Failed to add generic LLDP Rx filter on VSI %i error: %d, falling back to specialized AQ control\n", + vsi->vsi_num, status); + } -err_out: - devm_kfree(&pf->pdev->dev, q_ids); + status = ice_lldp_fltr_add_remove(&pf->hw, vsi, create); + if (!status) + set_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags); -err_alloc_q_ids: - devm_kfree(&pf->pdev->dev, q_teids); + } - return err; +report: + if (status) + dev_warn(dev, "Failed to %s %s LLDP rule on VSI %i error: %d\n", + create ? "add" : "remove", tx ? "Tx" : "Rx", + vsi->vsi_num, status); } /** - * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings - * @vsi: the VSI being configured - * @rst_src: reset source - * @rel_vmvf_num: Relative id of VF/VM + * ice_cfg_sw_rx_lldp - Enable/disable software handling of LLDP + * @pf: the PF being configured + * @enable: enable or disable + * + * Configure switch rules to enable/disable LLDP handling by software + * across PF. */ -int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, - enum ice_disq_rst_src rst_src, u16 rel_vmvf_num) +void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable) { - return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, - 0); + struct ice_vsi *vsi; + struct ice_vf *vf; + unsigned int bkt; + + vsi = ice_get_main_vsi(pf); + ice_vsi_cfg_sw_lldp(vsi, false, enable); + + if (!test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) + return; + + ice_for_each_vf(pf, bkt, vf) { + vsi = ice_get_vf_vsi(vf); + + if (WARN_ON(!vsi)) + continue; + + if (ice_vf_is_lldp_ena(vf)) + ice_vsi_cfg_sw_lldp(vsi, false, enable); + } } /** - * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI - * @vsi: VSI to enable or disable VLAN pruning on - * @ena: set to true to enable VLAN pruning and false to disable it + * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it + * @vsi: pointer to the VSI * - * returns 0 if VSI is updated, negative otherwise + * This function will allocate new scheduler aggregator now if needed and will + * move specified VSI into it. */ -int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) +static void ice_set_agg_vsi(struct ice_vsi *vsi) { - struct ice_vsi_ctx *ctxt; - struct device *dev; + struct device *dev = ice_pf_to_dev(vsi->back); + struct ice_agg_node *agg_node_iter = NULL; + u32 agg_id = ICE_INVALID_AGG_NODE_ID; + struct ice_agg_node *agg_node = NULL; + int node_offset, max_agg_nodes = 0; + struct ice_port_info *port_info; + struct ice_pf *pf = vsi->back; + u32 agg_node_id_start = 0; int status; - if (!vsi) - return -EINVAL; + /* create (as needed) scheduler aggregator node and move VSI into + * corresponding aggregator node + * - PF aggregator node to contains VSIs of type _PF and _CTRL + * - VF aggregator nodes will contain VF VSI + */ + port_info = pf->hw.port_info; + if (!port_info) + return; - dev = &vsi->back->pdev->dev; - ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); - if (!ctxt) - return -ENOMEM; + switch (vsi->type) { + case ICE_VSI_CTRL: + case ICE_VSI_CHNL: + case ICE_VSI_LB: + case ICE_VSI_PF: + case ICE_VSI_SF: + max_agg_nodes = ICE_MAX_PF_AGG_NODES; + agg_node_id_start = ICE_PF_AGG_NODE_ID_START; + agg_node_iter = &pf->pf_agg_node[0]; + break; + case ICE_VSI_VF: + /* user can create 'n' VFs on a given PF, but since max children + * per aggregator node can be only 64. Following code handles + * aggregator(s) for VF VSIs, either selects a agg_node which + * was already created provided num_vsis < 64, otherwise + * select next available node, which will be created + */ + max_agg_nodes = ICE_MAX_VF_AGG_NODES; + agg_node_id_start = ICE_VF_AGG_NODE_ID_START; + agg_node_iter = &pf->vf_agg_node[0]; + break; + default: + /* other VSI type, handle later if needed */ + dev_dbg(dev, "unexpected VSI type %s\n", + ice_vsi_type_str(vsi->type)); + return; + } - ctxt->info = vsi->info; + /* find the appropriate aggregator node */ + for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) { + /* see if we can find space in previously created + * node if num_vsis < 64, otherwise skip + */ + if (agg_node_iter->num_vsis && + agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { + agg_node_iter++; + continue; + } - if (ena) { - ctxt->info.sec_flags |= - ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << - ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S; - ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; - } else { - ctxt->info.sec_flags &= - ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << - ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); - ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + if (agg_node_iter->valid && + agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) { + agg_id = agg_node_iter->agg_id; + agg_node = agg_node_iter; + break; + } + + /* find unclaimed agg_id */ + if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) { + agg_id = node_offset + agg_node_id_start; + agg_node = agg_node_iter; + break; + } + /* move to next agg_node */ + agg_node_iter++; } - ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID | - ICE_AQ_VSI_PROP_SW_VALID); + if (!agg_node) + return; - status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL); + /* if selected aggregator node was not created, create it */ + if (!agg_node->valid) { + status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG, + (u8)vsi->tc_cfg.ena_tc); + if (status) { + dev_err(dev, "unable to create aggregator node with agg_id %u\n", + agg_id); + return; + } + /* aggregator node is created, store the needed info */ + agg_node->valid = true; + agg_node->agg_id = agg_id; + } + + /* move VSI to corresponding aggregator node */ + status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx, + (u8)vsi->tc_cfg.ena_tc); if (status) { - netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n", - ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status, - vsi->back->hw.adminq.sq_last_status); - goto err_out; + dev_err(dev, "unable to move VSI idx %u into aggregator %u node", + vsi->idx, agg_id); + return; } - vsi->info.sec_flags = ctxt->info.sec_flags; - vsi->info.sw_flags2 = ctxt->info.sw_flags2; + /* keep active children count for aggregator node */ + agg_node->num_vsis++; - devm_kfree(dev, ctxt); - return 0; - -err_out: - devm_kfree(dev, ctxt); - return -EIO; + /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved + * to aggregator node + */ + vsi->agg_node = agg_node; + dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n", + vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id, + vsi->agg_node->num_vsis); } -/** - * ice_vsi_setup - Set up a VSI by a given type - * @pf: board private structure - * @pi: pointer to the port_info instance - * @type: VSI type - * @vf_id: defines VF id to which this VSI connects. This field is meant to be - * used only for ICE_VSI_VF VSI type. For other VSI types, should - * fill-in ICE_INVAL_VFID as input. - * - * This allocates the sw VSI structure and its queue resources. - * - * Returns pointer to the successfully allocated and configured VSI sw struct on - * success, NULL on failure. - */ -struct ice_vsi * -ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, - enum ice_vsi_type type, u16 vf_id) +static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; - struct device *dev = &pf->pdev->dev; - struct ice_vsi *vsi; + struct device *dev = ice_pf_to_dev(pf); int ret, i; - vsi = ice_vsi_alloc(pf, type); - if (!vsi) { - dev_err(dev, "could not allocate VSI\n"); - return NULL; + /* configure VSI nodes based on number of queues and TC's */ + ice_for_each_traffic_class(i) { + if (!(vsi->tc_cfg.ena_tc & BIT(i))) + continue; + + if (vsi->type == ICE_VSI_CHNL) { + if (!vsi->alloc_txq && vsi->num_txq) + max_txqs[i] = vsi->num_txq; + else + max_txqs[i] = pf->num_lan_tx; + } else { + max_txqs[i] = vsi->alloc_txq; + } + + if (vsi->type == ICE_VSI_PF) + max_txqs[i] += vsi->num_xdp_txq; } - vsi->port_info = pi; + dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); + if (ret) { + dev_err(dev, "VSI %d failed lan queue config, error %d\n", + vsi->vsi_num, ret); + return ret; + } + + return 0; +} + +/** + * ice_vsi_cfg_def - configure default VSI based on the type + * @vsi: pointer to VSI + */ +static int ice_vsi_cfg_def(struct ice_vsi *vsi) +{ + struct device *dev = ice_pf_to_dev(vsi->back); + struct ice_pf *pf = vsi->back; + int ret; + vsi->vsw = pf->first_sw; - if (vsi->type == ICE_VSI_VF) - vsi->vf_id = vf_id; - if (ice_vsi_get_qs(vsi)) { + ret = ice_vsi_alloc_def(vsi, vsi->ch); + if (ret) + return ret; + + /* allocate memory for Tx/Rx ring stat pointers */ + ret = ice_vsi_alloc_stat_arrays(vsi); + if (ret) + goto unroll_vsi_alloc; + + ice_alloc_fd_res(vsi); + + ret = ice_vsi_get_qs(vsi); + if (ret) { dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", vsi->idx); - goto unroll_get_qs; + goto unroll_vsi_alloc_stat; } /* set RSS capabilities */ ice_vsi_set_rss_params(vsi); - /* set tc configuration */ + /* set TC configuration */ ice_vsi_set_tc_cfg(vsi); /* create the VSI */ - ret = ice_vsi_init(vsi); + ret = ice_vsi_init(vsi, vsi->flags); if (ret) goto unroll_get_qs; + ice_vsi_init_vlan_ops(vsi); + switch (vsi->type) { + case ICE_VSI_CTRL: + case ICE_VSI_SF: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); if (ret) goto unroll_vsi_init; - ret = ice_vsi_setup_vector_base(vsi); + ret = ice_vsi_alloc_rings(vsi); if (ret) - goto unroll_alloc_q_vector; + goto unroll_vector_base; - ret = ice_vsi_alloc_rings(vsi); + ret = ice_vsi_alloc_ring_stats(vsi); if (ret) goto unroll_vector_base; + if (ice_is_xdp_ena_vsi(vsi)) { + ret = ice_vsi_determine_xdp_res(vsi); + if (ret) + goto unroll_vector_base; + ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog, + ICE_XDP_CFG_PART); + if (ret) + goto unroll_vector_base; + } + ice_vsi_map_rings_to_vectors(vsi); - /* Do not exit if configuring RSS had an issue, at least - * receive traffic on first queue. Hence no need to capture - * return value - */ - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + vsi->stat_offsets_loaded = false; + + /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ + if (vsi->type != ICE_VSI_CTRL) + /* Do not exit if configuring RSS had an issue, at + * least receive traffic on first queue. Hence no + * need to capture return value + */ + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + ice_vsi_cfg_rss_lut_key(vsi); + ice_vsi_set_rss_flow_fld(vsi); + } + ice_init_arfs(vsi); + break; + case ICE_VSI_CHNL: + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { ice_vsi_cfg_rss_lut_key(vsi); + ice_vsi_set_rss_flow_fld(vsi); + } break; case ICE_VSI_VF: /* VF driver will take care of creating netdev for this type and @@ -2151,54 +2404,181 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (ret) goto unroll_alloc_q_vector; - /* Setup Vector base only during VF init phase or when VF asks - * for more vectors than assigned number. In all other cases, - * assign hw_base_vector to the value given earlier. + ret = ice_vsi_alloc_ring_stats(vsi); + if (ret) + goto unroll_vector_base; + + vsi->stat_offsets_loaded = false; + + /* Do not exit if configuring RSS had an issue, at least + * receive traffic on first queue. Hence no need to capture + * return value */ - if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) { - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto unroll_vector_base; - } else { - vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx; + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + ice_vsi_cfg_rss_lut_key(vsi); + ice_vsi_set_vf_rss_flow_fld(vsi); } - pf->q_left_tx -= vsi->alloc_txq; - pf->q_left_rx -= vsi->alloc_rxq; + break; + case ICE_VSI_LB: + ret = ice_vsi_alloc_rings(vsi); + if (ret) + goto unroll_vsi_init; + + ret = ice_vsi_alloc_ring_stats(vsi); + if (ret) + goto unroll_vector_base; + break; default: /* clean up the resources and exit */ + ret = -EINVAL; goto unroll_vsi_init; } - /* configure VSI nodes based on number of queues and TC's */ - for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = pf->num_lan_tx; - - ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, - max_txqs); - if (ret) { - dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n"); - goto unroll_vector_base; - } - - return vsi; + return 0; unroll_vector_base: /* reclaim SW interrupts back to the common pool */ - ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); - pf->num_avail_sw_msix += vsi->num_q_vectors; - /* reclaim HW interrupt back to the common pool */ - ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); - pf->num_avail_hw_msix += vsi->num_q_vectors; unroll_alloc_q_vector: ice_vsi_free_q_vectors(vsi); unroll_vsi_init: - ice_vsi_delete(vsi); + ice_vsi_delete_from_hw(vsi); unroll_get_qs: ice_vsi_put_qs(vsi); - pf->q_left_tx += vsi->alloc_txq; - pf->q_left_rx += vsi->alloc_rxq; - ice_vsi_clear(vsi); +unroll_vsi_alloc_stat: + ice_vsi_free_stats(vsi); +unroll_vsi_alloc: + ice_vsi_free_arrays(vsi); + return ret; +} + +/** + * ice_vsi_cfg - configure a previously allocated VSI + * @vsi: pointer to VSI + */ +int ice_vsi_cfg(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int ret; + + if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) + return -EINVAL; + + ret = ice_vsi_cfg_def(vsi); + if (ret) + return ret; + + ret = ice_vsi_cfg_tc_lan(vsi->back, vsi); + if (ret) + ice_vsi_decfg(vsi); + + if (vsi->type == ICE_VSI_CTRL) { + if (vsi->vf) { + WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI); + vsi->vf->ctrl_vsi_idx = vsi->idx; + } else { + WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI); + pf->ctrl_vsi_idx = vsi->idx; + } + } + + return ret; +} + +/** + * ice_vsi_decfg - remove all VSI configuration + * @vsi: pointer to VSI + */ +void ice_vsi_decfg(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int err; + + ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); + err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); + if (err) + dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", + vsi->vsi_num, err); + + if (vsi->xdp_rings) + /* return value check can be skipped here, it always returns + * 0 if reset is in progress + */ + ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART); + + ice_vsi_clear_rings(vsi); + ice_vsi_free_q_vectors(vsi); + ice_vsi_put_qs(vsi); + ice_vsi_free_arrays(vsi); + + /* SR-IOV determines needed MSIX resources all at once instead of per + * VSI since when VFs are spawned we know how many VFs there are and how + * many interrupts each VF needs. SR-IOV MSIX resources are also + * cleared in the same manner. + */ + + if (vsi->type == ICE_VSI_VF && + vsi->agg_node && vsi->agg_node->valid) + vsi->agg_node->num_vsis--; +} + +/** + * ice_vsi_setup - Set up a VSI by a given type + * @pf: board private structure + * @params: parameters to use when creating the VSI + * + * This allocates the sw VSI structure and its queue resources. + * + * Returns pointer to the successfully allocated and configured VSI sw struct on + * success, NULL on failure. + */ +struct ice_vsi * +ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_vsi *vsi; + int ret; + + /* ice_vsi_setup can only initialize a new VSI, and we must have + * a port_info structure for it. + */ + if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) || + WARN_ON(!params->port_info)) + return NULL; + + vsi = ice_vsi_alloc(pf); + if (!vsi) { + dev_err(dev, "could not allocate VSI\n"); + return NULL; + } + + vsi->params = *params; + ret = ice_vsi_cfg(vsi); + if (ret) + goto err_vsi_cfg; + + /* Add switch rule to drop all Tx Flow Control Frames, of look up + * type ETHERTYPE from VSIs, and restrict malicious VF from sending + * out PAUSE or PFC frames. If enabled, FW can still send FC frames. + * The rule is added once for PF VSI in order to create appropriate + * recipe, since VSI/VSI list is ignored with drop action... + * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to + * be dropped so that VFs cannot send LLDP packets to reconfig DCB + * settings in the HW. + */ + if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { + ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, + ICE_DROP_PACKET); + ice_vsi_cfg_sw_lldp(vsi, true, true); + } + + if (!vsi->agg_node) + ice_set_agg_vsi(vsi); + + return vsi; + +err_vsi_cfg: + ice_vsi_free(vsi); return NULL; } @@ -2210,23 +2590,28 @@ unroll_get_qs: static void ice_vsi_release_msix(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - u16 vector = vsi->hw_base_vector; struct ice_hw *hw = &pf->hw; u32 txq = 0; u32 rxq = 0; int i, q; - for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + ice_for_each_q_vector(vsi, i) { struct ice_q_vector *q_vector = vsi->q_vectors[i]; - wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0); - wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0); + ice_write_intrl(q_vector, 0); for (q = 0; q < q_vector->num_ring_tx; q++) { + ice_write_itr(&q_vector->tx, 0); wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); + if (vsi->xdp_rings) { + u32 xdp_txq = txq + vsi->num_xdp_txq; + + wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); + } txq++; } for (q = 0; q < q_vector->num_ring_rx; q++) { + ice_write_itr(&q_vector->rx, 0); wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); rxq++; } @@ -2242,40 +2627,30 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi) void ice_vsi_free_irq(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - int base = vsi->sw_base_vector; + int i; - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - int i; + if (!vsi->q_vectors || !vsi->irqs_ready) + return; - if (!vsi->q_vectors || !vsi->irqs_ready) - return; + ice_vsi_release_msix(vsi); + if (vsi->type == ICE_VSI_VF) + return; - ice_vsi_release_msix(vsi); - if (vsi->type == ICE_VSI_VF) - return; + vsi->irqs_ready = false; - vsi->irqs_ready = false; - for (i = 0; i < vsi->num_q_vectors; i++) { - u16 vector = i + base; - int irq_num; + ice_for_each_q_vector(vsi, i) { + int irq_num; - irq_num = pf->msix_entries[vector].vector; + irq_num = vsi->q_vectors[i]->irq.virq; - /* free only the irqs that were actually requested */ - if (!vsi->q_vectors[i] || - !(vsi->q_vectors[i]->num_ring_tx || - vsi->q_vectors[i]->num_ring_rx)) - continue; - - /* clear the affinity notifier in the IRQ descriptor */ - irq_set_affinity_notifier(irq_num, NULL); + /* free only the irqs that were actually requested */ + if (!vsi->q_vectors[i] || + !(vsi->q_vectors[i]->num_ring_tx || + vsi->q_vectors[i]->num_ring_rx)) + continue; - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(irq_num, NULL); - synchronize_irq(irq_num); - devm_free_irq(&pf->pdev->dev, irq_num, - vsi->q_vectors[i]); - } + synchronize_irq(irq_num); + devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); } } @@ -2317,363 +2692,1366 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi) */ void ice_vsi_close(struct ice_vsi *vsi) { - if (!test_and_set_bit(__ICE_DOWN, vsi->state)) + if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) ice_down(vsi); + ice_vsi_clear_napi_queues(vsi); ice_vsi_free_irq(vsi); ice_vsi_free_tx_rings(vsi); ice_vsi_free_rx_rings(vsi); } /** - * ice_free_res - free a block of resources - * @res: pointer to the resource - * @index: starting index previously returned by ice_get_res - * @id: identifier to track owner + * ice_ena_vsi - resume a VSI + * @vsi: the VSI being resume + * @locked: is the rtnl_lock already held + */ +int ice_ena_vsi(struct ice_vsi *vsi, bool locked) +{ + int err = 0; + + if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) + return 0; + + clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); + + if (vsi->netdev && (vsi->type == ICE_VSI_PF || + vsi->type == ICE_VSI_SF)) { + if (netif_running(vsi->netdev)) { + if (!locked) + rtnl_lock(); + + err = ice_open_internal(vsi->netdev); + + if (!locked) + rtnl_unlock(); + } + } else if (vsi->type == ICE_VSI_CTRL) { + err = ice_vsi_open_ctrl(vsi); + } + + return err; +} + +/** + * ice_dis_vsi - pause a VSI + * @vsi: the VSI being paused + * @locked: is the rtnl_lock already held + */ +void ice_dis_vsi(struct ice_vsi *vsi, bool locked) +{ + bool already_down = test_bit(ICE_VSI_DOWN, vsi->state); + + set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); + + if (vsi->netdev && (vsi->type == ICE_VSI_PF || + vsi->type == ICE_VSI_SF)) { + if (netif_running(vsi->netdev)) { + if (!locked) + rtnl_lock(); + already_down = test_bit(ICE_VSI_DOWN, vsi->state); + if (!already_down) + ice_vsi_close(vsi); + + if (!locked) + rtnl_unlock(); + } else if (!already_down) { + ice_vsi_close(vsi); + } + } else if (vsi->type == ICE_VSI_CTRL && !already_down) { + ice_vsi_close(vsi); + } +} + +/** + * ice_vsi_set_napi_queues - associate netdev queues with napi + * @vsi: VSI pointer + * + * Associate queue[s] with napi for all vectors. + */ +void ice_vsi_set_napi_queues(struct ice_vsi *vsi) +{ + struct net_device *netdev = vsi->netdev; + int q_idx, v_idx; + + if (!netdev) + return; + + ASSERT_RTNL(); + ice_for_each_rxq(vsi, q_idx) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, + &vsi->rx_rings[q_idx]->q_vector->napi); + + ice_for_each_txq(vsi, q_idx) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, + &vsi->tx_rings[q_idx]->q_vector->napi); + /* Also set the interrupt number for the NAPI */ + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; + + netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); + } +} + +/** + * ice_vsi_clear_napi_queues - dissociate netdev queues from napi + * @vsi: VSI pointer * - * Returns number of resources freed + * Clear the association between all VSI queues queue[s] and napi. */ -int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) +void ice_vsi_clear_napi_queues(struct ice_vsi *vsi) +{ + struct net_device *netdev = vsi->netdev; + int q_idx, v_idx; + + if (!netdev) + return; + + ASSERT_RTNL(); + /* Clear the NAPI's interrupt number */ + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; + + netif_napi_set_irq(&q_vector->napi, -1); + } + + ice_for_each_txq(vsi, q_idx) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL); + + ice_for_each_rxq(vsi, q_idx) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, NULL); +} + +/** + * ice_napi_add - register NAPI handler for the VSI + * @vsi: VSI for which NAPI handler is to be registered + * + * This function is only called in the driver's load path. Registering the NAPI + * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, + * reset/rebuild, etc.) + */ +void ice_napi_add(struct ice_vsi *vsi) +{ + int v_idx; + + if (!vsi->netdev) + return; + + ice_for_each_q_vector(vsi, v_idx) + netif_napi_add_config(vsi->netdev, + &vsi->q_vectors[v_idx]->napi, + ice_napi_poll, + v_idx); +} + +/** + * ice_vsi_release - Delete a VSI and free its resources + * @vsi: the VSI being removed + * + * Returns 0 on success or < 0 on error + */ +int ice_vsi_release(struct ice_vsi *vsi) +{ + struct ice_pf *pf; + + if (!vsi->back) + return -ENODEV; + pf = vsi->back; + + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + ice_rss_clean(vsi); + + ice_vsi_close(vsi); + + /* The Rx rule will only exist to remove if the LLDP FW + * engine is currently stopped + */ + if (!ice_is_safe_mode(pf) && + !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) && + (vsi->type == ICE_VSI_PF || (vsi->type == ICE_VSI_VF && + ice_vf_is_lldp_ena(vsi->vf)))) + ice_vsi_cfg_sw_lldp(vsi, false, false); + + ice_vsi_decfg(vsi); + + /* retain SW VSI data structure since it is needed to unregister and + * free VSI netdev when PF is not in reset recovery pending state,\ + * for ex: during rmmod. + */ + if (!ice_is_reset_in_progress(pf->state)) + ice_vsi_delete(vsi); + + return 0; +} + +/** + * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors + * @vsi: VSI connected with q_vectors + * @coalesce: array of struct with stored coalesce + * + * Returns array size. + */ +static int +ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi, + struct ice_coalesce_stored *coalesce) { - int count = 0; int i; - if (!res || index >= res->num_entries) - return -EINVAL; + ice_for_each_q_vector(vsi, i) { + struct ice_q_vector *q_vector = vsi->q_vectors[i]; + + coalesce[i].itr_tx = q_vector->tx.itr_settings; + coalesce[i].itr_rx = q_vector->rx.itr_settings; + coalesce[i].intrl = q_vector->intrl; - id |= ICE_RES_VALID_BIT; - for (i = index; i < res->num_entries && res->list[i] == id; i++) { - res->list[i] = 0; - count++; + if (i < vsi->num_txq) + coalesce[i].tx_valid = true; + if (i < vsi->num_rxq) + coalesce[i].rx_valid = true; } - return count; + return vsi->num_q_vectors; } /** - * ice_search_res - Search the tracker for a block of resources - * @res: pointer to the resource - * @needed: size of the block needed - * @id: identifier to track owner + * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays + * @vsi: VSI connected with q_vectors + * @coalesce: pointer to array of struct with stored coalesce + * @size: size of coalesce array * - * Returns the base item index of the block, or -ENOMEM for error + * Before this function, ice_vsi_rebuild_get_coalesce should be called to save + * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce + * to default value. */ -static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) +static void +ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, + struct ice_coalesce_stored *coalesce, int size) { - int start = res->search_hint; - int end = start; + struct ice_ring_container *rc; + int i; - if ((start + needed) > res->num_entries) - return -ENOMEM; + if ((size && !coalesce) || !vsi) + return; - id |= ICE_RES_VALID_BIT; + /* There are a couple of cases that have to be handled here: + * 1. The case where the number of queue vectors stays the same, but + * the number of Tx or Rx rings changes (the first for loop) + * 2. The case where the number of queue vectors increased (the + * second for loop) + */ + for (i = 0; i < size && i < vsi->num_q_vectors; i++) { + /* There are 2 cases to handle here and they are the same for + * both Tx and Rx: + * if the entry was valid previously (coalesce[i].[tr]x_valid + * and the loop variable is less than the number of rings + * allocated, then write the previous values + * + * if the entry was not valid previously, but the number of + * rings is less than are allocated (this means the number of + * rings increased from previously), then write out the + * values in the first element + * + * Also, always write the ITR, even if in ITR_IS_DYNAMIC + * as there is no harm because the dynamic algorithm + * will just overwrite. + */ + if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { + rc = &vsi->q_vectors[i]->rx; + rc->itr_settings = coalesce[i].itr_rx; + ice_write_itr(rc, rc->itr_setting); + } else if (i < vsi->alloc_rxq) { + rc = &vsi->q_vectors[i]->rx; + rc->itr_settings = coalesce[0].itr_rx; + ice_write_itr(rc, rc->itr_setting); + } - do { - /* skip already allocated entries */ - if (res->list[end++] & ICE_RES_VALID_BIT) { - start = end; - if ((start + needed) > res->num_entries) - break; + if (i < vsi->alloc_txq && coalesce[i].tx_valid) { + rc = &vsi->q_vectors[i]->tx; + rc->itr_settings = coalesce[i].itr_tx; + ice_write_itr(rc, rc->itr_setting); + } else if (i < vsi->alloc_txq) { + rc = &vsi->q_vectors[i]->tx; + rc->itr_settings = coalesce[0].itr_tx; + ice_write_itr(rc, rc->itr_setting); } - if (end == (start + needed)) { - int i = start; + vsi->q_vectors[i]->intrl = coalesce[i].intrl; + ice_set_q_vector_intrl(vsi->q_vectors[i]); + } - /* there was enough, so assign it to the requestor */ - while (i != end) - res->list[i++] = id; + /* the number of queue vectors increased so write whatever is in + * the first element + */ + for (; i < vsi->num_q_vectors; i++) { + /* transmit */ + rc = &vsi->q_vectors[i]->tx; + rc->itr_settings = coalesce[0].itr_tx; + ice_write_itr(rc, rc->itr_setting); + + /* receive */ + rc = &vsi->q_vectors[i]->rx; + rc->itr_settings = coalesce[0].itr_rx; + ice_write_itr(rc, rc->itr_setting); - if (end == res->num_entries) - end = 0; + vsi->q_vectors[i]->intrl = coalesce[0].intrl; + ice_set_q_vector_intrl(vsi->q_vectors[i]); + } +} - res->search_hint = end; - return start; +/** + * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones + * @vsi: VSI pointer + */ +static int +ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi) +{ + u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq; + u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq; + struct ice_ring_stats **tx_ring_stats; + struct ice_ring_stats **rx_ring_stats; + struct ice_vsi_stats *vsi_stat; + struct ice_pf *pf = vsi->back; + u16 prev_txq = vsi->alloc_txq; + u16 prev_rxq = vsi->alloc_rxq; + int i; + + vsi_stat = pf->vsi_stats[vsi->idx]; + + if (req_txq < prev_txq) { + for (i = req_txq; i < prev_txq; i++) { + if (vsi_stat->tx_ring_stats[i]) { + kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); + WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); + } } - } while (1); + } - return -ENOMEM; + tx_ring_stats = vsi_stat->tx_ring_stats; + vsi_stat->tx_ring_stats = + krealloc_array(vsi_stat->tx_ring_stats, req_txq, + sizeof(*vsi_stat->tx_ring_stats), + GFP_KERNEL | __GFP_ZERO); + if (!vsi_stat->tx_ring_stats) { + vsi_stat->tx_ring_stats = tx_ring_stats; + return -ENOMEM; + } + + if (req_rxq < prev_rxq) { + for (i = req_rxq; i < prev_rxq; i++) { + if (vsi_stat->rx_ring_stats[i]) { + kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); + WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); + } + } + } + + rx_ring_stats = vsi_stat->rx_ring_stats; + vsi_stat->rx_ring_stats = + krealloc_array(vsi_stat->rx_ring_stats, req_rxq, + sizeof(*vsi_stat->rx_ring_stats), + GFP_KERNEL | __GFP_ZERO); + if (!vsi_stat->rx_ring_stats) { + vsi_stat->rx_ring_stats = rx_ring_stats; + return -ENOMEM; + } + + return 0; } /** - * ice_get_res - get a block of resources - * @pf: board private structure - * @res: pointer to the resource - * @needed: size of the block needed - * @id: identifier to track owner + * ice_vsi_rebuild - Rebuild VSI after reset + * @vsi: VSI to be rebuild + * @vsi_flags: flags used for VSI rebuild flow + * + * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or + * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware. * - * Returns the base item index of the block, or -ENOMEM for error - * The search_hint trick and lack of advanced fit-finding only works - * because we're highly likely to have all the same sized requests. - * Linear search time and any fragmentation should be minimal. + * Returns 0 on success and negative value on failure */ -int -ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) +int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) { + struct ice_coalesce_stored *coalesce; + int prev_num_q_vectors; + struct ice_pf *pf; int ret; - if (!res || !pf) + if (!vsi) return -EINVAL; - if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { - dev_err(&pf->pdev->dev, - "param err: needed=%d, num_entries = %d id=0x%04x\n", - needed, res->num_entries, id); + vsi->flags = vsi_flags; + pf = vsi->back; + if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) return -EINVAL; + + mutex_lock(&vsi->xdp_state_lock); + + ret = ice_vsi_realloc_stat_arrays(vsi); + if (ret) + goto unlock; + + ice_vsi_decfg(vsi); + ret = ice_vsi_cfg_def(vsi); + if (ret) + goto unlock; + + coalesce = kcalloc(vsi->num_q_vectors, + sizeof(struct ice_coalesce_stored), GFP_KERNEL); + if (!coalesce) { + ret = -ENOMEM; + goto decfg; } - /* search based on search_hint */ - ret = ice_search_res(res, needed, id); + prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); + + ret = ice_vsi_cfg_tc_lan(pf, vsi); + if (ret) { + if (vsi_flags & ICE_VSI_FLAG_INIT) { + ret = -EIO; + goto free_coalesce; + } - if (ret < 0) { - /* previous search failed. Reset search hint and try again */ - res->search_hint = 0; - ret = ice_search_res(res, needed, id); + ret = ice_schedule_reset(pf, ICE_RESET_PFR); + goto free_coalesce; } + ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); + clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state); + +free_coalesce: + kfree(coalesce); +decfg: + if (ret) + ice_vsi_decfg(vsi); +unlock: + mutex_unlock(&vsi->xdp_state_lock); return ret; } /** - * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI - * @vsi: the VSI being un-configured + * ice_is_reset_in_progress - check for a reset in progress + * @state: PF state field + */ +bool ice_is_reset_in_progress(unsigned long *state) +{ + return test_bit(ICE_RESET_OICR_RECV, state) || + test_bit(ICE_PFR_REQ, state) || + test_bit(ICE_CORER_REQ, state) || + test_bit(ICE_GLOBR_REQ, state); +} + +/** + * ice_wait_for_reset - Wait for driver to finish reset and rebuild + * @pf: pointer to the PF structure + * @timeout: length of time to wait, in jiffies + * + * Wait (sleep) for a short time until the driver finishes cleaning up from + * a device reset. The caller must be able to sleep. Use this to delay + * operations that could fail while the driver is cleaning up after a device + * reset. + * + * Returns 0 on success, -EBUSY if the reset is not finished within the + * timeout, and -ERESTARTSYS if the thread was interrupted. + */ +int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout) +{ + long ret; + + ret = wait_event_interruptible_timeout(pf->reset_wait_queue, + !ice_is_reset_in_progress(pf->state), + timeout); + if (ret < 0) + return ret; + else if (!ret) + return -EBUSY; + else + return 0; +} + +/** + * ice_vsi_update_q_map - update our copy of the VSI info with new queue map + * @vsi: VSI being configured + * @ctx: the context buffer returned from AQ VSI update command + */ +static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) +{ + vsi->info.mapping_flags = ctx->info.mapping_flags; + memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, + sizeof(vsi->info.q_mapping)); + memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, + sizeof(vsi->info.tc_mapping)); +} + +/** + * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration + * @vsi: the VSI being configured + * @ena_tc: TC map to be enabled */ -void ice_vsi_dis_irq(struct ice_vsi *vsi) +void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) { - int base = vsi->sw_base_vector; + struct net_device *netdev = vsi->netdev; struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - u32 val; + int numtc = vsi->tc_cfg.numtc; + struct ice_dcbx_cfg *dcbcfg; + u8 netdev_tc; int i; - /* disable interrupt causation from each queue */ - if (vsi->tx_rings) { - ice_for_each_txq(vsi, i) { - if (vsi->tx_rings[i]) { - u16 reg; + if (!netdev) + return; - reg = vsi->tx_rings[i]->reg_idx; - val = rd32(hw, QINT_TQCTL(reg)); - val &= ~QINT_TQCTL_CAUSE_ENA_M; - wr32(hw, QINT_TQCTL(reg), val); - } - } + /* CHNL VSI doesn't have its own netdev, hence, no netdev_tc */ + if (vsi->type == ICE_VSI_CHNL) + return; + + if (!ena_tc) { + netdev_reset_tc(netdev); + return; } - if (vsi->rx_rings) { - ice_for_each_rxq(vsi, i) { - if (vsi->rx_rings[i]) { - u16 reg; + if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf)) + numtc = vsi->all_numtc; - reg = vsi->rx_rings[i]->reg_idx; - val = rd32(hw, QINT_RQCTL(reg)); - val &= ~QINT_RQCTL_CAUSE_ENA_M; - wr32(hw, QINT_RQCTL(reg), val); - } - } + if (netdev_set_num_tc(netdev, numtc)) + return; + + dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + + ice_for_each_traffic_class(i) + if (vsi->tc_cfg.ena_tc & BIT(i)) + netdev_set_tc_queue(netdev, + vsi->tc_cfg.tc_info[i].netdev_tc, + vsi->tc_cfg.tc_info[i].qcount_tx, + vsi->tc_cfg.tc_info[i].qoffset); + /* setup TC queue map for CHNL TCs */ + ice_for_each_chnl_tc(i) { + if (!(vsi->all_enatc & BIT(i))) + break; + if (!vsi->mqprio_qopt.qopt.count[i]) + break; + netdev_set_tc_queue(netdev, i, + vsi->mqprio_qopt.qopt.count[i], + vsi->mqprio_qopt.qopt.offset[i]); } - /* disable each interrupt */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - for (i = vsi->hw_base_vector; - i < (vsi->num_q_vectors + vsi->hw_base_vector); i++) - wr32(hw, GLINT_DYN_CTL(i), 0); + if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) + return; + + for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { + u8 ets_tc = dcbcfg->etscfg.prio_table[i]; - ice_flush(hw); - for (i = 0; i < vsi->num_q_vectors; i++) - synchronize_irq(pf->msix_entries[i + base].vector); + /* Get the mapped netdev TC# for the UP */ + netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; + netdev_set_prio_tc_map(netdev, i, netdev_tc); } } /** - * ice_vsi_release - Delete a VSI and free its resources - * @vsi: the VSI being removed + * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config + * @vsi: the VSI being configured, + * @ctxt: VSI context structure + * @ena_tc: number of traffic classes to enable * - * Returns 0 on success or < 0 on error + * Prepares VSI tc_config to have queue configurations based on MQPRIO options. */ -int ice_vsi_release(struct ice_vsi *vsi) +static int +ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, + u8 ena_tc) { - struct ice_pf *pf; - struct ice_vf *vf; + u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap; + u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; + int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; + u16 new_txq, new_rxq; + u8 netdev_tc = 0; + int i; - if (!vsi->back) - return -ENODEV; - pf = vsi->back; - vf = &pf->vf[vsi->vf_id]; - /* do not unregister and free netdevs while driver is in the reset - * recovery pending state. Since reset/rebuild happens through PF - * service task workqueue, its not a good idea to unregister netdev - * that is associated to the PF that is running the work queue items - * currently. This is done to avoid check_flush_dependency() warning - * on this wq - */ - if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) { - ice_napi_del(vsi); - unregister_netdev(vsi->netdev); - free_netdev(vsi->netdev); - vsi->netdev = NULL; + vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1; + + pow = order_base_2(tc0_qcount); + qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, tc0_offset); + qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow); + + ice_for_each_traffic_class(i) { + if (!(vsi->tc_cfg.ena_tc & BIT(i))) { + /* TC is not enabled */ + vsi->tc_cfg.tc_info[i].qoffset = 0; + vsi->tc_cfg.tc_info[i].qcount_rx = 1; + vsi->tc_cfg.tc_info[i].qcount_tx = 1; + vsi->tc_cfg.tc_info[i].netdev_tc = 0; + ctxt->info.tc_mapping[i] = 0; + continue; + } + + offset = vsi->mqprio_qopt.qopt.offset[i]; + qcount_rx = vsi->mqprio_qopt.qopt.count[i]; + qcount_tx = vsi->mqprio_qopt.qopt.count[i]; + vsi->tc_cfg.tc_info[i].qoffset = offset; + vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; + vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx; + vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; } - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) - ice_rss_clean(vsi); + if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) { + ice_for_each_chnl_tc(i) { + if (!(vsi->all_enatc & BIT(i))) + continue; + offset = vsi->mqprio_qopt.qopt.offset[i]; + qcount_rx = vsi->mqprio_qopt.qopt.count[i]; + qcount_tx = vsi->mqprio_qopt.qopt.count[i]; + } + } - /* Disable VSI and free resources */ - ice_vsi_dis_irq(vsi); - ice_vsi_close(vsi); + new_txq = offset + qcount_tx; + if (new_txq > vsi->alloc_txq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", + new_txq, vsi->alloc_txq); + return -EINVAL; + } - /* reclaim interrupt vectors back to PF */ - if (vsi->type != ICE_VSI_VF) { - /* reclaim SW interrupts back to the common pool */ - ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, - vsi->idx); - pf->num_avail_sw_msix += vsi->num_q_vectors; - /* reclaim HW interrupts back to the common pool */ - ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, - vsi->idx); - pf->num_avail_hw_msix += vsi->num_q_vectors; - } else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) { - /* Reclaim VF resources back only while freeing all VFs or - * vector reassignment is requested - */ - ice_free_res(vsi->back->hw_irq_tracker, vf->first_vector_idx, - vsi->idx); - pf->num_avail_hw_msix += pf->num_vf_msix; + new_rxq = offset + qcount_rx; + if (new_rxq > vsi->alloc_rxq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", + new_rxq, vsi->alloc_rxq); + return -EINVAL; } - ice_remove_vsi_fltr(&pf->hw, vsi->idx); - ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); - ice_vsi_delete(vsi); - ice_vsi_free_q_vectors(vsi); - ice_vsi_clear_rings(vsi); + /* Set actual Tx/Rx queue pairs */ + vsi->num_txq = new_txq; + vsi->num_rxq = new_rxq; - ice_vsi_put_qs(vsi); - pf->q_left_tx += vsi->alloc_txq; - pf->q_left_rx += vsi->alloc_rxq; + /* Setup queue TC[0].qmap for given VSI context */ + ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); + ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); + ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount); - /* retain SW VSI data structure since it is needed to unregister and - * free VSI netdev when PF is not in reset recovery pending state,\ - * for ex: during rmmod. + /* Find queue count available for channel VSIs and starting offset + * for channel VSIs */ - if (!ice_is_reset_in_progress(pf->state)) - ice_vsi_clear(vsi); + if (tc0_qcount && tc0_qcount < vsi->num_rxq) { + vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount; + vsi->next_base_q = tc0_qcount; + } + dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq); + dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); + dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", + vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); return 0; } /** - * ice_vsi_rebuild - Rebuild VSI after reset - * @vsi: VSI to be rebuild + * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map + * @vsi: VSI to be configured + * @ena_tc: TC bitmap * - * Returns 0 on success and negative value on failure + * VSI queues expected to be quiesced before calling this function */ -int ice_vsi_rebuild(struct ice_vsi *vsi) +int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; - struct ice_pf *pf; - int ret, i; + struct ice_pf *pf = vsi->back; + struct ice_tc_cfg old_tc_cfg; + struct ice_vsi_ctx *ctx; + struct device *dev; + int i, ret = 0; + u8 num_tc = 0; + + dev = ice_pf_to_dev(pf); + if (vsi->tc_cfg.ena_tc == ena_tc && + vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) + return 0; + + ice_for_each_traffic_class(i) { + /* build bitmap of enabled TCs */ + if (ena_tc & BIT(i)) + num_tc++; + /* populate max_txqs per TC */ + max_txqs[i] = vsi->alloc_txq; + /* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are + * zero for CHNL VSI, hence use num_txq instead as max_txqs + */ + if (vsi->type == ICE_VSI_CHNL && + test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) + max_txqs[i] = vsi->num_txq; + } + + memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg)); + vsi->tc_cfg.ena_tc = ena_tc; + vsi->tc_cfg.numtc = num_tc; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->vf_num = 0; + ctx->info = vsi->info; + + if (vsi->type == ICE_VSI_PF && + test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) + ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); + else + ret = ice_vsi_setup_q_map(vsi, ctx); + + if (ret) { + memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg)); + goto out; + } + + /* must to indicate which section of VSI context are being modified */ + ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); + ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); + if (ret) { + dev_info(dev, "Failed VSI Update\n"); + goto out; + } + + if (vsi->type == ICE_VSI_PF && + test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); + else + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, + vsi->tc_cfg.ena_tc, max_txqs); + + if (ret) { + dev_err(dev, "VSI %d failed TC config, error %d\n", + vsi->vsi_num, ret); + goto out; + } + ice_vsi_update_q_map(vsi, ctx); + vsi->info.valid_sections = 0; + + ice_vsi_cfg_netdev_tc(vsi, ena_tc); +out: + kfree(ctx); + return ret; +} + +/** + * ice_update_ring_stats - Update ring statistics + * @stats: stats to be updated + * @pkts: number of processed packets + * @bytes: number of processed bytes + * + * This function assumes that caller has acquired a u64_stats_sync lock. + */ +static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes) +{ + stats->bytes += bytes; + stats->pkts += pkts; +} + +/** + * ice_update_tx_ring_stats - Update Tx ring specific counters + * @tx_ring: ring to update + * @pkts: number of processed packets + * @bytes: number of processed bytes + */ +void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes) +{ + u64_stats_update_begin(&tx_ring->ring_stats->syncp); + ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes); + u64_stats_update_end(&tx_ring->ring_stats->syncp); +} + +/** + * ice_update_rx_ring_stats - Update Rx ring specific counters + * @rx_ring: ring to update + * @pkts: number of processed packets + * @bytes: number of processed bytes + */ +void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes) +{ + u64_stats_update_begin(&rx_ring->ring_stats->syncp); + ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes); + u64_stats_update_end(&rx_ring->ring_stats->syncp); +} + +/** + * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used + * @pi: port info of the switch with default VSI + * + * Return true if the there is a single VSI in default forwarding VSI list + */ +bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi) +{ + bool exists = false; + + ice_check_if_dflt_vsi(pi, 0, &exists); + return exists; +} + +/** + * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI + * @vsi: VSI to compare against default forwarding VSI + * + * If this VSI passed in is the default forwarding VSI then return true, else + * return false + */ +bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi) +{ + return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL); +} + +/** + * ice_set_dflt_vsi - set the default forwarding VSI + * @vsi: VSI getting set as the default forwarding VSI on the switch + * + * If the VSI passed in is already the default VSI and it's enabled just return + * success. + * + * Otherwise try to set the VSI passed in as the switch's default VSI and + * return the result. + */ +int ice_set_dflt_vsi(struct ice_vsi *vsi) +{ + struct device *dev; + int status; if (!vsi) return -EINVAL; - pf = vsi->back; - ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); - ice_vsi_free_q_vectors(vsi); - ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); - ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); - vsi->sw_base_vector = 0; - vsi->hw_base_vector = 0; - ice_vsi_clear_rings(vsi); - ice_vsi_free_arrays(vsi, false); - ice_dev_onetime_setup(&vsi->back->hw); - ice_vsi_set_num_qs(vsi); - ice_vsi_set_tc_cfg(vsi); + dev = ice_pf_to_dev(vsi->back); - /* Initialize VSI struct elements and create VSI in FW */ - ret = ice_vsi_init(vsi); - if (ret < 0) - goto err_vsi; + if (ice_lag_is_switchdev_running(vsi->back)) { + dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n", + vsi->vsi_num); + return 0; + } - ret = ice_vsi_alloc_arrays(vsi, false); - if (ret < 0) - goto err_vsi; + /* the VSI passed in is already the default VSI */ + if (ice_is_vsi_dflt_vsi(vsi)) { + dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n", + vsi->vsi_num); + return 0; + } - switch (vsi->type) { - case ICE_VSI_PF: - ret = ice_vsi_alloc_q_vectors(vsi); - if (ret) - goto err_rings; + status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX); + if (status) { + dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n", + vsi->vsi_num, status); + return status; + } - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto err_vectors; + return 0; +} - ret = ice_vsi_alloc_rings(vsi); - if (ret) - goto err_vectors; +/** + * ice_clear_dflt_vsi - clear the default forwarding VSI + * @vsi: VSI to remove from filter list + * + * If the switch has no default VSI or it's not enabled then return error. + * + * Otherwise try to clear the default VSI and return the result. + */ +int ice_clear_dflt_vsi(struct ice_vsi *vsi) +{ + struct device *dev; + int status; - ice_vsi_map_rings_to_vectors(vsi); - /* Do not exit if configuring RSS had an issue, at least - * receive traffic on first queue. Hence no need to capture - * return value - */ - if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) - ice_vsi_cfg_rss_lut_key(vsi); - break; - case ICE_VSI_VF: - ret = ice_vsi_alloc_q_vectors(vsi); - if (ret) - goto err_rings; + if (!vsi) + return -EINVAL; - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto err_vectors; + dev = ice_pf_to_dev(vsi->back); - ret = ice_vsi_alloc_rings(vsi); - if (ret) - goto err_vectors; + /* there is no default VSI configured */ + if (!ice_is_dflt_vsi_in_use(vsi->port_info)) + return -ENODEV; + + status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false, + ICE_FLTR_RX); + if (status) { + dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n", + vsi->vsi_num, status); + return -EIO; + } + + return 0; +} + +/** + * ice_get_link_speed_mbps - get link speed in Mbps + * @vsi: the VSI whose link speed is being queried + * + * Return current VSI link speed and 0 if the speed is unknown. + */ +int ice_get_link_speed_mbps(struct ice_vsi *vsi) +{ + unsigned int link_speed; + + link_speed = vsi->port_info->phy.link_info.link_speed; + + return (int)ice_get_link_speed(fls(link_speed) - 1); +} + +/** + * ice_get_link_speed_kbps - get link speed in Kbps + * @vsi: the VSI whose link speed is being queried + * + * Return current VSI link speed and 0 if the speed is unknown. + */ +int ice_get_link_speed_kbps(struct ice_vsi *vsi) +{ + int speed_mbps; + + speed_mbps = ice_get_link_speed_mbps(vsi); + + return speed_mbps * 1000; +} + +/** + * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate + * @vsi: VSI to be configured + * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit + * + * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit + * profile, otherwise a non-zero value will force a minimum BW limit for the VSI + * on TC 0. + */ +int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) +{ + struct ice_pf *pf = vsi->back; + struct device *dev; + int status; + int speed; + + dev = ice_pf_to_dev(pf); + if (!vsi->port_info) { + dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n", + vsi->idx, vsi->type); + return -EINVAL; + } + + speed = ice_get_link_speed_kbps(vsi); + if (min_tx_rate > (u64)speed) { + dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n", + min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, + speed); + return -EINVAL; + } + + /* Configure min BW for VSI limit */ + if (min_tx_rate) { + status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, + ICE_MIN_BW, min_tx_rate); + if (status) { + dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n", + min_tx_rate, ice_vsi_type_str(vsi->type), + vsi->idx); + return status; + } + + dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n", + min_tx_rate, ice_vsi_type_str(vsi->type)); + } else { + status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, + vsi->idx, 0, + ICE_MIN_BW); + if (status) { + dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n", + ice_vsi_type_str(vsi->type), vsi->idx); + return status; + } + + dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n", + ice_vsi_type_str(vsi->type), vsi->idx); + } + + return 0; +} + +/** + * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate + * @vsi: VSI to be configured + * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit + * + * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit + * profile, otherwise a non-zero value will force a maximum BW limit for the VSI + * on TC 0. + */ +int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) +{ + struct ice_pf *pf = vsi->back; + struct device *dev; + int status; + int speed; + + dev = ice_pf_to_dev(pf); + if (!vsi->port_info) { + dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n", + vsi->idx, vsi->type); + return -EINVAL; + } + + speed = ice_get_link_speed_kbps(vsi); + if (max_tx_rate > (u64)speed) { + dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n", + max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, + speed); + return -EINVAL; + } + + /* Configure max BW for VSI limit */ + if (max_tx_rate) { + status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, + ICE_MAX_BW, max_tx_rate); + if (status) { + dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n", + max_tx_rate, ice_vsi_type_str(vsi->type), + vsi->idx); + return status; + } + + dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n", + max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); + } else { + status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, + vsi->idx, 0, + ICE_MAX_BW); + if (status) { + dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n", + ice_vsi_type_str(vsi->type), vsi->idx); + return status; + } + + dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n", + ice_vsi_type_str(vsi->type), vsi->idx); + } + + return 0; +} + +/** + * ice_set_link - turn on/off physical link + * @vsi: VSI to modify physical link on + * @ena: turn on/off physical link + */ +int ice_set_link(struct ice_vsi *vsi, bool ena) +{ + struct device *dev = ice_pf_to_dev(vsi->back); + struct ice_port_info *pi = vsi->port_info; + struct ice_hw *hw = pi->hw; + int status; + + if (vsi->type != ICE_VSI_PF) + return -EINVAL; + + status = ice_aq_set_link_restart_an(pi, ena, NULL); + + /* if link is owned by manageability, FW will return LIBIE_AQ_RC_EMODE. + * this is not a fatal error, so print a warning message and return + * a success code. Return an error if FW returns an error code other + * than LIBIE_AQ_RC_EMODE + */ + if (status == -EIO) { + if (hw->adminq.sq_last_status == LIBIE_AQ_RC_EMODE) + dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n", + (ena ? "ON" : "OFF"), status, + libie_aq_str(hw->adminq.sq_last_status)); + } else if (status) { + dev_err(dev, "can't set link to %s, err %d aq_err %s\n", + (ena ? "ON" : "OFF"), status, + libie_aq_str(hw->adminq.sq_last_status)); + return status; + } + + return 0; +} + +/** + * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI + * @vsi: VSI used to add VLAN filters + * + * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based + * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't + * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via + * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID. + * + * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic + * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged + * traffic in SVM, since the VLAN TPID isn't part of filtering. + * + * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be + * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is + * part of filtering. + */ +int ice_vsi_add_vlan_zero(struct ice_vsi *vsi) +{ + struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); + struct ice_vlan vlan; + int err; + + vlan = ICE_VLAN(0, 0, 0); + err = vlan_ops->add_vlan(vsi, &vlan); + if (err && err != -EEXIST) + return err; + + /* in SVM both VLAN 0 filters are identical */ + if (!ice_is_dvm_ena(&vsi->back->hw)) + return 0; + + vlan = ICE_VLAN(ETH_P_8021Q, 0, 0); + err = vlan_ops->add_vlan(vsi, &vlan); + if (err && err != -EEXIST) + return err; + + return 0; +} + +/** + * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI + * @vsi: VSI used to add VLAN filters + * + * Delete the VLAN 0 filters in the same manner that they were added in + * ice_vsi_add_vlan_zero. + */ +int ice_vsi_del_vlan_zero(struct ice_vsi *vsi) +{ + struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); + struct ice_vlan vlan; + int err; + + vlan = ICE_VLAN(0, 0, 0); + err = vlan_ops->del_vlan(vsi, &vlan); + if (err && err != -EEXIST) + return err; + + /* in SVM both VLAN 0 filters are identical */ + if (!ice_is_dvm_ena(&vsi->back->hw)) + return 0; + + vlan = ICE_VLAN(ETH_P_8021Q, 0, 0); + err = vlan_ops->del_vlan(vsi, &vlan); + if (err && err != -EEXIST) + return err; - vsi->back->q_left_tx -= vsi->alloc_txq; - vsi->back->q_left_rx -= vsi->alloc_rxq; + /* when deleting the last VLAN filter, make sure to disable the VLAN + * promisc mode so the filter isn't left by accident + */ + return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, 0); +} + +/** + * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode + * @vsi: VSI used to get the VLAN mode + * + * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled + * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details. + */ +static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi) +{ +#define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2 +#define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1 + /* no VLAN 0 filter is created when a port VLAN is active */ + if (vsi->type == ICE_VSI_VF) { + if (WARN_ON(!vsi->vf)) + return 0; + + if (ice_vf_is_port_vlan_ena(vsi->vf)) + return 0; + } + + if (ice_is_dvm_ena(&vsi->back->hw)) + return ICE_DVM_NUM_ZERO_VLAN_FLTRS; + else + return ICE_SVM_NUM_ZERO_VLAN_FLTRS; +} + +/** + * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs + * @vsi: VSI used to determine if any non-zero VLANs have been added + */ +bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi) +{ + return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi)); +} + +/** + * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI + * @vsi: VSI used to get the number of non-zero VLANs added + */ +u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi) +{ + return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi)); +} + +/** + * ice_is_feature_supported + * @pf: pointer to the struct ice_pf instance + * @f: feature enum to be checked + * + * returns true if feature is supported, false otherwise + */ +bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f) +{ + if (f < 0 || f >= ICE_F_MAX) + return false; + + return test_bit(f, pf->features); +} + +/** + * ice_set_feature_support + * @pf: pointer to the struct ice_pf instance + * @f: feature enum to set + */ +void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f) +{ + if (f < 0 || f >= ICE_F_MAX) + return; + + set_bit(f, pf->features); +} + +/** + * ice_clear_feature_support + * @pf: pointer to the struct ice_pf instance + * @f: feature enum to clear + */ +void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f) +{ + if (f < 0 || f >= ICE_F_MAX) + return; + + clear_bit(f, pf->features); +} + +/** + * ice_init_feature_support + * @pf: pointer to the struct ice_pf instance + * + * called during init to setup supported feature + */ +void ice_init_feature_support(struct ice_pf *pf) +{ + switch (pf->hw.device_id) { + case ICE_DEV_ID_E810C_BACKPLANE: + case ICE_DEV_ID_E810C_QSFP: + case ICE_DEV_ID_E810C_SFP: + case ICE_DEV_ID_E810_XXV_BACKPLANE: + case ICE_DEV_ID_E810_XXV_QSFP: + case ICE_DEV_ID_E810_XXV_SFP: + ice_set_feature_support(pf, ICE_F_DSCP); + if (ice_is_phy_rclk_in_netlist(&pf->hw)) + ice_set_feature_support(pf, ICE_F_PHY_RCLK); + /* If we don't own the timer - don't enable other caps */ + if (!ice_pf_src_tmr_owned(pf)) + break; + if (ice_is_cgu_in_netlist(&pf->hw)) + ice_set_feature_support(pf, ICE_F_CGU); + if (ice_is_clock_mux_in_netlist(&pf->hw)) + ice_set_feature_support(pf, ICE_F_SMA_CTRL); + if (ice_gnss_is_module_present(&pf->hw)) + ice_set_feature_support(pf, ICE_F_GNSS); break; default: break; } - /* configure VSI nodes based on number of queues and TC's */ - for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = pf->num_lan_tx; - - ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, - max_txqs); - if (ret) { - dev_info(&vsi->back->pdev->dev, - "Failed VSI lan queue config\n"); - goto err_vectors; + if (pf->hw.mac_type == ICE_MAC_E830) { + ice_set_feature_support(pf, ICE_F_MBX_LIMIT); + ice_set_feature_support(pf, ICE_F_GCS); + ice_set_feature_support(pf, ICE_F_TXTIME); } +} + +/** + * ice_vsi_update_security - update security block in VSI + * @vsi: pointer to VSI structure + * @fill: function pointer to fill ctx + */ +int +ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)) +{ + struct ice_vsi_ctx ctx = { 0 }; + + ctx.info = vsi->info; + ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); + fill(&ctx); + + if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) + return -ENODEV; + + vsi->info = ctx.info; return 0; +} -err_vectors: - ice_vsi_free_q_vectors(vsi); -err_rings: - if (vsi->netdev) { - vsi->current_netdev_flags = 0; - unregister_netdev(vsi->netdev); - free_netdev(vsi->netdev); - vsi->netdev = NULL; - } -err_vsi: - ice_vsi_clear(vsi); - set_bit(__ICE_RESET_FAILED, vsi->back->state); - return ret; +/** + * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx + * @ctx: pointer to VSI ctx structure + */ +void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx) +{ + ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | + (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); } /** - * ice_is_reset_in_progress - check for a reset in progress - * @state: pf state field + * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx + * @ctx: pointer to VSI ctx structure */ -bool ice_is_reset_in_progress(unsigned long *state) +void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx) +{ + ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF & + ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); +} + +/** + * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit + * @vsi: pointer to VSI structure + * @set: set or unset the bit + */ +int +ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set) { - return test_bit(__ICE_RESET_OICR_RECV, state) || - test_bit(__ICE_PFR_REQ, state) || - test_bit(__ICE_CORER_REQ, state) || - test_bit(__ICE_GLOBR_REQ, state); + struct ice_vsi_ctx ctx = { + .info = vsi->info, + }; + + ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); + if (set) + ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB; + else + ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB; + + if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) + return -ENODEV; + + vsi->info = ctx.info; + return 0; +} + +/** + * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI + * @vsi: VSI used to update l2tsel on + * @l2tsel: l2tsel setting requested + * + * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel. + * This will modify which descriptor field the first offloaded VLAN will be + * stripped into. + */ +void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel) +{ + struct ice_hw *hw = &vsi->back->hw; + u32 l2tsel_bit; + int i; + + if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND) + l2tsel_bit = 0; + else + l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET); + + for (i = 0; i < vsi->alloc_rxq; i++) { + u16 pfq = vsi->rxq_map[i]; + u32 qrx_context_offset; + u32 regval; + + qrx_context_offset = + QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq); + + regval = rd32(hw, qrx_context_offset); + regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET); + regval |= l2tsel_bit; + wr32(hw, qrx_context_offset, regval); + } } |
