diff options
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c')
| -rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 676 |
1 files changed, 408 insertions, 268 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index be59ba3774e1..8b30a3accd31 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2,6 +2,8 @@ /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e.h" +#include "i40e_lan_hmc.h" +#include "i40e_virtchnl_pf.h" /*********************notification routines***********************/ @@ -152,6 +154,32 @@ void i40e_vc_notify_reset(struct i40e_pf *pf) (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); } +#ifdef CONFIG_PCI_IOV +void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev) +{ + u16 vf_id; + u16 pos; + + /* Continue only if this is a PF */ + if (!pdev->is_physfn) + return; + + if (!pci_num_vf(pdev)) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + struct pci_dev *vf_dev = NULL; + + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) { + if (vf_dev->is_virtfn && vf_dev->physfn == pdev) + pci_restore_msi_state(vf_dev); + } + } +} +#endif /* CONFIG_PCI_IOV */ + /** * i40e_vc_notify_vf_reset * @vf: pointer to the VF structure @@ -188,7 +216,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) * @notify_vf: notify vf about reset or not * Reset VF handler. **/ -static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf) +void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf) { struct i40e_pf *pf = vf->pf; int i; @@ -420,7 +448,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | - (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); + FIELD_PREP(I40E_QINT_RQCTL_ITR_INDX_MASK, itr_idx); wr32(hw, reg_idx, reg); } @@ -463,8 +491,6 @@ static void i40e_release_rdma_qvlist(struct i40e_vf *vf) u32 v_idx, reg_idx, reg; qv_info = &qvlist_info->qv_info[i]; - if (!qv_info) - continue; v_idx = qv_info->v_idx; if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { /* Figure out the queue after CEQ and make that the @@ -472,10 +498,10 @@ static void i40e_release_rdma_qvlist(struct i40e_vf *vf) */ reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); - next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) - >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; - next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) - >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; + next_q_index = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK, + reg); + next_q_type = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK, + reg); reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); reg = (next_q_index & @@ -506,6 +532,7 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf, struct virtchnl_rdma_qv_info *qv_info; u32 v_idx, i, reg_idx, reg; u32 next_q_idx, next_q_type; + size_t size; u32 msix_vf; int ret = 0; @@ -521,9 +548,9 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf, } kfree(vf->qvlist_info); - vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info, - qvlist_info->num_vectors - 1), - GFP_KERNEL); + size = virtchnl_struct_size(vf->qvlist_info, qv_info, + qvlist_info->num_vectors); + vf->qvlist_info = kzalloc(size, GFP_KERNEL); if (!vf->qvlist_info) { ret = -ENOMEM; goto err_out; @@ -533,8 +560,6 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf, msix_vf = pf->hw.func_caps.num_msix_vectors_vf; for (i = 0; i < qvlist_info->num_vectors; i++) { qv_info = &qvlist_info->qv_info[i]; - if (!qv_info) - continue; /* Validate vector id belongs to this vf */ if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) { @@ -552,10 +577,10 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf, * queue on top. Also link it with the new queue in CEQCTL. */ reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); - next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> - I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); - next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> - I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); + next_q_idx = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK, + reg); + next_q_type = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK, + reg); if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; @@ -628,6 +653,13 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, /* only set the required fields */ tx_ctx.base = info->dma_ring_addr / 128; + + /* ring_len has to be multiple of 8 */ + if (!IS_ALIGNED(info->ring_len, 8) || + info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) { + ret = -EINVAL; + goto error_context; + } tx_ctx.qlen = info->ring_len; tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); tx_ctx.rdylist_act = 0; @@ -656,11 +688,9 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, /* associate this queue with the PCI VF function */ qtx_ctl = I40E_QTX_CTL_VF_QUEUE; - qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) - & I40E_QTX_CTL_PF_INDX_MASK); - qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) - << I40E_QTX_CTL_VFVM_INDX_SHIFT) - & I40E_QTX_CTL_VFVM_INDX_MASK); + qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id); + qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK, + vf->vf_id + hw->func_caps.vf_base_id); wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); i40e_flush(hw); @@ -693,6 +723,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, /* only set the required fields */ rx_ctx.base = info->dma_ring_addr / 128; + + /* ring_len has to be multiple of 32 */ + if (!IS_ALIGNED(info->ring_len, 32) || + info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) { + ret = -EINVAL; + goto error_param; + } rx_ctx.qlen = info->ring_len; if (info->splithdr_enabled) { @@ -772,13 +809,13 @@ error_param: static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) { struct i40e_mac_filter *f = NULL; + struct i40e_vsi *main_vsi, *vsi; struct i40e_pf *pf = vf->pf; - struct i40e_vsi *vsi; u64 max_tx_rate = 0; int ret = 0; - vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid, - vf->vf_id); + main_vsi = i40e_pf_get_main_vsi(pf); + vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, main_vsi->seid, vf->vf_id); if (!vsi) { dev_err(&pf->pdev->dev, @@ -789,7 +826,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) } if (!idx) { - u64 hena = i40e_pf_get_default_rss_hena(pf); + u64 hashcfg = i40e_pf_get_default_rss_hashcfg(pf); u8 broadcast[ETH_ALEN]; vf->lan_vsi_idx = vsi->idx; @@ -818,8 +855,9 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) dev_info(&pf->pdev->dev, "Could not allocate VF broadcast filter\n"); spin_unlock_bh(&vsi->mac_filter_hash_lock); - wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); - wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); + wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hashcfg); + wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), + (u32)(hashcfg >> 32)); /* program mac filter only for VF VSI */ ret = i40e_sync_vsi_filters(vsi); if (ret) @@ -1266,9 +1304,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, dev_err(&pf->pdev->dev, "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n", - vf->vf_id, - ERR_PTR(aq_ret), - i40e_aq_str(&pf->hw, aq_err)); + vf->vf_id, ERR_PTR(aq_ret), + libie_aq_str(aq_err)); return aq_ret; } @@ -1282,9 +1319,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, dev_err(&pf->pdev->dev, "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n", - vf->vf_id, - ERR_PTR(aq_ret), - i40e_aq_str(&pf->hw, aq_err)); + vf->vf_id, ERR_PTR(aq_ret), + libie_aq_str(aq_err)); } return aq_ret; @@ -1299,9 +1335,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, dev_err(&pf->pdev->dev, "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n", - vf->vf_id, - ERR_PTR(aq_ret), - i40e_aq_str(&pf->hw, aq_err)); + vf->vf_id, ERR_PTR(aq_ret), + libie_aq_str(aq_err)); if (!aq_tmp) aq_tmp = aq_ret; @@ -1315,9 +1350,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, dev_err(&pf->pdev->dev, "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n", - vf->vf_id, - ERR_PTR(aq_ret), - i40e_aq_str(&pf->hw, aq_err)); + vf->vf_id, ERR_PTR(aq_ret), + libie_aq_str(aq_err)); if (!aq_tmp) aq_tmp = aq_ret; @@ -1346,14 +1380,14 @@ static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, bool alluni) { struct i40e_pf *pf = vf->pf; - int aq_ret = I40E_SUCCESS; struct i40e_vsi *vsi; + int aq_ret = 0; u16 num_vlans; s16 *vl; vsi = i40e_find_vsi_from_id(pf, vsi_id); if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) - return I40E_ERR_PARAM; + return -EINVAL; if (vf->port_vlan_id) { aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, @@ -1363,7 +1397,7 @@ static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, i40e_get_vlan_list_sync(vsi, &num_vlans, &vl); if (!vl) - return I40E_ERR_NO_MEMORY; + return -ENOMEM; aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, vl, num_vlans); @@ -1430,6 +1464,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) * functions that may still be running at this point. */ clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); + clear_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states); /* In the case of a VFLR, the HW has already reset the VF and we * just need to clean up, so don't hit the VFRTRIG register. @@ -1523,8 +1558,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) * @vf: pointer to the VF structure * @flr: VFLR was issued or not * - * Returns true if the VF is in reset, resets successfully, or resets - * are disabled and false otherwise. + * Return: True if reset was performed successfully or if resets are disabled. + * False if reset is already in progress. **/ bool i40e_reset_vf(struct i40e_vf *vf, bool flr) { @@ -1543,7 +1578,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr) /* If VF is being reset already we don't need to continue. */ if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) - return true; + return false; i40e_trigger_vf_reset(vf, flr); @@ -1601,8 +1636,8 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) { struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf; - int i, v; u32 reg; + int i; /* If we don't have any VFs, then there is nothing to reset */ if (!pf->num_alloc_vfs) @@ -1613,11 +1648,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) return false; /* Begin reset on all VFs at once */ - for (v = 0; v < pf->num_alloc_vfs; v++) { - vf = &pf->vf[v]; + for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { /* If VF is being reset no need to trigger reset again */ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) - i40e_trigger_vf_reset(&pf->vf[v], flr); + i40e_trigger_vf_reset(vf, flr); } /* HW requires some time to make sure it can flush the FIFO for a VF @@ -1626,14 +1660,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) * the VFs using a simple iterator that increments once that VF has * finished resetting. */ - for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { + for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) { usleep_range(10000, 20000); /* Check each VF in sequence, beginning with the VF to fail * the previous check. */ - while (v < pf->num_alloc_vfs) { - vf = &pf->vf[v]; + while (vf < &pf->vf[pf->num_alloc_vfs]) { if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) { reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) @@ -1643,7 +1676,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) /* If the current VF has finished resetting, move on * to the next VF in sequence. */ - v++; + ++vf; } } @@ -1653,39 +1686,39 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) /* Display a warning if at least one VF didn't manage to reset in * time, but continue on with the operation. */ - if (v < pf->num_alloc_vfs) + if (vf < &pf->vf[pf->num_alloc_vfs]) dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", - pf->vf[v].vf_id); + vf->vf_id); usleep_range(10000, 20000); /* Begin disabling all the rings associated with VFs, but do not wait * between each VF. */ - for (v = 0; v < pf->num_alloc_vfs; v++) { + for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { /* On initial reset, we don't have any queues to disable */ - if (pf->vf[v].lan_vsi_idx == 0) + if (vf->lan_vsi_idx == 0) continue; /* If VF is reset in another thread just continue */ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) continue; - i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); + i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]); } /* Now that we've notified HW to disable all of the VF rings, wait * until they finish. */ - for (v = 0; v < pf->num_alloc_vfs; v++) { + for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { /* On initial reset, we don't have any queues to disable */ - if (pf->vf[v].lan_vsi_idx == 0) + if (vf->lan_vsi_idx == 0) continue; /* If VF is reset in another thread just continue */ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) continue; - i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); + i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]); } /* Hw may need up to 50ms to finish disabling the RX queues. We @@ -1694,12 +1727,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) mdelay(50); /* Finish the reset on each VF */ - for (v = 0; v < pf->num_alloc_vfs; v++) { + for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { /* If VF is reset in another thread just continue */ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) continue; - i40e_cleanup_reset_vf(&pf->vf[v]); + i40e_cleanup_reset_vf(vf); } i40e_flush(hw); @@ -1805,7 +1838,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) if (pci_num_vf(pf->pdev) != num_alloc_vfs) { ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); if (ret) { - pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); pf->num_alloc_vfs = 0; goto err_iov; } @@ -1916,8 +1949,8 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) } if (num_vfs) { - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { + set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); } ret = i40e_pci_sriov_enable(pdev, num_vfs); @@ -1926,7 +1959,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!pci_vfs_assigned(pf->pdev)) { i40e_free_vfs(pf); - pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); @@ -2037,7 +2070,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) if (VF_IS_V10(&vf->vf_ver)) info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, - I40E_SUCCESS, (u8 *)&info, + 0, (u8 *)&info, sizeof(struct virtchnl_version_info)); } @@ -2098,15 +2131,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) size_t len = 0; int ret; - if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) { - aq_ret = I40E_ERR_PARAM; + i40e_sync_vf_state(vf, I40E_VF_STATE_INIT); + + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) || + test_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states)) { + aq_ret = -EINVAL; goto err; } - len = struct_size(vfres, vsi_res, num_vsis); + len = virtchnl_struct_size(vfres, vsi_res, num_vsis); vfres = kzalloc(len, GFP_KERNEL); if (!vfres) { - aq_ret = I40E_ERR_NO_MEMORY; + aq_ret = -ENOMEM; len = 0; goto err; } @@ -2134,14 +2170,14 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; } else { - if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && + if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; else vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; } - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; @@ -2150,22 +2186,22 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; - if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && + if (test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { - if (pf->flags & I40E_FLAG_MFP_ENABLED) { + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { dev_err(&pf->pdev->dev, "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; } - if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { + if (test_bit(I40E_HW_CAP_WB_ON_ITR, pf->hw.caps)) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; @@ -2192,13 +2228,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->vsi_res[0].qset_handle = le16_to_cpu(vsi->info.qs_handle[0]); if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) { + spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); eth_zero_addr(vf->default_lan_addr.addr); + spin_unlock_bh(&vsi->mac_filter_hash_lock); } ether_addr_copy(vfres->vsi_res[0].default_mac_addr, vf->default_lan_addr.addr); } set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); + set_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states); err: /* send the response back to the VF */ @@ -2227,7 +2266,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err_out; } if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { @@ -2243,12 +2282,12 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) } if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err_out; } if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err_out; } @@ -2315,17 +2354,17 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } @@ -2333,7 +2372,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) for (i = 0; i < vf->num_tc; i++) num_qps_all += vf->ch[i].num_qps; if (num_qps_all != qci->num_queue_pairs) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } } @@ -2346,7 +2385,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) if (!vf->adq_enabled) { if (!i40e_vc_isvalid_queue_id(vf, vsi_id, qpi->txq.queue_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } @@ -2355,14 +2394,14 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) if (qpi->txq.vsi_id != qci->vsi_id || qpi->rxq.vsi_id != qci->vsi_id || qpi->rxq.queue_id != vsi_queue_id) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } } if (vf->adq_enabled) { - if (idx >= ARRAY_SIZE(vf->ch)) { - aq_ret = I40E_ERR_NO_AVAILABLE_VSI; + if (idx >= vf->num_tc) { + aq_ret = -ENODEV; goto error_param; } vsi_id = vf->ch[idx].vsi_id; @@ -2372,7 +2411,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) &qpi->rxq) || i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, &qpi->txq)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } @@ -2382,8 +2421,8 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) * to its appropriate VSIs based on TC mapping */ if (vf->adq_enabled) { - if (idx >= ARRAY_SIZE(vf->ch)) { - aq_ret = I40E_ERR_NO_AVAILABLE_VSI; + if (idx >= vf->num_tc) { + aq_ret = -ENODEV; goto error_param; } if (j == (vf->ch[idx].num_qps - 1)) { @@ -2406,7 +2445,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) vsi->num_queue_pairs = vf->ch[i].num_qps; if (i40e_update_adq_vsi_queues(vsi, i)) { - aq_ret = I40E_ERR_CONFIG; + aq_ret = -EIO; goto error_param; } } @@ -2432,8 +2471,10 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, queue_id; for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { - if (vf->adq_enabled) { - vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; + u16 idx = vsi_queue_id / I40E_MAX_VF_VSI; + + if (vf->adq_enabled && idx < vf->num_tc) { + vsi_id = vf->ch[idx].vsi_id; queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); } else { queue_id = vsi_queue_id; @@ -2464,13 +2505,13 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) int i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (irqmap_info->num_vectors > vf->pf->hw.func_caps.num_msix_vectors_vf) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } @@ -2479,18 +2520,18 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) /* validate msg params */ if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) || !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } vsi_id = map->vsi_id; if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } @@ -2578,30 +2619,38 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) int aq_ret = 0; int i; + if (vf->is_disabled_from_host) { + aq_ret = -EPERM; + dev_info(&pf->pdev->dev, + "Admin has disabled VF %d, will not enable queues\n", + vf->vf_id); + goto error_param; + } + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_validate_vqs_bitmaps(vqs)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } /* Use the queue bit map sent by the VF */ if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, true)) { - aq_ret = I40E_ERR_TIMEOUT; + aq_ret = -EIO; goto error_param; } if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, true)) { - aq_ret = I40E_ERR_TIMEOUT; + aq_ret = -EIO; goto error_param; } @@ -2610,7 +2659,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) /* zero belongs to LAN VSI */ for (i = 1; i < vf->num_tc; i++) { if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) - aq_ret = I40E_ERR_TIMEOUT; + aq_ret = -EIO; } } @@ -2636,29 +2685,29 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_validate_vqs_bitmaps(vqs)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } /* Use the queue bit map sent by the VF */ if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, false)) { - aq_ret = I40E_ERR_TIMEOUT; + aq_ret = -EIO; goto error_param; } if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, false)) { - aq_ret = I40E_ERR_TIMEOUT; + aq_ret = -EIO; goto error_param; } error_param: @@ -2790,18 +2839,18 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) memset(&stats, 0, sizeof(struct i40e_eth_stats)); if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } i40e_update_eth_stats(vsi); @@ -2851,8 +2900,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; struct i40e_hw *hw = &pf->hw; - int mac2add_cnt = 0; - int i; + int i, mac_add_max, mac_add_cnt = 0; + bool vf_trusted; + + vf_trusted = test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; @@ -2862,7 +2913,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, is_zero_ether_addr(addr)) { dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", addr); - return I40E_ERR_INVALID_MAC_ADDR; + return -EINVAL; } /* If the host VMM administrator has set the VF MAC address @@ -2872,9 +2923,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, * The VF may request to set the MAC address filter already * assigned to it so do not return an error in that case. */ - if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && - !is_multicast_ether_addr(addr) && vf->pf_set_mac && - !ether_addr_equal(addr, vf->default_lan_addr.addr)) { + if (!vf_trusted && !is_multicast_ether_addr(addr) && + vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); return -EPERM; @@ -2883,31 +2933,50 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, /*count filters that really will be added*/ f = i40e_find_mac(vsi, addr); if (!f) - ++mac2add_cnt; + ++mac_add_cnt; } + /* Determine the maximum number of MAC addresses this VF may use. + * + * - For untrusted VFs: use a fixed small limit. + * + * - For trusted VFs: limit is calculated by dividing total MAC + * filter pool across all VFs/ports. + * + * - User can override this by devlink param "max_mac_per_vf". + * If set its value is used as a strict cap for both trusted and + * untrusted VFs. + * Note: + * even when overridden, this is a theoretical maximum; hardware + * may reject additional MACs if the absolute HW limit is reached. + */ + if (!vf_trusted) + mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF; + else + mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports); + + if (pf->max_mac_per_vf > 0) + mac_add_max = pf->max_mac_per_vf; - /* If this VF is not privileged, then we can't add more than a limited - * number of addresses. Check to make sure that the additions do not - * push us over the limit. + /* VF can replace all its filters in one step, in this case mac_add_max + * will be added as active and another mac_add_max will be in + * a to-be-removed state. Account for that. */ - if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { - if ((i40e_count_filters(vsi) + mac2add_cnt) > - I40E_VC_MAX_MAC_ADDR_PER_VF) { + if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max || + (i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) { + if (pf->max_mac_per_vf == mac_add_max && mac_add_max > 0) { dev_err(&pf->pdev->dev, - "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); + "Cannot add more MAC addresses: VF reached its maximum allowed limit (%d)\n", + mac_add_max); return -EPERM; } - /* If this VF is trusted, it can use more resources than untrusted. - * However to ensure that every trusted VF has appropriate number of - * resources, divide whole pool of resources per port and then across - * all VFs. - */ - } else { - if ((i40e_count_filters(vsi) + mac2add_cnt) > - I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, - hw->num_ports)) { + if (!vf_trusted) { dev_err(&pf->pdev->dev, - "Cannot add more MAC addresses, trusted VF exhausted it's resources\n"); + "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); + return -EPERM; + } else { + dev_err(&pf->pdev->dev, + "Cannot add more MAC addresses: trusted VF reached its maximum allowed limit (%d)\n", + mac_add_max); return -EPERM; } } @@ -2998,7 +3067,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { - ret = I40E_ERR_PARAM; + ret = -EINVAL; goto error_param; } @@ -3027,7 +3096,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) dev_err(&pf->pdev->dev, "Unable to add MAC filter %pM for VF %d\n", al->list[i].addr, vf->vf_id); - ret = I40E_ERR_PARAM; + ret = -EINVAL; spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; } @@ -3067,7 +3136,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { - ret = I40E_ERR_PARAM; + ret = -EINVAL; goto error_param; } @@ -3076,22 +3145,33 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) is_zero_ether_addr(al->list[i].addr)) { dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", al->list[i].addr, vf->vf_id); - ret = I40E_ERR_INVALID_MAC_ADDR; + ret = -EINVAL; goto error_param; } - if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr)) - was_unimac_deleted = true; } vsi = pf->vsi[vf->lan_vsi_idx]; spin_lock_bh(&vsi->mac_filter_hash_lock); /* delete addresses from the list */ - for (i = 0; i < al->num_elements; i++) + for (i = 0; i < al->num_elements; i++) { + const u8 *addr = al->list[i].addr; + + /* Allow to delete VF primary MAC only if it was not set + * administratively by PF. + */ + if (ether_addr_equal(addr, vf->default_lan_addr.addr)) { + if (!vf->pf_set_mac) + was_unimac_deleted = true; + else + continue; + } + if (i40e_del_mac_filter(vsi, al->list[i].addr)) { - ret = I40E_ERR_INVALID_MAC_ADDR; + ret = -EINVAL; spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; } + } spin_unlock_bh(&vsi->mac_filter_hash_lock); @@ -3149,13 +3229,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) } if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } for (i = 0; i < vfl->num_elements; i++) { if (vfl->vlan_id[i] > I40E_MAX_VLANID) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; dev_err(&pf->pdev->dev, "invalid VF VLAN id %d\n", vfl->vlan_id[i]); goto error_param; @@ -3163,7 +3243,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) } vsi = pf->vsi[vf->lan_vsi_idx]; if (vsi->info.pvid) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } @@ -3214,13 +3294,13 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } for (i = 0; i < vfl->num_elements; i++) { if (vfl->vlan_id[i] > I40E_MAX_VLANID) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } } @@ -3228,7 +3308,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) vsi = pf->vsi[vf->lan_vsi_idx]; if (vsi->info.pvid) { if (vfl->num_elements > 1 || vfl->vlan_id[0]) - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } @@ -3264,17 +3344,19 @@ error_param: static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { struct i40e_pf *pf = vf->pf; - int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; + struct i40e_vsi *main_vsi; int aq_ret = 0; + int abs_vf_id; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } - i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, - msg, msglen); + main_vsi = i40e_pf_get_main_vsi(pf); + abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; + i40e_notify_client_of_vf_msg(main_vsi, abs_vf_id, msg, msglen); error_param: /* send the response to the VF */ @@ -3298,13 +3380,13 @@ static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (config) { if (i40e_config_rdma_qvlist(vf, qvlist_info)) - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; } else { i40e_release_rdma_qvlist(vf); } @@ -3335,7 +3417,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || vrk->key_len != I40E_HKEY_ARRAY_SIZE) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3366,13 +3448,13 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } for (i = 0; i < vrl->lut_entries; i++) if (vrl->lut[i] >= vf->num_queue_pairs) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3385,66 +3467,67 @@ err: } /** - * i40e_vc_get_rss_hena + * i40e_vc_get_rss_hashcfg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * - * Return the RSS HENA bits allowed by the hardware + * Return the RSS Hash configuration bits allowed by the hardware **/ -static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) +static int i40e_vc_get_rss_hashcfg(struct i40e_vf *vf, u8 *msg) { - struct virtchnl_rss_hena *vrh = NULL; + struct virtchnl_rss_hashcfg *vrh = NULL; struct i40e_pf *pf = vf->pf; int aq_ret = 0; int len = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } - len = sizeof(struct virtchnl_rss_hena); + len = sizeof(struct virtchnl_rss_hashcfg); vrh = kzalloc(len, GFP_KERNEL); if (!vrh) { - aq_ret = I40E_ERR_NO_MEMORY; + aq_ret = -ENOMEM; len = 0; goto err; } - vrh->hena = i40e_pf_get_default_rss_hena(pf); + vrh->hashcfg = i40e_pf_get_default_rss_hashcfg(pf); err: /* send the response back to the VF */ - aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, + aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, aq_ret, (u8 *)vrh, len); kfree(vrh); return aq_ret; } /** - * i40e_vc_set_rss_hena + * i40e_vc_set_rss_hashcfg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * - * Set the RSS HENA bits for the VF + * Set the RSS Hash configuration bits for the VF **/ -static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) +static int i40e_vc_set_rss_hashcfg(struct i40e_vf *vf, u8 *msg) { - struct virtchnl_rss_hena *vrh = - (struct virtchnl_rss_hena *)msg; + struct virtchnl_rss_hashcfg *vrh = + (struct virtchnl_rss_hashcfg *)msg; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } - i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); + i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), + (u32)vrh->hashcfg); i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), - (u32)(vrh->hena >> 32)); + (u32)(vrh->hashcfg >> 32)); /* send the response to the VF */ err: - return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, aq_ret); } /** @@ -3460,7 +3543,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3486,7 +3569,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3518,16 +3601,16 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf, bool found = false; int bkt; - if (!tc_filter->action) { + if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) { dev_info(&pf->pdev->dev, - "VF %d: Currently ADq doesn't support Drop Action\n", - vf->vf_id); + "VF %d: ADQ doesn't support this action (%d)\n", + vf->vf_id, tc_filter->action); goto err; } /* action_meta is TC number here to which the filter is applied */ if (!tc_filter->action_meta || - tc_filter->action_meta > I40E_MAX_VF_VSI) { + tc_filter->action_meta >= vf->num_tc) { dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", vf->vf_id, tc_filter->action_meta); goto err; @@ -3574,7 +3657,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf, dev_err(&pf->pdev->dev, "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", vf->vf_id); - return I40E_ERR_CONFIG; + return -EIO; } } @@ -3627,9 +3710,9 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf, } } - return I40E_SUCCESS; + return 0; err: - return I40E_ERR_CONFIG; + return -EIO; } /** @@ -3684,8 +3767,7 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf) dev_err(&pf->pdev->dev, "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n", vf->vf_id, ERR_PTR(ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + libie_aq_str(pf->hw.aq.asq_last_status)); hlist_del(&cfilter->cloud_node); kfree(cfilter); @@ -3713,7 +3795,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) int i, ret; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3721,7 +3803,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) dev_info(&pf->pdev->dev, "VF %d: ADq not enabled, can't apply cloud filter\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3729,7 +3811,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) dev_info(&pf->pdev->dev, "VF %d: Invalid input, can't apply cloud filter\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3787,7 +3869,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) dev_err(&pf->pdev->dev, "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n", vf->vf_id, ERR_PTR(ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + libie_aq_str(pf->hw.aq.asq_last_status)); goto err; } @@ -3825,6 +3907,8 @@ err: aq_ret); } +#define I40E_MAX_VF_CLOUD_FILTER 0xFF00 + /** * i40e_vc_add_cloud_filter * @vf: pointer to the VF info @@ -3841,10 +3925,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; int aq_ret = 0; - int i, ret; + int i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err_out; } @@ -3852,7 +3936,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) dev_info(&pf->pdev->dev, "VF %d: ADq is not enabled, can't apply cloud filter\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err_out; } @@ -3860,13 +3944,23 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) dev_info(&pf->pdev->dev, "VF %d: Invalid input/s, can't apply cloud filter\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; + goto err_out; + } + + if (vf->num_cloud_filters >= I40E_MAX_VF_CLOUD_FILTER) { + dev_warn(&pf->pdev->dev, + "VF %d: Max number of filters reached, can't apply cloud filter\n", + vf->vf_id); + aq_ret = -ENOSPC; goto err_out; } cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); - if (!cfilter) - return -ENOMEM; + if (!cfilter) { + aq_ret = -ENOMEM; + goto err_out; + } /* parse destination mac address */ for (i = 0; i < ETH_ALEN; i++) @@ -3914,14 +4008,14 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) /* Adding cloud filter programmed as TC filter */ if (tcf.dst_port) - ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); + aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); else - ret = i40e_add_del_cloud_filter(vsi, cfilter, true); - if (ret) { + aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true); + if (aq_ret) { dev_err(&pf->pdev->dev, "VF %d: Failed to add cloud filter, err %pe aq_err %s\n", - vf->vf_id, ERR_PTR(ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + vf->vf_id, ERR_PTR(aq_ret), + libie_aq_str(pf->hw.aq.asq_last_status)); goto err_free; } @@ -3953,7 +4047,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) u64 speed = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3961,7 +4055,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) if (vf->spoofchk) { dev_err(&pf->pdev->dev, "Spoof check is ON, turn it OFF to enable ADq\n"); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3969,7 +4063,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) dev_err(&pf->pdev->dev, "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3978,7 +4072,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) dev_err(&pf->pdev->dev, "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n", vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3990,7 +4084,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n", vf->vf_id, i, tci->list[i].count, I40E_DEFAULT_QUEUES_PER_VF); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -4001,7 +4095,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) dev_err(&pf->pdev->dev, "No queues left to allocate to VF %d\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } else { /* we need to allocate max VF queues to enable ADq so as to @@ -4016,7 +4110,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) if (speed == SPEED_UNKNOWN) { dev_err(&pf->pdev->dev, "Cannot detect link speed\n"); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -4029,7 +4123,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) "Invalid max tx rate %llu specified for VF %d.", tci->list[i].max_tx_rate, vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } else { vf->ch[i].max_tx_rate = @@ -4045,7 +4139,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) /* reset the VF in order to allocate resources */ i40e_vc_reset_vf(vf, true); - return I40E_SUCCESS; + return 0; /* send the response to the VF */ err: @@ -4064,7 +4158,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -4079,13 +4173,13 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) } else { dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; } /* reset the VF in order to allocate resources */ i40e_vc_reset_vf(vf, true); - return I40E_SUCCESS; + return 0; err: return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, @@ -4119,21 +4213,16 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, /* Check if VF is disabled. */ if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) - return I40E_ERR_PARAM; + return -EINVAL; /* perform basic checks on the msg */ ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); if (ret) { - i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); + i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL); dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", local_vf_id, v_opcode, msglen); - switch (ret) { - case VIRTCHNL_STATUS_ERR_PARAM: - return -EPERM; - default: - return -EINVAL; - } + return ret; } switch (v_opcode) { @@ -4194,11 +4283,11 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, case VIRTCHNL_OP_CONFIG_RSS_LUT: ret = i40e_vc_config_rss_lut(vf, msg); break; - case VIRTCHNL_OP_GET_RSS_HENA_CAPS: - ret = i40e_vc_get_rss_hena(vf, msg); + case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: + ret = i40e_vc_get_rss_hashcfg(vf, msg); break; - case VIRTCHNL_OP_SET_RSS_HENA: - ret = i40e_vc_set_rss_hena(vf, msg); + case VIRTCHNL_OP_SET_RSS_HASHCFG: + ret = i40e_vc_set_rss_hashcfg(vf, msg); break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: ret = i40e_vc_enable_vlan_stripping(vf, msg); @@ -4226,7 +4315,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", v_opcode, local_vf_id); ret = i40e_vc_send_resp_to_vf(vf, v_opcode, - I40E_ERR_NOT_IMPLEMENTED); + -EOPNOTSUPP); break; } @@ -4269,7 +4358,10 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); if (reg & BIT(bit_idx)) /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ - i40e_reset_vf(vf, true); + if (!i40e_reset_vf(vf, true)) { + /* At least one VF did not finish resetting, retry next time */ + set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); + } } return 0; @@ -4305,6 +4397,38 @@ err_out: } /** + * i40e_check_vf_init_timeout + * @vf: the virtual function + * + * Check that the VF's initialization was successfully done and if not + * wait up to 300ms for its finish. + * + * Returns true when VF is initialized, false on timeout + **/ +static bool i40e_check_vf_init_timeout(struct i40e_vf *vf) +{ + int i; + + /* When the VF is resetting wait until it is done. + * It can take up to 200 milliseconds, but wait for + * up to 300 milliseconds to be safe. + */ + for (i = 0; i < 15; i++) { + if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) + return true; + msleep(20); + } + + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { + dev_err(&vf->pf->pdev->dev, + "VF %d still in reset. Try again.\n", vf->vf_id); + return false; + } + + return true; +} + +/** * i40e_ndo_set_vf_mac * @netdev: network interface device structure * @vf_id: VF identifier @@ -4322,7 +4446,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) int ret = 0; struct hlist_node *h; int bkt; - u8 i; if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); @@ -4335,21 +4458,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto error_param; vf = &pf->vf[vf_id]; - - /* When the VF is resetting wait until it is done. - * It can take up to 200 milliseconds, - * but wait for up to 300 milliseconds to be safe. - * Acquire the VSI pointer only after the VF has been - * properly initialized. - */ - for (i = 0; i < 15; i++) { - if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) - break; - msleep(20); - } - if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", - vf_id); + if (!i40e_check_vf_init_timeout(vf)) { ret = -EAGAIN; goto error_param; } @@ -4451,22 +4560,18 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, } vf = &pf->vf[vf_id]; - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", - vf_id); + if (!i40e_check_vf_init_timeout(vf)) { ret = -EAGAIN; goto error_pvid; } + vsi = pf->vsi[vf->lan_vsi_idx]; if (le16_to_cpu(vsi->info.pvid) == vlanprio) /* duplicate request, so just return success */ goto error_pvid; i40e_vlan_stripping_enable(vsi); - i40e_vc_reset_vf(vf, true); - /* During reset the VF got a new VSI, so refresh a pointer. */ - vsi = pf->vsi[vf->lan_vsi_idx]; + /* Locked once because multiple functions below iterate list */ spin_lock_bh(&vsi->mac_filter_hash_lock); @@ -4552,6 +4657,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, */ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); + i40e_vc_reset_vf(vf, true); + /* During reset the VF got a new VSI, so refresh a pointer. */ + vsi = pf->vsi[vf->lan_vsi_idx]; + ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni); if (ret) { dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n"); @@ -4601,13 +4710,11 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, } vf = &pf->vf[vf_id]; - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", - vf_id); + if (!i40e_check_vf_init_timeout(vf)) { ret = -EAGAIN; goto error; } + vsi = pf->vsi[vf->lan_vsi_idx]; ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (ret) @@ -4660,9 +4767,8 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, ivi->max_tx_rate = vf->tx_rate; ivi->min_tx_rate = 0; - ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; - ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> - I40E_VLAN_PRIORITY_SHIFT; + ivi->vlan = le16_get_bits(vsi->info.pvid, I40E_VLAN_MASK); + ivi->qos = le16_get_bits(vsi->info.pvid, I40E_PRIORITY_MASK); if (vf->link_forced == false) ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; else if (vf->link_up == true) @@ -4693,9 +4799,13 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) struct i40e_link_status *ls = &pf->hw.phy.link_info; struct virtchnl_pf_event pfe; struct i40e_hw *hw = &pf->hw; + struct i40e_vsi *vsi; + unsigned long q_map; struct i40e_vf *vf; int abs_vf_id; + int old_link; int ret = 0; + int tmp; if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); @@ -4712,23 +4822,55 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) vf = &pf->vf[vf_id]; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + /* skip VF link state change if requested state is already set */ + if (!vf->link_forced) + old_link = IFLA_VF_LINK_STATE_AUTO; + else if (vf->link_up) + old_link = IFLA_VF_LINK_STATE_ENABLE; + else + old_link = IFLA_VF_LINK_STATE_DISABLE; + + if (link == old_link) + goto error_out; + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; switch (link) { case IFLA_VF_LINK_STATE_AUTO: vf->link_forced = false; + vf->is_disabled_from_host = false; + /* reset needed to reinit VF resources */ + i40e_vc_reset_vf(vf, true); i40e_set_vf_link_state(vf, &pfe, ls); break; case IFLA_VF_LINK_STATE_ENABLE: vf->link_forced = true; vf->link_up = true; + vf->is_disabled_from_host = false; + /* reset needed to reinit VF resources */ + i40e_vc_reset_vf(vf, true); i40e_set_vf_link_state(vf, &pfe, ls); break; case IFLA_VF_LINK_STATE_DISABLE: vf->link_forced = true; vf->link_up = false; i40e_set_vf_link_state(vf, &pfe, ls); + + vsi = pf->vsi[vf->lan_vsi_idx]; + q_map = BIT(vsi->num_queue_pairs) - 1; + + vf->is_disabled_from_host = true; + + /* Try to stop both Tx&Rx rings even if one of the calls fails + * to ensure we stop the rings even in case of errors. + * If any of them returns with an error then the first + * error that occurred will be returned. + */ + tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, false); + ret = i40e_ctrl_vf_rx_rings(vsi, q_map, false); + + ret = tmp ? tmp : ret; break; default: ret = -EINVAL; @@ -4774,9 +4916,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) } vf = &(pf->vf[vf_id]); - if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", - vf_id); + if (!i40e_check_vf_init_timeout(vf)) { ret = -EAGAIN; goto out; } @@ -4830,7 +4970,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) goto out; } - if (pf->flags & I40E_FLAG_MFP_ENABLED) { + if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); ret = -EINVAL; goto out; @@ -4907,8 +5047,8 @@ int i40e_get_vf_stats(struct net_device *netdev, int vf_id, vf_stats->tx_bytes = stats->tx_bytes; vf_stats->broadcast = stats->rx_broadcast; vf_stats->multicast = stats->rx_multicast; - vf_stats->rx_dropped = stats->rx_discards; - vf_stats->tx_dropped = stats->tx_discards; + vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other; + vf_stats->tx_dropped = stats->tx_errors; return 0; } |
