diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_sriov.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_sriov.c | 482 |
1 files changed, 285 insertions, 197 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 31314e7540f8..6b1126ddb561 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -9,7 +9,7 @@ #include "ice_dcb_lib.h" #include "ice_flow.h" #include "ice_eswitch.h" -#include "ice_virtchnl_allowlist.h" +#include "virt/allowlist.h" #include "ice_flex_pipe.h" #include "ice_vf_vsi_vlan_ops.h" #include "ice_vlan.h" @@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf) hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { hash_del_rcu(&vf->entry); + ice_deinitialize_vf_entry(vf); ice_put_vf(vf); } } @@ -62,9 +63,10 @@ static void ice_free_vf_res(struct ice_vf *vf) if (vf->lan_vsi_idx != ICE_NO_VSI) { ice_vf_vsi_release(vf); vf->num_mac = 0; + vf->num_mac_lldp = 0; } - last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1; + last_vector_idx = vf->first_vector_idx + vf->num_msix - 1; /* clear VF MDD event information */ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); @@ -102,14 +104,12 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); first = vf->first_vector_idx; - last = first + pf->vfs.num_msix_per - 1; + last = first + vf->num_msix - 1; for (v = first; v <= last; v++) { u32 reg; - reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) & - GLINT_VECT2FUNC_IS_PF_M) | - ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & - GLINT_VECT2FUNC_PF_NUM_M)); + reg = FIELD_PREP(GLINT_VECT2FUNC_IS_PF_M, 1) | + FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id); wr32(hw, GLINT_VECT2FUNC(v), reg); } @@ -125,25 +125,6 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) } /** - * ice_sriov_free_msix_res - Reset/free any used MSIX resources - * @pf: pointer to the PF structure - * - * Since no MSIX entries are taken from the pf->irq_tracker then just clear - * the pf->sriov_base_vector. - * - * Returns 0 on success, and -EINVAL on error. - */ -static int ice_sriov_free_msix_res(struct ice_pf *pf) -{ - if (!pf) - return -EINVAL; - - pf->sriov_base_vector = 0; - - return 0; -} - -/** * ice_free_vfs - Free all VFs * @pf: pointer to the PF structure */ @@ -172,12 +153,12 @@ void ice_free_vfs(struct ice_pf *pf) mutex_lock(&vfs->table_lock); - ice_eswitch_release(pf); - ice_for_each_vf(pf, bkt, vf) { mutex_lock(&vf->cfg_lock); + ice_eswitch_detach_vf(pf, vf); ice_dis_vf_qs(vf); + ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix); if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { /* disable VF qp mappings and set VF disable state */ @@ -194,15 +175,9 @@ void ice_free_vfs(struct ice_pf *pf) wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); } - /* clear malicious info since the VF is getting released */ - list_del(&vf->mbx_info.list_entry); - mutex_unlock(&vf->cfg_lock); } - if (ice_sriov_free_msix_res(pf)) - dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); - vfs->num_qps_per = 0; ice_free_vf_entries(pf); @@ -226,7 +201,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) struct ice_vsi *vsi; params.type = ICE_VSI_VF; - params.pi = ice_vf_get_port_info(vf); + params.port_info = ice_vf_get_port_info(vf); params.vf = vf; params.flags = ICE_VSI_FLAG_INIT; @@ -239,27 +214,10 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) } vf->lan_vsi_idx = vsi->idx; - vf->lan_vsi_num = vsi->vsi_num; return vsi; } -/** - * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space - * @pf: pointer to PF structure - * @vf: pointer to VF that the first MSIX vector index is being calculated for - * - * This returns the first MSIX vector index in PF space that is used by this VF. - * This index is used when accessing PF relative registers such as - * GLINT_VECT2FUNC and GLINT_DYN_CTL. - * This will always be the OICR index in the AVF driver so any functionality - * using vf->first_vector_idx for queue configuration will have to increment by - * 1 to avoid meddling with the OICR index. - */ -static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) -{ - return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per; -} /** * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware @@ -280,32 +238,28 @@ static void ice_ena_vf_msix_mappings(struct ice_vf *vf) hw = &pf->hw; pf_based_first_msix = vf->first_vector_idx; - pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1; + pf_based_last_msix = (pf_based_first_msix + vf->num_msix) - 1; device_based_first_msix = pf_based_first_msix + pf->hw.func_caps.common_cap.msix_vector_first_id; device_based_last_msix = - (device_based_first_msix + pf->vfs.num_msix_per) - 1; + (device_based_first_msix + vf->num_msix) - 1; device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & - VPINT_ALLOC_FIRST_M) | - ((device_based_last_msix << VPINT_ALLOC_LAST_S) & - VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); + reg = FIELD_PREP(VPINT_ALLOC_FIRST_M, device_based_first_msix) | + FIELD_PREP(VPINT_ALLOC_LAST_M, device_based_last_msix) | + VPINT_ALLOC_VALID_M; wr32(hw, VPINT_ALLOC(vf->vf_id), reg); - reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S) - & VPINT_ALLOC_PCI_FIRST_M) | - ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) & - VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); + reg = FIELD_PREP(VPINT_ALLOC_PCI_FIRST_M, device_based_first_msix) | + FIELD_PREP(VPINT_ALLOC_PCI_LAST_M, device_based_last_msix) | + VPINT_ALLOC_PCI_VALID_M; wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); /* map the interrupts to its functions */ for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) { - reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & - GLINT_VECT2FUNC_VF_NUM_M) | - ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & - GLINT_VECT2FUNC_PF_NUM_M)); + reg = FIELD_PREP(GLINT_VECT2FUNC_VF_NUM_M, device_based_vf_id) | + FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id); wr32(hw, GLINT_VECT2FUNC(v), reg); } @@ -338,10 +292,8 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) * VFNUMQ value should be set to (number of queues - 1). A value * of 0 means 1 queue and a value of 255 means 256 queues */ - reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & - VPLAN_TX_QBASE_VFFIRSTQ_M) | - (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & - VPLAN_TX_QBASE_VFNUMQ_M)); + reg = FIELD_PREP(VPLAN_TX_QBASE_VFFIRSTQ_M, vsi->txq_map[0]) | + FIELD_PREP(VPLAN_TX_QBASE_VFNUMQ_M, max_txq - 1); wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); } else { dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); @@ -356,10 +308,8 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) * VFNUMQ value should be set to (number of queues - 1). A value * of 0 means 1 queue and a value of 255 means 256 queues */ - reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & - VPLAN_RX_QBASE_VFFIRSTQ_M) | - (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & - VPLAN_RX_QBASE_VFNUMQ_M)); + reg = FIELD_PREP(VPLAN_RX_QBASE_VFFIRSTQ_M, vsi->rxq_map[0]) | + FIELD_PREP(VPLAN_RX_QBASE_VFNUMQ_M, max_rxq - 1); wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); } else { dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); @@ -386,52 +336,14 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) * @vf: VF to calculate the register index for * @q_vector: a q_vector associated to the VF */ -int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) +void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) { - struct ice_pf *pf; - if (!vf || !q_vector) - return -EINVAL; - - pf = vf->pf; + return; /* always add one to account for the OICR being the first MSIX */ - return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id + - q_vector->v_idx + 1; -} - -/** - * ice_sriov_set_msix_res - Set any used MSIX resources - * @pf: pointer to PF structure - * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs - * - * This function allows SR-IOV resources to be taken from the end of the PF's - * allowed HW MSIX vectors so that the irq_tracker will not be affected. We - * just set the pf->sriov_base_vector and return success. - * - * If there are not enough resources available, return an error. This should - * always be caught by ice_set_per_vf_res(). - * - * Return 0 on success, and -EINVAL when there are not enough MSIX vectors - * in the PF's space available for SR-IOV. - */ -static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) -{ - u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; - int vectors_used = ice_get_max_used_msix_vector(pf); - int sriov_base_vector; - - sriov_base_vector = total_vectors - num_msix_needed; - - /* make sure we only grab irq_tracker entries from the list end and - * that we have enough available MSIX vectors - */ - if (sriov_base_vector < vectors_used) - return -EINVAL; - - pf->sriov_base_vector = sriov_base_vector; - - return 0; + q_vector->vf_reg_idx = q_vector->v_idx + ICE_NONQ_VECS_VF; + q_vector->reg_idx = vf->first_vector_idx + q_vector->vf_reg_idx; } /** @@ -458,11 +370,9 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) */ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) { - int vectors_used = ice_get_max_used_msix_vector(pf); u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; int msix_avail_per_vf, msix_avail_for_sriov; struct device *dev = ice_pf_to_dev(pf); - int err; lockdep_assert_held(&pf->vfs.table_lock); @@ -470,8 +380,7 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) return -EINVAL; /* determine MSI-X resources per VF */ - msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - - vectors_used; + msix_avail_for_sriov = pf->virt_irq_tracker.num_entries; msix_avail_per_vf = msix_avail_for_sriov / num_vfs; if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { num_msix_per_vf = ICE_NUM_VF_MSIX_MED; @@ -510,13 +419,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) return -ENOSPC; } - err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs); - if (err) { - dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n", - num_vfs, err); - return err; - } - /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); pf->vfs.num_msix_per = num_msix_per_vf; @@ -539,7 +441,9 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf) struct ice_vsi *vsi; int err; - vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); + vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix); + if (vf->first_vector_idx < 0) + return -ENOMEM; vsi = ice_vf_vsi_setup(vf); if (!vsi) @@ -580,6 +484,14 @@ static int ice_start_vfs(struct ice_pf *pf) goto teardown; } + retval = ice_eswitch_attach_vf(pf, vf); + if (retval) { + dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d", + vf->vf_id, retval); + ice_vf_vsi_release(vf); + goto teardown; + } + set_bit(ICE_VF_STATE_INIT, vf->vf_states); ice_ena_vf_mappings(vf); wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); @@ -734,24 +646,6 @@ static void ice_sriov_clear_reset_trigger(struct ice_vf *vf) } /** - * ice_sriov_create_vsi - Create a new VSI for a VF - * @vf: VF to create the VSI for - * - * This is called by ice_vf_recreate_vsi to create the new VSI after the old - * VSI has been released. - */ -static int ice_sriov_create_vsi(struct ice_vf *vf) -{ - struct ice_vsi *vsi; - - vsi = ice_vf_vsi_setup(vf); - if (!vsi) - return -ENOMEM; - - return 0; -} - -/** * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt * @vf: VF to perform tasks on */ @@ -770,7 +664,6 @@ static const struct ice_vf_ops ice_sriov_vf_ops = { .poll_reset_status = ice_sriov_poll_reset_status, .clear_reset_trigger = ice_sriov_clear_reset_trigger, .irq_close = NULL, - .create_vsi = ice_sriov_create_vsi, .post_vsi_rebuild = ice_sriov_post_vsi_rebuild, }; @@ -789,14 +682,19 @@ static const struct ice_vf_ops ice_sriov_vf_ops = { */ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) { + struct pci_dev *pdev = pf->pdev; struct ice_vfs *vfs = &pf->vfs; + struct pci_dev *vfdev = NULL; struct ice_vf *vf; - u16 vf_id; - int err; + u16 vf_pdev_id; + int err, pos; lockdep_assert_held(&vfs->table_lock); - for (vf_id = 0; vf_id < num_vfs; vf_id++) { + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_pdev_id); + + for (u16 vf_id = 0; vf_id < num_vfs; vf_id++) { vf = kzalloc(sizeof(*vf), GFP_KERNEL); if (!vf) { err = -ENOMEM; @@ -812,11 +710,23 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) ice_initialize_vf_entry(vf); + do { + vfdev = pci_get_device(pdev->vendor, vf_pdev_id, vfdev); + } while (vfdev && vfdev->physfn != pdev); + vf->vfdev = vfdev; vf->vf_sw_id = pf->first_sw; + pci_dev_get(vfdev); + hash_add_rcu(vfs->table, &vf->entry, vf_id); } + /* Decrement of refcount done by pci_get_device() inside the loop does + * not touch the last iteration's vfdev, so it has to be done manually + * to balance pci_dev_get() added within the loop. + */ + pci_dev_put(vfdev); + return 0; err_free_entries: @@ -870,12 +780,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) clear_bit(ICE_VF_DIS, pf->state); - ret = ice_eswitch_configure(pf); - if (ret) { - dev_err(dev, "Failed to configure eswitch, err %d\n", ret); - goto err_unroll_sriov; - } - /* rearm global interrupts */ if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) ice_irq_dynamic_ena(hw, NULL, NULL); @@ -957,6 +861,173 @@ static int ice_check_sriov_allowed(struct ice_pf *pf) } /** + * ice_sriov_get_vf_total_msix - return number of MSI-X used by VFs + * @pdev: pointer to pci_dev struct + * + * The function is called via sysfs ops + */ +u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + + return pf->virt_irq_tracker.num_entries; +} + +static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id) +{ + u16 vf_ids[ICE_MAX_SRIOV_VFS]; + struct ice_vf *tmp_vf; + int to_remap = 0, bkt; + + /* For better irqs usage try to remap irqs of VFs + * that aren't running yet + */ + ice_for_each_vf(pf, bkt, tmp_vf) { + /* skip VF which is changing the number of MSI-X */ + if (restricted_id == tmp_vf->vf_id || + test_bit(ICE_VF_STATE_ACTIVE, tmp_vf->vf_states)) + continue; + + ice_dis_vf_mappings(tmp_vf); + ice_virt_free_irqs(pf, tmp_vf->first_vector_idx, + tmp_vf->num_msix); + + vf_ids[to_remap] = tmp_vf->vf_id; + to_remap += 1; + } + + for (int i = 0; i < to_remap; i++) { + tmp_vf = ice_get_vf_by_id(pf, vf_ids[i]); + if (!tmp_vf) + continue; + + tmp_vf->first_vector_idx = + ice_virt_get_irqs(pf, tmp_vf->num_msix); + /* there is no need to rebuild VSI as we are only changing the + * vector indexes not amount of MSI-X or queues + */ + ice_ena_vf_mappings(tmp_vf); + ice_put_vf(tmp_vf); + } +} + +/** + * ice_sriov_set_msix_vec_count + * @vf_dev: pointer to pci_dev struct of VF device + * @msix_vec_count: new value for MSI-X amount on this VF + * + * Set requested MSI-X, queues and registers for @vf_dev. + * + * First do some sanity checks like if there are any VFs, if the new value + * is correct etc. Then disable old mapping (MSI-X and queues registers), change + * MSI-X and queues, rebuild VSI and enable new mapping. + * + * If it is possible (driver not binded to VF) try to remap also other VFs to + * linearize irqs register usage. + */ +int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) +{ + struct pci_dev *pdev = pci_physfn(vf_dev); + struct ice_pf *pf = pci_get_drvdata(pdev); + u16 prev_msix, prev_queues, queues; + bool needs_rebuild = false; + struct ice_vsi *vsi; + struct ice_vf *vf; + + if (!ice_get_num_vfs(pf)) + return -ENOENT; + + if (!msix_vec_count) + return 0; + + queues = msix_vec_count; + /* add 1 MSI-X for OICR */ + msix_vec_count += 1; + + if (queues > min(ice_get_avail_txq_count(pf), + ice_get_avail_rxq_count(pf))) + return -EINVAL; + + if (msix_vec_count < ICE_MIN_INTR_PER_VF) + return -EINVAL; + + vf = ice_get_vf_by_dev(pf, vf_dev); + if (!vf) + return -ENOENT; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + ice_put_vf(vf); + return -ENOENT; + } + + /* No need to rebuild if we're setting to the same value */ + if (msix_vec_count == vf->num_msix) { + ice_put_vf(vf); + return 0; + } + + prev_msix = vf->num_msix; + prev_queues = vf->num_vf_qs; + + ice_dis_vf_mappings(vf); + ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix); + + /* Remap all VFs beside the one is now configured */ + ice_sriov_remap_vectors(pf, vf->vf_id); + + vf->num_msix = msix_vec_count; + vf->num_vf_qs = queues; + vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix); + if (vf->first_vector_idx < 0) + goto unroll; + + vsi->req_txq = queues; + vsi->req_rxq = queues; + + if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) { + /* Try to rebuild with previous values */ + needs_rebuild = true; + goto unroll; + } + + dev_info(ice_pf_to_dev(pf), + "Changing VF %d resources to %d vectors and %d queues\n", + vf->vf_id, vf->num_msix, vf->num_vf_qs); + + ice_ena_vf_mappings(vf); + ice_put_vf(vf); + + return 0; + +unroll: + dev_info(ice_pf_to_dev(pf), + "Can't set %d vectors on VF %d, falling back to %d\n", + vf->num_msix, vf->vf_id, prev_msix); + + vf->num_msix = prev_msix; + vf->num_vf_qs = prev_queues; + + vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix); + if (vf->first_vector_idx < 0) { + ice_put_vf(vf); + return -EINVAL; + } + + if (needs_rebuild) { + vsi->req_txq = prev_queues; + vsi->req_rxq = prev_queues; + + ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + } + + ice_ena_vf_mappings(vf); + ice_put_vf(vf); + + return -EINVAL; +} + +/** * ice_sriov_configure - Enable or change number of VFs via sysfs * @pdev: pointer to a pci_dev structure * @num_vfs: number of VFs to allocate or 0 to free VFs @@ -1090,15 +1161,16 @@ static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { + struct ice_aqc_event_lan_overflow *cmd; u32 gldcb_rtctq, queue; struct ice_vf *vf; - gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq); + cmd = libie_aq_raw(&event->desc); + gldcb_rtctq = le32_to_cpu(cmd->prtdcb_ruptq); dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); /* event returns device global Rx queue number */ - queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >> - GLDCB_RTCTQ_RXQNUM_S; + queue = FIELD_GET(GLDCB_RTCTQ_RXQNUM_M, gldcb_rtctq); vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); if (!vf) @@ -1118,8 +1190,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) */ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_vsi *vf_vsi; struct device *dev; struct ice_vf *vf; @@ -1217,21 +1288,23 @@ out_put_vf: } /** - * ice_set_vf_mac - * @netdev: network interface device structure + * __ice_set_vf_mac - program VF MAC address + * @pf: PF to be configure * @vf_id: VF identifier * @mac: MAC address * * program VF MAC address + * Return: zero on success or an error code on failure */ -int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac) { - struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct device *dev; struct ice_vf *vf; int ret; + dev = ice_pf_to_dev(pf); if (is_multicast_ether_addr(mac)) { - netdev_err(netdev, "%pM not a valid unicast address\n", mac); + dev_err(dev, "%pM not a valid unicast address\n", mac); return -EINVAL; } @@ -1260,13 +1333,13 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) if (is_zero_ether_addr(mac)) { /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */ vf->pf_set_mac = false; - netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n", - vf->vf_id); + dev_info(dev, "Removing MAC on VF %d. VF driver will be reinitialized\n", + vf->vf_id); } else { /* PF will add MAC rule for the VF */ vf->pf_set_mac = true; - netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", - mac, vf_id); + dev_info(dev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", + mac, vf_id); } ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); @@ -1278,6 +1351,20 @@ out_put_vf: } /** + * ice_set_vf_mac - .ndo_set_vf_mac handler + * @netdev: network interface device structure + * @vf_id: VF identifier + * @mac: MAC address + * + * program VF MAC address + * Return: zero on success or an error code on failure + */ +int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + return __ice_set_vf_mac(ice_netdev_to_pf(netdev), vf_id, mac); +} + +/** * ice_set_vf_trust * @netdev: network interface device structure * @vf_id: VF identifier @@ -1312,6 +1399,9 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) mutex_lock(&vf->cfg_lock); + while (!trusted && vf->num_mac_lldp) + ice_vf_update_mac_lldp_num(vf, ice_get_vf_vsi(vf), false); + vf->trusted = trusted; ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", @@ -1663,6 +1753,24 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf) } /** + * ice_print_vf_tx_mdd_event - print VF Tx malicious driver detect event + * @vf: pointer to the VF structure + */ +void ice_print_vf_tx_mdd_event(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct device *dev; + + dev = ice_pf_to_dev(pf); + + dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", + vf->mdd_tx_events.count, pf->hw.pf_id, vf->vf_id, + vf->dev_lan_addr, + test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) + ? "on" : "off"); +} + +/** * ice_print_vfs_mdd_events - print VFs malicious driver detect event * @pf: pointer to the PF structure * @@ -1670,8 +1778,6 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf) */ void ice_print_vfs_mdd_events(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); - struct ice_hw *hw = &pf->hw; struct ice_vf *vf; unsigned int bkt; @@ -1698,10 +1804,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { vf->mdd_tx_events.last_printed = vf->mdd_tx_events.count; - - dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", - vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, - vf->dev_lan_addr); + ice_print_vf_tx_mdd_event(vf); } } mutex_unlock(&pf->vfs.table_lock); @@ -1709,31 +1812,16 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) /** * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR - * @pdev: pointer to a pci_dev structure + * @pf: pointer to the PF structure * * Called when recovering from a PF FLR to restore interrupt capability to * the VFs. */ -void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) +void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { - u16 vf_id; - int pos; - - if (!pci_num_vf(pdev)) - return; + struct ice_vf *vf; + u32 bkt; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); - if (pos) { - struct pci_dev *vfdev; - - pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, - &vf_id); - vfdev = pci_get_device(pdev->vendor, vf_id, NULL); - while (vfdev) { - if (vfdev->is_virtfn && vfdev->physfn == pdev) - pci_restore_msi_state(vfdev); - vfdev = pci_get_device(pdev->vendor, vf_id, - vfdev); - } - } + ice_for_each_vf(pf, bkt, vf) + pci_restore_msi_state(vf->vfdev); } |
