diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
47 files changed, 2196 insertions, 394 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 3cd13fd55011..5aa86318ed3e 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -295,6 +295,7 @@ config ICE default n depends on PCI_MSI select NET_DEVLINK + select PLDMFW help This driver supports Intel(R) Ethernet Connection E800 Series of devices. For more information on how to identify your adapter, go diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 91c64f91a835..36da059388dc 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2993,8 +2993,6 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) e100_down(nic); netif_device_detach(netdev); - pci_save_state(pdev); - if ((nic->flags & wol_magic) | e100_asf(nic)) { /* enable reverse auto-negotiation */ if (nic->phy == phy_82552_v) { @@ -3024,24 +3022,22 @@ static int __e100_power_off(struct pci_dev *pdev, bool wake) return 0; } -#ifdef CONFIG_PM -static int e100_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused e100_suspend(struct device *dev_d) { bool wake; - __e100_shutdown(pdev, &wake); - return __e100_power_off(pdev, wake); + + __e100_shutdown(to_pci_dev(dev_d), &wake); + + device_wakeup_disable(dev_d); + + return 0; } -static int e100_resume(struct pci_dev *pdev) +static int __maybe_unused e100_resume(struct device *dev_d) { - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev_d); struct nic *nic = netdev_priv(netdev); - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* ack any pending wake events, disable PME */ - pci_enable_wake(pdev, PCI_D0, 0); - /* disable reverse auto-negotiation */ if (nic->phy == phy_82552_v) { u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, @@ -3058,7 +3054,6 @@ static int e100_resume(struct pci_dev *pdev) return 0; } -#endif /* CONFIG_PM */ static void e100_shutdown(struct pci_dev *pdev) { @@ -3146,16 +3141,17 @@ static const struct pci_error_handlers e100_err_handler = { .resume = e100_io_resume, }; +static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume); + static struct pci_driver e100_driver = { .name = DRV_NAME, .id_table = e100_id_table, .probe = e100_probe, .remove = e100_remove, -#ifdef CONFIG_PM + /* Power Management hooks */ - .suspend = e100_suspend, - .resume = e100_resume, -#endif + .driver.pm = &e100_pm_ops, + .shutdown = e100_shutdown, .err_handler = &e100_err_handler, }; diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 0b4196d2cdd4..f976e9daa3d8 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1356,8 +1356,8 @@ static void e1000_create_lbtest_frame(struct sk_buff *skb, memset(skb->data, 0xFF, frame_size); frame_size &= ~1; memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); - memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); - memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); + skb->data[frame_size / 2 + 10] = 0xBE; + skb->data[frame_size / 2 + 12] = 0xAF; } static int e1000_check_lbtest_frame(const unsigned char *data, diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 64f684dc6c7a..a8fc9208382c 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1608,8 +1608,8 @@ static void e1000_create_lbtest_frame(struct sk_buff *skb, memset(skb->data, 0xFF, frame_size); frame_size &= ~1; memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); - memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); - memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); + skb->data[frame_size / 2 + 10] = 0xBE; + skb->data[frame_size / 2 + 12] = 0xAF; } static int e1000_check_lbtest_frame(struct sk_buff *skb, diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index ae0a6332fd30..b2f2fcfdf732 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -301,10 +301,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) */ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; ret_val = e1000_disable_ulp_lpt_lp(hw, true); - if (ret_val) { + if (ret_val) e_warn("Failed to disable ULP\n"); - goto out; - } ret_val = hw->phy.ops.acquire(hw); if (ret_val) { diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 48c956d90b90..d870343cf689 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -3766,7 +3766,6 @@ err_dma: return err; } -#ifdef CONFIG_PM /** * iavf_suspend - Power management suspend routine * @pdev: PCI device information struct @@ -3774,11 +3773,10 @@ err_dma: * * Called when the system (VM) is entering sleep/suspend. **/ -static int iavf_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused iavf_suspend(struct device *dev_d) { - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev_d); struct iavf_adapter *adapter = netdev_priv(netdev); - int retval = 0; netif_device_detach(netdev); @@ -3796,12 +3794,6 @@ static int iavf_suspend(struct pci_dev *pdev, pm_message_t state) clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); - retval = pci_save_state(pdev); - if (retval) - return retval; - - pci_disable_device(pdev); - return 0; } @@ -3811,24 +3803,13 @@ static int iavf_suspend(struct pci_dev *pdev, pm_message_t state) * * Called when the system (VM) is resumed from sleep/suspend. **/ -static int iavf_resume(struct pci_dev *pdev) +static int __maybe_unused iavf_resume(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct iavf_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; u32 err; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* pci_restore_state clears dev->state_saved so call - * pci_save_state to restore it. - */ - pci_save_state(pdev); - - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n"); - return err; - } pci_set_master(pdev); rtnl_lock(); @@ -3852,7 +3833,6 @@ static int iavf_resume(struct pci_dev *pdev) return err; } -#endif /* CONFIG_PM */ /** * iavf_remove - Device Removal Routine * @pdev: PCI device information struct @@ -3954,16 +3934,15 @@ static void iavf_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); + static struct pci_driver iavf_driver = { - .name = iavf_driver_name, - .id_table = iavf_pci_tbl, - .probe = iavf_probe, - .remove = iavf_remove, -#ifdef CONFIG_PM - .suspend = iavf_suspend, - .resume = iavf_resume, -#endif - .shutdown = iavf_shutdown, + .name = iavf_driver_name, + .id_table = iavf_pci_tbl, + .probe = iavf_probe, + .remove = iavf_remove, + .driver.pm = &iavf_pm_ops, + .shutdown = iavf_shutdown, }; /** diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 980bbcc64b4b..6da4f43f2348 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -23,6 +23,7 @@ ice-y := ice_main.o \ ice_flex_pipe.o \ ice_flow.o \ ice_devlink.o \ + ice_fw_update.o \ ice_ethtool.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index c665220bb637..fe140ff38f74 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -19,6 +19,7 @@ #include <linux/dma-mapping.h> #include <linux/pci.h> #include <linux/workqueue.h> +#include <linux/wait.h> #include <linux/aer.h> #include <linux/interrupt.h> #include <linux/ethtool.h> @@ -255,6 +256,7 @@ struct ice_vsi { u32 tx_busy; u32 rx_buf_failed; u32 rx_page_failed; + u32 rx_gro_dropped; u16 num_q_vectors; u16 base_vector; /* IRQ base for OS reserved vectors */ enum ice_vsi_type type; @@ -412,6 +414,12 @@ struct ice_pf { struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ struct mutex tc_mutex; /* lock to protect TC changes */ u32 msg_enable; + + /* spinlock to protect the AdminQ wait list */ + spinlock_t aq_wait_lock; + struct hlist_head aq_wait_list; + wait_queue_head_t aq_wait_queue; + u32 hw_csum_rx_error; u16 oicr_idx; /* Other interrupt cause MSIX vector index */ u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ @@ -593,6 +601,8 @@ void ice_fdir_release_flows(struct ice_hw *hw); void ice_fdir_replay_flows(struct ice_hw *hw); void ice_fdir_replay_fltrs(struct ice_pf *pf); int ice_fdir_create_dflt_rules(struct ice_pf *pf); +int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, + struct ice_rq_event_info *event); int ice_open(struct net_device *netdev); int ice_stop(struct net_device *netdev); void ice_service_task_schedule(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index b363e0223670..ba9375218fef 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -109,6 +109,13 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_MSIX 0x0043 #define ICE_AQC_CAPS_FD 0x0045 #define ICE_AQC_CAPS_MAX_MTU 0x0047 +#define ICE_AQC_CAPS_NVM_VER 0x0048 +#define ICE_AQC_CAPS_PENDING_NVM_VER 0x0049 +#define ICE_AQC_CAPS_OROM_VER 0x004A +#define ICE_AQC_CAPS_PENDING_OROM_VER 0x004B +#define ICE_AQC_CAPS_NET_VER 0x004C +#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D +#define ICE_AQC_CAPS_NVM_MGMT 0x0080 u8 major_ver; u8 minor_ver; @@ -1298,7 +1305,14 @@ struct ice_aqc_nvm { #define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_PRESERVE_ALL BIT(1) +#define ICE_AQC_NVM_FACTORY_DEFAULT (2 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */ +#define ICE_AQC_NVM_ACTIV_SEL_OROM BIT(4) +#define ICE_AQC_NVM_ACTIV_SEL_NETLIST BIT(5) +#define ICE_AQC_NVM_SPECIAL_UPDATE BIT(6) +#define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */ +#define ICE_AQC_NVM_ACTIV_SEL_MASK ICE_M(0x7, 3) #define ICE_AQC_NVM_FLASH_ONLY BIT(7) __le16 module_typeid; __le16 length; @@ -1347,6 +1361,67 @@ struct ice_aqc_nvm_checksum { #define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA #define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F +/* Used for NVM Set Package Data command - 0x070A */ +struct ice_aqc_nvm_pkg_data { + u8 reserved[3]; + u8 cmd_flags; +#define ICE_AQC_NVM_PKG_DELETE BIT(0) /* used for command call */ +#define ICE_AQC_NVM_PKG_SKIPPED BIT(0) /* used for command response */ + + u32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +/* Used for Pass Component Table command - 0x070B */ +struct ice_aqc_nvm_pass_comp_tbl { + u8 component_response; /* Response only */ +#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED 0x0 +#define ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1 +#define ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2 + u8 component_response_code; /* Response only */ +#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0 +#define ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1 +#define ICE_AQ_NVM_PASS_COMP_STAMP_LOWER 0x2 +#define ICE_AQ_NVM_PASS_COMP_INVALID_STAMP_CODE 0x3 +#define ICE_AQ_NVM_PASS_COMP_CONFLICT_CODE 0x4 +#define ICE_AQ_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE 0x5 +#define ICE_AQ_NVM_PASS_COMP_NOT_SUPPORTED_CODE 0x6 +#define ICE_AQ_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE 0x7 +#define ICE_AQ_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE 0x8 +#define ICE_AQ_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE 0xA +#define ICE_AQ_NVM_PASS_COMP_VER_STR_LOWER_CODE 0xB + u8 reserved; + u8 transfer_flag; +#define ICE_AQ_NVM_PASS_COMP_TBL_START 0x1 +#define ICE_AQ_NVM_PASS_COMP_TBL_MIDDLE 0x2 +#define ICE_AQ_NVM_PASS_COMP_TBL_END 0x4 +#define ICE_AQ_NVM_PASS_COMP_TBL_START_AND_END 0x5 + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_nvm_comp_tbl { + __le16 comp_class; +#define NVM_COMP_CLASS_ALL_FW 0x000A + + __le16 comp_id; +#define NVM_COMP_ID_OROM 0x5 +#define NVM_COMP_ID_NVM 0x6 +#define NVM_COMP_ID_NETLIST 0x8 + + u8 comp_class_idx; +#define FWU_COMP_CLASS_IDX_NOT_USE 0x0 + + __le32 comp_cmp_stamp; + u8 cvs_type; +#define NVM_CVS_TYPE_ASCII 0x1 + + u8 cvs_len; + u8 cvs[]; /* Component Version String */ +} __packed; + /** * Send to PF command (indirect 0x0801) ID is only used by PF * @@ -1506,7 +1581,7 @@ struct ice_aqc_get_set_rss_keys { struct ice_aqc_get_set_rss_lut { #define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15) #define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0 -#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) +#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) __le16 vsi_id; #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0 #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \ @@ -1794,6 +1869,8 @@ struct ice_aq_desc { struct ice_aqc_rl_profile rl_profile; struct ice_aqc_nvm nvm; struct ice_aqc_nvm_checksum nvm_checksum; + struct ice_aqc_nvm_pkg_data pkg_data; + struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl; struct ice_aqc_pf_vf_msg virt; struct ice_aqc_lldp_get_mib lldp_get_mib; struct ice_aqc_lldp_set_mib_change lldp_set_event; @@ -1922,7 +1999,13 @@ enum ice_adminq_opc { /* NVM commands */ ice_aqc_opc_nvm_read = 0x0701, + ice_aqc_opc_nvm_erase = 0x0702, + ice_aqc_opc_nvm_write = 0x0703, ice_aqc_opc_nvm_checksum = 0x0706, + ice_aqc_opc_nvm_write_activate = 0x0707, + ice_aqc_opc_nvm_update_empr = 0x0709, + ice_aqc_opc_nvm_pkg_data = 0x070A, + ice_aqc_opc_nvm_pass_component_tbl = 0x070B, /* PF/VF mailbox commands */ ice_mbx_opc_send_msg_to_pf = 0x0801, diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index c72cc77b8d67..34abfcea9858 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -611,8 +611,6 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - status = ice_aq_send_cmd(hw, &desc, config, size, NULL); if (!status) { u16 i; @@ -1029,23 +1027,23 @@ void ice_deinit_hw(struct ice_hw *hw) */ enum ice_status ice_check_reset(struct ice_hw *hw) { - u32 cnt, reg = 0, grst_delay, uld_mask; + u32 cnt, reg = 0, grst_timeout, uld_mask; /* Poll for Device Active state in case a recent CORER, GLOBR, * or EMPR has occurred. The grst delay value is in 100ms units. * Add 1sec for outstanding AQ commands that can take a long time. */ - grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> - GLGEN_RSTCTL_GRSTDEL_S) + 10; + grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> + GLGEN_RSTCTL_GRSTDEL_S) + 10; - for (cnt = 0; cnt < grst_delay; cnt++) { + for (cnt = 0; cnt < grst_timeout; cnt++) { mdelay(100); reg = rd32(hw, GLGEN_RSTAT); if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) break; } - if (cnt == grst_delay) { + if (cnt == grst_timeout) { ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); return ICE_ERR_RESET_FAILED; @@ -1720,8 +1718,7 @@ ice_alloc_res_exit: * @num: number of resources * @res: pointer to array that contains the resources to free */ -enum ice_status -ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) +enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) { struct ice_aqc_alloc_free_res_elem *buf; enum ice_status status; @@ -1857,6 +1854,25 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, "%s: msix_vector_first_id = %d\n", prefix, caps->msix_vector_first_id); break; + case ICE_AQC_CAPS_PENDING_NVM_VER: + caps->nvm_update_pending_nvm = true; + ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); + break; + case ICE_AQC_CAPS_PENDING_OROM_VER: + caps->nvm_update_pending_orom = true; + ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); + break; + case ICE_AQC_CAPS_PENDING_NET_VER: + caps->nvm_update_pending_netlist = true; + ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); + break; + case ICE_AQC_CAPS_NVM_MGMT: + caps->nvm_unified_update = + (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? + true : false; + ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, + caps->nvm_unified_update); + break; case ICE_AQC_CAPS_MAX_MTU: caps->max_mtu = number; ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", @@ -2104,7 +2120,7 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, * @cap_count: the number of capabilities * * Helper device to parse device (0x000B) capabilities list. For - * capabilities shared between device and device, this relies on + * capabilities shared between device and function, this relies on * ice_parse_common_caps. * * Loop through the list of provided capabilities and extract the relevant @@ -2204,7 +2220,7 @@ ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, * Read the device capabilities and extract them into the dev_caps structure * for later use. */ -static enum ice_status +enum ice_status ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) { enum ice_status status; @@ -3888,7 +3904,18 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, * Without setting the generic section as valid in valid_sections, the * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. */ - buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC; + buf->txqs[0].info.valid_sections = + ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | + ICE_AQC_ELEM_VALID_EIR; + buf->txqs[0].info.generic = 0; + buf->txqs[0].info.cir_bw.bw_profile_idx = + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->txqs[0].info.cir_bw.bw_alloc = + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); + buf->txqs[0].info.eir_bw.bw_profile_idx = + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->txqs[0].info.eir_bw.bw_alloc = + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); /* add the LAN queue */ status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); @@ -4338,3 +4365,36 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) return false; } + +/** + * ice_aq_set_lldp_mib - Set the LLDP MIB + * @hw: pointer to the HW struct + * @mib_type: Local, Remote or both Local and Remote MIBs + * @buf: pointer to the caller-supplied buffer to store the MIB block + * @buf_size: size of the buffer (in bytes) + * @cd: pointer to command details structure or NULL + * + * Set the LLDP MIB. (0x0A08) + */ +enum ice_status +ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aqc_lldp_set_local_mib *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.lldp_set_mib; + + if (buf_size == 0 || !buf) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); + + desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); + desc.datalen = cpu_to_le16(buf_size); + + cmd->type = mib_type; + cmd->length = cpu_to_le16(buf_size); + + return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); +} diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 33a681a75439..3ebb973878c7 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -11,8 +11,6 @@ #include "ice_switch.h" #include <linux/avf/virtchnl.h> -enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw); - enum ice_status ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); enum ice_status ice_check_reset(struct ice_hw *hw); @@ -90,6 +88,8 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, enum ice_status ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd); +enum ice_status +ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps); void ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, u16 link_speeds_bitmap); @@ -172,4 +172,7 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, enum ice_status ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); +enum ice_status +ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, + struct ice_sq_cd *cd); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 1e18021aa073..1f46a7828be8 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -312,9 +312,10 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) #define ICE_FREE_CQ_BUFS(hw, qi, ring) \ do { \ - int i; \ /* free descriptors */ \ - if ((qi)->ring.r.ring##_bi) \ + if ((qi)->ring.r.ring##_bi) { \ + int i; \ + \ for (i = 0; i < (qi)->num_##ring##_entries; i++) \ if ((qi)->ring.r.ring##_bi[i].pa) { \ dmam_free_coherent(ice_hw_to_dev(hw), \ @@ -325,6 +326,7 @@ do { \ (qi)->ring.r.ring##_bi[i].pa = 0;\ (qi)->ring.r.ring##_bi[i].size = 0;\ } \ + } \ /* free the buffer info list */ \ if ((qi)->ring.cmd_buf) \ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 2cecc9d08005..2a3147ee0bbb 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -135,39 +135,6 @@ ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd) } /** - * ice_aq_set_lldp_mib - Set the LLDP MIB - * @hw: pointer to the HW struct - * @mib_type: Local, Remote or both Local and Remote MIBs - * @buf: pointer to the caller-supplied buffer to store the MIB block - * @buf_size: size of the buffer (in bytes) - * @cd: pointer to command details structure or NULL - * - * Set the LLDP MIB. (0x0A08) - */ -static enum ice_status -ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, - struct ice_sq_cd *cd) -{ - struct ice_aqc_lldp_set_local_mib *cmd; - struct ice_aq_desc desc; - - cmd = &desc.params.lldp_set_mib; - - if (buf_size == 0 || !buf) - return ICE_ERR_PARAM; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); - - desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); - desc.datalen = cpu_to_le16(buf_size); - - cmd->type = mib_type; - cmd->length = cpu_to_le16(buf_size); - - return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); -} - -/** * ice_get_dcbx_status * @hw: pointer to the HW struct * diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 979af197f8a3..36abd6b7280c 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -444,10 +444,6 @@ void ice_dcb_rebuild(struct ice_pf *pf) goto dcb_error; } - /* If DCB was not enabled previously, we are done */ - if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags)) - return; - mutex_lock(&pf->tc_mutex); if (!pf->hw.port_info->is_sw_lldp) @@ -467,7 +463,7 @@ void ice_dcb_rebuild(struct ice_pf *pf) } } - dev_info(dev, "DCB restored after reset\n"); + dev_info(dev, "DCB info restored\n"); ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) { dev_err(dev, "Query Port ETS failed\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index 323238669572..35c21d9ae009 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -53,6 +53,12 @@ ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { tlan_ctx->cgd_num = ring->dcb_tc; } + +static inline bool ice_is_dcb_active(struct ice_pf *pf) +{ + return (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) || + test_bit(ICE_FLAG_DCB_ENA, pf->flags)); +} #else #define ice_dcb_rebuild(pf) do {} while (0) @@ -95,6 +101,11 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring, return 0; } +static inline bool ice_is_dcb_active(struct ice_pf __always_unused *pf) +{ + return false; +} + static inline bool ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf, unsigned int __always_unused txqueue) diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 43da2dcb0cbc..111d6bfe4222 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -4,6 +4,7 @@ #include "ice.h" #include "ice_lib.h" #include "ice_devlink.h" +#include "ice_fw_update.h" static int ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len) { @@ -229,8 +230,61 @@ static int ice_devlink_info_get(struct devlink *devlink, return 0; } +/** + * ice_devlink_flash_update - Update firmware stored in flash on the device + * @devlink: pointer to devlink associated with device to update + * @path: the path of the firmware file to use via request_firmware + * @component: name of the component to update, or NULL + * @extack: netlink extended ACK structure + * + * Perform a device flash update. The bulk of the update logic is contained + * within the ice_flash_pldm_image function. + * + * Returns: zero on success, or an error code on failure. + */ +static int +ice_devlink_flash_update(struct devlink *devlink, const char *path, + const char *component, struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = &pf->pdev->dev; + struct ice_hw *hw = &pf->hw; + const struct firmware *fw; + int err; + + /* individual component update is not yet supported */ + if (component) + return -EOPNOTSUPP; + + if (!hw->dev_caps.common_cap.nvm_unified_update) { + NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update"); + return -EOPNOTSUPP; + } + + err = ice_check_for_pending_update(pf, component, extack); + if (err) + return err; + + err = request_firmware(&fw, path, dev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk"); + return err; + } + + devlink_flash_update_begin_notify(devlink); + devlink_flash_update_status_notify(devlink, "Preparing to flash", + component, 0, 0); + err = ice_flash_pldm_image(pf, fw, extack); + devlink_flash_update_end_notify(devlink); + + release_firmware(fw); + + return err; +} + static const struct devlink_ops ice_devlink_ops = { .info_get = ice_devlink_info_get, + .flash_update = ice_devlink_flash_update, }; static void ice_devlink_free(void *devlink_ptr) @@ -303,7 +357,7 @@ void ice_devlink_unregister(struct ice_pf *pf) * * Create and register a devlink_port for this PF. Note that although each * physical function is connected to a separate devlink instance, the port - * will still be numbered according to the physical function id. + * will still be numbered according to the physical function ID. * * Return: zero on success or an error code on failure. */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 06b93e97892d..9e8e9531cd87 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -59,8 +59,11 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = { ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed), ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), + ICE_VSI_STAT("rx_gro_dropped", rx_gro_dropped), ICE_VSI_STAT("tx_errors", eth_stats.tx_errors), ICE_VSI_STAT("tx_linearize", tx_linearize), + ICE_VSI_STAT("tx_busy", tx_busy), + ICE_VSI_STAT("tx_restart", tx_restart), }; enum ice_ethtool_test_id { @@ -100,6 +103,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = { ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast), ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast), ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors), + ICE_PF_STAT("tx_timeout.nic", tx_timeout_count), ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64), ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64), ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127), diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 3c217e51b27e..b17ae3e20157 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -644,7 +644,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) * This function generates a key from a value, a don't care mask and a never * match mask. * upd, dc, and nm are optional parameters, and can be NULL: - * upd == NULL --> udp mask is all 1's (update all bits) + * upd == NULL --> upd mask is all 1's (update all bits) * dc == NULL --> dc mask is all 0's (no don't care bits) * nm == NULL --> nm mask is all 0's (no never match bits) */ @@ -2921,6 +2921,8 @@ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx) ICE_FLOW_ENTRY_HNDL(e)); list_del(&p->l_entry); + + mutex_destroy(&p->entries_lock); devm_kfree(ice_hw_to_dev(hw), p); } mutex_unlock(&hw->fl_profs_locks[blk_idx]); @@ -3038,7 +3040,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw) memset(prof_redir->t, 0, prof_redir->count * sizeof(*prof_redir->t)); - memset(es->t, 0, es->count * sizeof(*es->t)); + memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw); memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); memset(es->written, 0, es->count * sizeof(*es->written)); } @@ -3149,10 +3151,12 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw) es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count, sizeof(*es->ref_count), GFP_KERNEL); + if (!es->ref_count) + goto err; es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, sizeof(*es->written), GFP_KERNEL); - if (!es->ref_count) + if (!es->written) goto err; } return 0; @@ -3874,16 +3878,16 @@ err_ice_add_prof: } /** - * ice_search_prof_id_low - Search for a profile tracking ID low level + * ice_search_prof_id - Search for a profile tracking ID * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID * - * This will search for a profile tracking ID which was previously added. This - * version assumes that the caller has already acquired the prof map lock. + * This will search for a profile tracking ID which was previously added. + * The profile map lock should be held before calling this function. */ static struct ice_prof_map * -ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id) +ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_prof_map *entry = NULL; struct ice_prof_map *map; @@ -3898,26 +3902,6 @@ ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id) } /** - * ice_search_prof_id - Search for a profile tracking ID - * @hw: pointer to the HW struct - * @blk: hardware block - * @id: profile tracking ID - * - * This will search for a profile tracking ID which was previously added. - */ -static struct ice_prof_map * -ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) -{ - struct ice_prof_map *entry; - - mutex_lock(&hw->blk[blk].es.prof_map_lock); - entry = ice_search_prof_id_low(hw, blk, id); - mutex_unlock(&hw->blk[blk].es.prof_map_lock); - - return entry; -} - -/** * ice_vsig_prof_id_count - count profiles in a VSIG * @hw: pointer to the HW struct * @blk: hardware block @@ -4133,7 +4117,7 @@ enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) mutex_lock(&hw->blk[blk].es.prof_map_lock); - pmap = ice_search_prof_id_low(hw, blk, id); + pmap = ice_search_prof_id(hw, blk, id); if (!pmap) { status = ICE_ERR_DOES_NOT_EXIST; goto err_ice_rem_prof; @@ -4166,22 +4150,28 @@ static enum ice_status ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, struct list_head *chg) { + enum ice_status status = 0; struct ice_prof_map *map; struct ice_chs_chg *p; u16 i; + mutex_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); - if (!map) - return ICE_ERR_DOES_NOT_EXIST; + if (!map) { + status = ICE_ERR_DOES_NOT_EXIST; + goto err_ice_get_prof; + } for (i = 0; i < map->ptg_cnt; i++) if (!hw->blk[blk].es.written[map->prof_id]) { /* add ES to change list */ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); - if (!p) + if (!p) { + status = ICE_ERR_NO_MEMORY; goto err_ice_get_prof; + } p->type = ICE_PTG_ES_ADD; p->ptype = 0; @@ -4196,11 +4186,10 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, list_add(&p->list_entry, chg); } - return 0; - err_ice_get_prof: + mutex_unlock(&hw->blk[blk].es.prof_map_lock); /* let caller clean up the change list */ - return ICE_ERR_NO_MEMORY; + return status; } /** @@ -4254,17 +4243,23 @@ static enum ice_status ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, struct list_head *lst, u64 hdl) { + enum ice_status status = 0; struct ice_prof_map *map; struct ice_vsig_prof *p; u16 i; + mutex_lock(&hw->blk[blk].es.prof_map_lock); map = ice_search_prof_id(hw, blk, hdl); - if (!map) - return ICE_ERR_DOES_NOT_EXIST; + if (!map) { + status = ICE_ERR_DOES_NOT_EXIST; + goto err_ice_add_prof_to_lst; + } p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); - if (!p) - return ICE_ERR_NO_MEMORY; + if (!p) { + status = ICE_ERR_NO_MEMORY; + goto err_ice_add_prof_to_lst; + } p->profile_cookie = map->profile_cookie; p->prof_id = map->prof_id; @@ -4278,7 +4273,9 @@ ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, list_add(&p->list, lst); - return 0; +err_ice_add_prof_to_lst: + mutex_unlock(&hw->blk[blk].es.prof_map_lock); + return status; } /** @@ -4496,16 +4493,12 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; + enum ice_status status = 0; struct ice_prof_map *map; struct ice_vsig_prof *t; struct ice_chs_chg *p; u16 vsig_idx, i; - /* Get the details on the profile specified by the handle ID */ - map = ice_search_prof_id(hw, blk, hdl); - if (!map) - return ICE_ERR_DOES_NOT_EXIST; - /* Error, if this VSIG already has this profile */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) return ICE_ERR_ALREADY_EXISTS; @@ -4515,19 +4508,28 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, if (!t) return ICE_ERR_NO_MEMORY; + mutex_lock(&hw->blk[blk].es.prof_map_lock); + /* Get the details on the profile specified by the handle ID */ + map = ice_search_prof_id(hw, blk, hdl); + if (!map) { + status = ICE_ERR_DOES_NOT_EXIST; + goto err_ice_add_prof_id_vsig; + } + t->profile_cookie = map->profile_cookie; t->prof_id = map->prof_id; t->tcam_count = map->ptg_cnt; /* create TCAM entries */ for (i = 0; i < map->ptg_cnt; i++) { - enum ice_status status; u16 tcam_idx; /* add TCAM to change list */ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); - if (!p) + if (!p) { + status = ICE_ERR_NO_MEMORY; goto err_ice_add_prof_id_vsig; + } /* allocate the TCAM entry index */ status = ice_alloc_tcam_ent(hw, blk, &tcam_idx); @@ -4571,12 +4573,14 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, list_add(&t->list, &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); - return 0; + mutex_unlock(&hw->blk[blk].es.prof_map_lock); + return status; err_ice_add_prof_id_vsig: + mutex_unlock(&hw->blk[blk].es.prof_map_lock); /* let caller clean up the change list */ devm_kfree(ice_hw_to_dev(hw), t); - return ICE_ERR_NO_MEMORY; + return status; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index d74e5290677f..fe677621dd51 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -1187,7 +1187,7 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) if (list_empty(&hw->fl_profs[blk])) return 0; - mutex_lock(&hw->fl_profs_locks[blk]); + mutex_lock(&hw->rss_locks); list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry) if (test_bit(vsi_handle, p->vsis)) { status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle); @@ -1195,12 +1195,12 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) break; if (bitmap_empty(p->vsis, ICE_MAX_VSI)) { - status = ice_flow_rem_prof_sync(hw, blk, p); + status = ice_flow_rem_prof(hw, blk, p->id); if (status) break; } } - mutex_unlock(&hw->fl_profs_locks[blk]); + mutex_unlock(&hw->rss_locks); return status; } @@ -1597,7 +1597,8 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) */ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) { - struct ice_rss_cfg *r, *rss_cfg = NULL; + u64 rss_hash = ICE_HASH_INVALID; + struct ice_rss_cfg *r; /* verify if the protocol header is non zero and VSI is valid */ if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle)) @@ -1607,10 +1608,10 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) list_for_each_entry(r, &hw->rss_list_head, l_entry) if (test_bit(vsi_handle, r->vsis) && r->packet_hdr == hdrs) { - rss_cfg = r; + rss_hash = r->hashed_flds; break; } mutex_unlock(&hw->rss_locks); - return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID; + return rss_hash; } diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c new file mode 100644 index 000000000000..deaefe00c9c0 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c @@ -0,0 +1,773 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2019, Intel Corporation. */ + +#include <asm/unaligned.h> +#include <linux/uuid.h> +#include <linux/crc32.h> +#include <linux/pldmfw.h> +#include "ice.h" +#include "ice_fw_update.h" + +struct ice_fwu_priv { + struct pldmfw context; + + struct ice_pf *pf; + struct netlink_ext_ack *extack; + + /* Track which NVM banks to activate at the end of the update */ + u8 activate_flags; +}; + +/** + * ice_send_package_data - Send record package data to firmware + * @context: PLDM fw update structure + * @data: pointer to the package data + * @length: length of the package data + * + * Send a copy of the package data associated with the PLDM record matching + * this device to the firmware. + * + * Note that this function sends an AdminQ command that will fail unless the + * NVM resource has been acquired. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length) +{ + struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context); + struct netlink_ext_ack *extack = priv->extack; + struct device *dev = context->dev; + struct ice_pf *pf = priv->pf; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u8 *package_data; + + package_data = kmemdup(data, length, GFP_KERNEL); + if (!package_data) + return -ENOMEM; + + status = ice_nvm_set_pkg_data(hw, false, package_data, length, NULL); + + kfree(package_data); + + if (status) { + dev_err(dev, "Failed to send record package data to firmware, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware"); + return -EIO; + } + + return 0; +} + +/** + * ice_check_component_response - Report firmware response to a component + * @pf: device private data structure + * @id: component id being checked + * @response: indicates whether this component can be updated + * @code: code indicating reason for response + * @extack: netlink extended ACK structure + * + * Check whether firmware indicates if this component can be updated. Report + * a suitable error message over the netlink extended ACK if the component + * cannot be updated. + * + * Returns: zero if the component can be updated, or -ECANCELED of the + * firmware indicates the component cannot be updated. + */ +static int +ice_check_component_response(struct ice_pf *pf, u16 id, u8 response, u8 code, + struct netlink_ext_ack *extack) +{ + struct device *dev = ice_pf_to_dev(pf); + const char *component; + + switch (id) { + case NVM_COMP_ID_OROM: + component = "fw.undi"; + break; + case NVM_COMP_ID_NVM: + component = "fw.mgmt"; + break; + case NVM_COMP_ID_NETLIST: + component = "fw.netlist"; + break; + default: + WARN(1, "Unexpected unknown component identifier 0x%02x", id); + return -EINVAL; + } + + dev_dbg(dev, "%s: firmware response 0x%x, code 0x%x\n", + component, response, code); + + switch (response) { + case ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED: + /* firmware indicated this update is good to proceed */ + return 0; + case ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE: + dev_warn(dev, "firmware recommends not updating %s, as it may result in a downgrade. continuing anyways\n", component); + return 0; + case ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED: + dev_info(dev, "firmware has rejected updating %s\n", component); + break; + } + + switch (code) { + case ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE: + dev_err(dev, "Component comparison stamp for %s is identical to the running image\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component comparison stamp is identical to running image"); + break; + case ICE_AQ_NVM_PASS_COMP_STAMP_LOWER: + dev_err(dev, "Component comparison stamp for %s is lower than the running image\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component comparison stamp is lower than running image"); + break; + case ICE_AQ_NVM_PASS_COMP_INVALID_STAMP_CODE: + dev_err(dev, "Component comparison stamp for %s is invalid\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component comparison stamp is invalid"); + break; + case ICE_AQ_NVM_PASS_COMP_CONFLICT_CODE: + dev_err(dev, "%s conflicts with a previous component table\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component table conflict occurred"); + break; + case ICE_AQ_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE: + dev_err(dev, "Pre-requisites for component %s have not been met\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component pre-requisites not met"); + break; + case ICE_AQ_NVM_PASS_COMP_NOT_SUPPORTED_CODE: + dev_err(dev, "%s is not a supported component\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component not supported"); + break; + case ICE_AQ_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE: + dev_err(dev, "Security restrictions prevent %s from being downgraded\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component cannot be downgraded"); + break; + case ICE_AQ_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE: + dev_err(dev, "Received an incomplete component image for %s\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Incomplete component image"); + break; + case ICE_AQ_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE: + dev_err(dev, "Component version for %s is identical to the running image\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component version is identical to running image"); + break; + case ICE_AQ_NVM_PASS_COMP_VER_STR_LOWER_CODE: + dev_err(dev, "Component version for %s is lower than the running image\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component version is lower than the running image"); + break; + default: + dev_err(dev, "Unexpected response code 0x02%x for %s\n", + code, component); + NL_SET_ERR_MSG_MOD(extack, "Received unexpected response code from firmware"); + break; + } + + return -ECANCELED; +} + +/** + * ice_send_component_table - Send PLDM component table to firmware + * @context: PLDM fw update structure + * @component: the component to process + * @transfer_flag: relative transfer order of this component + * + * Read relevant data from the component and forward it to the device + * firmware. Check the response to determine if the firmware indicates that + * the update can proceed. + * + * This function sends AdminQ commands related to the NVM, and assumes that + * the NVM resource has been acquired. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +ice_send_component_table(struct pldmfw *context, struct pldmfw_component *component, + u8 transfer_flag) +{ + struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context); + struct netlink_ext_ack *extack = priv->extack; + struct ice_aqc_nvm_comp_tbl *comp_tbl; + u8 comp_response, comp_response_code; + struct device *dev = context->dev; + struct ice_pf *pf = priv->pf; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + size_t length; + + switch (component->identifier) { + case NVM_COMP_ID_OROM: + case NVM_COMP_ID_NVM: + case NVM_COMP_ID_NETLIST: + break; + default: + dev_err(dev, "Unable to update due to a firmware component with unknown ID %u\n", + component->identifier); + NL_SET_ERR_MSG_MOD(extack, "Unable to update due to unknown firmware component"); + return -EOPNOTSUPP; + } + + length = struct_size(comp_tbl, cvs, component->version_len); + comp_tbl = kzalloc(length, GFP_KERNEL); + if (!comp_tbl) + return -ENOMEM; + + comp_tbl->comp_class = cpu_to_le16(component->classification); + comp_tbl->comp_id = cpu_to_le16(component->identifier); + comp_tbl->comp_class_idx = FWU_COMP_CLASS_IDX_NOT_USE; + comp_tbl->comp_cmp_stamp = cpu_to_le32(component->comparison_stamp); + comp_tbl->cvs_type = component->version_type; + comp_tbl->cvs_len = component->version_len; + memcpy(comp_tbl->cvs, component->version_string, component->version_len); + + status = ice_nvm_pass_component_tbl(hw, (u8 *)comp_tbl, length, + transfer_flag, &comp_response, + &comp_response_code, NULL); + + kfree(comp_tbl); + + if (status) { + dev_err(dev, "Failed to transfer component table to firmware, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware"); + return -EIO; + } + + return ice_check_component_response(pf, component->identifier, comp_response, + comp_response_code, extack); +} + +/** + * ice_write_one_nvm_block - Write an NVM block and await completion response + * @pf: the PF data structure + * @module: the module to write to + * @offset: offset in bytes + * @block_size: size of the block to write, up to 4k + * @block: pointer to block of data to write + * @last_cmd: whether this is the last command + * @extack: netlink extended ACK structure + * + * Write a block of data to a flash module, and await for the completion + * response message from firmware. + * + * Note this function assumes the caller has acquired the NVM resource. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, + u16 block_size, u8 *block, bool last_cmd, + struct netlink_ext_ack *extack) +{ + u16 completion_module, completion_retval; + struct device *dev = ice_pf_to_dev(pf); + struct ice_rq_event_info event; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u32 completion_offset; + int err; + + memset(&event, 0, sizeof(event)); + + status = ice_aq_update_nvm(hw, module, offset, block_size, block, + last_cmd, 0, NULL); + if (status) { + dev_err(dev, "Failed to program flash module 0x%02x at offset %u, err %s aq_err %s\n", + module, offset, ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module"); + return -EIO; + } + + err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, HZ, &event); + if (err) { + dev_err(dev, "Timed out waiting for firmware write completion for module 0x%02x, err %d\n", + module, err); + NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware"); + return -EIO; + } + + completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid); + completion_retval = le16_to_cpu(event.desc.retval); + + completion_offset = le16_to_cpu(event.desc.params.nvm.offset_low); + completion_offset |= event.desc.params.nvm.offset_high << 16; + + if (completion_module != module) { + dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n", + completion_module, module); + NL_SET_ERR_MSG_MOD(extack, "Unexpected firmware response"); + return -EIO; + } + + if (completion_offset != offset) { + dev_err(dev, "Unexpected offset in write completion: got %u, expected %u\n", + completion_offset, offset); + NL_SET_ERR_MSG_MOD(extack, "Unexpected firmware response"); + return -EIO; + } + + if (completion_retval) { + dev_err(dev, "Firmware failed to program flash module 0x%02x at offset %u, completion err %s\n", + module, offset, + ice_aq_str((enum ice_aq_err)completion_retval)); + NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module"); + return -EIO; + } + + return 0; +} + +/** + * ice_write_nvm_module - Write data to an NVM module + * @pf: the PF driver structure + * @module: the module id to program + * @component: the name of the component being updated + * @image: buffer of image data to write to the NVM + * @length: length of the buffer + * @extack: netlink extended ACK structure + * + * Loop over the data for a given NVM module and program it in 4 Kb + * blocks. Notify devlink core of progress after each block is programmed. + * Loops over a block of data and programs the NVM in 4k block chunks. + * + * Note this function assumes the caller has acquired the NVM resource. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component, + const u8 *image, u32 length, + struct netlink_ext_ack *extack) +{ + struct devlink *devlink; + u32 offset = 0; + bool last_cmd; + u8 *block; + int err; + + devlink = priv_to_devlink(pf); + + devlink_flash_update_status_notify(devlink, "Flashing", + component, 0, length); + + block = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); + if (!block) + return -ENOMEM; + + do { + u32 block_size; + + block_size = min_t(u32, ICE_AQ_MAX_BUF_LEN, length - offset); + last_cmd = !(offset + block_size < length); + + /* ice_aq_update_nvm may copy the firmware response into the + * buffer, so we must make a copy since the source data is + * constant. + */ + memcpy(block, image + offset, block_size); + + err = ice_write_one_nvm_block(pf, module, offset, block_size, + block, last_cmd, extack); + if (err) + break; + + offset += block_size; + + devlink_flash_update_status_notify(devlink, "Flashing", + component, offset, length); + } while (!last_cmd); + + if (err) + devlink_flash_update_status_notify(devlink, "Flashing failed", + component, length, length); + else + devlink_flash_update_status_notify(devlink, "Flashing done", + component, length, length); + + kfree(block); + return err; +} + +/** + * ice_erase_nvm_module - Erase an NVM module and await firmware completion + * @pf: the PF data structure + * @module: the module to erase + * @component: name of the component being updated + * @extack: netlink extended ACK structure + * + * Erase the inactive NVM bank associated with this module, and await for + * a completion response message from firmware. + * + * Note this function assumes the caller has acquired the NVM resource. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, + struct netlink_ext_ack *extack) +{ + u16 completion_module, completion_retval; + struct device *dev = ice_pf_to_dev(pf); + struct ice_rq_event_info event; + struct ice_hw *hw = &pf->hw; + struct devlink *devlink; + enum ice_status status; + int err; + + memset(&event, 0, sizeof(event)); + + devlink = priv_to_devlink(pf); + + devlink_flash_update_status_notify(devlink, "Erasing", component, 0, 0); + + status = ice_aq_erase_nvm(hw, module, NULL); + if (status) { + dev_err(dev, "Failed to erase %s (module 0x%02x), err %s aq_err %s\n", + component, module, ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module"); + err = -EIO; + goto out_notify_devlink; + } + + /* Yes, this really can take minutes to complete */ + err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, 300 * HZ, &event); + if (err) { + dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n", + component, module, err); + NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware"); + goto out_notify_devlink; + } + + completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid); + completion_retval = le16_to_cpu(event.desc.retval); + + if (completion_module != module) { + dev_err(dev, "Unexpected module_typeid in erase completion for %s: got 0x%x, expected 0x%x\n", + component, completion_module, module); + NL_SET_ERR_MSG_MOD(extack, "Unexpected firmware response"); + err = -EIO; + goto out_notify_devlink; + } + + if (completion_retval) { + dev_err(dev, "Firmware failed to erase %s (module 0x02%x), aq_err %s\n", + component, module, + ice_aq_str((enum ice_aq_err)completion_retval)); + NL_SET_ERR_MSG_MOD(extack, "Firmware failed to erase flash"); + err = -EIO; + goto out_notify_devlink; + } + +out_notify_devlink: + if (err) + devlink_flash_update_status_notify(devlink, "Erasing failed", + component, 0, 0); + else + devlink_flash_update_status_notify(devlink, "Erasing done", + component, 0, 0); + + return err; +} + +/** + * ice_switch_flash_banks - Tell firmware to switch NVM banks + * @pf: Pointer to the PF data structure + * @activate_flags: flags used for the activation command + * @extack: netlink extended ACK structure + * + * Notify firmware to activate the newly written flash banks, and wait for the + * firmware response. + * + * Returns: zero on success or an error code on failure. + */ +static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, + struct netlink_ext_ack *extack) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_rq_event_info event; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u16 completion_retval; + int err; + + memset(&event, 0, sizeof(event)); + + status = ice_nvm_write_activate(hw, activate_flags); + if (status) { + dev_err(dev, "Failed to switch active flash banks, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks"); + return -EIO; + } + + err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, HZ, + &event); + if (err) { + dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n", + err); + NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware"); + return err; + } + + completion_retval = le16_to_cpu(event.desc.retval); + if (completion_retval) { + dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n", + ice_aq_str((enum ice_aq_err)completion_retval)); + NL_SET_ERR_MSG_MOD(extack, "Firmware failed to switch active flash banks"); + return -EIO; + } + + return 0; +} + +/** + * ice_flash_component - Flash a component of the NVM + * @context: PLDM fw update structure + * @component: the component table to program + * + * Program the flash contents for a given component. First, determine the + * module id. Then, erase the secondary bank for this module. Finally, write + * the contents of the component to the NVM. + * + * Note this function assumes the caller has acquired the NVM resource. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +ice_flash_component(struct pldmfw *context, struct pldmfw_component *component) +{ + struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context); + struct netlink_ext_ack *extack = priv->extack; + struct ice_pf *pf = priv->pf; + const char *name; + u16 module; + u8 flag; + int err; + + switch (component->identifier) { + case NVM_COMP_ID_OROM: + module = ICE_SR_1ST_OROM_BANK_PTR; + flag = ICE_AQC_NVM_ACTIV_SEL_OROM; + name = "fw.undi"; + break; + case NVM_COMP_ID_NVM: + module = ICE_SR_1ST_NVM_BANK_PTR; + flag = ICE_AQC_NVM_ACTIV_SEL_NVM; + name = "fw.mgmt"; + break; + case NVM_COMP_ID_NETLIST: + module = ICE_SR_NETLIST_BANK_PTR; + flag = ICE_AQC_NVM_ACTIV_SEL_NETLIST; + name = "fw.netlist"; + break; + default: + /* This should not trigger, since we check the id before + * sending the component table to firmware. + */ + WARN(1, "Unexpected unknown component identifier 0x%02x", + component->identifier); + return -EINVAL; + } + + /* Mark this component for activating at the end */ + priv->activate_flags |= flag; + + err = ice_erase_nvm_module(pf, module, name, extack); + if (err) + return err; + + return ice_write_nvm_module(pf, module, name, component->component_data, + component->component_size, extack); +} + +/** + * ice_finalize_update - Perform last steps to complete device update + * @context: PLDM fw update structure + * + * Called as the last step of the update process. Complete the update by + * telling the firmware to switch active banks, and perform a reset of + * configured. + * + * Returns: 0 on success, or an error code on failure. + */ +static int ice_finalize_update(struct pldmfw *context) +{ + struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context); + struct netlink_ext_ack *extack = priv->extack; + struct ice_pf *pf = priv->pf; + int err; + + /* Finally, notify firmware to activate the written NVM banks */ + err = ice_switch_flash_banks(pf, priv->activate_flags, extack); + if (err) + return err; + + return 0; +} + +static const struct pldmfw_ops ice_fwu_ops = { + .match_record = &pldmfw_op_pci_match_record, + .send_package_data = &ice_send_package_data, + .send_component_table = &ice_send_component_table, + .flash_component = &ice_flash_component, + .finalize_update = &ice_finalize_update, +}; + +/** + * ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device + * @pf: private device driver structure + * @fw: firmware object pointing to the relevant firmware file + * @extack: netlink extended ACK structure + * + * Parse the data for a given firmware file, verifying that it is a valid PLDM + * formatted image that matches this device. + * + * Extract the device record Package Data and Component Tables and send them + * to the firmware. Extract and write the flash data for each of the three + * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify + * firmware once the data is written to the inactive banks. + * + * Returns: zero on success or a negative error code on failure. + */ +int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw, + struct netlink_ext_ack *extack) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + struct ice_fwu_priv priv; + enum ice_status status; + int err; + + memset(&priv, 0, sizeof(priv)); + + priv.context.ops = &ice_fwu_ops; + priv.context.dev = dev; + priv.extack = extack; + priv.pf = pf; + priv.activate_flags = ICE_AQC_NVM_PRESERVE_ALL; + + status = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (status) { + dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); + return -EIO; + } + + err = pldmfw_flash_image(&priv.context, fw); + + ice_release_nvm(hw); + + return err; +} + +/** + * ice_check_for_pending_update - Check for a pending flash update + * @pf: the PF driver structure + * @component: if not NULL, the name of the component being updated + * @extack: Netlink extended ACK structure + * + * Check whether the device already has a pending flash update. If such an + * update is found, cancel it so that the requested update may proceed. + * + * Returns: zero on success, or a negative error code on failure. + */ +int ice_check_for_pending_update(struct ice_pf *pf, const char *component, + struct netlink_ext_ack *extack) +{ + struct devlink *devlink = priv_to_devlink(pf); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw_dev_caps *dev_caps; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u8 pending = 0; + int err; + + dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL); + if (!dev_caps) + return -ENOMEM; + + /* Read the most recent device capabilities from firmware. Do not use + * the cached values in hw->dev_caps, because the pending update flag + * may have changed, e.g. if an update was previously completed and + * the system has not yet rebooted. + */ + status = ice_discover_dev_caps(hw, dev_caps); + if (status) { + NL_SET_ERR_MSG_MOD(extack, "Unable to read device capabilities"); + kfree(dev_caps); + return -EIO; + } + + if (dev_caps->common_cap.nvm_update_pending_nvm) { + dev_info(dev, "The fw.mgmt flash component has a pending update\n"); + pending |= ICE_AQC_NVM_ACTIV_SEL_NVM; + } + + if (dev_caps->common_cap.nvm_update_pending_orom) { + dev_info(dev, "The fw.undi flash component has a pending update\n"); + pending |= ICE_AQC_NVM_ACTIV_SEL_OROM; + } + + if (dev_caps->common_cap.nvm_update_pending_netlist) { + dev_info(dev, "The fw.netlist flash component has a pending update\n"); + pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST; + } + + kfree(dev_caps); + + /* If the flash_update request is for a specific component, ignore all + * of the other components. + */ + if (component) { + if (strcmp(component, "fw.mgmt") == 0) + pending &= ICE_AQC_NVM_ACTIV_SEL_NVM; + else if (strcmp(component, "fw.undi") == 0) + pending &= ICE_AQC_NVM_ACTIV_SEL_OROM; + else if (strcmp(component, "fw.netlist") == 0) + pending &= ICE_AQC_NVM_ACTIV_SEL_NETLIST; + else + WARN(1, "Unexpected flash component %s", component); + } + + /* There is no previous pending update, so this request may continue */ + if (!pending) + return 0; + + /* In order to allow overwriting a previous pending update, notify + * firmware to cancel that update by issuing the appropriate command. + */ + devlink_flash_update_status_notify(devlink, + "Canceling previous pending update", + component, 0, 0); + + status = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (status) { + dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); + return -EIO; + } + + pending |= ICE_AQC_NVM_REVERT_LAST_ACTIV; + err = ice_switch_flash_banks(pf, pending, extack); + + ice_release_nvm(hw); + + return err; +} diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h new file mode 100644 index 000000000000..79472cc618b4 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2019, Intel Corporation. */ + +#ifndef _ICE_FW_UPDATE_H_ +#define _ICE_FW_UPDATE_H_ + +int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw, + struct netlink_ext_ack *extack); +int ice_check_for_pending_update(struct ice_pf *pf, const char *component, + struct netlink_ext_ack *extack); + +#endif diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 92e4abca62a4..90abc8612a6a 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -57,7 +57,7 @@ #define PRTDCB_GENS 0x00083020 #define PRTDCB_GENS_DCBX_STATUS_S 0 #define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0) -#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */ +#define PRTDCB_TUP2TC 0x001D26C0 #define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4)) #define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4)) @@ -362,6 +362,7 @@ #define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) +#define PRTRPB_RDPC 0x000AC260 #define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) #define VSIQF_FD_CNT_FD_GCNT_S 0 #define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0) @@ -378,6 +379,5 @@ #define PFPM_WUS_FW_RST_WK_M BIT(31) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) -#define PRTRPB_RDPC 0x000AC260 #endif /* _ICE_HW_AUTOGEN_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 14dfbbc1b2cf..4ec24c3e813f 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -601,6 +601,7 @@ struct ice_tlan_ctx { /* shorter macros makes the table fit but are terse */ #define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG +#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG /* Lookup table mapping the HW PTYPE to the bit field for decoding */ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = { @@ -608,6 +609,319 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = { ICE_PTT_UNUSED_ENTRY(0), ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT_UNUSED_ENTRY(3), + ICE_PTT_UNUSED_ENTRY(4), + ICE_PTT_UNUSED_ENTRY(5), + ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT_UNUSED_ENTRY(8), + ICE_PTT_UNUSED_ENTRY(9), + ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT_UNUSED_ENTRY(12), + ICE_PTT_UNUSED_ENTRY(13), + ICE_PTT_UNUSED_ENTRY(14), + ICE_PTT_UNUSED_ENTRY(15), + ICE_PTT_UNUSED_ENTRY(16), + ICE_PTT_UNUSED_ENTRY(17), + ICE_PTT_UNUSED_ENTRY(18), + ICE_PTT_UNUSED_ENTRY(19), + ICE_PTT_UNUSED_ENTRY(20), + ICE_PTT_UNUSED_ENTRY(21), + + /* Non Tunneled IPv4 */ + ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(25), + ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(32), + ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(39), + ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(47), + ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(54), + ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(62), + ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(69), + ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(77), + ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(84), + ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + ICE_PTT_UNUSED_ENTRY(91), + ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(98), + ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(105), + ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(113), + ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(120), + ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(128), + ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(135), + ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(143), + ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(150), + ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + ICE_PTT_UNUSED_ENTRY(154), + ICE_PTT_UNUSED_ENTRY(155), + ICE_PTT_UNUSED_ENTRY(156), + ICE_PTT_UNUSED_ENTRY(157), + ICE_PTT_UNUSED_ENTRY(158), + ICE_PTT_UNUSED_ENTRY(159), + + ICE_PTT_UNUSED_ENTRY(160), + ICE_PTT_UNUSED_ENTRY(161), + ICE_PTT_UNUSED_ENTRY(162), + ICE_PTT_UNUSED_ENTRY(163), + ICE_PTT_UNUSED_ENTRY(164), + ICE_PTT_UNUSED_ENTRY(165), + ICE_PTT_UNUSED_ENTRY(166), + ICE_PTT_UNUSED_ENTRY(167), + ICE_PTT_UNUSED_ENTRY(168), + ICE_PTT_UNUSED_ENTRY(169), + + ICE_PTT_UNUSED_ENTRY(170), + ICE_PTT_UNUSED_ENTRY(171), + ICE_PTT_UNUSED_ENTRY(172), + ICE_PTT_UNUSED_ENTRY(173), + ICE_PTT_UNUSED_ENTRY(174), + ICE_PTT_UNUSED_ENTRY(175), + ICE_PTT_UNUSED_ENTRY(176), + ICE_PTT_UNUSED_ENTRY(177), + ICE_PTT_UNUSED_ENTRY(178), + ICE_PTT_UNUSED_ENTRY(179), + + ICE_PTT_UNUSED_ENTRY(180), + ICE_PTT_UNUSED_ENTRY(181), + ICE_PTT_UNUSED_ENTRY(182), + ICE_PTT_UNUSED_ENTRY(183), + ICE_PTT_UNUSED_ENTRY(184), + ICE_PTT_UNUSED_ENTRY(185), + ICE_PTT_UNUSED_ENTRY(186), + ICE_PTT_UNUSED_ENTRY(187), + ICE_PTT_UNUSED_ENTRY(188), + ICE_PTT_UNUSED_ENTRY(189), + + ICE_PTT_UNUSED_ENTRY(190), + ICE_PTT_UNUSED_ENTRY(191), + ICE_PTT_UNUSED_ENTRY(192), + ICE_PTT_UNUSED_ENTRY(193), + ICE_PTT_UNUSED_ENTRY(194), + ICE_PTT_UNUSED_ENTRY(195), + ICE_PTT_UNUSED_ENTRY(196), + ICE_PTT_UNUSED_ENTRY(197), + ICE_PTT_UNUSED_ENTRY(198), + ICE_PTT_UNUSED_ENTRY(199), + + ICE_PTT_UNUSED_ENTRY(200), + ICE_PTT_UNUSED_ENTRY(201), + ICE_PTT_UNUSED_ENTRY(202), + ICE_PTT_UNUSED_ENTRY(203), + ICE_PTT_UNUSED_ENTRY(204), + ICE_PTT_UNUSED_ENTRY(205), + ICE_PTT_UNUSED_ENTRY(206), + ICE_PTT_UNUSED_ENTRY(207), + ICE_PTT_UNUSED_ENTRY(208), + ICE_PTT_UNUSED_ENTRY(209), + + ICE_PTT_UNUSED_ENTRY(210), + ICE_PTT_UNUSED_ENTRY(211), + ICE_PTT_UNUSED_ENTRY(212), + ICE_PTT_UNUSED_ENTRY(213), + ICE_PTT_UNUSED_ENTRY(214), + ICE_PTT_UNUSED_ENTRY(215), + ICE_PTT_UNUSED_ENTRY(216), + ICE_PTT_UNUSED_ENTRY(217), + ICE_PTT_UNUSED_ENTRY(218), + ICE_PTT_UNUSED_ENTRY(219), + + ICE_PTT_UNUSED_ENTRY(220), + ICE_PTT_UNUSED_ENTRY(221), + ICE_PTT_UNUSED_ENTRY(222), + ICE_PTT_UNUSED_ENTRY(223), + ICE_PTT_UNUSED_ENTRY(224), + ICE_PTT_UNUSED_ENTRY(225), + ICE_PTT_UNUSED_ENTRY(226), + ICE_PTT_UNUSED_ENTRY(227), + ICE_PTT_UNUSED_ENTRY(228), + ICE_PTT_UNUSED_ENTRY(229), + + ICE_PTT_UNUSED_ENTRY(230), + ICE_PTT_UNUSED_ENTRY(231), + ICE_PTT_UNUSED_ENTRY(232), + ICE_PTT_UNUSED_ENTRY(233), + ICE_PTT_UNUSED_ENTRY(234), + ICE_PTT_UNUSED_ENTRY(235), + ICE_PTT_UNUSED_ENTRY(236), + ICE_PTT_UNUSED_ENTRY(237), + ICE_PTT_UNUSED_ENTRY(238), + ICE_PTT_UNUSED_ENTRY(239), + + ICE_PTT_UNUSED_ENTRY(240), + ICE_PTT_UNUSED_ENTRY(241), + ICE_PTT_UNUSED_ENTRY(242), + ICE_PTT_UNUSED_ENTRY(243), + ICE_PTT_UNUSED_ENTRY(244), + ICE_PTT_UNUSED_ENTRY(245), + ICE_PTT_UNUSED_ENTRY(246), + ICE_PTT_UNUSED_ENTRY(247), + ICE_PTT_UNUSED_ENTRY(248), + ICE_PTT_UNUSED_ENTRY(249), + + ICE_PTT_UNUSED_ENTRY(250), + ICE_PTT_UNUSED_ENTRY(251), + ICE_PTT_UNUSED_ENTRY(252), + ICE_PTT_UNUSED_ENTRY(253), + ICE_PTT_UNUSED_ENTRY(254), + ICE_PTT_UNUSED_ENTRY(255), }; static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 8f6a191839f1..f2682776f8c8 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -127,8 +127,14 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) case ICE_VSI_PF: case ICE_VSI_CTRL: case ICE_VSI_LB: - vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; - vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; + /* a user could change the values of num_[tr]x_desc using + * ethtool -G so we should keep those values instead of + * overwriting them with the defaults. + */ + if (!vsi->num_rx_desc) + vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; + if (!vsi->num_tx_desc) + vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; break; default: dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", @@ -2011,6 +2017,13 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) if (!vsi) return -EINVAL; + /* Don't enable VLAN pruning if the netdev is currently in promiscuous + * mode. VLAN pruning will be enabled when the interface exits + * promiscuous mode if any VLAN filters are active. + */ + if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena) + return 0; + pf = vsi->back; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 231f4b6e93d0..8437d72795b0 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -369,6 +369,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) ~IFF_PROMISC; goto out_promisc; } + ice_cfg_vlan_pruning(vsi, false, false); } } else { /* Clear Rx filter to remove traffic from wire */ @@ -381,6 +382,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) IFF_PROMISC; goto out_promisc; } + if (vsi->num_vlan > 1) + ice_cfg_vlan_pruning(vsi, true, false); } } } @@ -767,6 +770,100 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) } /** + * ice_set_dflt_mib - send a default config MIB to the FW + * @pf: private PF struct + * + * This function sends a default configuration MIB to the FW. + * + * If this function errors out at any point, the driver is still able to + * function. The main impact is that LFC may not operate as expected. + * Therefore an error state in this function should be treated with a DBG + * message and continue on with driver rebuild/reenable. + */ +static void ice_set_dflt_mib(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + u8 mib_type, *buf, *lldpmib = NULL; + u16 len, typelen, offset = 0; + struct ice_lldp_org_tlv *tlv; + struct ice_hw *hw; + u32 ouisubtype; + + if (!pf) { + dev_dbg(dev, "%s NULL pf pointer\n", __func__); + return; + } + + hw = &pf->hw; + mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; + lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); + if (!lldpmib) { + dev_dbg(dev, "%s Failed to allocate MIB memory\n", + __func__); + return; + } + + /* Add ETS CFG TLV */ + tlv = (struct ice_lldp_org_tlv *)lldpmib; + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_IEEE_ETS_TLV_LEN); + tlv->typelen = htons(typelen); + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_IEEE_SUBTYPE_ETS_CFG); + tlv->ouisubtype = htonl(ouisubtype); + + buf = tlv->tlvinfo; + buf[0] = 0; + + /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. + * Octets 5 - 12 are BW values, set octet 5 to 100% BW. + * Octets 13 - 20 are TSA values - leave as zeros + */ + buf[5] = 0x64; + len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; + offset += len + 2; + tlv = (struct ice_lldp_org_tlv *) + ((char *)tlv + sizeof(tlv->typelen) + len); + + /* Add ETS REC TLV */ + buf = tlv->tlvinfo; + tlv->typelen = htons(typelen); + + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_IEEE_SUBTYPE_ETS_REC); + tlv->ouisubtype = htonl(ouisubtype); + + /* First octet of buf is reserved + * Octets 1 - 4 map UP to TC - all UPs map to zero + * Octets 5 - 12 are BW values - set TC 0 to 100%. + * Octets 13 - 20 are TSA value - leave as zeros + */ + buf[5] = 0x64; + offset += len + 2; + tlv = (struct ice_lldp_org_tlv *) + ((char *)tlv + sizeof(tlv->typelen) + len); + + /* Add PFC CFG TLV */ + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_IEEE_PFC_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_IEEE_SUBTYPE_PFC_CFG); + tlv->ouisubtype = htonl(ouisubtype); + + /* Octet 1 left as all zeros - PFC disabled */ + buf[0] = 0x08; + len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; + offset += len + 2; + + if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) + dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); + + kfree(lldpmib); +} + +/** * ice_link_event - process the link event * @pf: PF that the link event is associated with * @pi: port_info for the port that the link event is associated with @@ -800,6 +897,12 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n", pi->lport); + /* Check if the link state is up after updating link info, and treat + * this event as an UP event since the link is actually UP now. + */ + if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) + link_up = true; + vsi = ice_get_main_vsi(pf); if (!vsi || !vsi->port_info) return -EINVAL; @@ -821,7 +924,13 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, if (link_up == old_link && link_speed == old_link_speed) return result; - ice_dcb_rebuild(pf); + if (ice_is_dcb_active(pf)) { + if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) + ice_dcb_rebuild(pf); + } else { + if (link_up) + ice_set_dflt_mib(pf); + } ice_vsi_link_event(vsi, link_up); ice_print_link_msg(vsi, link_up); @@ -914,6 +1023,151 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) return status; } +enum ice_aq_task_state { + ICE_AQ_TASK_WAITING = 0, + ICE_AQ_TASK_COMPLETE, + ICE_AQ_TASK_CANCELED, +}; + +struct ice_aq_task { + struct hlist_node entry; + + u16 opcode; + struct ice_rq_event_info *event; + enum ice_aq_task_state state; +}; + +/** + * ice_wait_for_aq_event - Wait for an AdminQ event from firmware + * @pf: pointer to the PF private structure + * @opcode: the opcode to wait for + * @timeout: how long to wait, in jiffies + * @event: storage for the event info + * + * Waits for a specific AdminQ completion event on the ARQ for a given PF. The + * current thread will be put to sleep until the specified event occurs or + * until the given timeout is reached. + * + * To obtain only the descriptor contents, pass an event without an allocated + * msg_buf. If the complete data buffer is desired, allocate the + * event->msg_buf with enough space ahead of time. + * + * Returns: zero on success, or a negative error code on failure. + */ +int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, + struct ice_rq_event_info *event) +{ + struct ice_aq_task *task; + long ret; + int err; + + task = kzalloc(sizeof(*task), GFP_KERNEL); + if (!task) + return -ENOMEM; + + INIT_HLIST_NODE(&task->entry); + task->opcode = opcode; + task->event = event; + task->state = ICE_AQ_TASK_WAITING; + + spin_lock_bh(&pf->aq_wait_lock); + hlist_add_head(&task->entry, &pf->aq_wait_list); + spin_unlock_bh(&pf->aq_wait_lock); + + ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, + timeout); + switch (task->state) { + case ICE_AQ_TASK_WAITING: + err = ret < 0 ? ret : -ETIMEDOUT; + break; + case ICE_AQ_TASK_CANCELED: + err = ret < 0 ? ret : -ECANCELED; + break; + case ICE_AQ_TASK_COMPLETE: + err = ret < 0 ? ret : 0; + break; + default: + WARN(1, "Unexpected AdminQ wait task state %u", task->state); + err = -EINVAL; + break; + } + + spin_lock_bh(&pf->aq_wait_lock); + hlist_del(&task->entry); + spin_unlock_bh(&pf->aq_wait_lock); + kfree(task); + + return err; +} + +/** + * ice_aq_check_events - Check if any thread is waiting for an AdminQ event + * @pf: pointer to the PF private structure + * @opcode: the opcode of the event + * @event: the event to check + * + * Loops over the current list of pending threads waiting for an AdminQ event. + * For each matching task, copy the contents of the event into the task + * structure and wake up the thread. + * + * If multiple threads wait for the same opcode, they will all be woken up. + * + * Note that event->msg_buf will only be duplicated if the event has a buffer + * with enough space already allocated. Otherwise, only the descriptor and + * message length will be copied. + * + * Returns: true if an event was found, false otherwise + */ +static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, + struct ice_rq_event_info *event) +{ + struct ice_aq_task *task; + bool found = false; + + spin_lock_bh(&pf->aq_wait_lock); + hlist_for_each_entry(task, &pf->aq_wait_list, entry) { + if (task->state || task->opcode != opcode) + continue; + + memcpy(&task->event->desc, &event->desc, sizeof(event->desc)); + task->event->msg_len = event->msg_len; + + /* Only copy the data buffer if a destination was set */ + if (task->event->msg_buf && + task->event->buf_len > event->buf_len) { + memcpy(task->event->msg_buf, event->msg_buf, + event->buf_len); + task->event->buf_len = event->buf_len; + } + + task->state = ICE_AQ_TASK_COMPLETE; + found = true; + } + spin_unlock_bh(&pf->aq_wait_lock); + + if (found) + wake_up(&pf->aq_wait_queue); +} + +/** + * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks + * @pf: the PF private structure + * + * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. + * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. + */ +static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) +{ + struct ice_aq_task *task; + + spin_lock_bh(&pf->aq_wait_lock); + hlist_for_each_entry(task, &pf->aq_wait_list, entry) + task->state = ICE_AQ_TASK_CANCELED; + spin_unlock_bh(&pf->aq_wait_lock); + + wake_up(&pf->aq_wait_queue); +} + /** * __ice_clean_ctrlq - helper function to clean controlq rings * @pf: ptr to struct ice_pf @@ -1010,6 +1264,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) opcode = le16_to_cpu(event.desc.opcode); + /* Notify any thread that might be waiting for this event */ + ice_aq_check_events(pf, opcode, &event); + switch (opcode) { case ice_aqc_opc_get_link_status: if (ice_handle_link_event(pf, &event)) @@ -3086,6 +3343,10 @@ static int ice_init_pf(struct ice_pf *pf) mutex_init(&pf->sw_mutex); mutex_init(&pf->tc_mutex); + INIT_HLIST_HEAD(&pf->aq_wait_list); + spin_lock_init(&pf->aq_wait_lock); + init_waitqueue_head(&pf->aq_wait_queue); + /* setup service timer and periodic service task */ timer_setup(&pf->serv_tmr, ice_service_timer, 0); pf->serv_tmr_period = HZ; @@ -3323,6 +3584,60 @@ done: } /** + * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode + * @pf: PF to configure + * + * No VLAN offloads/filtering are advertised in safe mode so make sure the PF + * VSI can still Tx/Rx VLAN tagged packets. + */ +static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) +{ + struct ice_vsi *vsi = ice_get_main_vsi(pf); + struct ice_vsi_ctx *ctxt; + enum ice_status status; + struct ice_hw *hw; + + if (!vsi) + return; + + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return; + + hw = &pf->hw; + ctxt->info = vsi->info; + + ctxt->info.valid_sections = + cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | + ICE_AQ_VSI_PROP_SECURITY_VALID | + ICE_AQ_VSI_PROP_SW_VALID); + + /* disable VLAN anti-spoof */ + ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); + + /* disable VLAN pruning and keep all other settings */ + ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + + /* allow all VLANs on Tx and don't strip on Rx */ + ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL | + ICE_AQ_VSI_VLAN_EMOD_NOTHING; + + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (status) { + dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + } else { + vsi->info.sec_flags = ctxt->info.sec_flags; + vsi->info.sw_flags2 = ctxt->info.sw_flags2; + vsi->info.vlan_flags = ctxt->info.vlan_flags; + } + + kfree(ctxt); +} + +/** * ice_log_pkg_init - log result of DDP package load * @hw: pointer to hardware info * @status: status of package load @@ -3819,7 +4134,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) if (err) { dev_err(dev, "probe failed sending driver version %s. error: %d\n", UTS_RELEASE, err); - goto err_alloc_sw_unroll; + goto err_send_version_unroll; } /* since everything is good, start the service timer */ @@ -3828,19 +4143,19 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) err = ice_init_link_events(pf->hw.port_info); if (err) { dev_err(dev, "ice_init_link_events failed: %d\n", err); - goto err_alloc_sw_unroll; + goto err_send_version_unroll; } err = ice_init_nvm_phy_type(pf->hw.port_info); if (err) { dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); - goto err_alloc_sw_unroll; + goto err_send_version_unroll; } err = ice_update_link_info(pf->hw.port_info); if (err) { dev_err(dev, "ice_update_link_info failed: %d\n", err); - goto err_alloc_sw_unroll; + goto err_send_version_unroll; } ice_init_link_dflt_override(pf->hw.port_info); @@ -3851,7 +4166,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) err = ice_init_phy_user_cfg(pf->hw.port_info); if (err) { dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); - goto err_alloc_sw_unroll; + goto err_send_version_unroll; } if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { @@ -3878,9 +4193,10 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) /* Disable WoL at init, wait for user to enable */ device_set_wakeup_enable(dev, false); - /* If no DDP driven features have to be setup, we are done with probe */ - if (ice_is_safe_mode(pf)) + if (ice_is_safe_mode(pf)) { + ice_set_safe_mode_vlan_cfg(pf); goto probe_done; + } /* initialize DDP driven features */ @@ -3904,6 +4220,8 @@ probe_done: clear_bit(__ICE_DOWN, pf->state); return 0; +err_send_version_unroll: + ice_vsi_release_all(pf); err_alloc_sw_unroll: ice_devlink_destroy_port(pf); set_bit(__ICE_SERVICE_DIS, pf->state); @@ -4014,6 +4332,8 @@ static void ice_remove(struct pci_dev *pdev) set_bit(__ICE_DOWN, pf->state); ice_service_task_stop(pf); + ice_aq_cancel_waiting_tasks(pf); + mutex_destroy(&(&pf->hw)->fdir_fltr_lock); if (!ice_is_safe_mode(pf)) ice_remove_arfs(pf); @@ -4147,7 +4467,7 @@ err_reinit: * Power Management callback to quiesce the device and prepare * for D3 transition. */ -static int ice_suspend(struct device *dev) +static int __maybe_unused ice_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ice_pf *pf; @@ -4211,7 +4531,7 @@ static int ice_suspend(struct device *dev) * ice_resume - PM callback for waking up from D3 * @dev: generic device information structure */ -static int ice_resume(struct device *dev) +static int __maybe_unused ice_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); enum ice_reset_req reset_type; @@ -4360,6 +4680,8 @@ static void ice_pci_err_resume(struct pci_dev *pdev) return; } + ice_restore_all_vfs_msi_state(pdev); + ice_do_reset(pf, ICE_RESET_PFR); ice_service_task_restart(pf); mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); @@ -4968,6 +5290,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) vsi->tx_linearize = 0; vsi->rx_buf_failed = 0; vsi->rx_page_failed = 0; + vsi->rx_gro_dropped = 0; rcu_read_lock(); @@ -4982,6 +5305,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) vsi_stats->rx_bytes += bytes; vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; + vsi->rx_gro_dropped += ring->rx_stats.gro_dropped; } /* update XDP Tx rings counters */ @@ -5013,7 +5337,7 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) ice_update_eth_stats(vsi); cur_ns->tx_errors = cur_es->tx_errors; - cur_ns->rx_dropped = cur_es->rx_discards; + cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped; cur_ns->tx_dropped = cur_es->tx_discards; cur_ns->multicast = cur_es->rx_multicast; @@ -5656,10 +5980,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) if (err) goto err_sched_init_port; - err = ice_update_link_info(hw->port_info); - if (err) - dev_err(dev, "Get link status error %d\n", err); - /* start misc vector */ err = ice_req_irq_msix_misc(pf); if (err) { diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 7c2a06892bbb..5903a36763de 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -108,6 +108,76 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, } /** + * ice_aq_update_nvm + * @hw: pointer to the HW struct + * @module_typeid: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be written (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @command_flags: command parameters + * @cd: pointer to command details structure or NULL + * + * Update the NVM using the admin queue commands (0x0703) + */ +enum ice_status +ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, u8 command_flags, + struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + struct ice_aqc_nvm *cmd; + + cmd = &desc.params.nvm; + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write); + + cmd->cmd_flags |= command_flags; + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; + cmd->module_typeid = cpu_to_le16(module_typeid); + cmd->offset_low = cpu_to_le16(offset & 0xFFFF); + cmd->offset_high = (offset >> 16) & 0xFF; + cmd->length = cpu_to_le16(length); + + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + return ice_aq_send_cmd(hw, &desc, data, length, cd); +} + +/** + * ice_aq_erase_nvm + * @hw: pointer to the HW struct + * @module_typeid: module pointer location in words from the NVM beginning + * @cd: pointer to command details structure or NULL + * + * Erase the NVM sector using the admin queue commands (0x0702) + */ +enum ice_status +ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + struct ice_aqc_nvm *cmd; + + cmd = &desc.params.nvm; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase); + + cmd->module_typeid = cpu_to_le16(module_typeid); + cmd->length = cpu_to_le16(ICE_AQC_NVM_ERASE_LEN); + cmd->offset_low = 0; + cmd->offset_high = 0; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** * ice_read_sr_word_aq - Reads Shadow RAM via AQ * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) @@ -634,3 +704,119 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) return status; } + +/** + * ice_nvm_write_activate + * @hw: pointer to the HW struct + * @cmd_flags: NVM activate admin command bits (banks to be validated) + * + * Update the control word with the required banks' validity bits + * and dumps the Shadow RAM to flash (0x0707) + */ +enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags) +{ + struct ice_aqc_nvm *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.nvm; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate); + + cmd->cmd_flags = cmd_flags; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_nvm_update_empr + * @hw: pointer to the HW struct + * + * Update empr (0x0709). This command allows SW to + * request an EMPR to activate new FW. + */ +enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_update_empr); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/* ice_nvm_set_pkg_data + * @hw: pointer to the HW struct + * @del_pkg_data_flag: If is set then the current pkg_data store by FW + * is deleted. + * If bit is set to 1, then buffer should be size 0. + * @data: pointer to buffer + * @length: length of the buffer + * @cd: pointer to command details structure or NULL + * + * Set package data (0x070A). This command is equivalent to the reception + * of a PLDM FW Update GetPackageData cmd. This command should be sent + * as part of the NVM update as the first cmd in the flow. + */ + +enum ice_status +ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, + u16 length, struct ice_sq_cd *cd) +{ + struct ice_aqc_nvm_pkg_data *cmd; + struct ice_aq_desc desc; + + if (length != 0 && !data) + return ICE_ERR_PARAM; + + cmd = &desc.params.pkg_data; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_pkg_data); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + if (del_pkg_data_flag) + cmd->cmd_flags |= ICE_AQC_NVM_PKG_DELETE; + + return ice_aq_send_cmd(hw, &desc, data, length, cd); +} + +/* ice_nvm_pass_component_tbl + * @hw: pointer to the HW struct + * @data: pointer to buffer + * @length: length of the buffer + * @transfer_flag: parameter for determining stage of the update + * @comp_response: a pointer to the response from the 0x070B AQC. + * @comp_response_code: a pointer to the response code from the 0x070B AQC. + * @cd: pointer to command details structure or NULL + * + * Pass component table (0x070B). This command is equivalent to the reception + * of a PLDM FW Update PassComponentTable cmd. This command should be sent once + * per component. It can be only sent after Set Package Data cmd and before + * actual update. FW will assume these commands are going to be sent until + * the TransferFlag is set to End or StartAndEnd. + */ + +enum ice_status +ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, + u8 transfer_flag, u8 *comp_response, + u8 *comp_response_code, struct ice_sq_cd *cd) +{ + struct ice_aqc_nvm_pass_comp_tbl *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (!data || !comp_response || !comp_response_code) + return ICE_ERR_PARAM; + + cmd = &desc.params.pass_comp_tbl; + + ice_fill_dflt_direct_cmd_desc(&desc, + ice_aqc_opc_nvm_pass_component_tbl); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + cmd->transfer_flag = transfer_flag; + status = ice_aq_send_cmd(hw, &desc, data, length, cd); + + if (!status) { + *comp_response = cmd->component_response; + *comp_response_code = cmd->component_response_code; + } + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h index 999f273ba6ad..8d430909f846 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.h +++ b/drivers/net/ethernet/intel/ice/ice_nvm.h @@ -17,4 +17,20 @@ enum ice_status ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); enum ice_status ice_init_nvm(struct ice_hw *hw); enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); +enum ice_status +ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, u8 command_flags, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd); +enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw); +enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags); +enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw); +enum ice_status +ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, + u16 length, struct ice_sq_cd *cd); +enum ice_status +ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, + u8 transfer_flag, u8 *comp_response, + u8 *comp_response_code, struct ice_sq_cd *cd); #endif /* _ICE_NVM_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 1c29cfa1cf33..44a228530253 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -170,7 +170,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, return ICE_ERR_PARAM; } - /* query the current node information from FW before additing it + /* query the current node information from FW before adding it * to the SW DB */ status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem); @@ -578,7 +578,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) /** * ice_aq_rl_profile - performs a rate limiting task * @hw: pointer to the HW struct - * @opcode:opcode for add, query, or remove profile(s) + * @opcode: opcode for add, query, or remove profile(s) * @num_profiles: the number of profiles * @buf: pointer to buffer * @buf_size: buffer size in bytes @@ -1276,6 +1276,53 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, } /** + * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node + * @pi: port information structure + * @vsi_node: software VSI handle + * @qgrp_node: first queue group node identified for scanning + * @owner: LAN or RDMA + * + * This function retrieves a free LAN or RDMA queue group node by scanning + * qgrp_node and its siblings for the queue group with the fewest number + * of queues currently assigned. + */ +static struct ice_sched_node * +ice_sched_get_free_qgrp(struct ice_port_info *pi, + struct ice_sched_node *vsi_node, + struct ice_sched_node *qgrp_node, u8 owner) +{ + struct ice_sched_node *min_qgrp; + u8 min_children; + + if (!qgrp_node) + return qgrp_node; + min_children = qgrp_node->num_children; + if (!min_children) + return qgrp_node; + min_qgrp = qgrp_node; + /* scan all queue groups until find a node which has less than the + * minimum number of children. This way all queue group nodes get + * equal number of shares and active. The bandwidth will be equally + * distributed across all queues. + */ + while (qgrp_node) { + /* make sure the qgroup node is part of the VSI subtree */ + if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) + if (qgrp_node->num_children < min_children && + qgrp_node->owner == owner) { + /* replace the new min queue group node */ + min_qgrp = qgrp_node; + min_children = min_qgrp->num_children; + /* break if it has no children, */ + if (!min_children) + break; + } + qgrp_node = qgrp_node->sibling; + } + return min_qgrp; +} + +/** * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node * @pi: port information structure * @vsi_handle: software VSI handle @@ -1288,7 +1335,7 @@ struct ice_sched_node * ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 owner) { - struct ice_sched_node *vsi_node, *qgrp_node = NULL; + struct ice_sched_node *vsi_node, *qgrp_node; struct ice_vsi_ctx *vsi_ctx; u16 max_children; u8 qgrp_layer; @@ -1302,7 +1349,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, vsi_node = vsi_ctx->sched.vsi_node[tc]; /* validate invalid VSI ID */ if (!vsi_node) - goto lan_q_exit; + return NULL; /* get the first queue group node from VSI sub-tree */ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); @@ -1315,8 +1362,8 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, qgrp_node = qgrp_node->sibling; } -lan_q_exit: - return qgrp_node; + /* Select the best queue group */ + return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner); } /** @@ -2153,8 +2200,8 @@ ice_sched_add_rl_profile(struct ice_port_info *pi, hw = pi->hw; list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], list_entry) - if (rl_prof_elem->profile.flags == profile_type && - rl_prof_elem->bw == bw) + if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == + profile_type && rl_prof_elem->bw == bw) /* Return existing profile ID info */ return rl_prof_elem; @@ -2384,7 +2431,8 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, /* Check the existing list for RL profile */ list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], list_entry) - if (rl_prof_elem->profile.flags == profile_type && + if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == + profile_type && le16_to_cpu(rl_prof_elem->profile.profile_id) == profile_id) { if (rl_prof_elem->prof_id_ref) @@ -2546,8 +2594,8 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, return 0; return ice_sched_rm_rl_profile(pi, layer_num, - rl_prof_info->profile.flags, - old_id); + rl_prof_info->profile.flags & + ICE_AQC_RL_PROFILE_TYPE_M, old_id); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index ccbe1cc64295..c3a6c41385ee 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -495,6 +495,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aq_desc desc; + enum ice_status status; if (opc != ice_aqc_opc_add_sw_rules && opc != ice_aqc_opc_update_sw_rules && @@ -506,7 +507,12 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); desc.params.sw_rules.num_rules_fltr_entry_index = cpu_to_le16(num_rules); - return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); + status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); + if (opc != ice_aqc_opc_add_sw_rules && + hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) + status = ICE_ERR_DOES_NOT_EXIST; + + return status; } /* ice_init_port_info - Initialize port_info with switch configuration data diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index abdb137c8bb7..9d0d6b0025cf 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -509,8 +509,8 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring) return 0; } -static unsigned int ice_rx_frame_truesize(struct ice_ring *rx_ring, - unsigned int size) +static unsigned int +ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size) { unsigned int truesize; @@ -631,10 +631,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) dma_addr_t dma; /* since we are recycling buffers we should seldom need to alloc */ - if (likely(page)) { - rx_ring->rx_stats.page_reuse_count++; + if (likely(page)) return true; - } /* alloc new page for storage */ page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); @@ -1033,7 +1031,6 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) if (ice_can_reuse_rx_page(rx_buf)) { /* hand second half of page back to the ring */ ice_reuse_rx_page(rx_ring, rx_buf); - rx_ring->rx_stats.page_reuse_count++; } else { /* we are not reusing the buffer so unmap it */ dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, @@ -1254,12 +1251,12 @@ construct_skb: * @itr: ITR value to update * * Calculate how big of an increment should be applied to the ITR value passed - * in based on wmem_default, SKB overhead, Ethernet overhead, and the current + * in based on wmem_default, SKB overhead, ethernet overhead, and the current * link speed. * * The following is a calculation derived from: * wmem_default / (size + overhead) = desired_pkts_per_int - * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value * * Assuming wmem_default is 212992 and overhead is 640 bytes per @@ -2294,10 +2291,30 @@ static bool __ice_chk_linearize(struct sk_buff *skb) /* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum. */ - stale = &skb_shinfo(skb)->frags[0]; - for (;;) { + for (stale = &skb_shinfo(skb)->frags[0];; stale++) { + int stale_size = skb_frag_size(stale); + sum += skb_frag_size(frag++); + /* The stale fragment may present us with a smaller + * descriptor than the actual fragment size. To account + * for that we need to remove all the data on the front and + * figure out what the remainder would be in the last + * descriptor associated with the fragment. + */ + if (stale_size > ICE_MAX_DATA_PER_TXD) { + int align_pad = -(skb_frag_off(stale)) & + (ICE_MAX_READ_REQ_SIZE - 1); + + sum -= align_pad; + stale_size -= align_pad; + + do { + sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; + stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; + } while (stale_size > ICE_MAX_DATA_PER_TXD); + } + /* if sum is negative we failed to make sufficient progress */ if (sum < 0) return true; @@ -2305,7 +2322,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb) if (!nr_frags--) break; - sum -= skb_frag_size(stale++); + sum -= stale_size; } return false; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index e70c4619edc3..51b4df7a59d2 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -193,7 +193,7 @@ struct ice_rxq_stats { u64 non_eop_descs; u64 alloc_page_failed; u64 alloc_buf_failed; - u64 page_reuse_count; + u64 gro_dropped; /* GRO returned dropped */ }; /* this enum matches hardware bits and is meant to be used by DYN_CTLN diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 02b12736ea80..bc2f4390b51d 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -191,7 +191,12 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && (vlan_tag & VLAN_VID_MASK)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - napi_gro_receive(&rx_ring->q_vector->napi, skb); + if (napi_gro_receive(&rx_ring->q_vector->napi, skb) == GRO_DROP) { + /* this is tracked separately to help us debug stack drops */ + rx_ring->rx_stats.gro_dropped++; + netdev_dbg(rx_ring->netdev, "Receive Queue %d: Dropped packet from GRO\n", + rx_ring->q_index); + } } /** diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 08c616d9fffd..4cdccfadf274 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -244,6 +244,15 @@ struct ice_hw_common_caps { u8 rss_table_entry_width; /* RSS Entry width in bits */ u8 dcb; + + bool nvm_update_pending_nvm; + bool nvm_update_pending_orom; + bool nvm_update_pending_netlist; +#define ICE_NVM_PENDING_NVM_IMAGE BIT(0) +#define ICE_NVM_PENDING_OROM BIT(1) +#define ICE_NVM_PENDING_NETLIST BIT(2) + bool nvm_unified_update; +#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3) }; /* Function specific capabilities */ @@ -312,7 +321,7 @@ struct ice_nvm_info { u32 flash_size; /* Size of available flash in bytes */ u8 major_ver; /* major version of NVM package */ u8 minor_ver; /* minor version of dev starter */ - u8 blank_nvm_mode; /* is NVM empty (no FW present) */ + u8 blank_nvm_mode; /* is NVM empty (no FW present) */ }; struct ice_link_default_override_tlv { @@ -400,7 +409,7 @@ enum ice_rl_type { #define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */ #define ICE_SCHED_DFLT_RL_PROF_ID 0 #define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF -#define ICE_SCHED_DFLT_BW_WT 1 +#define ICE_SCHED_DFLT_BW_WT 4 #define ICE_SCHED_INVAL_PROF_ID 0xFFFF #define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */ @@ -771,6 +780,9 @@ struct ice_hw_port_stats { #define ICE_OROM_VER_SHIFT 24 #define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT) #define ICE_SR_PFA_PTR 0x40 +#define ICE_SR_1ST_NVM_BANK_PTR 0x42 +#define ICE_SR_1ST_OROM_BANK_PTR 0x44 +#define ICE_SR_NETLIST_BANK_PTR 0x46 #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 /* Link override related */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 9df5ceb26ab9..71497776ac62 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -932,6 +932,8 @@ static int ice_set_per_vf_res(struct ice_pf *pf) num_msix_per_vf = ICE_NUM_VF_MSIX_MED; } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL; + } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) { + num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN; } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) { num_msix_per_vf = ICE_MIN_INTR_PER_VF; } else { @@ -2972,8 +2974,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) vsi->max_frame = qpi->rxq.max_pkt_size; } - /* VF can request to configure less than allocated queues - * or default allocated queues. So update the VSI with new number + /* VF can request to configure less than allocated queues or default + * allocated queues. So update the VSI with new number */ vsi->num_txq = num_txq; vsi->num_rxq = num_rxq; @@ -4071,3 +4073,33 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) } } } + +/** + * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR + * @pdev: pointer to a pci_dev structure + * + * Called when recovering from a PF FLR to restore interrupt capability to + * the VFs. + */ +void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) +{ + struct pci_dev *vfdev; + u16 vf_id; + int pos; + + if (!pci_num_vf(pdev)) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, + &vf_id); + vfdev = pci_get_device(pdev->vendor, vf_id, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == pdev) + pci_restore_msi_state(vfdev); + vfdev = pci_get_device(pdev->vendor, vf_id, + vfdev); + } + } +} diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 67aa9110fdd1..0f519fba3770 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -32,6 +32,7 @@ #define ICE_MAX_RSS_QS_PER_VF 16 #define ICE_NUM_VF_MSIX_MED 17 #define ICE_NUM_VF_MSIX_SMALL 5 +#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3 #define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) #define ICE_MAX_VF_RESET_TRIES 40 #define ICE_MAX_VF_RESET_SLEEP_MS 20 @@ -114,6 +115,7 @@ void ice_vc_notify_link_state(struct ice_pf *pf); void ice_vc_notify_reset(struct ice_pf *pf); bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr); bool ice_reset_vf(struct ice_vf *vf, bool is_vflr); +void ice_restore_all_vfs_msi_state(struct pci_dev *pdev); int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, @@ -146,6 +148,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf); #define ice_vf_lan_overflow_event(pf, event) do {} while (0) #define ice_print_vfs_mdd_events(pf) do {} while (0) #define ice_print_vf_rx_mdd_event(vf) do {} while (0) +#define ice_restore_all_vfs_msi_state(pdev) do {} while (0) static inline bool ice_reset_all_vfs(struct ice_pf __always_unused *pf, diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 6badfd62dc63..20ac5fca68c6 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -298,7 +298,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid) } } - /** * ice_xsk_umem_disable - disable a UMEM region * @vsi: Current VSI @@ -594,7 +593,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) if (!size) break; - rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; rx_buf->xdp->data_end = rx_buf->xdp->data + size; xsk_buff_dma_sync_for_cpu(rx_buf->xdp); @@ -706,8 +704,6 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget) if (tx_desc) { ice_xdp_ring_update_tail(xdp_ring); xsk_umem_consume_tx_done(xdp_ring->xsk_umem); - if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) - xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); } return budget > 0 && work_done; @@ -783,12 +779,8 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget) if (xsk_frames) xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames); - if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) { - if (xdp_ring->next_to_clean == xdp_ring->next_to_use) - xsk_set_tx_need_wakeup(xdp_ring->xsk_umem); - else - xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); - } + if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) + xsk_set_tx_need_wakeup(xdp_ring->xsk_umem); ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes); xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index c2cf414d126b..6e8231c1ddf0 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1782,8 +1782,8 @@ static void igb_create_lbtest_frame(struct sk_buff *skb, memset(skb->data, 0xFF, frame_size); frame_size /= 2; memset(&skb->data[frame_size], 0xAA, frame_size - 1); - memset(&skb->data[frame_size + 10], 0xBE, 1); - memset(&skb->data[frame_size + 12], 0xAF, 1); + skb->data[frame_size + 10] = 0xBE; + skb->data[frame_size + 12] = 0xAF; } static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ae8d64324619..4f05f6efe6af 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6215,9 +6215,18 @@ static void igb_reset_task(struct work_struct *work) struct igb_adapter *adapter; adapter = container_of(work, struct igb_adapter, reset_task); + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGB_DOWN, &adapter->state) || + test_bit(__IGB_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + igb_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); igb_reinit_locked(adapter); + rtnl_unlock(); } /** @@ -7159,7 +7168,7 @@ static void igb_flush_mac_table(struct igb_adapter *adapter) for (i = 0; i < hw->mac.rar_entry_count; i++) { adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + eth_zero_addr(adapter->mac_table[i].addr); adapter->mac_table[i].queue = 0; igb_rar_set_index(adapter, i); } @@ -7308,7 +7317,7 @@ static int igb_del_mac_filter_flags(struct igb_adapter *adapter, } else { adapter->mac_table[i].state = 0; adapter->mac_table[i].queue = 0; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + eth_zero_addr(adapter->mac_table[i].addr); } igb_rar_set_index(adapter, i); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 97a065928976..19269f5d52bc 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2457,13 +2457,10 @@ static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) } } -static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) +static int igbvf_suspend(struct device *dev_d) { - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev_d); struct igbvf_adapter *adapter = netdev_priv(netdev); -#ifdef CONFIG_PM - int retval = 0; -#endif netif_device_detach(netdev); @@ -2473,31 +2470,16 @@ static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) igbvf_free_irq(adapter); } -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; -#endif - - pci_disable_device(pdev); - return 0; } -#ifdef CONFIG_PM -static int igbvf_resume(struct pci_dev *pdev) +static int __maybe_unused igbvf_resume(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct net_device *netdev = pci_get_drvdata(pdev); struct igbvf_adapter *adapter = netdev_priv(netdev); u32 err; - pci_restore_state(pdev); - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); - return err; - } - pci_set_master(pdev); if (netif_running(netdev)) { @@ -2515,11 +2497,10 @@ static int igbvf_resume(struct pci_dev *pdev) return 0; } -#endif static void igbvf_shutdown(struct pci_dev *pdev) { - igbvf_suspend(pdev, PMSG_SUSPEND); + igbvf_suspend(&pdev->dev); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2960,17 +2941,15 @@ static const struct pci_device_id igbvf_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); +static SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume); + /* PCI Device API Driver */ static struct pci_driver igbvf_driver = { .name = igbvf_driver_name, .id_table = igbvf_pci_tbl, .probe = igbvf_probe, .remove = igbvf_remove, -#ifdef CONFIG_PM - /* Power Management Hooks */ - .suspend = igbvf_suspend, - .resume = igbvf_resume, -#endif + .driver.pm = &igbvf_pm_ops, .shutdown = igbvf_shutdown, .err_handler = &igbvf_err_handler }; diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 2ab7d9fab6af..b9fe51b91c47 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -82,13 +82,7 @@ struct igc_mac_info { enum igc_mac_type type; - u32 collision_delta; - u32 ledctl_default; - u32 ledctl_mode1; - u32 ledctl_mode2; u32 mc_filter_type; - u32 tx_packet_delta; - u32 txcw; u16 mta_reg_count; u16 uta_reg_count; @@ -98,8 +92,6 @@ struct igc_mac_info { u8 forced_speed_duplex; - bool adaptive_ifs; - bool has_fwsm; bool asf_firmware_present; bool arc_subsystem_valid; @@ -276,21 +268,9 @@ struct igc_hw_stats { u64 tsctc; u64 tsctfc; u64 iac; - u64 icrxptc; - u64 icrxatc; - u64 ictxptc; - u64 ictxatc; - u64 ictxqec; - u64 ictxqmtc; - u64 icrxdmtc; - u64 icrxoc; - u64 cbtmpc; u64 htdpmc; - u64 cbrdpc; - u64 cbrmpc; u64 rpthc; u64 hgptc; - u64 htcbdpc; u64 hgorc; u64 hgotc; u64 lenerrs; diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index b47e7b0a6398..09cd0ec7ee87 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -295,20 +295,12 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw) rd32(IGC_MGTPTC); rd32(IGC_IAC); - rd32(IGC_ICRXOC); - - rd32(IGC_ICRXPTC); - rd32(IGC_ICRXATC); - rd32(IGC_ICTXPTC); - rd32(IGC_ICTXATC); - rd32(IGC_ICTXQEC); - rd32(IGC_ICTXQMTC); - rd32(IGC_ICRXDMTC); rd32(IGC_RPTHC); rd32(IGC_TLPIC); rd32(IGC_RLPIC); rd32(IGC_HGPTC); + rd32(IGC_RXDMTC); rd32(IGC_HGORCL); rd32(IGC_HGORCH); rd32(IGC_HGOTCL); @@ -363,8 +355,8 @@ void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index) s32 igc_check_for_copper_link(struct igc_hw *hw) { struct igc_mac_info *mac = &hw->mac; + bool link = false; s32 ret_val; - bool link; /* We only want to go out to the PHY registers to see if Auto-Neg * has completed and/or if our link status has changed. The diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 8d5869dcf798..7a6f2a0d413f 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -3730,14 +3730,6 @@ void igc_update_stats(struct igc_adapter *adapter) adapter->stats.tsctc += rd32(IGC_TSCTC); adapter->stats.iac += rd32(IGC_IAC); - adapter->stats.icrxoc += rd32(IGC_ICRXOC); - adapter->stats.icrxptc += rd32(IGC_ICRXPTC); - adapter->stats.icrxatc += rd32(IGC_ICRXATC); - adapter->stats.ictxptc += rd32(IGC_ICTXPTC); - adapter->stats.ictxatc += rd32(IGC_ICTXATC); - adapter->stats.ictxqec += rd32(IGC_ICTXQEC); - adapter->stats.ictxqmtc += rd32(IGC_ICTXQMTC); - adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC); /* Fill out the OS statistics structure */ net_stats->multicast = adapter->stats.mprc; diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 1c46cec5a799..b52dd9d737e8 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -58,16 +58,6 @@ #define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ #define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */ -/* Interrupt Cause */ -#define IGC_ICRXPTC 0x04104 /* Rx Packet Timer Expire Count */ -#define IGC_ICRXATC 0x04108 /* Rx Absolute Timer Expire Count */ -#define IGC_ICTXPTC 0x0410C /* Tx Packet Timer Expire Count */ -#define IGC_ICTXATC 0x04110 /* Tx Absolute Timer Expire Count */ -#define IGC_ICTXQEC 0x04118 /* Tx Queue Empty Count */ -#define IGC_ICTXQMTC 0x0411C /* Tx Queue Min Threshold Count */ -#define IGC_ICRXDMTC 0x04120 /* Rx Descriptor Min Threshold Count */ -#define IGC_ICRXOC 0x04124 /* Receiver Overrun Count */ - /* MSI-X Table Register Descriptions */ #define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */ @@ -182,10 +172,6 @@ #define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ #define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ #define IGC_IAC 0x04100 /* Interrupt Assertion Count */ -#define IGC_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ -#define IGC_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ -#define IGC_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ -#define IGC_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ #define IGC_RPTHC 0x04104 /* Rx Packets To Host */ #define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */ #define IGC_RLPIC 0x0414C /* EEE Rx LPI Count */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 6725d892336e..71ec908266a6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1951,8 +1951,8 @@ static void ixgbe_create_lbtest_frame(struct sk_buff *skb, memset(skb->data, 0xFF, frame_size); frame_size >>= 1; memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); - memset(&skb->data[frame_size + 10], 0xBE, 1); - memset(&skb->data[frame_size + 12], 0xAF, 1); + skb->data[frame_size + 10] = 0xBE; + skb->data[frame_size + 12] = 0xAF; } static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6f32b1706ab9..2f8a4cfc5fa1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6877,32 +6877,20 @@ int ixgbe_close(struct net_device *netdev) return 0; } -#ifdef CONFIG_PM -static int ixgbe_resume(struct pci_dev *pdev) +static int __maybe_unused ixgbe_resume(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; u32 err; adapter->hw.hw_addr = adapter->io_addr; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* - * pci_restore_state clears dev->state_saved so call - * pci_save_state to restore it. - */ - pci_save_state(pdev); - err = pci_enable_device_mem(pdev); - if (err) { - e_dev_err("Cannot enable PCI device from suspend\n"); - return err; - } smp_mb__before_atomic(); clear_bit(__IXGBE_DISABLED, &adapter->state); pci_set_master(pdev); - pci_wake_from_d3(pdev, false); + device_wakeup_disable(dev_d); ixgbe_reset(adapter); @@ -6920,7 +6908,6 @@ static int ixgbe_resume(struct pci_dev *pdev) return err; } -#endif /* CONFIG_PM */ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) { @@ -6929,9 +6916,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) struct ixgbe_hw *hw = &adapter->hw; u32 ctrl; u32 wufc = adapter->wol; -#ifdef CONFIG_PM - int retval = 0; -#endif rtnl_lock(); netif_device_detach(netdev); @@ -6942,12 +6926,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) ixgbe_clear_interrupt_scheme(adapter); rtnl_unlock(); -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; - -#endif if (hw->mac.ops.stop_link_on_d3) hw->mac.ops.stop_link_on_d3(hw); @@ -7002,26 +6980,18 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) return 0; } -#ifdef CONFIG_PM -static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused ixgbe_suspend(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); int retval; bool wake; retval = __ixgbe_shutdown(pdev, &wake); - if (retval) - return retval; - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } + device_set_wakeup_enable(dev_d, wake); - return 0; + return retval; } -#endif /* CONFIG_PM */ static void ixgbe_shutdown(struct pci_dev *pdev) { @@ -11379,16 +11349,15 @@ static const struct pci_error_handlers ixgbe_err_handler = { .resume = ixgbe_io_resume, }; +static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume); + static struct pci_driver ixgbe_driver = { - .name = ixgbe_driver_name, - .id_table = ixgbe_pci_tbl, - .probe = ixgbe_probe, - .remove = ixgbe_remove, -#ifdef CONFIG_PM - .suspend = ixgbe_suspend, - .resume = ixgbe_resume, -#endif - .shutdown = ixgbe_shutdown, + .name = ixgbe_driver_name, + .id_table = ixgbe_pci_tbl, + .probe = ixgbe_probe, + .remove = ixgbe_remove, + .driver.pm = &ixgbe_pm_ops, + .shutdown = ixgbe_shutdown, .sriov_configure = ixgbe_pci_sriov_configure, .err_handler = &ixgbe_err_handler }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 23a92656821d..988db46bff0e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -783,7 +783,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); else - memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN); + eth_zero_addr(adapter->vfinfo[vf].vf_mac_addresses); return retval; } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index a6267569bfa9..a428113e6d54 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4297,13 +4297,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) return 0; } -static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused ixgbevf_suspend(struct device *dev_d) { - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev_d); struct ixgbevf_adapter *adapter = netdev_priv(netdev); -#ifdef CONFIG_PM - int retval = 0; -#endif rtnl_lock(); netif_device_detach(netdev); @@ -4314,37 +4311,16 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) ixgbevf_clear_interrupt_scheme(adapter); rtnl_unlock(); -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; - -#endif - if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) - pci_disable_device(pdev); - return 0; } -#ifdef CONFIG_PM -static int ixgbevf_resume(struct pci_dev *pdev) +static int __maybe_unused ixgbevf_resume(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); u32 err; - pci_restore_state(pdev); - /* pci_restore_state clears dev->state_saved so call - * pci_save_state to restore it. - */ - pci_save_state(pdev); - - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); - return err; - } - adapter->hw.hw_addr = adapter->io_addr; smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); @@ -4365,10 +4341,9 @@ static int ixgbevf_resume(struct pci_dev *pdev) return err; } -#endif /* CONFIG_PM */ static void ixgbevf_shutdown(struct pci_dev *pdev) { - ixgbevf_suspend(pdev, PMSG_SUSPEND); + ixgbevf_suspend(&pdev->dev); } static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats, @@ -4882,16 +4857,17 @@ static const struct pci_error_handlers ixgbevf_err_handler = { .resume = ixgbevf_io_resume, }; +static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume); + static struct pci_driver ixgbevf_driver = { .name = ixgbevf_driver_name, .id_table = ixgbevf_pci_tbl, .probe = ixgbevf_probe, .remove = ixgbevf_remove, -#ifdef CONFIG_PM + /* Power Management Hooks */ - .suspend = ixgbevf_suspend, - .resume = ixgbevf_resume, -#endif + .driver.pm = &ixgbevf_pm_ops, + .shutdown = ixgbevf_shutdown, .err_handler = &ixgbevf_err_handler }; |