summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h214
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c1432
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c121
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c745
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c135
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c773
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h314
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c1137
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c191
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c135
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h73
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c61
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c18
35 files changed, 4693 insertions, 1035 deletions
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 980bbcc64b4b..6da4f43f2348 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -23,6 +23,7 @@ ice-y := ice_main.o \
ice_flex_pipe.o \
ice_flow.o \
ice_devlink.o \
+ ice_fw_update.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 5792ee616b5c..fe140ff38f74 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
+#include <linux/wait.h>
#include <linux/aer.h>
#include <linux/interrupt.h>
#include <linux/ethtool.h>
@@ -55,7 +56,6 @@
#include "ice_xsk.h"
#include "ice_arfs.h"
-extern const char ice_drv_ver[];
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
#define ICE_MIN_NUM_DESC 64
@@ -223,6 +223,8 @@ enum ice_state {
__ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
__ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */
__ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
+ __ICE_LINK_DEFAULT_OVERRIDE_PENDING,
+ __ICE_PHY_INIT_COMPLETE,
__ICE_STATE_NBITS /* must be last */
};
@@ -254,6 +256,7 @@ struct ice_vsi {
u32 tx_busy;
u32 rx_buf_failed;
u32 rx_page_failed;
+ u32 rx_gro_dropped;
u16 num_q_vectors;
u16 base_vector; /* IRQ base for OS reserved vectors */
enum ice_vsi_type type;
@@ -358,12 +361,14 @@ enum ice_pf_flags {
ICE_FLAG_FD_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
+ ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
+ ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -374,6 +379,7 @@ struct ice_pf {
struct devlink_port devlink_port;
struct devlink_region *nvm_region;
+ struct devlink_region *devcaps_region;
/* OS reserved IRQ details */
struct msix_entry *msix_entries;
@@ -408,6 +414,12 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
+
+ /* spinlock to protect the AdminQ wait list */
+ spinlock_t aq_wait_lock;
+ struct hlist_head aq_wait_list;
+ wait_queue_head_t aq_wait_queue;
+
u32 hw_csum_rx_error;
u16 oicr_idx; /* Other interrupt cause MSIX vector index */
u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
@@ -423,6 +435,8 @@ struct ice_pf {
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
+ u8 wol_ena : 1; /* software state of WoL */
+ u32 wakeup_reason; /* last wakeup reason */
struct ice_hw_port_stats stats;
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
@@ -435,6 +449,10 @@ struct ice_pf {
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
u32 sw_int_count;
+
+ __le64 nvm_phy_type_lo; /* NVM PHY type low */
+ __le64 nvm_phy_type_hi; /* NVM PHY type high */
+ struct ice_link_default_override_tlv link_dflt_override;
};
struct ice_netdev_priv {
@@ -568,6 +586,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
const char *ice_stat_str(enum ice_status stat_err);
const char *ice_aq_str(enum ice_aq_err aq_err);
+bool ice_is_wol_supported(struct ice_pf *pf);
int
ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
bool is_tun);
@@ -582,6 +601,8 @@ void ice_fdir_release_flows(struct ice_hw *hw);
void ice_fdir_replay_flows(struct ice_hw *hw);
void ice_fdir_replay_fltrs(struct ice_pf *pf);
int ice_fdir_create_dflt_rules(struct ice_pf *pf);
+int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
+ struct ice_rq_event_info *event);
int ice_open(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 92f82f2a8af4..ba9375218fef 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -109,6 +109,13 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_MSIX 0x0043
#define ICE_AQC_CAPS_FD 0x0045
#define ICE_AQC_CAPS_MAX_MTU 0x0047
+#define ICE_AQC_CAPS_NVM_VER 0x0048
+#define ICE_AQC_CAPS_PENDING_NVM_VER 0x0049
+#define ICE_AQC_CAPS_OROM_VER 0x004A
+#define ICE_AQC_CAPS_PENDING_OROM_VER 0x004B
+#define ICE_AQC_CAPS_NET_VER 0x004C
+#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
+#define ICE_AQC_CAPS_NVM_MGMT 0x0080
u8 major_ver;
u8 minor_ver;
@@ -215,13 +222,6 @@ struct ice_aqc_get_sw_cfg_resp_elem {
#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
};
-/* The response buffer is as follows. Note that the length of the
- * elements array varies with the length of the command response.
- */
-struct ice_aqc_get_sw_cfg_resp {
- struct ice_aqc_get_sw_cfg_resp_elem elements[1];
-};
-
/* These resource type defines are used for all switch resource
* commands where a resource type is required, such as:
* Get Resource Allocation command (indirect 0x0204)
@@ -274,7 +274,7 @@ struct ice_aqc_alloc_free_res_elem {
#define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_M \
(0xF << ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S)
__le16 num_elems;
- struct ice_aqc_res_elem elem[1];
+ struct ice_aqc_res_elem elem[];
};
/* Add VSI (indirect 0x0210)
@@ -568,8 +568,8 @@ struct ice_sw_rule_lkup_rx_tx {
* lookup-type
*/
__le16 hdr_len;
- u8 hdr[1];
-} __packed;
+ u8 hdr[];
+};
/* Add/Update/Remove large action command/response entry
* "index" is returned as part of a response to a successful Add command, and
@@ -578,7 +578,6 @@ struct ice_sw_rule_lkup_rx_tx {
struct ice_sw_rule_lg_act {
__le16 index; /* Index in large action table */
__le16 size;
- __le32 act[1]; /* array of size for actions */
/* Max number of large actions */
#define ICE_MAX_LG_ACT 4
/* Bit 0:1 - Action type */
@@ -629,6 +628,7 @@ struct ice_sw_rule_lg_act {
#define ICE_LG_ACT_STAT_COUNT 0x7
#define ICE_LG_ACT_STAT_COUNT_S 3
#define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S)
+ __le32 act[]; /* array of size for actions */
};
/* Add/Update/Remove VSI list command/response entry
@@ -638,7 +638,7 @@ struct ice_sw_rule_lg_act {
struct ice_sw_rule_vsi_list {
__le16 index; /* Index of VSI/Prune list */
__le16 number_vsi;
- __le16 vsi[1]; /* Array of number_vsi VSI numbers */
+ __le16 vsi[]; /* Array of number_vsi VSI numbers */
};
/* Query VSI list command/response entry */
@@ -695,14 +695,6 @@ struct ice_aqc_sched_elem_cmd {
__le32 addr_low;
};
-/* This is the buffer for:
- * Suspend Nodes (indirect 0x0409)
- * Resume Nodes (indirect 0x040A)
- */
-struct ice_aqc_suspend_resume_elem {
- __le32 teid[1];
-};
-
struct ice_aqc_elem_info_bw {
__le16 bw_profile_idx;
__le16 bw_alloc;
@@ -753,15 +745,7 @@ struct ice_aqc_txsched_topo_grp_info_hdr {
struct ice_aqc_add_elem {
struct ice_aqc_txsched_topo_grp_info_hdr hdr;
- struct ice_aqc_txsched_elem_data generic[1];
-};
-
-struct ice_aqc_conf_elem {
- struct ice_aqc_txsched_elem_data generic[1];
-};
-
-struct ice_aqc_get_elem {
- struct ice_aqc_txsched_elem_data generic[1];
+ struct ice_aqc_txsched_elem_data generic[];
};
struct ice_aqc_get_topo_elem {
@@ -772,7 +756,7 @@ struct ice_aqc_get_topo_elem {
struct ice_aqc_delete_elem {
struct ice_aqc_txsched_topo_grp_info_hdr hdr;
- __le32 teid[1];
+ __le32 teid[];
};
/* Query Port ETS (indirect 0x040E)
@@ -835,10 +819,6 @@ struct ice_aqc_rl_profile_elem {
__le16 rl_encode;
};
-struct ice_aqc_rl_profile_generic_elem {
- struct ice_aqc_rl_profile_elem generic[1];
-};
-
/* Query Scheduler Resource Allocation (indirect 0x0412)
* This indirect command retrieves the scheduler resources allocated by
* EMP Firmware to the given PF.
@@ -988,8 +968,11 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_GET_PHY_EN_MOD_QUAL BIT(5)
#define ICE_AQC_PHY_EN_AUTO_FEC BIT(7)
#define ICE_AQC_PHY_CAPS_MASK ICE_M(0xff, 0)
- u8 low_power_ctrl;
+ u8 low_power_ctrl_an;
#define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+#define ICE_AQC_PHY_AN_EN_CLAUSE28 BIT(1)
+#define ICE_AQC_PHY_AN_EN_CLAUSE73 BIT(2)
+#define ICE_AQC_PHY_AN_EN_CLAUSE37 BIT(3)
__le16 eee_cap;
#define ICE_AQC_PHY_EEE_EN_100BASE_TX BIT(0)
#define ICE_AQC_PHY_EEE_EN_1000BASE_T BIT(1)
@@ -1010,12 +993,14 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
#define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
#define ICE_AQC_PHY_FEC_MASK ICE_M(0xdf, 0)
- u8 rsvd1; /* Byte 35 reserved */
+ u8 module_compliance_enforcement;
+#define ICE_AQC_MOD_ENFORCE_STRICT_MODE BIT(0)
u8 extended_compliance_code;
#define ICE_MODULE_TYPE_TOTAL_BYTE 3
u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
#define ICE_AQC_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
#define ICE_AQC_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define ICE_AQC_MOD_TYPE_IDENT 1
#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
@@ -1059,11 +1044,11 @@ struct ice_aqc_set_phy_cfg_data {
#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5)
#define ICE_AQ_PHY_ENA_LESM BIT(6)
#define ICE_AQ_PHY_ENA_AUTO_FEC BIT(7)
- u8 low_power_ctrl;
+ u8 low_power_ctrl_an;
__le16 eee_cap; /* Value from ice_aqc_get_phy_caps */
__le16 eeer_value;
u8 link_fec_opt; /* Use defines from ice_aqc_get_phy_caps */
- u8 rsvd1;
+ u8 module_compliance_enforcement;
};
/* Set MAC Config command data structure (direct 0x0603) */
@@ -1174,6 +1159,7 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2
#define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3
__le16 link_speed;
+#define ICE_AQ_LINK_SPEED_M 0x7FF
#define ICE_AQ_LINK_SPEED_10MB BIT(0)
#define ICE_AQ_LINK_SPEED_100MB BIT(1)
#define ICE_AQ_LINK_SPEED_1000MB BIT(2)
@@ -1216,6 +1202,57 @@ struct ice_aqc_set_mac_lb {
u8 reserved[15];
};
+struct ice_aqc_link_topo_addr {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0)
+ u8 node_type_ctx;
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_S 0
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_M (0xF << ICE_AQC_LINK_TOPO_NODE_TYPE_S)
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_PHY 0
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_MUX_CTRL 2
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_LED_CTRL 3
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_LED 4
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_THERMAL 5
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define ICE_AQC_LINK_TOPO_NODE_CTX_S 4
+#define ICE_AQC_LINK_TOPO_NODE_CTX_M \
+ (0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S)
+#define ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL 0
+#define ICE_AQC_LINK_TOPO_NODE_CTX_BOARD 1
+#define ICE_AQC_LINK_TOPO_NODE_CTX_PORT 2
+#define ICE_AQC_LINK_TOPO_NODE_CTX_NODE 3
+#define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4
+#define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5
+ u8 index;
+ __le16 handle;
+#define ICE_AQC_LINK_TOPO_HANDLE_S 0
+#define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S)
+/* Used to decode the handle field */
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+#define ICE_AQC_LINK_TOPO_HANDLE_NODE_S 0
+/* In case of a Mezzanine type */
+#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M \
+ (0x3F << ICE_AQC_LINK_TOPO_HANDLE_NODE_S)
+#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S 6
+#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_M (0x7 << ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S)
+/* In case of a LOM type */
+#define ICE_AQC_LINK_TOPO_HANDLE_LOM_NODE_M \
+ (0x1FF << ICE_AQC_LINK_TOPO_HANDLE_NODE_S)
+};
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ice_aqc_get_link_topo {
+ struct ice_aqc_link_topo_addr addr;
+ u8 node_part_num;
+ u8 rsvd[9];
+};
+
/* Set Port Identification LED (direct, 0x06E9) */
struct ice_aqc_set_port_id_led {
u8 lport_num;
@@ -1268,7 +1305,14 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_PRESERVE_ALL BIT(1)
+#define ICE_AQC_NVM_FACTORY_DEFAULT (2 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define ICE_AQC_NVM_ACTIV_SEL_OROM BIT(4)
+#define ICE_AQC_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define ICE_AQC_NVM_SPECIAL_UPDATE BIT(6)
+#define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define ICE_AQC_NVM_ACTIV_SEL_MASK ICE_M(0x7, 3)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
__le16 module_typeid;
__le16 length;
@@ -1317,6 +1361,67 @@ struct ice_aqc_nvm_checksum {
#define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA
#define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F
+/* Used for NVM Set Package Data command - 0x070A */
+struct ice_aqc_nvm_pkg_data {
+ u8 reserved[3];
+ u8 cmd_flags;
+#define ICE_AQC_NVM_PKG_DELETE BIT(0) /* used for command call */
+#define ICE_AQC_NVM_PKG_SKIPPED BIT(0) /* used for command response */
+
+ u32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Used for Pass Component Table command - 0x070B */
+struct ice_aqc_nvm_pass_comp_tbl {
+ u8 component_response; /* Response only */
+#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED 0x0
+#define ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1
+#define ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2
+ u8 component_response_code; /* Response only */
+#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0
+#define ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1
+#define ICE_AQ_NVM_PASS_COMP_STAMP_LOWER 0x2
+#define ICE_AQ_NVM_PASS_COMP_INVALID_STAMP_CODE 0x3
+#define ICE_AQ_NVM_PASS_COMP_CONFLICT_CODE 0x4
+#define ICE_AQ_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE 0x5
+#define ICE_AQ_NVM_PASS_COMP_NOT_SUPPORTED_CODE 0x6
+#define ICE_AQ_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE 0x7
+#define ICE_AQ_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE 0x8
+#define ICE_AQ_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE 0xA
+#define ICE_AQ_NVM_PASS_COMP_VER_STR_LOWER_CODE 0xB
+ u8 reserved;
+ u8 transfer_flag;
+#define ICE_AQ_NVM_PASS_COMP_TBL_START 0x1
+#define ICE_AQ_NVM_PASS_COMP_TBL_MIDDLE 0x2
+#define ICE_AQ_NVM_PASS_COMP_TBL_END 0x4
+#define ICE_AQ_NVM_PASS_COMP_TBL_START_AND_END 0x5
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_nvm_comp_tbl {
+ __le16 comp_class;
+#define NVM_COMP_CLASS_ALL_FW 0x000A
+
+ __le16 comp_id;
+#define NVM_COMP_ID_OROM 0x5
+#define NVM_COMP_ID_NVM 0x6
+#define NVM_COMP_ID_NETLIST 0x8
+
+ u8 comp_class_idx;
+#define FWU_COMP_CLASS_IDX_NOT_USE 0x0
+
+ __le32 comp_cmp_stamp;
+ u8 cvs_type;
+#define NVM_CVS_TYPE_ASCII 0x1
+
+ u8 cvs_len;
+ u8 cvs[]; /* Component Version String */
+} __packed;
+
/**
* Send to PF command (indirect 0x0801) ID is only used by PF
*
@@ -1476,7 +1581,7 @@ struct ice_aqc_get_set_rss_keys {
struct ice_aqc_get_set_rss_lut {
#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
-#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
+#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
__le16 vsi_id;
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \
@@ -1537,7 +1642,7 @@ struct ice_aqc_add_tx_qgrp {
__le32 parent_teid;
u8 num_txqs;
u8 rsvd[3];
- struct ice_aqc_add_txqs_perq txqs[1];
+ struct ice_aqc_add_txqs_perq txqs[];
};
/* Disable Tx LAN Queues (indirect 0x0C31) */
@@ -1575,18 +1680,13 @@ struct ice_aqc_dis_txq_item {
u8 num_qs;
u8 rsvd;
/* The length of the q_id array varies according to num_qs */
- __le16 q_id[1];
- /* This only applies from F8 onward */
#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S 15
#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_LAN_Q \
(0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET \
(1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
-};
-
-struct ice_aqc_dis_txq {
- struct ice_aqc_dis_txq_item qgrps[1];
-};
+ __le16 q_id[];
+} __packed;
/* Configure Firmware Logging Command (indirect 0xFF09)
* Logging Information Read Response (indirect 0xFF10)
@@ -1636,12 +1736,7 @@ enum ice_aqc_fw_logging_mod {
ICE_AQC_FW_LOG_ID_MAX,
};
-/* This is the buffer for both of the logging commands.
- * The entry array size depends on the datalen parameter in the descriptor.
- * There will be a total of datalen / 2 entries.
- */
-struct ice_aqc_fw_logging_data {
- __le16 entry[1];
+/* Defines for both above FW logging command/response buffers */
#define ICE_AQC_FW_LOG_ID_S 0
#define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S)
@@ -1654,7 +1749,6 @@ struct ice_aqc_fw_logging_data {
#define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */
#define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */
#define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */
-};
/* Get/Clear FW Log (indirect 0xFF11) */
struct ice_aqc_get_clear_fw_log {
@@ -1716,7 +1810,7 @@ struct ice_aqc_get_pkg_info {
/* Get Package Info List response buffer format (0x0C43) */
struct ice_aqc_get_pkg_info_resp {
__le32 count;
- struct ice_aqc_get_pkg_info pkg_info[1];
+ struct ice_aqc_get_pkg_info pkg_info[];
};
/* Lan Queue Overflow Event (direct, 0x1001) */
@@ -1775,6 +1869,8 @@ struct ice_aq_desc {
struct ice_aqc_rl_profile rl_profile;
struct ice_aqc_nvm nvm;
struct ice_aqc_nvm_checksum nvm_checksum;
+ struct ice_aqc_nvm_pkg_data pkg_data;
+ struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl;
struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_lldp_get_mib lldp_get_mib;
struct ice_aqc_lldp_set_mib_change lldp_set_event;
@@ -1797,6 +1893,7 @@ struct ice_aq_desc {
struct ice_aqc_set_event_mask set_event_mask;
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
+ struct ice_aqc_get_link_topo get_link_topo;
} params;
};
@@ -1896,12 +1993,19 @@ enum ice_adminq_opc {
ice_aqc_opc_get_link_status = 0x0607,
ice_aqc_opc_set_event_mask = 0x0613,
ice_aqc_opc_set_mac_lb = 0x0620,
+ ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_set_port_id_led = 0x06E9,
ice_aqc_opc_sff_eeprom = 0x06EE,
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
+ ice_aqc_opc_nvm_erase = 0x0702,
+ ice_aqc_opc_nvm_write = 0x0703,
ice_aqc_opc_nvm_checksum = 0x0706,
+ ice_aqc_opc_nvm_write_activate = 0x0707,
+ ice_aqc_opc_nvm_update_empr = 0x0709,
+ ice_aqc_opc_nvm_pkg_data = 0x070A,
+ ice_aqc_opc_nvm_pass_component_tbl = 0x070B,
/* PF/VF mailbox commands */
ice_mbx_opc_send_msg_to_pf = 0x0801,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index d620d26d42ed..87008476d8fe 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -635,10 +635,10 @@ int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
+ u8 buf_len = struct_size(qg_buf, txqs, 1);
struct ice_tlan_ctx tlan_ctx = { 0 };
struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
- u8 buf_len = sizeof(*qg_buf);
struct ice_hw *hw = &pf->hw;
enum ice_status status;
u16 pf_q;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index bce0e1281168..34abfcea9858 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -20,7 +20,40 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
return ICE_ERR_DEVICE_NOT_SUPPORTED;
- hw->mac_type = ICE_MAC_GENERIC;
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_BACKPLANE:
+ case ICE_DEV_ID_E810C_QSFP:
+ case ICE_DEV_ID_E810C_SFP:
+ case ICE_DEV_ID_E810_XXV_SFP:
+ hw->mac_type = ICE_MAC_E810;
+ break;
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_SGMII:
+ case ICE_DEV_ID_E822C_10G_BASE_T:
+ case ICE_DEV_ID_E822C_BACKPLANE:
+ case ICE_DEV_ID_E822C_QSFP:
+ case ICE_DEV_ID_E822C_SFP:
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822L_10G_BASE_T:
+ case ICE_DEV_ID_E822L_BACKPLANE:
+ case ICE_DEV_ID_E822L_SFP:
+ case ICE_DEV_ID_E822L_SGMII:
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823L_SFP:
+ hw->mac_type = ICE_MAC_GENERIC;
+ break;
+ default:
+ hw->mac_type = ICE_MAC_UNKNOWN;
+ break;
+ }
+
+ ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
return 0;
}
@@ -52,7 +85,8 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
* is returned in user specified buffer. Please interpret user specified
* buffer as "manage_mac_read" response.
* Response such as various MAC addresses are stored in HW struct (port.mac)
- * ice_aq_discover_caps is expected to be called before this function is called.
+ * ice_discover_dev_caps is expected to be called before this function is
+ * called.
*/
static enum ice_status
ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
@@ -116,11 +150,13 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
u16 pcaps_size = sizeof(*pcaps);
struct ice_aq_desc desc;
enum ice_status status;
+ struct ice_hw *hw;
cmd = &desc.params.get_phy;
if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
return ICE_ERR_PARAM;
+ hw = pi->hw;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
@@ -128,17 +164,94 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
cmd->param0 |= cpu_to_le16(report_mode);
- status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
+ status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
+
+ ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
+ report_mode);
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
+ ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
+ ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
+ pcaps->low_power_ctrl_an);
+ ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
+ pcaps->eeer_value);
+ ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
+ pcaps->link_fec_options);
+ ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
+ pcaps->module_compliance_enforcement);
+ ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
+ pcaps->extended_compliance_code);
+ ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
+ pcaps->module_type[0]);
+ ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
+ pcaps->module_type[1]);
+ ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
+ pcaps->module_type[2]);
if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
+ memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
+ sizeof(pi->phy.link_info.module_type));
}
return status;
}
/**
+ * ice_aq_get_link_topo_handle - get link topology node return status
+ * @pi: port information structure
+ * @node_type: requested node type
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get link topology node return status for specified node type (0x06E0)
+ *
+ * Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present, then
+ * connection type is backplane or BASE-T.
+ */
+static enum ice_status
+ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.get_link_topo;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+
+ cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
+ ICE_AQC_LINK_TOPO_NODE_CTX_S);
+
+ /* set node type */
+ cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
+
+ return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_is_media_cage_present
+ * @pi: port information structure
+ *
+ * Returns true if media cage is present, else false. If no cage, then
+ * media type is backplane or BASE-T.
+ */
+static bool ice_is_media_cage_present(struct ice_port_info *pi)
+{
+ /* Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present then
+ * connection type is backplane or BASE-T.
+ */
+ return !ice_aq_get_link_topo_handle(pi,
+ ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
+ NULL);
+}
+
+/**
* ice_get_media_type - Gets media type
* @pi: port information structure
*/
@@ -155,6 +268,18 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
return ICE_MEDIA_UNKNOWN;
if (hw_link_info->phy_type_low) {
+ /* 1G SGMII is a special case where some DA cable PHYs
+ * may show this as an option when it really shouldn't
+ * be since SGMII is meant to be between a MAC and a PHY
+ * in a backplane. Try to detect this case and handle it
+ */
+ if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
+ (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
+ ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+ hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
+ ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+ return ICE_MEDIA_DA;
+
switch (hw_link_info->phy_type_low) {
case ICE_PHY_TYPE_LOW_1000BASE_SX:
case ICE_PHY_TYPE_LOW_1000BASE_LX:
@@ -163,7 +288,6 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
case ICE_PHY_TYPE_LOW_25GBASE_SR:
case ICE_PHY_TYPE_LOW_25GBASE_LR:
- case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
case ICE_PHY_TYPE_LOW_40GBASE_SR4:
case ICE_PHY_TYPE_LOW_40GBASE_LR4:
case ICE_PHY_TYPE_LOW_50GBASE_SR2:
@@ -175,6 +299,14 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_100GBASE_LR4:
case ICE_PHY_TYPE_LOW_100GBASE_SR2:
case ICE_PHY_TYPE_LOW_100GBASE_DR:
+ case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
return ICE_MEDIA_FIBER;
case ICE_PHY_TYPE_LOW_100BASE_TX:
case ICE_PHY_TYPE_LOW_1000BASE_T:
@@ -194,6 +326,16 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
case ICE_PHY_TYPE_LOW_100GBASE_CP2:
return ICE_MEDIA_DA;
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2:
+ case ICE_PHY_TYPE_LOW_50G_AUI2:
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ case ICE_PHY_TYPE_LOW_100G_AUI4:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4:
+ if (ice_is_media_cage_present(pi))
+ return ICE_MEDIA_DA;
+ fallthrough;
case ICE_PHY_TYPE_LOW_1000BASE_KX:
case ICE_PHY_TYPE_LOW_2500BASE_KX:
case ICE_PHY_TYPE_LOW_2500BASE_X:
@@ -211,8 +353,16 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
}
} else {
switch (hw_link_info->phy_type_high) {
+ case ICE_PHY_TYPE_HIGH_100G_AUI2:
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2:
+ if (ice_is_media_cage_present(pi))
+ return ICE_MEDIA_DA;
+ fallthrough;
case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
return ICE_MEDIA_BACKPLANE;
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
+ return ICE_MEDIA_FIBER;
}
}
return ICE_MEDIA_UNKNOWN;
@@ -292,18 +442,21 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
- ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
- ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ ice_debug(hw, ICE_DBG_LINK, "get link info\n");
+ ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
(unsigned long long)li->phy_type_low);
- ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
(unsigned long long)li->phy_type_high);
- ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
- ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
- ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
- ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
- ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
- ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
- ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
+ ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
+ ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
+ ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
+ ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
+ ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
+ ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
+ ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
+ li->max_frame_size);
+ ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
/* save link status information */
if (link)
@@ -440,32 +593,24 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), sw);
}
-#define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \
- (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
-#define ICE_FW_LOG_DESC_SIZE_MAX \
- ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
-
/**
* ice_get_fw_log_cfg - get FW logging configuration
* @hw: pointer to the HW struct
*/
static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
{
- struct ice_aqc_fw_logging_data *config;
struct ice_aq_desc desc;
enum ice_status status;
+ __le16 *config;
u16 size;
- size = ICE_FW_LOG_DESC_SIZE_MAX;
+ size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
if (!config)
return ICE_ERR_NO_MEMORY;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
if (!status) {
u16 i;
@@ -474,7 +619,7 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
u16 v, m, flgs;
- v = le16_to_cpu(config->entry[i]);
+ v = le16_to_cpu(config[i]);
m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
@@ -526,11 +671,11 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
*/
static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
{
- struct ice_aqc_fw_logging_data *data = NULL;
struct ice_aqc_fw_logging *cmd;
enum ice_status status = 0;
u16 i, chgs = 0, len = 0;
struct ice_aq_desc desc;
+ __le16 *data = NULL;
u8 actv_evnts = 0;
void *buf = NULL;
@@ -571,8 +716,9 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
continue;
if (!data) {
- data = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_FW_LOG_DESC_SIZE_MAX,
+ data = devm_kcalloc(ice_hw_to_dev(hw),
+ sizeof(*data),
+ ICE_AQC_FW_LOG_ID_MAX,
GFP_KERNEL);
if (!data)
return ICE_ERR_NO_MEMORY;
@@ -580,7 +726,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
val = i << ICE_AQC_FW_LOG_ID_S;
val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
- data->entry[chgs++] = cpu_to_le16(val);
+ data[chgs++] = cpu_to_le16(val);
}
/* Only enable FW logging if at least one module is specified.
@@ -599,7 +745,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
buf = data;
- len = ICE_FW_LOG_DESC_SIZE(chgs);
+ len = sizeof(*data) * chgs;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
}
}
@@ -629,7 +775,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
continue;
}
- v = le16_to_cpu(data->entry[i]);
+ v = le16_to_cpu(data[i]);
m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
}
@@ -881,23 +1027,23 @@ void ice_deinit_hw(struct ice_hw *hw)
*/
enum ice_status ice_check_reset(struct ice_hw *hw)
{
- u32 cnt, reg = 0, grst_delay, uld_mask;
+ u32 cnt, reg = 0, grst_timeout, uld_mask;
/* Poll for Device Active state in case a recent CORER, GLOBR,
* or EMPR has occurred. The grst delay value is in 100ms units.
* Add 1sec for outstanding AQ commands that can take a long time.
*/
- grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
- GLGEN_RSTCTL_GRSTDEL_S) + 10;
+ grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
+ GLGEN_RSTCTL_GRSTDEL_S) + 10;
- for (cnt = 0; cnt < grst_delay; cnt++) {
+ for (cnt = 0; cnt < grst_timeout; cnt++) {
mdelay(100);
reg = rd32(hw, GLGEN_RSTAT);
if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
break;
}
- if (cnt == grst_delay) {
+ if (cnt == grst_timeout) {
ice_debug(hw, ICE_DBG_INIT,
"Global reset polling failed to complete.\n");
return ICE_ERR_RESET_FAILED;
@@ -1541,7 +1687,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
enum ice_status status;
u16 buf_len;
- buf_len = struct_size(buf, elem, num - 1);
+ buf_len = struct_size(buf, elem, num);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -1558,7 +1704,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
if (status)
goto ice_alloc_res_exit;
- memcpy(res, buf->elem, sizeof(buf->elem) * num);
+ memcpy(res, buf->elem, sizeof(*buf->elem) * num);
ice_alloc_res_exit:
kfree(buf);
@@ -1572,14 +1718,13 @@ ice_alloc_res_exit:
* @num: number of resources
* @res: pointer to array that contains the resources to free
*/
-enum ice_status
-ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
enum ice_status status;
u16 buf_len;
- buf_len = struct_size(buf, elem, num - 1);
+ buf_len = struct_size(buf, elem, num);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -1587,7 +1732,7 @@ ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
/* Prepare buffer to free resource. */
buf->num_elems = cpu_to_le16(num);
buf->res_type = cpu_to_le16(type);
- memcpy(buf->elem, res, sizeof(buf->elem) * num);
+ memcpy(buf->elem, res, sizeof(*buf->elem) * num);
status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
ice_aqc_opc_free_res, NULL);
@@ -1622,221 +1767,431 @@ static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
}
/**
- * ice_parse_caps - parse function/device capabilities
+ * ice_parse_common_caps - parse common device/function capabilities
* @hw: pointer to the HW struct
- * @buf: pointer to a buffer containing function/device capability records
- * @cap_count: number of capability records in the list
- * @opc: type of capabilities list to parse
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
+ *
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Returns: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool
+ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
+ struct ice_aqc_list_caps_elem *elem, const char *prefix)
+{
+ u32 logical_id = le32_to_cpu(elem->logical_id);
+ u32 phys_id = le32_to_cpu(elem->phys_id);
+ u32 number = le32_to_cpu(elem->number);
+ u16 cap = le16_to_cpu(elem->cap);
+ bool found = true;
+
+ switch (cap) {
+ case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: valid_functions (bitmap) = %d\n", prefix,
+ caps->valid_functions);
+ break;
+ case ICE_AQC_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: sr_iov_1_1 = %d\n", prefix,
+ caps->sr_iov_1_1);
+ break;
+ case ICE_AQC_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: dcb = %d\n", prefix, caps->dcb);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: active_tc_bitmap = %d\n", prefix,
+ caps->active_tc_bitmap);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: maxtc = %d\n", prefix, caps->maxtc);
+ break;
+ case ICE_AQC_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: rss_table_size = %d\n", prefix,
+ caps->rss_table_size);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: rss_table_entry_width = %d\n", prefix,
+ caps->rss_table_entry_width);
+ break;
+ case ICE_AQC_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_rxq = %d\n", prefix,
+ caps->num_rxq);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: rxq_first_id = %d\n", prefix,
+ caps->rxq_first_id);
+ break;
+ case ICE_AQC_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_txq = %d\n", prefix,
+ caps->num_txq);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: txq_first_id = %d\n", prefix,
+ caps->txq_first_id);
+ break;
+ case ICE_AQC_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_msix_vectors = %d\n", prefix,
+ caps->num_msix_vectors);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: msix_vector_first_id = %d\n", prefix,
+ caps->msix_vector_first_id);
+ break;
+ case ICE_AQC_CAPS_PENDING_NVM_VER:
+ caps->nvm_update_pending_nvm = true;
+ ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
+ break;
+ case ICE_AQC_CAPS_PENDING_OROM_VER:
+ caps->nvm_update_pending_orom = true;
+ ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
+ break;
+ case ICE_AQC_CAPS_PENDING_NET_VER:
+ caps->nvm_update_pending_netlist = true;
+ ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
+ break;
+ case ICE_AQC_CAPS_NVM_MGMT:
+ caps->nvm_unified_update =
+ (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
+ caps->nvm_unified_update);
+ break;
+ case ICE_AQC_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
+ prefix, caps->max_mtu);
+ break;
+ default:
+ /* Not one of the recognized common capabilities */
+ found = false;
+ }
+
+ return found;
+}
+
+/**
+ * ice_recalc_port_limited_caps - Recalculate port limited capabilities
+ * @hw: pointer to the HW structure
+ * @caps: pointer to capabilities structure to fix
+ *
+ * Re-calculate the capabilities that are dependent on the number of physical
+ * ports; i.e. some features are not supported or function differently on
+ * devices with more than 4 ports.
+ */
+static void
+ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
+{
+ /* This assumes device capabilities are always scanned before function
+ * capabilities during the initialization flow.
+ */
+ if (hw->dev_caps.num_funcs > 4) {
+ /* Max 4 TCs per port */
+ caps->maxtc = 4;
+ ice_debug(hw, ICE_DBG_INIT,
+ "reducing maxtc to %d (based on #ports)\n",
+ caps->maxtc);
+ }
+}
+
+/**
+ * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for ICE_AQC_CAPS_VF.
+ */
+static void
+ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ u32 logical_id = le32_to_cpu(cap->logical_id);
+ u32 number = le32_to_cpu(cap->number);
+
+ func_p->num_allocd_vfs = number;
+ func_p->vf_base_id = logical_id;
+ ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
+ func_p->num_allocd_vfs);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
+ func_p->vf_base_id);
+}
+
+/**
+ * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for ICE_AQC_CAPS_VSI.
+ */
+static void
+ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
+ le32_to_cpu(cap->number));
+ ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
+ func_p->guar_num_vsi);
+}
+
+/**
+ * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ *
+ * Extract function capabilities for ICE_AQC_CAPS_FD.
+ */
+static void
+ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
+{
+ u32 reg_val, val;
+
+ reg_val = rd32(hw, GLQF_FD_SIZE);
+ val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
+ GLQF_FD_SIZE_FD_GSIZE_S;
+ func_p->fd_fltr_guar =
+ ice_get_num_per_func(hw, val);
+ val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
+ GLQF_FD_SIZE_FD_BSIZE_S;
+ func_p->fd_fltr_best_effort = val;
+
+ ice_debug(hw, ICE_DBG_INIT,
+ "func caps: fd_fltr_guar = %d\n",
+ func_p->fd_fltr_guar);
+ ice_debug(hw, ICE_DBG_INIT,
+ "func caps: fd_fltr_best_effort = %d\n",
+ func_p->fd_fltr_best_effort);
+}
+
+/**
+ * ice_parse_func_caps - Parse function capabilities
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ice_parse_common_caps.
*
- * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
*/
static void
-ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
- enum ice_adminq_opc opc)
+ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+ void *buf, u32 cap_count)
{
struct ice_aqc_list_caps_elem *cap_resp;
- struct ice_hw_func_caps *func_p = NULL;
- struct ice_hw_dev_caps *dev_p = NULL;
- struct ice_hw_common_caps *caps;
- char const *prefix;
u32 i;
- if (!buf)
- return;
-
cap_resp = (struct ice_aqc_list_caps_elem *)buf;
- if (opc == ice_aqc_opc_list_dev_caps) {
- dev_p = &hw->dev_caps;
- caps = &dev_p->common_cap;
- prefix = "dev cap";
- } else if (opc == ice_aqc_opc_list_func_caps) {
- func_p = &hw->func_caps;
- caps = &func_p->common_cap;
- prefix = "func cap";
- } else {
- ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
- return;
- }
+ memset(func_p, 0, sizeof(*func_p));
- for (i = 0; caps && i < cap_count; i++, cap_resp++) {
- u32 logical_id = le32_to_cpu(cap_resp->logical_id);
- u32 phys_id = le32_to_cpu(cap_resp->phys_id);
- u32 number = le32_to_cpu(cap_resp->number);
- u16 cap = le16_to_cpu(cap_resp->cap);
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+ bool found;
- switch (cap) {
- case ICE_AQC_CAPS_VALID_FUNCTIONS:
- caps->valid_functions = number;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: valid_functions (bitmap) = %d\n", prefix,
- caps->valid_functions);
+ found = ice_parse_common_caps(hw, &func_p->common_cap,
+ &cap_resp[i], "func caps");
- /* store func count for resource management purposes */
- if (dev_p)
- dev_p->num_funcs = hweight32(number);
- break;
- case ICE_AQC_CAPS_SRIOV:
- caps->sr_iov_1_1 = (number == 1);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: sr_iov_1_1 = %d\n", prefix,
- caps->sr_iov_1_1);
- break;
+ switch (cap) {
case ICE_AQC_CAPS_VF:
- if (dev_p) {
- dev_p->num_vfs_exposed = number;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_vfs_exposed = %d\n", prefix,
- dev_p->num_vfs_exposed);
- } else if (func_p) {
- func_p->num_allocd_vfs = number;
- func_p->vf_base_id = logical_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_allocd_vfs = %d\n", prefix,
- func_p->num_allocd_vfs);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: vf_base_id = %d\n", prefix,
- func_p->vf_base_id);
- }
+ ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
break;
case ICE_AQC_CAPS_VSI:
- if (dev_p) {
- dev_p->num_vsi_allocd_to_host = number;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_vsi_allocd_to_host = %d\n",
- prefix,
- dev_p->num_vsi_allocd_to_host);
- } else if (func_p) {
- func_p->guar_num_vsi =
- ice_get_num_per_func(hw, ICE_MAX_VSI);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: guar_num_vsi (fw) = %d\n",
- prefix, number);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: guar_num_vsi = %d\n",
- prefix, func_p->guar_num_vsi);
- }
+ ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_DCB:
- caps->dcb = (number == 1);
- caps->active_tc_bitmap = logical_id;
- caps->maxtc = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: dcb = %d\n", prefix, caps->dcb);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: active_tc_bitmap = %d\n", prefix,
- caps->active_tc_bitmap);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: maxtc = %d\n", prefix, caps->maxtc);
- break;
- case ICE_AQC_CAPS_RSS:
- caps->rss_table_size = number;
- caps->rss_table_entry_width = logical_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: rss_table_size = %d\n", prefix,
- caps->rss_table_size);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: rss_table_entry_width = %d\n", prefix,
- caps->rss_table_entry_width);
+ case ICE_AQC_CAPS_FD:
+ ice_parse_fdir_func_caps(hw, func_p);
break;
- case ICE_AQC_CAPS_RXQS:
- caps->num_rxq = number;
- caps->rxq_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_rxq = %d\n", prefix,
- caps->num_rxq);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: rxq_first_id = %d\n", prefix,
- caps->rxq_first_id);
+ default:
+ /* Don't list common capabilities as unknown */
+ if (!found)
+ ice_debug(hw, ICE_DBG_INIT,
+ "func caps: unknown capability[%d]: 0x%x\n",
+ i, cap);
break;
- case ICE_AQC_CAPS_TXQS:
- caps->num_txq = number;
- caps->txq_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_txq = %d\n", prefix,
- caps->num_txq);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: txq_first_id = %d\n", prefix,
- caps->txq_first_id);
+ }
+ }
+
+ ice_recalc_port_limited_caps(hw, &func_p->common_cap);
+}
+
+/**
+ * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ u32 number = le32_to_cpu(cap->number);
+
+ dev_p->num_funcs = hweight32(number);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
+ dev_p->num_funcs);
+}
+
+/**
+ * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_VF for device capabilities.
+ */
+static void
+ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ u32 number = le32_to_cpu(cap->number);
+
+ dev_p->num_vfs_exposed = number;
+ ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
+ dev_p->num_vfs_exposed);
+}
+
+/**
+ * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_VSI for device capabilities.
+ */
+static void
+ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ u32 number = le32_to_cpu(cap->number);
+
+ dev_p->num_vsi_allocd_to_host = number;
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
+ dev_p->num_vsi_allocd_to_host);
+}
+
+/**
+ * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_FD for device capabilities.
+ */
+static void
+ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ u32 number = le32_to_cpu(cap->number);
+
+ dev_p->num_flow_director_fltr = number;
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
+ dev_p->num_flow_director_fltr);
+}
+
+/**
+ * ice_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ice_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void
+ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ void *buf, u32 cap_count)
+{
+ struct ice_aqc_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ice_aqc_list_caps_elem *)buf;
+
+ memset(dev_p, 0, sizeof(*dev_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+ bool found;
+
+ found = ice_parse_common_caps(hw, &dev_p->common_cap,
+ &cap_resp[i], "dev caps");
+
+ switch (cap) {
+ case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_MSIX:
- caps->num_msix_vectors = number;
- caps->msix_vector_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_msix_vectors = %d\n", prefix,
- caps->num_msix_vectors);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: msix_vector_first_id = %d\n", prefix,
- caps->msix_vector_first_id);
+ case ICE_AQC_CAPS_VF:
+ ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_FD:
- if (dev_p) {
- dev_p->num_flow_director_fltr = number;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_flow_director_fltr = %d\n",
- prefix,
- dev_p->num_flow_director_fltr);
- }
- if (func_p) {
- u32 reg_val, val;
-
- reg_val = rd32(hw, GLQF_FD_SIZE);
- val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
- GLQF_FD_SIZE_FD_GSIZE_S;
- func_p->fd_fltr_guar =
- ice_get_num_per_func(hw, val);
- val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
- GLQF_FD_SIZE_FD_BSIZE_S;
- func_p->fd_fltr_best_effort = val;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: fd_fltr_guar = %d\n",
- prefix, func_p->fd_fltr_guar);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: fd_fltr_best_effort = %d\n",
- prefix, func_p->fd_fltr_best_effort);
- }
+ case ICE_AQC_CAPS_VSI:
+ ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_MAX_MTU:
- caps->max_mtu = number;
- ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
- prefix, caps->max_mtu);
+ case ICE_AQC_CAPS_FD:
+ ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
break;
default:
- ice_debug(hw, ICE_DBG_INIT,
- "%s: unknown capability[%d]: 0x%x\n", prefix,
- i, cap);
+ /* Don't list common capabilities as unknown */
+ if (!found)
+ ice_debug(hw, ICE_DBG_INIT,
+ "dev caps: unknown capability[%d]: 0x%x\n",
+ i, cap);
break;
}
}
- /* Re-calculate capabilities that are dependent on the number of
- * physical ports; i.e. some features are not supported or function
- * differently on devices with more than 4 ports.
- */
- if (hw->dev_caps.num_funcs > 4) {
- /* Max 4 TCs per port */
- caps->maxtc = 4;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: maxtc = %d (based on #ports)\n", prefix,
- caps->maxtc);
- }
+ ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
}
/**
- * ice_aq_discover_caps - query function/device capabilities
+ * ice_aq_list_caps - query function/device capabilities
* @hw: pointer to the HW struct
- * @buf: a virtual buffer to hold the capabilities
- * @buf_size: Size of the virtual buffer
- * @cap_count: cap count needed if AQ err==ENOMEM
- * @opc: capabilities type to discover - pass in the command opcode
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
* @cd: pointer to command details structure or NULL
*
- * Get the function(0x000a)/device(0x000b) capabilities description from
- * the firmware.
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
+ * firmware could return) to avoid this.
*/
-static enum ice_status
-ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
- enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+enum ice_status
+ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aqc_list_caps *cmd;
struct ice_aq_desc desc;
@@ -1849,59 +2204,78 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
-
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
- if (!status)
- ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
- else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
+
+ if (cap_count)
*cap_count = le32_to_cpu(cmd->count);
+
return status;
}
/**
- * ice_discover_caps - get info about the HW
+ * ice_discover_dev_caps - Read and extract device capabilities
* @hw: pointer to the hardware structure
- * @opc: capabilities type to discover - pass in the command opcode
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
*/
-static enum ice_status
-ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
+enum ice_status
+ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
{
enum ice_status status;
- u32 cap_count;
- u16 cbuf_len;
- u8 retries;
-
- /* The driver doesn't know how many capabilities the device will return
- * so the buffer size required isn't known ahead of time. The driver
- * starts with cbuf_len and if this turns out to be insufficient, the
- * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
- * The driver then allocates the buffer based on the count and retries
- * the operation. So it follows that the retry count is 2.
+ u32 cap_count = 0;
+ void *cbuf;
+
+ cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!cbuf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
*/
-#define ICE_GET_CAP_BUF_COUNT 40
-#define ICE_GET_CAP_RETRY_COUNT 2
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
- cap_count = ICE_GET_CAP_BUF_COUNT;
- retries = ICE_GET_CAP_RETRY_COUNT;
+ status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
+ ice_aqc_opc_list_dev_caps, NULL);
+ if (!status)
+ ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+ kfree(cbuf);
- do {
- void *cbuf;
+ return status;
+}
- cbuf_len = (u16)(cap_count *
- sizeof(struct ice_aqc_list_caps_elem));
- cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
- if (!cbuf)
- return ICE_ERR_NO_MEMORY;
+/**
+ * ice_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ */
+static enum ice_status
+ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
+{
+ enum ice_status status;
+ u32 cap_count = 0;
+ void *cbuf;
- status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
- opc, NULL);
- devm_kfree(ice_hw_to_dev(hw), cbuf);
+ cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!cbuf)
+ return ICE_ERR_NO_MEMORY;
- if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
- break;
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
- /* If ENOMEM is returned, try again with bigger buffer */
- } while (--retries);
+ status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
+ ice_aqc_opc_list_func_caps, NULL);
+ if (!status)
+ ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
+ kfree(cbuf);
return status;
}
@@ -1978,11 +2352,11 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
{
enum ice_status status;
- status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
- if (!status)
- status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
+ status = ice_discover_dev_caps(hw, &hw->dev_caps);
+ if (status)
+ return status;
- return status;
+ return ice_discover_func_caps(hw, &hw->func_caps);
}
/**
@@ -2218,7 +2592,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
/**
* ice_aq_set_phy_cfg
* @hw: pointer to the HW struct
- * @lport: logical port number
+ * @pi: port info structure of the interested logical port
* @cfg: structure with PHY configuration data to be set
* @cd: pointer to command details structure or NULL
*
@@ -2228,7 +2602,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
* parameters. This status will be indicated by the command response (0x0601).
*/
enum ice_status
-ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
+ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
@@ -2247,24 +2621,29 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
}
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
- desc.params.set_phy.lport_num = lport;
+ desc.params.set_phy.lport_num = pi->lport;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
(unsigned long long)le64_to_cpu(cfg->phy_type_low));
- ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
(unsigned long long)le64_to_cpu(cfg->phy_type_high));
- ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
- ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
- cfg->low_power_ctrl);
- ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
- ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
- ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
+ ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
+ ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
+ cfg->low_power_ctrl_an);
+ ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
+ ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
+ cfg->link_fec_opt);
status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
status = 0;
+ if (!status)
+ pi->phy.curr_user_phy_cfg = *cfg;
+
return status;
}
@@ -2298,9 +2677,6 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
pcaps, NULL);
- if (!status)
- memcpy(li->module_type, &pcaps->module_type,
- sizeof(li->module_type));
devm_kfree(ice_hw_to_dev(hw), pcaps);
}
@@ -2309,28 +2685,101 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
}
/**
- * ice_set_fc
+ * ice_cache_phy_user_req
* @pi: port information structure
- * @aq_failures: pointer to status code, specific to ice_set_fc routine
- * @ena_auto_link_update: enable automatic link update
+ * @cache_data: PHY logging data
+ * @cache_mode: PHY logging mode
*
- * Set the requested flow control mode.
+ * Log the user request on (FC, FEC, SPEED) for later use.
+ */
+static void
+ice_cache_phy_user_req(struct ice_port_info *pi,
+ struct ice_phy_cache_mode_data cache_data,
+ enum ice_phy_cache_mode cache_mode)
+{
+ if (!pi)
+ return;
+
+ switch (cache_mode) {
+ case ICE_FC_MODE:
+ pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
+ break;
+ case ICE_SPEED_MODE:
+ pi->phy.curr_user_speed_req =
+ cache_data.data.curr_user_speed_req;
+ break;
+ case ICE_FEC_MODE:
+ pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_caps_to_fc_mode
+ * @caps: PHY capabilities
+ *
+ * Convert PHY FC capabilities to ice FC mode
+ */
+enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
+{
+ if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
+ caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
+ return ICE_FC_FULL;
+
+ if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
+ return ICE_FC_TX_PAUSE;
+
+ if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
+ return ICE_FC_RX_PAUSE;
+
+ return ICE_FC_NONE;
+}
+
+/**
+ * ice_caps_to_fec_mode
+ * @caps: PHY capabilities
+ * @fec_options: Link FEC options
+ *
+ * Convert PHY FEC capabilities to ice FEC mode
+ */
+enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
+{
+ if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
+ return ICE_FEC_AUTO;
+
+ if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
+ ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
+ ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
+ ICE_AQC_PHY_FEC_25G_KR_REQ))
+ return ICE_FEC_BASER;
+
+ if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
+ ICE_AQC_PHY_FEC_25G_RS_544_REQ |
+ ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
+ return ICE_FEC_RS;
+
+ return ICE_FEC_NONE;
+}
+
+/**
+ * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
+ * @pi: port information structure
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
*/
enum ice_status
-ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
+ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+ enum ice_fc_mode req_mode)
{
- struct ice_aqc_set_phy_cfg_data cfg = { 0 };
- struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
+ struct ice_phy_cache_mode_data cache_data;
u8 pause_mask = 0x0;
- struct ice_hw *hw;
- if (!pi)
- return ICE_ERR_PARAM;
- hw = pi->hw;
- *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
+ if (!pi || !cfg)
+ return ICE_ERR_BAD_PTR;
- switch (pi->fc.req_mode) {
+ switch (req_mode) {
case ICE_FC_FULL:
pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
@@ -2345,6 +2794,42 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
break;
}
+ /* clear the old pause settings */
+ cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
+ ICE_AQC_PHY_EN_RX_LINK_PAUSE);
+
+ /* set the new capabilities */
+ cfg->caps |= pause_mask;
+
+ /* Cache user FC request */
+ cache_data.data.curr_user_fc_req = req_mode;
+ ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
+
+ return 0;
+}
+
+/**
+ * ice_set_fc
+ * @pi: port information structure
+ * @aq_failures: pointer to status code, specific to ice_set_fc routine
+ * @ena_auto_link_update: enable automatic link update
+ *
+ * Set the requested flow control mode.
+ */
+enum ice_status
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
+{
+ struct ice_aqc_set_phy_cfg_data cfg = { 0 };
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ enum ice_status status;
+ struct ice_hw *hw;
+
+ if (!pi || !aq_failures)
+ return ICE_ERR_BAD_PTR;
+
+ *aq_failures = 0;
+ hw = pi->hw;
+
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return ICE_ERR_NO_MEMORY;
@@ -2357,12 +2842,12 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
goto out;
}
- /* clear the old pause settings */
- cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
- ICE_AQC_PHY_EN_RX_LINK_PAUSE);
+ ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
- /* set the new capabilities */
- cfg.caps |= pause_mask;
+ /* Configure the set PHY data */
+ status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
+ if (status)
+ goto out;
/* If the capabilities have changed, then set the new config */
if (cfg.caps != pcaps->caps) {
@@ -2371,15 +2856,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
/* Auto restart link so settings take effect */
if (ena_auto_link_update)
cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
- /* Copy over all the old settings */
- cfg.phy_type_high = pcaps->phy_type_high;
- cfg.phy_type_low = pcaps->phy_type_low;
- cfg.low_power_ctrl = pcaps->low_power_ctrl;
- cfg.eee_cap = pcaps->eee_cap;
- cfg.eeer_value = pcaps->eeer_value;
- cfg.link_fec_opt = pcaps->link_fec_options;
-
- status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
+
+ status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
if (status) {
*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
goto out;
@@ -2409,7 +2887,44 @@ out:
}
/**
+ * ice_phy_caps_equals_cfg
+ * @phy_caps: PHY capabilities
+ * @phy_cfg: PHY configuration
+ *
+ * Helper function to determine if PHY capabilities matches PHY
+ * configuration
+ */
+bool
+ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
+ struct ice_aqc_set_phy_cfg_data *phy_cfg)
+{
+ u8 caps_mask, cfg_mask;
+
+ if (!phy_caps || !phy_cfg)
+ return false;
+
+ /* These bits are not common between capabilities and configuration.
+ * Do not use them to determine equality.
+ */
+ caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
+ ICE_AQC_GET_PHY_EN_MOD_QUAL);
+ cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+
+ if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
+ phy_caps->phy_type_high != phy_cfg->phy_type_high ||
+ ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
+ phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
+ phy_caps->eee_cap != phy_cfg->eee_cap ||
+ phy_caps->eeer_value != phy_cfg->eeer_value ||
+ phy_caps->link_fec_options != phy_cfg->link_fec_opt)
+ return false;
+
+ return true;
+}
+
+/**
* ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @pi: port information structure
* @caps: PHY ability structure to copy date from
* @cfg: PHY configuration structure to copy data to
*
@@ -2417,42 +2932,73 @@ out:
* data structure
*/
void
-ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
+ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg)
{
- if (!caps || !cfg)
+ if (!pi || !caps || !cfg)
return;
+ memset(cfg, 0, sizeof(*cfg));
cfg->phy_type_low = caps->phy_type_low;
cfg->phy_type_high = caps->phy_type_high;
cfg->caps = caps->caps;
- cfg->low_power_ctrl = caps->low_power_ctrl;
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
cfg->eee_cap = caps->eee_cap;
cfg->eeer_value = caps->eeer_value;
cfg->link_fec_opt = caps->link_fec_options;
+ cfg->module_compliance_enforcement =
+ caps->module_compliance_enforcement;
+
+ if (ice_fw_supports_link_override(pi->hw)) {
+ struct ice_link_default_override_tlv tlv;
+
+ if (ice_get_link_default_override(&tlv, pi))
+ return;
+
+ if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
+ cfg->module_compliance_enforcement |=
+ ICE_LINK_OVERRIDE_STRICT_MODE;
+ }
}
/**
* ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
+ * @pi: port information structure
* @cfg: PHY configuration data to set FEC mode
* @fec: FEC mode to configure
- *
- * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
- * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
- * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
*/
-void
-ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
+enum ice_status
+ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+ enum ice_fec_mode fec)
{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ enum ice_status status;
+
+ if (!pi || !cfg)
+ return ICE_ERR_BAD_PTR;
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
+ NULL);
+ if (status)
+ goto out;
+
+ cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
+ cfg->link_fec_opt = pcaps->link_fec_options;
+
switch (fec) {
case ICE_FEC_BASER:
/* Clear RS bits, and AND BASE-R ability
* bits and OR request bits.
*/
cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
- ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
+ ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
- ICE_AQC_PHY_FEC_25G_KR_REQ;
+ ICE_AQC_PHY_FEC_25G_KR_REQ;
break;
case ICE_FEC_RS:
/* Clear BASE-R bits, and AND RS ability
@@ -2460,7 +3006,7 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
*/
cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
- ICE_AQC_PHY_FEC_25G_RS_544_REQ;
+ ICE_AQC_PHY_FEC_25G_RS_544_REQ;
break;
case ICE_FEC_NONE:
/* Clear all FEC option bits. */
@@ -2469,8 +3015,28 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
case ICE_FEC_AUTO:
/* AND auto FEC bit, and all caps bits. */
cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
+ cfg->link_fec_opt |= pcaps->link_fec_options;
break;
+ default:
+ status = ICE_ERR_PARAM;
+ break;
+ }
+
+ if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
+ struct ice_link_default_override_tlv tlv;
+
+ if (ice_get_link_default_override(&tlv, pi))
+ goto out;
+
+ if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
+ (tlv.options & ICE_LINK_OVERRIDE_EN))
+ cfg->link_fec_opt = tlv.fec_options;
}
+
+out:
+ kfree(pcaps);
+
+ return status;
}
/**
@@ -2889,10 +3455,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
struct ice_sq_cd *cd)
{
- u16 i, sum_header_size, sum_q_size = 0;
struct ice_aqc_add_tx_qgrp *list;
struct ice_aqc_add_txqs *cmd;
struct ice_aq_desc desc;
+ u16 i, sum_size = 0;
cmd = &desc.params.add_txqs;
@@ -2904,18 +3470,13 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
return ICE_ERR_PARAM;
- sum_header_size = num_qgrps *
- (sizeof(*qg_list) - sizeof(*qg_list->txqs));
-
- list = qg_list;
- for (i = 0; i < num_qgrps; i++) {
- struct ice_aqc_add_txqs_perq *q = list->txqs;
-
- sum_q_size += list->num_txqs * sizeof(*q);
- list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
+ for (i = 0, list = qg_list; i < num_qgrps; i++) {
+ sum_size += struct_size(list, txqs, list->num_txqs);
+ list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
+ list->num_txqs);
}
- if (buf_size != (sum_header_size + sum_q_size))
+ if (buf_size != sum_size)
return ICE_ERR_PARAM;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@@ -2943,6 +3504,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
+ struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
struct ice_aq_desc desc;
enum ice_status status;
@@ -2992,16 +3554,16 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
*/
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- for (i = 0; i < num_qgrps; ++i) {
- /* Calculate the size taken up by the queue IDs in this group */
- sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
-
- /* Add the size of the group header */
- sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
+ for (i = 0, item = qg_list; i < num_qgrps; i++) {
+ u16 item_size = struct_size(item, q_id, item->num_qs);
/* If the num of queues is even, add 2 bytes of padding */
- if ((qg_list[i].num_qs % 2) == 0)
- sz += 2;
+ if ((item->num_qs % 2) == 0)
+ item_size += 2;
+
+ sz += item_size;
+
+ item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
}
if (buf_size != sz)
@@ -3342,7 +3904,18 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
* Without setting the generic section as valid in valid_sections, the
* Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
*/
- buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
+ buf->txqs[0].info.valid_sections =
+ ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
+ ICE_AQC_ELEM_VALID_EIR;
+ buf->txqs[0].info.generic = 0;
+ buf->txqs[0].info.cir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->txqs[0].info.cir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
+ buf->txqs[0].info.eir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->txqs[0].info.eir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
/* add the LAN queue */
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
@@ -3390,24 +3963,32 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
struct ice_sq_cd *cd)
{
enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
- struct ice_aqc_dis_txq_item qg_list;
+ struct ice_aqc_dis_txq_item *qg_list;
struct ice_q_ctx *q_ctx;
- u16 i;
+ struct ice_hw *hw;
+ u16 i, buf_size;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
+ hw = pi->hw;
+
if (!num_queues) {
/* if queue is disabled already yet the disable queue command
* has to be sent to complete the VF reset, then call
* ice_aq_dis_lan_txq without any queue information
*/
if (rst_src)
- return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
+ return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
vmvf_num, NULL);
return ICE_ERR_CFG;
}
+ buf_size = struct_size(qg_list, q_id, 1);
+ qg_list = kzalloc(buf_size, GFP_KERNEL);
+ if (!qg_list)
+ return ICE_ERR_NO_MEMORY;
+
mutex_lock(&pi->sched_lock);
for (i = 0; i < num_queues; i++) {
@@ -3416,23 +3997,22 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
if (!node)
continue;
- q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
if (!q_ctx) {
- ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
q_handles[i]);
continue;
}
if (q_ctx->q_handle != q_handles[i]) {
- ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
q_ctx->q_handle, q_handles[i]);
continue;
}
- qg_list.parent_teid = node->info.parent_teid;
- qg_list.num_qs = 1;
- qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
- status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
- sizeof(qg_list), rst_src, vmvf_num,
- cd);
+ qg_list->parent_teid = node->info.parent_teid;
+ qg_list->num_qs = 1;
+ qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
+ status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
+ vmvf_num, cd);
if (status)
break;
@@ -3440,6 +4020,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
}
mutex_unlock(&pi->sched_lock);
+ kfree(qg_list);
return status;
}
@@ -3652,17 +4233,168 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
*/
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
- struct ice_aqc_get_elem *buf)
+ struct ice_aqc_txsched_elem_data *buf)
{
u16 buf_size, num_elem_ret = 0;
enum ice_status status;
buf_size = sizeof(*buf);
memset(buf, 0, buf_size);
- buf->generic[0].node_teid = cpu_to_le32(node_teid);
+ buf->node_teid = cpu_to_le32(node_teid);
status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
NULL);
if (status || num_elem_ret != 1)
ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
return status;
}
+
+/**
+ * ice_fw_supports_link_override
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the firmware supports link override
+ */
+bool ice_fw_supports_link_override(struct ice_hw *hw)
+{
+ /* Currently, only supported for E810 devices */
+ if (hw->mac_type != ICE_MAC_E810)
+ return false;
+
+ if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
+ if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
+ return true;
+ if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
+ hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
+ return true;
+ } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_get_link_default_override
+ * @ldo: pointer to the link default override struct
+ * @pi: pointer to the port info struct
+ *
+ * Gets the link default override for a port
+ */
+enum ice_status
+ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
+ struct ice_port_info *pi)
+{
+ u16 i, tlv, tlv_len, tlv_start, buf, offset;
+ struct ice_hw *hw = pi->hw;
+ enum ice_status status;
+
+ status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
+ ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read link override TLV.\n");
+ return status;
+ }
+
+ /* Each port has its own config; calculate for our port */
+ tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
+ ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
+
+ /* link options first */
+ status = ice_read_sr_word(hw, tlv_start, &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override link options.\n");
+ return status;
+ }
+ ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
+ ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
+ ICE_LINK_OVERRIDE_PHY_CFG_S;
+
+ /* link PHY config */
+ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
+ status = ice_read_sr_word(hw, offset, &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override phy config.\n");
+ return status;
+ }
+ ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
+
+ /* PHY types low */
+ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
+ for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
+ status = ice_read_sr_word(hw, (offset + i), &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override link options.\n");
+ return status;
+ }
+ /* shift 16 bits at a time to fill 64 bits */
+ ldo->phy_type_low |= ((u64)buf << (i * 16));
+ }
+
+ /* PHY types high */
+ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
+ ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
+ for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
+ status = ice_read_sr_word(hw, (offset + i), &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override link options.\n");
+ return status;
+ }
+ /* shift 16 bits at a time to fill 64 bits */
+ ldo->phy_type_high |= ((u64)buf << (i * 16));
+ }
+
+ return status;
+}
+
+/**
+ * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
+ * @caps: get PHY capability data
+ */
+bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
+{
+ if (caps->caps & ICE_AQC_PHY_AN_MODE ||
+ caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
+ ICE_AQC_PHY_AN_EN_CLAUSE73 |
+ ICE_AQC_PHY_AN_EN_CLAUSE37))
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the HW struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
+ * @buf_size: size of the buffer (in bytes)
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB. (0x0A08)
+ */
+enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_set_local_mib *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_set_mib;
+
+ if (buf_size == 0 || !buf)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
+
+ desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
+ desc.datalen = cpu_to_le16(buf_size);
+
+ cmd->type = mib_type;
+ cmd->length = cpu_to_le16(buf_size);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 9b9e50d2398b..3ebb973878c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -11,8 +11,6 @@
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
-
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
@@ -87,6 +85,11 @@ enum ice_status
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+enum ice_status
+ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps);
void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap);
@@ -95,17 +98,33 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
enum ice_status
-ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
+ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
+bool ice_fw_supports_link_override(struct ice_hw *hw);
+enum ice_status
+ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
+ struct ice_port_info *pi);
+bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
+
+enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
+enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
enum ice_status
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update);
+enum ice_status
+ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+ enum ice_fc_mode fc);
+bool
+ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_aqc_set_phy_cfg_data *cfg);
void
-ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec);
-void
-ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
+ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg);
enum ice_status
+ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+ enum ice_fec_mode fec);
+enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
@@ -152,5 +171,8 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
- struct ice_aqc_get_elem *buf);
+ struct ice_aqc_txsched_elem_data *buf);
+enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 1e18021aa073..1f46a7828be8 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -312,9 +312,10 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
do { \
- int i; \
/* free descriptors */ \
- if ((qi)->ring.r.ring##_bi) \
+ if ((qi)->ring.r.ring##_bi) { \
+ int i; \
+ \
for (i = 0; i < (qi)->num_##ring##_entries; i++) \
if ((qi)->ring.r.ring##_bi[i].pa) { \
dmam_free_coherent(ice_hw_to_dev(hw), \
@@ -325,6 +326,7 @@ do { \
(qi)->ring.r.ring##_bi[i].pa = 0;\
(qi)->ring.r.ring##_bi[i].size = 0;\
} \
+ } \
/* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index adb8dab765c8..2a3147ee0bbb 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -135,39 +135,6 @@ ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
}
/**
- * ice_aq_set_lldp_mib - Set the LLDP MIB
- * @hw: pointer to the HW struct
- * @mib_type: Local, Remote or both Local and Remote MIBs
- * @buf: pointer to the caller-supplied buffer to store the MIB block
- * @buf_size: size of the buffer (in bytes)
- * @cd: pointer to command details structure or NULL
- *
- * Set the LLDP MIB. (0x0A08)
- */
-static enum ice_status
-ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
- struct ice_sq_cd *cd)
-{
- struct ice_aqc_lldp_set_local_mib *cmd;
- struct ice_aq_desc desc;
-
- cmd = &desc.params.lldp_set_mib;
-
- if (buf_size == 0 || !buf)
- return ICE_ERR_PARAM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
-
- desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
- desc.datalen = cpu_to_le16(buf_size);
-
- cmd->type = mib_type;
- cmd->length = cpu_to_le16(buf_size);
-
- return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
-}
-
-/**
* ice_get_dcbx_status
* @hw: pointer to the HW struct
*
@@ -1362,7 +1329,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf)
{
struct ice_sched_node *node, *tc_node;
- struct ice_aqc_get_elem elem;
+ struct ice_aqc_txsched_elem_data elem;
enum ice_status status = 0;
u32 teid1, teid2;
u8 i, j;
@@ -1404,7 +1371,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
/* new TC */
status = ice_sched_query_elem(pi->hw, teid2, &elem);
if (!status)
- status = ice_sched_add_node(pi, 1, &elem.generic[0]);
+ status = ice_sched_add_node(pi, 1, &elem);
if (status)
break;
/* update the TC number */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index ee138f9bdc7c..d7e5e6178a21 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -87,7 +87,7 @@
struct ice_lldp_org_tlv {
__be16 typelen;
__be32 ouisubtype;
- u8 tlvinfo[1];
+ u8 tlvinfo[];
} __packed;
struct ice_cee_tlv_hdr {
@@ -109,7 +109,7 @@ struct ice_cee_feat_tlv {
#define ICE_CEE_FEAT_TLV_WILLING_M 0x40
#define ICE_CEE_FEAT_TLV_ERR_M 0x20
u8 subtype;
- u8 tlvinfo[1];
+ u8 tlvinfo[];
};
struct ice_cee_app_prio {
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 979af197f8a3..36abd6b7280c 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -444,10 +444,6 @@ void ice_dcb_rebuild(struct ice_pf *pf)
goto dcb_error;
}
- /* If DCB was not enabled previously, we are done */
- if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
- return;
-
mutex_lock(&pf->tc_mutex);
if (!pf->hw.port_info->is_sw_lldp)
@@ -467,7 +463,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
}
}
- dev_info(dev, "DCB restored after reset\n");
+ dev_info(dev, "DCB info restored\n");
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(dev, "Query Port ETS failed\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 323238669572..35c21d9ae009 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -53,6 +53,12 @@ ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
{
tlan_ctx->cgd_num = ring->dcb_tc;
}
+
+static inline bool ice_is_dcb_active(struct ice_pf *pf)
+{
+ return (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) ||
+ test_bit(ICE_FLAG_DCB_ENA, pf->flags));
+}
#else
#define ice_dcb_rebuild(pf) do {} while (0)
@@ -95,6 +101,11 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
return 0;
}
+static inline bool ice_is_dcb_active(struct ice_pf __always_unused *pf)
+{
+ return false;
+}
+
static inline bool
ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
unsigned int __always_unused txqueue)
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index a73d06e06b5d..111d6bfe4222 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -4,6 +4,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_devlink.h"
+#include "ice_fw_update.h"
static int ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
{
@@ -229,8 +230,61 @@ static int ice_devlink_info_get(struct devlink *devlink,
return 0;
}
+/**
+ * ice_devlink_flash_update - Update firmware stored in flash on the device
+ * @devlink: pointer to devlink associated with device to update
+ * @path: the path of the firmware file to use via request_firmware
+ * @component: name of the component to update, or NULL
+ * @extack: netlink extended ACK structure
+ *
+ * Perform a device flash update. The bulk of the update logic is contained
+ * within the ice_flash_pldm_image function.
+ *
+ * Returns: zero on success, or an error code on failure.
+ */
+static int
+ice_devlink_flash_update(struct devlink *devlink, const char *path,
+ const char *component, struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = &pf->pdev->dev;
+ struct ice_hw *hw = &pf->hw;
+ const struct firmware *fw;
+ int err;
+
+ /* individual component update is not yet supported */
+ if (component)
+ return -EOPNOTSUPP;
+
+ if (!hw->dev_caps.common_cap.nvm_unified_update) {
+ NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
+ return -EOPNOTSUPP;
+ }
+
+ err = ice_check_for_pending_update(pf, component, extack);
+ if (err)
+ return err;
+
+ err = request_firmware(&fw, path, dev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk");
+ return err;
+ }
+
+ devlink_flash_update_begin_notify(devlink);
+ devlink_flash_update_status_notify(devlink, "Preparing to flash",
+ component, 0, 0);
+ err = ice_flash_pldm_image(pf, fw, extack);
+ devlink_flash_update_end_notify(devlink);
+
+ release_firmware(fw);
+
+ return err;
+}
+
static const struct devlink_ops ice_devlink_ops = {
.info_get = ice_devlink_info_get,
+ .flash_update = ice_devlink_flash_update,
};
static void ice_devlink_free(void *devlink_ptr)
@@ -303,7 +357,7 @@ void ice_devlink_unregister(struct ice_pf *pf)
*
* Create and register a devlink_port for this PF. Note that although each
* physical function is connected to a separate devlink instance, the port
- * will still be numbered according to the physical function id.
+ * will still be numbered according to the physical function ID.
*
* Return: zero on success or an error code on failure.
*/
@@ -312,6 +366,7 @@ int ice_devlink_create_port(struct ice_pf *pf)
struct devlink *devlink = priv_to_devlink(pf);
struct ice_vsi *vsi = ice_get_main_vsi(pf);
struct device *dev = ice_pf_to_dev(pf);
+ struct devlink_port_attrs attrs = {};
int err;
if (!vsi) {
@@ -319,8 +374,9 @@ int ice_devlink_create_port(struct ice_pf *pf)
return -EIO;
}
- devlink_port_attrs_set(&pf->devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
- pf->hw.pf_id, false, 0, NULL, 0);
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = pf->hw.pf_id;
+ devlink_port_attrs_set(&pf->devlink_port, &attrs);
err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id);
if (err) {
dev_err(dev, "devlink_port_register failed: %d\n", err);
@@ -397,12 +453,60 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
return 0;
}
+/**
+ * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
+ * @devlink: the devlink instance
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
+ * the device-caps devlink region. It captures a snapshot of the device
+ * capabilities reported by firmware.
+ *
+ * @returns zero on success, and updates the data pointer. Returns a non-zero
+ * error code on failure.
+ */
+static int
+ice_devlink_devcaps_snapshot(struct devlink *devlink,
+ struct netlink_ext_ack *extack, u8 **data)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ void *devcaps;
+
+ devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN);
+ if (!devcaps)
+ return -ENOMEM;
+
+ status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL,
+ ice_aqc_opc_list_dev_caps, NULL);
+ if (status) {
+ dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities");
+ vfree(devcaps);
+ return -EIO;
+ }
+
+ *data = (u8 *)devcaps;
+
+ return 0;
+}
+
static const struct devlink_region_ops ice_nvm_region_ops = {
.name = "nvm-flash",
.destructor = vfree,
.snapshot = ice_devlink_nvm_snapshot,
};
+static const struct devlink_region_ops ice_devcaps_region_ops = {
+ .name = "device-caps",
+ .destructor = vfree,
+ .snapshot = ice_devlink_devcaps_snapshot,
+};
+
/**
* ice_devlink_init_regions - Initialize devlink regions
* @pf: the PF device structure
@@ -424,6 +528,15 @@ void ice_devlink_init_regions(struct ice_pf *pf)
PTR_ERR(pf->nvm_region));
pf->nvm_region = NULL;
}
+
+ pf->devcaps_region = devlink_region_create(devlink,
+ &ice_devcaps_region_ops, 10,
+ ICE_AQ_MAX_BUF_LEN);
+ if (IS_ERR(pf->devcaps_region)) {
+ dev_err(dev, "failed to create device-caps devlink region, err %ld\n",
+ PTR_ERR(pf->devcaps_region));
+ pf->devcaps_region = NULL;
+ }
}
/**
@@ -436,4 +549,6 @@ void ice_devlink_destroy_regions(struct ice_pf *pf)
{
if (pf->nvm_region)
devlink_region_destroy(pf->nvm_region);
+ if (pf->devcaps_region)
+ devlink_region_destroy(pf->devcaps_region);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 68c38004a088..9e8e9531cd87 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -59,8 +59,11 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
+ ICE_VSI_STAT("rx_gro_dropped", rx_gro_dropped),
ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
ICE_VSI_STAT("tx_linearize", tx_linearize),
+ ICE_VSI_STAT("tx_busy", tx_busy),
+ ICE_VSI_STAT("tx_restart", tx_restart),
};
enum ice_ethtool_test_id {
@@ -100,6 +103,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast),
ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast),
ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors),
+ ICE_PF_STAT("tx_timeout.nic", tx_timeout_count),
ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64),
ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64),
ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127),
@@ -179,7 +183,6 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
orom = &nvm->orom;
strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strscpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
/* Display NVM version (from which the firmware version can be
* determined) which contains more pertinent information.
@@ -967,12 +970,8 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_aqc_set_phy_cfg_data config = { 0 };
- struct ice_aqc_get_phy_caps_data *caps;
struct ice_vsi *vsi = np->vsi;
- u8 sw_cfg_caps, sw_cfg_fec;
struct ice_port_info *pi;
- enum ice_status status;
- int err = 0;
pi = vsi->port_info;
if (!pi)
@@ -984,54 +983,26 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
return -EOPNOTSUPP;
}
- /* Get last SW configuration */
- caps = kzalloc(sizeof(*caps), GFP_KERNEL);
- if (!caps)
- return -ENOMEM;
-
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
- caps, NULL);
- if (status) {
- err = -EAGAIN;
- goto done;
- }
-
- /* Copy SW configuration returned from PHY caps to PHY config */
- ice_copy_phy_caps_to_cfg(caps, &config);
- sw_cfg_caps = caps->caps;
- sw_cfg_fec = caps->link_fec_options;
-
- /* Get toloplogy caps, then copy PHY FEC topoloy caps to PHY config */
- memset(caps, 0, sizeof(*caps));
-
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
- caps, NULL);
- if (status) {
- err = -EAGAIN;
- goto done;
- }
-
- config.caps |= (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
- config.link_fec_opt = caps->link_fec_options;
+ /* Proceed only if requesting different FEC mode */
+ if (pi->phy.curr_user_fec_req == req_fec)
+ return 0;
- ice_cfg_phy_fec(&config, req_fec);
+ /* Copy the current user PHY configuration. The current user PHY
+ * configuration is initialized during probe from PHY capabilities
+ * software mode, and updated on set PHY configuration.
+ */
+ memcpy(&config, &pi->phy.curr_user_phy_cfg, sizeof(config));
- /* If FEC mode has changed, then set PHY configuration and enable AN. */
- if ((config.caps & ICE_AQ_PHY_ENA_AUTO_FEC) !=
- (sw_cfg_caps & ICE_AQC_PHY_EN_AUTO_FEC) ||
- config.link_fec_opt != sw_cfg_fec) {
- if (caps->caps & ICE_AQC_PHY_AN_MODE)
- config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ ice_cfg_phy_fec(pi, &config, req_fec);
+ config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
- status = ice_aq_set_phy_cfg(pi->hw, pi->lport, &config, NULL);
+ if (ice_aq_set_phy_cfg(pi->hw, pi, &config, NULL))
+ return -EAGAIN;
- if (status)
- err = -EAGAIN;
- }
+ /* Save requested FEC config */
+ pi->phy.curr_user_fec_req = req_fec;
-done:
- kfree(caps);
- return err;
+ return 0;
}
/**
@@ -1229,6 +1200,17 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
+ /* Do not allow change to link-down-on-close when Total Port Shutdown
+ * is enabled.
+ */
+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, change_flags) &&
+ test_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) {
+ dev_err(dev, "Setting link-down-on-close not supported on this port\n");
+ set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
+ ret = -EINVAL;
+ goto ethtool_exit;
+ }
+
if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) {
if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) {
enum ice_status status;
@@ -1316,6 +1298,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
ret = -EAGAIN;
}
+ethtool_exit:
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret;
}
@@ -1420,6 +1403,77 @@ ice_get_ethtool_stats(struct net_device *netdev,
}
}
+#define ICE_PHY_TYPE_LOW_MASK_MIN_1G (ICE_PHY_TYPE_LOW_100BASE_TX | \
+ ICE_PHY_TYPE_LOW_100M_SGMII)
+
+#define ICE_PHY_TYPE_LOW_MASK_MIN_25G (ICE_PHY_TYPE_LOW_MASK_MIN_1G | \
+ ICE_PHY_TYPE_LOW_1000BASE_T | \
+ ICE_PHY_TYPE_LOW_1000BASE_SX | \
+ ICE_PHY_TYPE_LOW_1000BASE_LX | \
+ ICE_PHY_TYPE_LOW_1000BASE_KX | \
+ ICE_PHY_TYPE_LOW_1G_SGMII | \
+ ICE_PHY_TYPE_LOW_2500BASE_T | \
+ ICE_PHY_TYPE_LOW_2500BASE_X | \
+ ICE_PHY_TYPE_LOW_2500BASE_KX | \
+ ICE_PHY_TYPE_LOW_5GBASE_T | \
+ ICE_PHY_TYPE_LOW_5GBASE_KR | \
+ ICE_PHY_TYPE_LOW_10GBASE_T | \
+ ICE_PHY_TYPE_LOW_10G_SFI_DA | \
+ ICE_PHY_TYPE_LOW_10GBASE_SR | \
+ ICE_PHY_TYPE_LOW_10GBASE_LR | \
+ ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \
+ ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \
+ ICE_PHY_TYPE_LOW_10G_SFI_C2C)
+
+#define ICE_PHY_TYPE_LOW_MASK_100G (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_SR4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_LR4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_KR4 | \
+ ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \
+ ICE_PHY_TYPE_LOW_100G_CAUI4 | \
+ ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \
+ ICE_PHY_TYPE_LOW_100G_AUI4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_CP2 | \
+ ICE_PHY_TYPE_LOW_100GBASE_SR2 | \
+ ICE_PHY_TYPE_LOW_100GBASE_DR)
+
+#define ICE_PHY_TYPE_HIGH_MASK_100G (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \
+ ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |\
+ ICE_PHY_TYPE_HIGH_100G_CAUI2 | \
+ ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \
+ ICE_PHY_TYPE_HIGH_100G_AUI2)
+
+/**
+ * ice_mask_min_supported_speeds
+ * @phy_types_high: PHY type high
+ * @phy_types_low: PHY type low to apply minimum supported speeds mask
+ *
+ * Apply minimum supported speeds mask to PHY type low. These are the speeds
+ * for ethtool supported link mode.
+ */
+static
+void ice_mask_min_supported_speeds(u64 phy_types_high, u64 *phy_types_low)
+{
+ /* if QSFP connection with 100G speed, minimum supported speed is 25G */
+ if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G ||
+ phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G)
+ *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G;
+ else
+ *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G;
+}
+
+#define ice_ethtool_advertise_link_mode(aq_link_speed, ethtool_link_mode) \
+ do { \
+ if (req_speeds & (aq_link_speed) || \
+ (!req_speeds && \
+ (adv_phy_type_lo & phy_type_mask_lo || \
+ adv_phy_type_hi & phy_type_mask_hi))) \
+ ethtool_link_ksettings_add_link_mode(ks, advertising,\
+ ethtool_link_mode); \
+ } while (0)
+
/**
* ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes
* @netdev: network interface device structure
@@ -1430,277 +1484,312 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_link_status *hw_link_info;
- bool need_add_adv_mode = false;
struct ice_vsi *vsi = np->vsi;
- u64 phy_types_high;
- u64 phy_types_low;
+ struct ice_pf *pf = vsi->back;
+ u64 phy_type_mask_lo = 0;
+ u64 phy_type_mask_hi = 0;
+ u64 adv_phy_type_lo = 0;
+ u64 adv_phy_type_hi = 0;
+ u64 phy_types_high = 0;
+ u64 phy_types_low = 0;
+ u16 req_speeds;
+
+ req_speeds = vsi->port_info->phy.link_info.req_speeds;
+
+ /* Check if lenient mode is supported and enabled, or in strict mode.
+ *
+ * In lenient mode the Supported link modes are the PHY types without
+ * media. The Advertising link mode is either 1. the user requested
+ * speed, 2. the override PHY mask, or 3. the PHY types with media.
+ *
+ * In strict mode Supported link mode are the PHY type with media,
+ * and Advertising link modes are the media PHY type or the speed
+ * requested by user.
+ */
+ if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) {
+ struct ice_link_default_override_tlv *ldo;
- hw_link_info = &vsi->port_info->phy.link_info;
- phy_types_low = vsi->port_info->phy.phy_type_low;
- phy_types_high = vsi->port_info->phy.phy_type_high;
+ ldo = &pf->link_dflt_override;
+ phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo);
+ phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi);
+
+ ice_mask_min_supported_speeds(phy_types_high, &phy_types_low);
+
+ /* If override enabled and PHY mask set, then
+ * Advertising link mode is the intersection of the PHY
+ * types without media and the override PHY mask.
+ */
+ if (ldo->options & ICE_LINK_OVERRIDE_EN &&
+ (ldo->phy_type_low || ldo->phy_type_high)) {
+ adv_phy_type_lo =
+ le64_to_cpu(pf->nvm_phy_type_lo) &
+ ldo->phy_type_low;
+ adv_phy_type_hi =
+ le64_to_cpu(pf->nvm_phy_type_hi) &
+ ldo->phy_type_high;
+ }
+ } else {
+ phy_types_low = vsi->port_info->phy.phy_type_low;
+ phy_types_high = vsi->port_info->phy.phy_type_high;
+ }
+
+ /* If Advertising link mode PHY type is not using override PHY type,
+ * then use PHY type with media.
+ */
+ if (!adv_phy_type_lo && !adv_phy_type_hi) {
+ adv_phy_type_lo = vsi->port_info->phy.phy_type_low;
+ adv_phy_type_hi = vsi->port_info->phy.phy_type_high;
+ }
ethtool_link_ksettings_zero_link_mode(ks, supported);
ethtool_link_ksettings_zero_link_mode(ks, advertising);
- if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
- phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) {
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_100BASE_TX |
+ ICE_PHY_TYPE_LOW_100M_SGMII;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100baseT_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100baseT_Full);
+
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100MB,
+ 100baseT_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_T |
+ ICE_PHY_TYPE_LOW_1G_SGMII;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseT_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 1000baseT_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB,
+ 1000baseT_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_KX;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseKX_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 1000baseKX_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB,
+ 1000baseKX_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_SX ||
- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_SX |
+ ICE_PHY_TYPE_LOW_1000BASE_LX;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseX_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 1000baseX_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB,
+ 1000baseX_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_T;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
2500baseT_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 2500baseT_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB,
+ 2500baseT_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_X ||
- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_X |
+ ICE_PHY_TYPE_LOW_2500BASE_KX;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
2500baseX_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 2500baseX_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB,
+ 2500baseX_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_5GBASE_T |
+ ICE_PHY_TYPE_LOW_5GBASE_KR;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
5000baseT_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 5000baseT_Full);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_DA ||
- phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) {
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_5GB,
+ 5000baseT_Full);
+ }
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_T |
+ ICE_PHY_TYPE_LOW_10G_SFI_DA |
+ ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC |
+ ICE_PHY_TYPE_LOW_10G_SFI_C2C;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseT_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseT_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
+ 10000baseT_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_KR_CR1;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseKR_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseKR_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
+ 10000baseKR_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_SR;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseSR_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
+ 10000baseSR_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_LR;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseLR_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseLR_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
+ 10000baseLR_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
- phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_T |
+ ICE_PHY_TYPE_LOW_25GBASE_CR |
+ ICE_PHY_TYPE_LOW_25GBASE_CR_S |
+ ICE_PHY_TYPE_LOW_25GBASE_CR1 |
+ ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC |
+ ICE_PHY_TYPE_LOW_25G_AUI_C2C;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseCR_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 25000baseCR_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB,
+ 25000baseCR_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_SR ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_SR |
+ ICE_PHY_TYPE_LOW_25GBASE_LR;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseSR_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 25000baseSR_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB,
+ 25000baseSR_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_KR |
+ ICE_PHY_TYPE_LOW_25GBASE_KR_S |
+ ICE_PHY_TYPE_LOW_25GBASE_KR1;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseKR_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 25000baseKR_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB,
+ 25000baseKR_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_KR4;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseKR4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseKR4_Full);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) {
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
+ 40000baseKR4_Full);
+ }
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_CR4 |
+ ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC |
+ ICE_PHY_TYPE_LOW_40G_XLAUI;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseCR4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseCR4_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
+ 40000baseCR4_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_SR4;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseSR4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseSR4_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
+ 40000baseSR4_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_LR4;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseLR4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseLR4_Full);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR ||
- phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) {
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
+ 40000baseLR4_Full);
+ }
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_CR2 |
+ ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC |
+ ICE_PHY_TYPE_LOW_50G_LAUI2 |
+ ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC |
+ ICE_PHY_TYPE_LOW_50G_AUI2 |
+ ICE_PHY_TYPE_LOW_50GBASE_CP |
+ ICE_PHY_TYPE_LOW_50GBASE_SR |
+ ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC |
+ ICE_PHY_TYPE_LOW_50G_AUI1;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseCR2_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 50000baseCR2_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB,
+ 50000baseCR2_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_KR2 |
+ ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseKR2_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 50000baseKR2_Full);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_FR ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) {
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB,
+ 50000baseKR2_Full);
+ }
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_SR2 |
+ ICE_PHY_TYPE_LOW_50GBASE_LR2 |
+ ICE_PHY_TYPE_LOW_50GBASE_FR |
+ ICE_PHY_TYPE_LOW_50GBASE_LR;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseSR2_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 50000baseSR2_Full);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2 ||
- phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC ||
- phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2 ||
- phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC ||
- phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) {
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB,
+ 50000baseSR2_Full);
+ }
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_CR4 |
+ ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC |
+ ICE_PHY_TYPE_LOW_100G_CAUI4 |
+ ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC |
+ ICE_PHY_TYPE_LOW_100G_AUI4 |
+ ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 |
+ ICE_PHY_TYPE_LOW_100GBASE_CP2;
+ phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |
+ ICE_PHY_TYPE_HIGH_100G_CAUI2 |
+ ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC |
+ ICE_PHY_TYPE_HIGH_100G_AUI2;
+ if (phy_types_low & phy_type_mask_lo ||
+ phy_types_high & phy_type_mask_hi) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseCR4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
- need_add_adv_mode = true;
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseCR4_Full);
}
- if (need_add_adv_mode) {
- need_add_adv_mode = false;
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseCR4_Full);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_SR4 |
+ ICE_PHY_TYPE_LOW_100GBASE_SR2;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
- need_add_adv_mode = true;
- }
- if (need_add_adv_mode) {
- need_add_adv_mode = false;
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseSR4_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseSR4_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_LR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_LR4 |
+ ICE_PHY_TYPE_LOW_100GBASE_DR;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseLR4_ER4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
- need_add_adv_mode = true;
- }
- if (need_add_adv_mode) {
- need_add_adv_mode = false;
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseLR4_ER4_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseLR4_ER4_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
- phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_KR4 |
+ ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4;
+ phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4;
+ if (phy_types_low & phy_type_mask_lo ||
+ phy_types_high & phy_type_mask_hi) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseKR4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
- need_add_adv_mode = true;
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseKR4_Full);
}
- if (need_add_adv_mode)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseKR4_Full);
/* Autoneg PHY types */
if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
@@ -2128,18 +2217,18 @@ static int
ice_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *ks)
{
- u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0;
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ethtool_link_ksettings safe_ks, copy_ks;
struct ice_aqc_get_phy_caps_data *abilities;
+ u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT;
u16 adv_link_speed, curr_link_speed, idx;
struct ice_aqc_set_phy_cfg_data config;
struct ice_pf *pf = np->vsi->back;
struct ice_port_info *p;
u8 autoneg_changed = 0;
enum ice_status status;
- u64 phy_type_high;
- u64 phy_type_low;
+ u64 phy_type_high = 0;
+ u64 phy_type_low = 0;
int err = 0;
bool linkup;
@@ -2163,6 +2252,18 @@ ice_set_link_ksettings(struct net_device *netdev,
p->phy.link_info.link_info & ICE_AQ_LINK_UP)
return -EOPNOTSUPP;
+ abilities = kzalloc(sizeof(*abilities), GFP_KERNEL);
+ if (!abilities)
+ return -ENOMEM;
+
+ /* Get the PHY capabilities based on media */
+ status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_TOPO_CAP,
+ abilities, NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
/* copy the ksettings to copy_ks to avoid modifying the original */
memcpy(&copy_ks, ks, sizeof(copy_ks));
@@ -2179,8 +2280,12 @@ ice_set_link_ksettings(struct net_device *netdev,
*/
if (!bitmap_subset(copy_ks.link_modes.advertising,
safe_ks.link_modes.supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS))
- return -EINVAL;
+ __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+ if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags))
+ netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
+ err = -EINVAL;
+ goto done;
+ }
/* get our own copy of the bits to check against */
memset(&safe_ks, 0, sizeof(safe_ks));
@@ -2197,33 +2302,27 @@ ice_set_link_ksettings(struct net_device *netdev,
/* If copy_ks.base and safe_ks.base are not the same now, then they are
* trying to set something that we do not support.
*/
- if (memcmp(&copy_ks.base, &safe_ks.base, sizeof(copy_ks.base)))
- return -EOPNOTSUPP;
+ if (memcmp(&copy_ks.base, &safe_ks.base, sizeof(copy_ks.base))) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
timeout--;
- if (!timeout)
- return -EBUSY;
+ if (!timeout) {
+ err = -EBUSY;
+ goto done;
+ }
usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX);
}
- abilities = kzalloc(sizeof(*abilities), GFP_KERNEL);
- if (!abilities)
- return -ENOMEM;
-
- /* Get the current PHY config */
- status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities,
- NULL);
- if (status) {
- err = -EAGAIN;
- goto done;
- }
+ /* Copy the current user PHY configuration. The current user PHY
+ * configuration is initialized during probe from PHY capabilities
+ * software mode, and updated on set PHY configuration.
+ */
+ memcpy(&config, &p->phy.curr_user_phy_cfg, sizeof(config));
- /* Copy abilities to config in case autoneg is not set below */
- memset(&config, 0, sizeof(config));
- config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE;
- if (abilities->caps & ICE_AQC_PHY_AN_MODE)
- config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
/* Check autoneg */
err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed,
@@ -2258,29 +2357,44 @@ ice_set_link_ksettings(struct net_device *netdev,
goto done;
}
- /* copy over the rest of the abilities */
- config.low_power_ctrl = abilities->low_power_ctrl;
- config.eee_cap = abilities->eee_cap;
- config.eeer_value = abilities->eeer_value;
- config.link_fec_opt = abilities->link_fec_options;
-
/* save the requested speeds */
p->phy.link_info.req_speeds = adv_link_speed;
/* set link and auto negotiation so changes take effect */
config.caps |= ICE_AQ_PHY_ENA_LINK;
- if (phy_type_low || phy_type_high) {
- config.phy_type_high = cpu_to_le64(phy_type_high) &
- abilities->phy_type_high;
- config.phy_type_low = cpu_to_le64(phy_type_low) &
- abilities->phy_type_low;
- } else {
+ /* check if there is a PHY type for the requested advertised speed */
+ if (!(phy_type_low || phy_type_high)) {
+ netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
err = -EAGAIN;
- netdev_info(netdev, "Nothing changed. No PHY_TYPE is corresponded to advertised link speed.\n");
goto done;
}
+ /* intersect requested advertised speed PHY types with media PHY types
+ * for set PHY configuration
+ */
+ config.phy_type_high = cpu_to_le64(phy_type_high) &
+ abilities->phy_type_high;
+ config.phy_type_low = cpu_to_le64(phy_type_low) &
+ abilities->phy_type_low;
+
+ if (!(config.phy_type_high || config.phy_type_low)) {
+ /* If there is no intersection and lenient mode is enabled, then
+ * intersect the requested advertised speed with NVM media type
+ * PHY types.
+ */
+ if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) {
+ config.phy_type_high = cpu_to_le64(phy_type_high) &
+ pf->nvm_phy_type_hi;
+ config.phy_type_low = cpu_to_le64(phy_type_low) &
+ pf->nvm_phy_type_lo;
+ } else {
+ netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
+ err = -EAGAIN;
+ goto done;
+ }
+ }
+
/* If link is up put link down */
if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) {
/* Tell the OS link is going down, the link will go
@@ -2292,12 +2406,15 @@ ice_set_link_ksettings(struct net_device *netdev,
}
/* make the aq call */
- status = ice_aq_set_phy_cfg(&pf->hw, lport, &config, NULL);
+ status = ice_aq_set_phy_cfg(&pf->hw, p, &config, NULL);
if (status) {
netdev_info(netdev, "Set phy config failed,\n");
err = -EAGAIN;
+ goto done;
}
+ /* Save speed request */
+ p->phy.curr_user_speed_req = adv_link_speed;
done:
kfree(abilities);
clear_bit(__ICE_CFG_BUSY, pf->state);
@@ -2874,8 +2991,8 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
if (status)
goto out;
- pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
- AUTONEG_ENABLE : AUTONEG_DISABLE);
+ pause->autoneg = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
if (dcbx_cfg->pfc.pfcena)
/* PFC enabled so report LFC as off */
@@ -2943,8 +3060,8 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return -EIO;
}
- is_an = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
- AUTONEG_ENABLE : AUTONEG_DISABLE);
+ is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
kfree(pcaps);
@@ -3323,6 +3440,58 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return 0;
}
+/**
+ * ice_get_wol - get current Wake on LAN configuration
+ * @netdev: network interface device structure
+ * @wol: Ethtool structure to retrieve WoL settings
+ */
+static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ if (np->vsi->type != ICE_VSI_PF)
+ netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n");
+
+ /* Get WoL settings based on the HW capability */
+ if (ice_is_wol_supported(pf)) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0;
+ } else {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ }
+}
+
+/**
+ * ice_set_wol - set Wake on LAN on supported device
+ * @netdev: network interface device structure
+ * @wol: Ethtool structure to set WoL
+ */
+static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+
+ if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(pf))
+ return -EOPNOTSUPP;
+
+ /* only magic packet is supported */
+ if (wol->wolopts && wol->wolopts != WAKE_MAGIC)
+ return -EOPNOTSUPP;
+
+ /* Set WoL only if there is a new value */
+ if (pf->wol_ena != !!wol->wolopts) {
+ pf->wol_ena = !!wol->wolopts;
+ device_set_wakeup_enable(ice_pf_to_dev(pf), pf->wol_ena);
+ netdev_dbg(netdev, "WoL magic packet %sabled\n",
+ pf->wol_ena ? "en" : "dis");
+ }
+
+ return 0;
+}
+
enum ice_container_type {
ICE_RX_CONTAINER,
ICE_TX_CONTAINER,
@@ -3806,6 +3975,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
+ .get_wol = ice_get_wol,
+ .set_wol = ice_set_wol,
.get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel,
.self_test = ice_self_test,
@@ -3848,6 +4019,8 @@ static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
+ .get_wol = ice_get_wol,
+ .set_wol = ice_set_wol,
.get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 4420fc02f7e7..b17ae3e20157 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -644,7 +644,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
* This function generates a key from a value, a don't care mask and a never
* match mask.
* upd, dc, and nm are optional parameters, and can be NULL:
- * upd == NULL --> udp mask is all 1's (update all bits)
+ * upd == NULL --> upd mask is all 1's (update all bits)
* dc == NULL --> dc mask is all 0's (no don't care bits)
* nm == NULL --> nm mask is all 0's (no never match bits)
*/
@@ -1121,8 +1121,7 @@ static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
u16 size;
u32 i;
- size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) *
- (ICE_PKG_CNT - 1));
+ size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
pkg_info = kzalloc(size, GFP_KERNEL);
if (!pkg_info)
return ICE_ERR_NO_MEMORY;
@@ -1180,7 +1179,7 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
u32 seg_count;
u32 i;
- if (len < sizeof(*pkg))
+ if (len < struct_size(pkg, seg_offset, 1))
return ICE_ERR_BUF_TOO_SHORT;
if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
@@ -1195,7 +1194,7 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
return ICE_ERR_CFG;
/* make sure segment array fits in package length */
- if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset)))
+ if (len < struct_size(pkg, seg_offset, seg_count))
return ICE_ERR_BUF_TOO_SHORT;
/* all segments must fit within length */
@@ -1300,7 +1299,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
}
/* Check if FW is compatible with the OS package */
- size = struct_size(pkg, pkg_info, ICE_PKG_CNT - 1);
+ size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
pkg = kzalloc(size, GFP_KERNEL);
if (!pkg)
return ICE_ERR_NO_MEMORY;
@@ -1764,13 +1763,13 @@ ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
goto ice_create_tunnel_err;
sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
- sizeof(*sect_rx));
+ struct_size(sect_rx, tcam, 1));
if (!sect_rx)
goto ice_create_tunnel_err;
sect_rx->count = cpu_to_le16(1);
sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
- sizeof(*sect_tx));
+ struct_size(sect_tx, tcam, 1));
if (!sect_tx)
goto ice_create_tunnel_err;
sect_tx->count = cpu_to_le16(1);
@@ -1847,7 +1846,7 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
}
/* size of section - there is at least one entry */
- size = struct_size(sect_rx, tcam, count - 1);
+ size = struct_size(sect_rx, tcam, count);
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
@@ -2922,6 +2921,8 @@ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
ICE_FLOW_ENTRY_HNDL(e));
list_del(&p->l_entry);
+
+ mutex_destroy(&p->entries_lock);
devm_kfree(ice_hw_to_dev(hw), p);
}
mutex_unlock(&hw->fl_profs_locks[blk_idx]);
@@ -3039,7 +3040,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
memset(prof_redir->t, 0,
prof_redir->count * sizeof(*prof_redir->t));
- memset(es->t, 0, es->count * sizeof(*es->t));
+ memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
memset(es->written, 0, es->count * sizeof(*es->written));
}
@@ -3150,10 +3151,12 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->ref_count),
GFP_KERNEL);
+ if (!es->ref_count)
+ goto err;
es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->written), GFP_KERNEL);
- if (!es->ref_count)
+ if (!es->written)
goto err;
}
return 0;
@@ -3324,10 +3327,10 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
u32 id;
id = ice_sect_id(blk, ICE_VEC_TBL);
- p = (struct ice_pkg_es *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p) +
- vec_size -
- sizeof(p->es[0]));
+ p = ice_pkg_buf_alloc_section(bld, id,
+ struct_size(p, es, 1) +
+ vec_size -
+ sizeof(p->es[0]));
if (!p)
return ICE_ERR_MAX_LIMIT;
@@ -3360,8 +3363,8 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
u32 id;
id = ice_sect_id(blk, ICE_PROF_TCAM);
- p = (struct ice_prof_id_section *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+ p = ice_pkg_buf_alloc_section(bld, id,
+ struct_size(p, entry, 1));
if (!p)
return ICE_ERR_MAX_LIMIT;
@@ -3396,8 +3399,8 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
u32 id;
id = ice_sect_id(blk, ICE_XLT1);
- p = (struct ice_xlt1_section *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+ p = ice_pkg_buf_alloc_section(bld, id,
+ struct_size(p, value, 1));
if (!p)
return ICE_ERR_MAX_LIMIT;
@@ -3431,8 +3434,8 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
case ICE_VSI_MOVE:
case ICE_VSIG_REM:
id = ice_sect_id(blk, ICE_XLT2);
- p = (struct ice_xlt2_section *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+ p = ice_pkg_buf_alloc_section(bld, id,
+ struct_size(p, value, 1));
if (!p)
return ICE_ERR_MAX_LIMIT;
@@ -3875,16 +3878,16 @@ err_ice_add_prof:
}
/**
- * ice_search_prof_id_low - Search for a profile tracking ID low level
+ * ice_search_prof_id - Search for a profile tracking ID
* @hw: pointer to the HW struct
* @blk: hardware block
* @id: profile tracking ID
*
- * This will search for a profile tracking ID which was previously added. This
- * version assumes that the caller has already acquired the prof map lock.
+ * This will search for a profile tracking ID which was previously added.
+ * The profile map lock should be held before calling this function.
*/
static struct ice_prof_map *
-ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *entry = NULL;
struct ice_prof_map *map;
@@ -3899,26 +3902,6 @@ ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
}
/**
- * ice_search_prof_id - Search for a profile tracking ID
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @id: profile tracking ID
- *
- * This will search for a profile tracking ID which was previously added.
- */
-static struct ice_prof_map *
-ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
-{
- struct ice_prof_map *entry;
-
- mutex_lock(&hw->blk[blk].es.prof_map_lock);
- entry = ice_search_prof_id_low(hw, blk, id);
- mutex_unlock(&hw->blk[blk].es.prof_map_lock);
-
- return entry;
-}
-
-/**
* ice_vsig_prof_id_count - count profiles in a VSIG
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -4134,7 +4117,7 @@ enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
mutex_lock(&hw->blk[blk].es.prof_map_lock);
- pmap = ice_search_prof_id_low(hw, blk, id);
+ pmap = ice_search_prof_id(hw, blk, id);
if (!pmap) {
status = ICE_ERR_DOES_NOT_EXIST;
goto err_ice_rem_prof;
@@ -4167,22 +4150,28 @@ static enum ice_status
ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
struct list_head *chg)
{
+ enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_chs_chg *p;
u16 i;
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_get_prof;
+ }
for (i = 0; i < map->ptg_cnt; i++)
if (!hw->blk[blk].es.written[map->prof_id]) {
/* add ES to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
GFP_KERNEL);
- if (!p)
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_get_prof;
+ }
p->type = ICE_PTG_ES_ADD;
p->ptype = 0;
@@ -4197,11 +4186,10 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
list_add(&p->list_entry, chg);
}
- return 0;
-
err_ice_get_prof:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
/* let caller clean up the change list */
- return ICE_ERR_NO_MEMORY;
+ return status;
}
/**
@@ -4255,17 +4243,23 @@ static enum ice_status
ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
struct list_head *lst, u64 hdl)
{
+ enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *p;
u16 i;
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_add_prof_to_lst;
+ }
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
- if (!p)
- return ICE_ERR_NO_MEMORY;
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_ice_add_prof_to_lst;
+ }
p->profile_cookie = map->profile_cookie;
p->prof_id = map->prof_id;
@@ -4279,7 +4273,9 @@ ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
list_add(&p->list, lst);
- return 0;
+err_ice_add_prof_to_lst:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+ return status;
}
/**
@@ -4497,16 +4493,12 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+ enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
u16 vsig_idx, i;
- /* Get the details on the profile specified by the handle ID */
- map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
-
/* Error, if this VSIG already has this profile */
if (ice_has_prof_vsig(hw, blk, vsig, hdl))
return ICE_ERR_ALREADY_EXISTS;
@@ -4516,19 +4508,28 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
if (!t)
return ICE_ERR_NO_MEMORY;
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_add_prof_id_vsig;
+ }
+
t->profile_cookie = map->profile_cookie;
t->prof_id = map->prof_id;
t->tcam_count = map->ptg_cnt;
/* create TCAM entries */
for (i = 0; i < map->ptg_cnt; i++) {
- enum ice_status status;
u16 tcam_idx;
/* add TCAM to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
- if (!p)
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_add_prof_id_vsig;
+ }
/* allocate the TCAM entry index */
status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
@@ -4572,12 +4573,14 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
list_add(&t->list,
&hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
- return 0;
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+ return status;
err_ice_add_prof_id_vsig:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
/* let caller clean up the change list */
devm_kfree(ice_hw_to_dev(hw), t);
- return ICE_ERR_NO_MEMORY;
+ return status;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index a6f391eac8ff..c1c99a267a98 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -22,7 +22,7 @@ struct ice_fv {
struct ice_pkg_hdr {
struct ice_pkg_ver pkg_format_ver;
__le32 seg_count;
- __le32 seg_offset[1];
+ __le32 seg_offset[];
};
/* generic segment */
@@ -53,12 +53,12 @@ struct ice_device_id_entry {
struct ice_seg {
struct ice_generic_seg_hdr hdr;
__le32 device_table_count;
- struct ice_device_id_entry device_table[1];
+ struct ice_device_id_entry device_table[];
};
struct ice_nvm_table {
__le32 table_count;
- __le32 vers[1];
+ __le32 vers[];
};
struct ice_buf {
@@ -68,7 +68,7 @@ struct ice_buf {
struct ice_buf_table {
__le32 buf_count;
- struct ice_buf buf_array[1];
+ struct ice_buf buf_array[];
};
/* global metadata specific segment */
@@ -101,11 +101,12 @@ struct ice_section_entry {
struct ice_buf_hdr {
__le16 section_count;
__le16 data_end;
- struct ice_section_entry section_entry[1];
+ struct ice_section_entry section_entry[];
};
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
- sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
+ struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
+ (ent_sz))
/* ice package section IDs */
#define ICE_SID_XLT0_SW 10
@@ -198,17 +199,17 @@ struct ice_label {
struct ice_label_section {
__le16 count;
- struct ice_label label[1];
+ struct ice_label label[];
};
#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
- sizeof(struct ice_label_section) - sizeof(struct ice_label), \
- sizeof(struct ice_label))
+ struct_size((struct ice_label_section *)0, label, 1) - \
+ sizeof(struct ice_label), sizeof(struct ice_label))
struct ice_sw_fv_section {
__le16 count;
__le16 base_offset;
- struct ice_fv fv[1];
+ struct ice_fv fv[];
};
/* The BOOST TCAM stores the match packet header in reverse order, meaning
@@ -245,30 +246,30 @@ struct ice_boost_tcam_entry {
struct ice_boost_tcam_section {
__le16 count;
__le16 reserved;
- struct ice_boost_tcam_entry tcam[1];
+ struct ice_boost_tcam_entry tcam[];
};
#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
- sizeof(struct ice_boost_tcam_section) - \
+ struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
struct ice_xlt1_section {
__le16 count;
__le16 offset;
- u8 value[1];
-} __packed;
+ u8 value[];
+};
struct ice_xlt2_section {
__le16 count;
__le16 offset;
- __le16 value[1];
+ __le16 value[];
};
struct ice_prof_redir_section {
__le16 count;
__le16 offset;
- u8 redir_value[1];
+ u8 redir_value[];
};
/* package buffer building */
@@ -327,7 +328,7 @@ struct ice_tunnel_table {
struct ice_pkg_es {
__le16 count;
__le16 offset;
- struct ice_fv_word es[1];
+ struct ice_fv_word es[];
};
struct ice_es {
@@ -461,8 +462,8 @@ struct ice_prof_tcam_entry {
struct ice_prof_id_section {
__le16 count;
- struct ice_prof_tcam_entry entry[1];
-} __packed;
+ struct ice_prof_tcam_entry entry[];
+};
struct ice_prof_tcam {
u32 sid;
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index d74e5290677f..fe677621dd51 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -1187,7 +1187,7 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
if (list_empty(&hw->fl_profs[blk]))
return 0;
- mutex_lock(&hw->fl_profs_locks[blk]);
+ mutex_lock(&hw->rss_locks);
list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
if (test_bit(vsi_handle, p->vsis)) {
status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
@@ -1195,12 +1195,12 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
break;
if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
- status = ice_flow_rem_prof_sync(hw, blk, p);
+ status = ice_flow_rem_prof(hw, blk, p->id);
if (status)
break;
}
}
- mutex_unlock(&hw->fl_profs_locks[blk]);
+ mutex_unlock(&hw->rss_locks);
return status;
}
@@ -1597,7 +1597,8 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
*/
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
{
- struct ice_rss_cfg *r, *rss_cfg = NULL;
+ u64 rss_hash = ICE_HASH_INVALID;
+ struct ice_rss_cfg *r;
/* verify if the protocol header is non zero and VSI is valid */
if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
@@ -1607,10 +1608,10 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
list_for_each_entry(r, &hw->rss_list_head, l_entry)
if (test_bit(vsi_handle, r->vsis) &&
r->packet_hdr == hdrs) {
- rss_cfg = r;
+ rss_hash = r->hashed_flds;
break;
}
mutex_unlock(&hw->rss_locks);
- return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
+ return rss_hash;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
new file mode 100644
index 000000000000..deaefe00c9c0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -0,0 +1,773 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2019, Intel Corporation. */
+
+#include <asm/unaligned.h>
+#include <linux/uuid.h>
+#include <linux/crc32.h>
+#include <linux/pldmfw.h>
+#include "ice.h"
+#include "ice_fw_update.h"
+
+struct ice_fwu_priv {
+ struct pldmfw context;
+
+ struct ice_pf *pf;
+ struct netlink_ext_ack *extack;
+
+ /* Track which NVM banks to activate at the end of the update */
+ u8 activate_flags;
+};
+
+/**
+ * ice_send_package_data - Send record package data to firmware
+ * @context: PLDM fw update structure
+ * @data: pointer to the package data
+ * @length: length of the package data
+ *
+ * Send a copy of the package data associated with the PLDM record matching
+ * this device to the firmware.
+ *
+ * Note that this function sends an AdminQ command that will fail unless the
+ * NVM resource has been acquired.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
+{
+ struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct device *dev = context->dev;
+ struct ice_pf *pf = priv->pf;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u8 *package_data;
+
+ package_data = kmemdup(data, length, GFP_KERNEL);
+ if (!package_data)
+ return -ENOMEM;
+
+ status = ice_nvm_set_pkg_data(hw, false, package_data, length, NULL);
+
+ kfree(package_data);
+
+ if (status) {
+ dev_err(dev, "Failed to send record package data to firmware, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_check_component_response - Report firmware response to a component
+ * @pf: device private data structure
+ * @id: component id being checked
+ * @response: indicates whether this component can be updated
+ * @code: code indicating reason for response
+ * @extack: netlink extended ACK structure
+ *
+ * Check whether firmware indicates if this component can be updated. Report
+ * a suitable error message over the netlink extended ACK if the component
+ * cannot be updated.
+ *
+ * Returns: zero if the component can be updated, or -ECANCELED of the
+ * firmware indicates the component cannot be updated.
+ */
+static int
+ice_check_component_response(struct ice_pf *pf, u16 id, u8 response, u8 code,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ const char *component;
+
+ switch (id) {
+ case NVM_COMP_ID_OROM:
+ component = "fw.undi";
+ break;
+ case NVM_COMP_ID_NVM:
+ component = "fw.mgmt";
+ break;
+ case NVM_COMP_ID_NETLIST:
+ component = "fw.netlist";
+ break;
+ default:
+ WARN(1, "Unexpected unknown component identifier 0x%02x", id);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "%s: firmware response 0x%x, code 0x%x\n",
+ component, response, code);
+
+ switch (response) {
+ case ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED:
+ /* firmware indicated this update is good to proceed */
+ return 0;
+ case ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE:
+ dev_warn(dev, "firmware recommends not updating %s, as it may result in a downgrade. continuing anyways\n", component);
+ return 0;
+ case ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED:
+ dev_info(dev, "firmware has rejected updating %s\n", component);
+ break;
+ }
+
+ switch (code) {
+ case ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE:
+ dev_err(dev, "Component comparison stamp for %s is identical to the running image\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component comparison stamp is identical to running image");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_STAMP_LOWER:
+ dev_err(dev, "Component comparison stamp for %s is lower than the running image\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component comparison stamp is lower than running image");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_INVALID_STAMP_CODE:
+ dev_err(dev, "Component comparison stamp for %s is invalid\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component comparison stamp is invalid");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_CONFLICT_CODE:
+ dev_err(dev, "%s conflicts with a previous component table\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component table conflict occurred");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE:
+ dev_err(dev, "Pre-requisites for component %s have not been met\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component pre-requisites not met");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_NOT_SUPPORTED_CODE:
+ dev_err(dev, "%s is not a supported component\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component not supported");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE:
+ dev_err(dev, "Security restrictions prevent %s from being downgraded\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component cannot be downgraded");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE:
+ dev_err(dev, "Received an incomplete component image for %s\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Incomplete component image");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE:
+ dev_err(dev, "Component version for %s is identical to the running image\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component version is identical to running image");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_VER_STR_LOWER_CODE:
+ dev_err(dev, "Component version for %s is lower than the running image\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component version is lower than the running image");
+ break;
+ default:
+ dev_err(dev, "Unexpected response code 0x02%x for %s\n",
+ code, component);
+ NL_SET_ERR_MSG_MOD(extack, "Received unexpected response code from firmware");
+ break;
+ }
+
+ return -ECANCELED;
+}
+
+/**
+ * ice_send_component_table - Send PLDM component table to firmware
+ * @context: PLDM fw update structure
+ * @component: the component to process
+ * @transfer_flag: relative transfer order of this component
+ *
+ * Read relevant data from the component and forward it to the device
+ * firmware. Check the response to determine if the firmware indicates that
+ * the update can proceed.
+ *
+ * This function sends AdminQ commands related to the NVM, and assumes that
+ * the NVM resource has been acquired.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_send_component_table(struct pldmfw *context, struct pldmfw_component *component,
+ u8 transfer_flag)
+{
+ struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ice_aqc_nvm_comp_tbl *comp_tbl;
+ u8 comp_response, comp_response_code;
+ struct device *dev = context->dev;
+ struct ice_pf *pf = priv->pf;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ size_t length;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ case NVM_COMP_ID_NVM:
+ case NVM_COMP_ID_NETLIST:
+ break;
+ default:
+ dev_err(dev, "Unable to update due to a firmware component with unknown ID %u\n",
+ component->identifier);
+ NL_SET_ERR_MSG_MOD(extack, "Unable to update due to unknown firmware component");
+ return -EOPNOTSUPP;
+ }
+
+ length = struct_size(comp_tbl, cvs, component->version_len);
+ comp_tbl = kzalloc(length, GFP_KERNEL);
+ if (!comp_tbl)
+ return -ENOMEM;
+
+ comp_tbl->comp_class = cpu_to_le16(component->classification);
+ comp_tbl->comp_id = cpu_to_le16(component->identifier);
+ comp_tbl->comp_class_idx = FWU_COMP_CLASS_IDX_NOT_USE;
+ comp_tbl->comp_cmp_stamp = cpu_to_le32(component->comparison_stamp);
+ comp_tbl->cvs_type = component->version_type;
+ comp_tbl->cvs_len = component->version_len;
+ memcpy(comp_tbl->cvs, component->version_string, component->version_len);
+
+ status = ice_nvm_pass_component_tbl(hw, (u8 *)comp_tbl, length,
+ transfer_flag, &comp_response,
+ &comp_response_code, NULL);
+
+ kfree(comp_tbl);
+
+ if (status) {
+ dev_err(dev, "Failed to transfer component table to firmware, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware");
+ return -EIO;
+ }
+
+ return ice_check_component_response(pf, component->identifier, comp_response,
+ comp_response_code, extack);
+}
+
+/**
+ * ice_write_one_nvm_block - Write an NVM block and await completion response
+ * @pf: the PF data structure
+ * @module: the module to write to
+ * @offset: offset in bytes
+ * @block_size: size of the block to write, up to 4k
+ * @block: pointer to block of data to write
+ * @last_cmd: whether this is the last command
+ * @extack: netlink extended ACK structure
+ *
+ * Write a block of data to a flash module, and await for the completion
+ * response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ struct netlink_ext_ack *extack)
+{
+ u16 completion_module, completion_retval;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_rq_event_info event;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u32 completion_offset;
+ int err;
+
+ memset(&event, 0, sizeof(event));
+
+ status = ice_aq_update_nvm(hw, module, offset, block_size, block,
+ last_cmd, 0, NULL);
+ if (status) {
+ dev_err(dev, "Failed to program flash module 0x%02x at offset %u, err %s aq_err %s\n",
+ module, offset, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
+ return -EIO;
+ }
+
+ err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, HZ, &event);
+ if (err) {
+ dev_err(dev, "Timed out waiting for firmware write completion for module 0x%02x, err %d\n",
+ module, err);
+ NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware");
+ return -EIO;
+ }
+
+ completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid);
+ completion_retval = le16_to_cpu(event.desc.retval);
+
+ completion_offset = le16_to_cpu(event.desc.params.nvm.offset_low);
+ completion_offset |= event.desc.params.nvm.offset_high << 16;
+
+ if (completion_module != module) {
+ dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n",
+ completion_module, module);
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected firmware response");
+ return -EIO;
+ }
+
+ if (completion_offset != offset) {
+ dev_err(dev, "Unexpected offset in write completion: got %u, expected %u\n",
+ completion_offset, offset);
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected firmware response");
+ return -EIO;
+ }
+
+ if (completion_retval) {
+ dev_err(dev, "Firmware failed to program flash module 0x%02x at offset %u, completion err %s\n",
+ module, offset,
+ ice_aq_str((enum ice_aq_err)completion_retval));
+ NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_nvm_module - Write data to an NVM module
+ * @pf: the PF driver structure
+ * @module: the module id to program
+ * @component: the name of the component being updated
+ * @image: buffer of image data to write to the NVM
+ * @length: length of the buffer
+ * @extack: netlink extended ACK structure
+ *
+ * Loop over the data for a given NVM module and program it in 4 Kb
+ * blocks. Notify devlink core of progress after each block is programmed.
+ * Loops over a block of data and programs the NVM in 4k block chunks.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
+ const u8 *image, u32 length,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink;
+ u32 offset = 0;
+ bool last_cmd;
+ u8 *block;
+ int err;
+
+ devlink = priv_to_devlink(pf);
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, 0, length);
+
+ block = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!block)
+ return -ENOMEM;
+
+ do {
+ u32 block_size;
+
+ block_size = min_t(u32, ICE_AQ_MAX_BUF_LEN, length - offset);
+ last_cmd = !(offset + block_size < length);
+
+ /* ice_aq_update_nvm may copy the firmware response into the
+ * buffer, so we must make a copy since the source data is
+ * constant.
+ */
+ memcpy(block, image + offset, block_size);
+
+ err = ice_write_one_nvm_block(pf, module, offset, block_size,
+ block, last_cmd, extack);
+ if (err)
+ break;
+
+ offset += block_size;
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, offset, length);
+ } while (!last_cmd);
+
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Flashing failed",
+ component, length, length);
+ else
+ devlink_flash_update_status_notify(devlink, "Flashing done",
+ component, length, length);
+
+ kfree(block);
+ return err;
+}
+
+/**
+ * ice_erase_nvm_module - Erase an NVM module and await firmware completion
+ * @pf: the PF data structure
+ * @module: the module to erase
+ * @component: name of the component being updated
+ * @extack: netlink extended ACK structure
+ *
+ * Erase the inactive NVM bank associated with this module, and await for
+ * a completion response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
+ struct netlink_ext_ack *extack)
+{
+ u16 completion_module, completion_retval;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_rq_event_info event;
+ struct ice_hw *hw = &pf->hw;
+ struct devlink *devlink;
+ enum ice_status status;
+ int err;
+
+ memset(&event, 0, sizeof(event));
+
+ devlink = priv_to_devlink(pf);
+
+ devlink_flash_update_status_notify(devlink, "Erasing", component, 0, 0);
+
+ status = ice_aq_erase_nvm(hw, module, NULL);
+ if (status) {
+ dev_err(dev, "Failed to erase %s (module 0x%02x), err %s aq_err %s\n",
+ component, module, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module");
+ err = -EIO;
+ goto out_notify_devlink;
+ }
+
+ /* Yes, this really can take minutes to complete */
+ err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, 300 * HZ, &event);
+ if (err) {
+ dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n",
+ component, module, err);
+ NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware");
+ goto out_notify_devlink;
+ }
+
+ completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid);
+ completion_retval = le16_to_cpu(event.desc.retval);
+
+ if (completion_module != module) {
+ dev_err(dev, "Unexpected module_typeid in erase completion for %s: got 0x%x, expected 0x%x\n",
+ component, completion_module, module);
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected firmware response");
+ err = -EIO;
+ goto out_notify_devlink;
+ }
+
+ if (completion_retval) {
+ dev_err(dev, "Firmware failed to erase %s (module 0x02%x), aq_err %s\n",
+ component, module,
+ ice_aq_str((enum ice_aq_err)completion_retval));
+ NL_SET_ERR_MSG_MOD(extack, "Firmware failed to erase flash");
+ err = -EIO;
+ goto out_notify_devlink;
+ }
+
+out_notify_devlink:
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Erasing failed",
+ component, 0, 0);
+ else
+ devlink_flash_update_status_notify(devlink, "Erasing done",
+ component, 0, 0);
+
+ return err;
+}
+
+/**
+ * ice_switch_flash_banks - Tell firmware to switch NVM banks
+ * @pf: Pointer to the PF data structure
+ * @activate_flags: flags used for the activation command
+ * @extack: netlink extended ACK structure
+ *
+ * Notify firmware to activate the newly written flash banks, and wait for the
+ * firmware response.
+ *
+ * Returns: zero on success or an error code on failure.
+ */
+static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_rq_event_info event;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u16 completion_retval;
+ int err;
+
+ memset(&event, 0, sizeof(event));
+
+ status = ice_nvm_write_activate(hw, activate_flags);
+ if (status) {
+ dev_err(dev, "Failed to switch active flash banks, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks");
+ return -EIO;
+ }
+
+ err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, HZ,
+ &event);
+ if (err) {
+ dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n",
+ err);
+ NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware");
+ return err;
+ }
+
+ completion_retval = le16_to_cpu(event.desc.retval);
+ if (completion_retval) {
+ dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n",
+ ice_aq_str((enum ice_aq_err)completion_retval));
+ NL_SET_ERR_MSG_MOD(extack, "Firmware failed to switch active flash banks");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_flash_component - Flash a component of the NVM
+ * @context: PLDM fw update structure
+ * @component: the component table to program
+ *
+ * Program the flash contents for a given component. First, determine the
+ * module id. Then, erase the secondary bank for this module. Finally, write
+ * the contents of the component to the NVM.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_flash_component(struct pldmfw *context, struct pldmfw_component *component)
+{
+ struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ice_pf *pf = priv->pf;
+ const char *name;
+ u16 module;
+ u8 flag;
+ int err;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ module = ICE_SR_1ST_OROM_BANK_PTR;
+ flag = ICE_AQC_NVM_ACTIV_SEL_OROM;
+ name = "fw.undi";
+ break;
+ case NVM_COMP_ID_NVM:
+ module = ICE_SR_1ST_NVM_BANK_PTR;
+ flag = ICE_AQC_NVM_ACTIV_SEL_NVM;
+ name = "fw.mgmt";
+ break;
+ case NVM_COMP_ID_NETLIST:
+ module = ICE_SR_NETLIST_BANK_PTR;
+ flag = ICE_AQC_NVM_ACTIV_SEL_NETLIST;
+ name = "fw.netlist";
+ break;
+ default:
+ /* This should not trigger, since we check the id before
+ * sending the component table to firmware.
+ */
+ WARN(1, "Unexpected unknown component identifier 0x%02x",
+ component->identifier);
+ return -EINVAL;
+ }
+
+ /* Mark this component for activating at the end */
+ priv->activate_flags |= flag;
+
+ err = ice_erase_nvm_module(pf, module, name, extack);
+ if (err)
+ return err;
+
+ return ice_write_nvm_module(pf, module, name, component->component_data,
+ component->component_size, extack);
+}
+
+/**
+ * ice_finalize_update - Perform last steps to complete device update
+ * @context: PLDM fw update structure
+ *
+ * Called as the last step of the update process. Complete the update by
+ * telling the firmware to switch active banks, and perform a reset of
+ * configured.
+ *
+ * Returns: 0 on success, or an error code on failure.
+ */
+static int ice_finalize_update(struct pldmfw *context)
+{
+ struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ice_pf *pf = priv->pf;
+ int err;
+
+ /* Finally, notify firmware to activate the written NVM banks */
+ err = ice_switch_flash_banks(pf, priv->activate_flags, extack);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct pldmfw_ops ice_fwu_ops = {
+ .match_record = &pldmfw_op_pci_match_record,
+ .send_package_data = &ice_send_package_data,
+ .send_component_table = &ice_send_component_table,
+ .flash_component = &ice_flash_component,
+ .finalize_update = &ice_finalize_update,
+};
+
+/**
+ * ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device
+ * @pf: private device driver structure
+ * @fw: firmware object pointing to the relevant firmware file
+ * @extack: netlink extended ACK structure
+ *
+ * Parse the data for a given firmware file, verifying that it is a valid PLDM
+ * formatted image that matches this device.
+ *
+ * Extract the device record Package Data and Component Tables and send them
+ * to the firmware. Extract and write the flash data for each of the three
+ * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
+ * firmware once the data is written to the inactive banks.
+ *
+ * Returns: zero on success or a negative error code on failure.
+ */
+int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_fwu_priv priv;
+ enum ice_status status;
+ int err;
+
+ memset(&priv, 0, sizeof(priv));
+
+ priv.context.ops = &ice_fwu_ops;
+ priv.context.dev = dev;
+ priv.extack = extack;
+ priv.pf = pf;
+ priv.activate_flags = ICE_AQC_NVM_PRESERVE_ALL;
+
+ status = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (status) {
+ dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ err = pldmfw_flash_image(&priv.context, fw);
+
+ ice_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ice_check_for_pending_update - Check for a pending flash update
+ * @pf: the PF driver structure
+ * @component: if not NULL, the name of the component being updated
+ * @extack: Netlink extended ACK structure
+ *
+ * Check whether the device already has a pending flash update. If such an
+ * update is found, cancel it so that the requested update may proceed.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw_dev_caps *dev_caps;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u8 pending = 0;
+ int err;
+
+ dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL);
+ if (!dev_caps)
+ return -ENOMEM;
+
+ /* Read the most recent device capabilities from firmware. Do not use
+ * the cached values in hw->dev_caps, because the pending update flag
+ * may have changed, e.g. if an update was previously completed and
+ * the system has not yet rebooted.
+ */
+ status = ice_discover_dev_caps(hw, dev_caps);
+ if (status) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to read device capabilities");
+ kfree(dev_caps);
+ return -EIO;
+ }
+
+ if (dev_caps->common_cap.nvm_update_pending_nvm) {
+ dev_info(dev, "The fw.mgmt flash component has a pending update\n");
+ pending |= ICE_AQC_NVM_ACTIV_SEL_NVM;
+ }
+
+ if (dev_caps->common_cap.nvm_update_pending_orom) {
+ dev_info(dev, "The fw.undi flash component has a pending update\n");
+ pending |= ICE_AQC_NVM_ACTIV_SEL_OROM;
+ }
+
+ if (dev_caps->common_cap.nvm_update_pending_netlist) {
+ dev_info(dev, "The fw.netlist flash component has a pending update\n");
+ pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST;
+ }
+
+ kfree(dev_caps);
+
+ /* If the flash_update request is for a specific component, ignore all
+ * of the other components.
+ */
+ if (component) {
+ if (strcmp(component, "fw.mgmt") == 0)
+ pending &= ICE_AQC_NVM_ACTIV_SEL_NVM;
+ else if (strcmp(component, "fw.undi") == 0)
+ pending &= ICE_AQC_NVM_ACTIV_SEL_OROM;
+ else if (strcmp(component, "fw.netlist") == 0)
+ pending &= ICE_AQC_NVM_ACTIV_SEL_NETLIST;
+ else
+ WARN(1, "Unexpected flash component %s", component);
+ }
+
+ /* There is no previous pending update, so this request may continue */
+ if (!pending)
+ return 0;
+
+ /* In order to allow overwriting a previous pending update, notify
+ * firmware to cancel that update by issuing the appropriate command.
+ */
+ devlink_flash_update_status_notify(devlink,
+ "Canceling previous pending update",
+ component, 0, 0);
+
+ status = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (status) {
+ dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ pending |= ICE_AQC_NVM_REVERT_LAST_ACTIV;
+ err = ice_switch_flash_banks(pf, pending, extack);
+
+ ice_release_nvm(hw);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h
new file mode 100644
index 000000000000..79472cc618b4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2019, Intel Corporation. */
+
+#ifndef _ICE_FW_UPDATE_H_
+#define _ICE_FW_UPDATE_H_
+
+int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
+ struct netlink_ext_ack *extack);
+int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
+ struct netlink_ext_ack *extack);
+
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 1086c9f778b4..90abc8612a6a 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -57,7 +57,7 @@
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
-#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */
+#define PRTDCB_TUP2TC 0x001D26C0
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
@@ -362,13 +362,22 @@
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
+#define PRTRPB_RDPC 0x000AC260
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
#define VSIQF_FD_CNT_FD_GCNT_S 0
#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
+#define PFPM_APM 0x000B8080
+#define PFPM_APM_APME_M BIT(0)
+#define PFPM_WUFC 0x0009DC00
+#define PFPM_WUFC_MAG_M BIT(1)
+#define PFPM_WUS 0x0009DB80
+#define PFPM_WUS_LNKC_M BIT(0)
+#define PFPM_WUS_MAG_M BIT(1)
+#define PFPM_WUS_MNG_M BIT(3)
+#define PFPM_WUS_FW_RST_WK_M BIT(31)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
-#define PRTRPB_RDPC 0x000AC260
#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 14dfbbc1b2cf..4ec24c3e813f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -601,6 +601,7 @@ struct ice_tlan_ctx {
/* shorter macros makes the table fit but are terse */
#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
+#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
@@ -608,6 +609,319 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
ICE_PTT_UNUSED_ENTRY(0),
ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(3),
+ ICE_PTT_UNUSED_ENTRY(4),
+ ICE_PTT_UNUSED_ENTRY(5),
+ ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(8),
+ ICE_PTT_UNUSED_ENTRY(9),
+ ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(12),
+ ICE_PTT_UNUSED_ENTRY(13),
+ ICE_PTT_UNUSED_ENTRY(14),
+ ICE_PTT_UNUSED_ENTRY(15),
+ ICE_PTT_UNUSED_ENTRY(16),
+ ICE_PTT_UNUSED_ENTRY(17),
+ ICE_PTT_UNUSED_ENTRY(18),
+ ICE_PTT_UNUSED_ENTRY(19),
+ ICE_PTT_UNUSED_ENTRY(20),
+ ICE_PTT_UNUSED_ENTRY(21),
+
+ /* Non Tunneled IPv4 */
+ ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(25),
+ ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(32),
+ ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(39),
+ ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(47),
+ ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(54),
+ ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(62),
+ ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(69),
+ ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(77),
+ ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(84),
+ ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ ICE_PTT_UNUSED_ENTRY(91),
+ ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(98),
+ ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(105),
+ ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(113),
+ ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(120),
+ ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(128),
+ ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(135),
+ ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(143),
+ ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(150),
+ ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ ICE_PTT_UNUSED_ENTRY(154),
+ ICE_PTT_UNUSED_ENTRY(155),
+ ICE_PTT_UNUSED_ENTRY(156),
+ ICE_PTT_UNUSED_ENTRY(157),
+ ICE_PTT_UNUSED_ENTRY(158),
+ ICE_PTT_UNUSED_ENTRY(159),
+
+ ICE_PTT_UNUSED_ENTRY(160),
+ ICE_PTT_UNUSED_ENTRY(161),
+ ICE_PTT_UNUSED_ENTRY(162),
+ ICE_PTT_UNUSED_ENTRY(163),
+ ICE_PTT_UNUSED_ENTRY(164),
+ ICE_PTT_UNUSED_ENTRY(165),
+ ICE_PTT_UNUSED_ENTRY(166),
+ ICE_PTT_UNUSED_ENTRY(167),
+ ICE_PTT_UNUSED_ENTRY(168),
+ ICE_PTT_UNUSED_ENTRY(169),
+
+ ICE_PTT_UNUSED_ENTRY(170),
+ ICE_PTT_UNUSED_ENTRY(171),
+ ICE_PTT_UNUSED_ENTRY(172),
+ ICE_PTT_UNUSED_ENTRY(173),
+ ICE_PTT_UNUSED_ENTRY(174),
+ ICE_PTT_UNUSED_ENTRY(175),
+ ICE_PTT_UNUSED_ENTRY(176),
+ ICE_PTT_UNUSED_ENTRY(177),
+ ICE_PTT_UNUSED_ENTRY(178),
+ ICE_PTT_UNUSED_ENTRY(179),
+
+ ICE_PTT_UNUSED_ENTRY(180),
+ ICE_PTT_UNUSED_ENTRY(181),
+ ICE_PTT_UNUSED_ENTRY(182),
+ ICE_PTT_UNUSED_ENTRY(183),
+ ICE_PTT_UNUSED_ENTRY(184),
+ ICE_PTT_UNUSED_ENTRY(185),
+ ICE_PTT_UNUSED_ENTRY(186),
+ ICE_PTT_UNUSED_ENTRY(187),
+ ICE_PTT_UNUSED_ENTRY(188),
+ ICE_PTT_UNUSED_ENTRY(189),
+
+ ICE_PTT_UNUSED_ENTRY(190),
+ ICE_PTT_UNUSED_ENTRY(191),
+ ICE_PTT_UNUSED_ENTRY(192),
+ ICE_PTT_UNUSED_ENTRY(193),
+ ICE_PTT_UNUSED_ENTRY(194),
+ ICE_PTT_UNUSED_ENTRY(195),
+ ICE_PTT_UNUSED_ENTRY(196),
+ ICE_PTT_UNUSED_ENTRY(197),
+ ICE_PTT_UNUSED_ENTRY(198),
+ ICE_PTT_UNUSED_ENTRY(199),
+
+ ICE_PTT_UNUSED_ENTRY(200),
+ ICE_PTT_UNUSED_ENTRY(201),
+ ICE_PTT_UNUSED_ENTRY(202),
+ ICE_PTT_UNUSED_ENTRY(203),
+ ICE_PTT_UNUSED_ENTRY(204),
+ ICE_PTT_UNUSED_ENTRY(205),
+ ICE_PTT_UNUSED_ENTRY(206),
+ ICE_PTT_UNUSED_ENTRY(207),
+ ICE_PTT_UNUSED_ENTRY(208),
+ ICE_PTT_UNUSED_ENTRY(209),
+
+ ICE_PTT_UNUSED_ENTRY(210),
+ ICE_PTT_UNUSED_ENTRY(211),
+ ICE_PTT_UNUSED_ENTRY(212),
+ ICE_PTT_UNUSED_ENTRY(213),
+ ICE_PTT_UNUSED_ENTRY(214),
+ ICE_PTT_UNUSED_ENTRY(215),
+ ICE_PTT_UNUSED_ENTRY(216),
+ ICE_PTT_UNUSED_ENTRY(217),
+ ICE_PTT_UNUSED_ENTRY(218),
+ ICE_PTT_UNUSED_ENTRY(219),
+
+ ICE_PTT_UNUSED_ENTRY(220),
+ ICE_PTT_UNUSED_ENTRY(221),
+ ICE_PTT_UNUSED_ENTRY(222),
+ ICE_PTT_UNUSED_ENTRY(223),
+ ICE_PTT_UNUSED_ENTRY(224),
+ ICE_PTT_UNUSED_ENTRY(225),
+ ICE_PTT_UNUSED_ENTRY(226),
+ ICE_PTT_UNUSED_ENTRY(227),
+ ICE_PTT_UNUSED_ENTRY(228),
+ ICE_PTT_UNUSED_ENTRY(229),
+
+ ICE_PTT_UNUSED_ENTRY(230),
+ ICE_PTT_UNUSED_ENTRY(231),
+ ICE_PTT_UNUSED_ENTRY(232),
+ ICE_PTT_UNUSED_ENTRY(233),
+ ICE_PTT_UNUSED_ENTRY(234),
+ ICE_PTT_UNUSED_ENTRY(235),
+ ICE_PTT_UNUSED_ENTRY(236),
+ ICE_PTT_UNUSED_ENTRY(237),
+ ICE_PTT_UNUSED_ENTRY(238),
+ ICE_PTT_UNUSED_ENTRY(239),
+
+ ICE_PTT_UNUSED_ENTRY(240),
+ ICE_PTT_UNUSED_ENTRY(241),
+ ICE_PTT_UNUSED_ENTRY(242),
+ ICE_PTT_UNUSED_ENTRY(243),
+ ICE_PTT_UNUSED_ENTRY(244),
+ ICE_PTT_UNUSED_ENTRY(245),
+ ICE_PTT_UNUSED_ENTRY(246),
+ ICE_PTT_UNUSED_ENTRY(247),
+ ICE_PTT_UNUSED_ENTRY(248),
+ ICE_PTT_UNUSED_ENTRY(249),
+
+ ICE_PTT_UNUSED_ENTRY(250),
+ ICE_PTT_UNUSED_ENTRY(251),
+ ICE_PTT_UNUSED_ENTRY(252),
+ ICE_PTT_UNUSED_ENTRY(253),
+ ICE_PTT_UNUSED_ENTRY(254),
+ ICE_PTT_UNUSED_ENTRY(255),
};
static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 2e3a39cea2c0..f2682776f8c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -127,8 +127,14 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
case ICE_VSI_PF:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
- vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
- vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
+ /* a user could change the values of num_[tr]x_desc using
+ * ethtool -G so we should keep those values instead of
+ * overwriting them with the defaults.
+ */
+ if (!vsi->num_rx_desc)
+ vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
+ if (!vsi->num_tx_desc)
+ vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
break;
default:
dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
@@ -1468,6 +1474,30 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
}
/**
+ * ice_pf_state_is_nominal - checks the PF for nominal state
+ * @pf: pointer to PF to check
+ *
+ * Check the PF's state for a collection of bits that would indicate
+ * the PF is in a state that would inhibit normal operation for
+ * driver functionality.
+ *
+ * Returns true if PF is in a nominal state, false otherwise
+ */
+bool ice_pf_state_is_nominal(struct ice_pf *pf)
+{
+ DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
+
+ if (!pf)
+ return false;
+
+ bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
+ if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
+ return false;
+
+ return true;
+}
+
+/**
* ice_update_eth_stats - Update VSI-specific ethernet statistics counters
* @vsi: the VSI to be updated
*/
@@ -1667,7 +1697,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
u16 q_idx = 0;
int err = 0;
- qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
+ qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
@@ -1987,6 +2017,13 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
if (!vsi)
return -EINVAL;
+ /* Don't enable VLAN pruning if the netdev is currently in promiscuous
+ * mode. VLAN pruning will be enabled when the interface exits
+ * promiscuous mode if any VLAN filters are active.
+ */
+ if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena)
+ return 0;
+
pf = vsi->back;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index d80e6afa4511..981f3a156c24 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -8,6 +8,8 @@
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
+bool ice_pf_state_is_nominal(struct ice_pf *pf);
+
void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 4cbd49c87568..8437d72795b0 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <generated/utsrelease.h>
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
@@ -13,15 +14,7 @@
#include "ice_dcb_nl.h"
#include "ice_devlink.h"
-#define DRV_VERSION_MAJOR 0
-#define DRV_VERSION_MINOR 8
-#define DRV_VERSION_BUILD 2
-
-#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
- __stringify(DRV_VERSION_MINOR) "." \
- __stringify(DRV_VERSION_BUILD) "-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
-const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
@@ -32,7 +25,6 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
static int debug = -1;
@@ -377,6 +369,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
~IFF_PROMISC;
goto out_promisc;
}
+ ice_cfg_vlan_pruning(vsi, false, false);
}
} else {
/* Clear Rx filter to remove traffic from wire */
@@ -389,6 +382,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
IFF_PROMISC;
goto out_promisc;
}
+ if (vsi->num_vlan > 1)
+ ice_cfg_vlan_pruning(vsi, true, false);
}
}
}
@@ -620,6 +615,7 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi)
void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
{
struct ice_aqc_get_phy_caps_data *caps;
+ const char *an_advertised;
enum ice_status status;
const char *fec_req;
const char *speed;
@@ -718,6 +714,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
caps = kzalloc(sizeof(*caps), GFP_KERNEL);
if (!caps) {
fec_req = "Unknown";
+ an_advertised = "Unknown";
goto done;
}
@@ -726,6 +723,8 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
if (status)
netdev_info(vsi->netdev, "Get phy capability failed.\n");
+ an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
+
if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
fec_req = "RS-FEC";
@@ -738,8 +737,8 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
kfree(caps);
done:
- netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
- speed, fec_req, fec, an, fc);
+ netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
+ speed, fec_req, fec, an_advertised, an, fc);
ice_print_topo_conflict(vsi);
}
@@ -771,6 +770,100 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
}
/**
+ * ice_set_dflt_mib - send a default config MIB to the FW
+ * @pf: private PF struct
+ *
+ * This function sends a default configuration MIB to the FW.
+ *
+ * If this function errors out at any point, the driver is still able to
+ * function. The main impact is that LFC may not operate as expected.
+ * Therefore an error state in this function should be treated with a DBG
+ * message and continue on with driver rebuild/reenable.
+ */
+static void ice_set_dflt_mib(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ u8 mib_type, *buf, *lldpmib = NULL;
+ u16 len, typelen, offset = 0;
+ struct ice_lldp_org_tlv *tlv;
+ struct ice_hw *hw;
+ u32 ouisubtype;
+
+ if (!pf) {
+ dev_dbg(dev, "%s NULL pf pointer\n", __func__);
+ return;
+ }
+
+ hw = &pf->hw;
+ mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
+ lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
+ if (!lldpmib) {
+ dev_dbg(dev, "%s Failed to allocate MIB memory\n",
+ __func__);
+ return;
+ }
+
+ /* Add ETS CFG TLV */
+ tlv = (struct ice_lldp_org_tlv *)lldpmib;
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_ETS_TLV_LEN);
+ tlv->typelen = htons(typelen);
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ buf = tlv->tlvinfo;
+ buf[0] = 0;
+
+ /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
+ * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
+ * Octets 13 - 20 are TSA values - leave as zeros
+ */
+ buf[5] = 0x64;
+ len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ offset += len + 2;
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+
+ /* Add ETS REC TLV */
+ buf = tlv->tlvinfo;
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* First octet of buf is reserved
+ * Octets 1 - 4 map UP to TC - all UPs map to zero
+ * Octets 5 - 12 are BW values - set TC 0 to 100%.
+ * Octets 13 - 20 are TSA value - leave as zeros
+ */
+ buf[5] = 0x64;
+ offset += len + 2;
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+
+ /* Add PFC CFG TLV */
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_PFC_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* Octet 1 left as all zeros - PFC disabled */
+ buf[0] = 0x08;
+ len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ offset += len + 2;
+
+ if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
+ dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
+
+ kfree(lldpmib);
+}
+
+/**
* ice_link_event - process the link event
* @pf: PF that the link event is associated with
* @pi: port_info for the port that the link event is associated with
@@ -804,9 +897,11 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
pi->lport);
- /* if the old link up/down and speed is the same as the new */
- if (link_up == old_link && link_speed == old_link_speed)
- return result;
+ /* Check if the link state is up after updating link info, and treat
+ * this event as an UP event since the link is actually UP now.
+ */
+ if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
+ link_up = true;
vsi = ice_get_main_vsi(pf);
if (!vsi || !vsi->port_info)
@@ -825,7 +920,17 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
}
}
- ice_dcb_rebuild(pf);
+ /* if the old link up/down and speed is the same as the new */
+ if (link_up == old_link && link_speed == old_link_speed)
+ return result;
+
+ if (ice_is_dcb_active(pf)) {
+ if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ ice_dcb_rebuild(pf);
+ } else {
+ if (link_up)
+ ice_set_dflt_mib(pf);
+ }
ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up);
@@ -918,6 +1023,151 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
return status;
}
+enum ice_aq_task_state {
+ ICE_AQ_TASK_WAITING = 0,
+ ICE_AQ_TASK_COMPLETE,
+ ICE_AQ_TASK_CANCELED,
+};
+
+struct ice_aq_task {
+ struct hlist_node entry;
+
+ u16 opcode;
+ struct ice_rq_event_info *event;
+ enum ice_aq_task_state state;
+};
+
+/**
+ * ice_wait_for_aq_event - Wait for an AdminQ event from firmware
+ * @pf: pointer to the PF private structure
+ * @opcode: the opcode to wait for
+ * @timeout: how long to wait, in jiffies
+ * @event: storage for the event info
+ *
+ * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
+ * current thread will be put to sleep until the specified event occurs or
+ * until the given timeout is reached.
+ *
+ * To obtain only the descriptor contents, pass an event without an allocated
+ * msg_buf. If the complete data buffer is desired, allocate the
+ * event->msg_buf with enough space ahead of time.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
+ struct ice_rq_event_info *event)
+{
+ struct ice_aq_task *task;
+ long ret;
+ int err;
+
+ task = kzalloc(sizeof(*task), GFP_KERNEL);
+ if (!task)
+ return -ENOMEM;
+
+ INIT_HLIST_NODE(&task->entry);
+ task->opcode = opcode;
+ task->event = event;
+ task->state = ICE_AQ_TASK_WAITING;
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_add_head(&task->entry, &pf->aq_wait_list);
+ spin_unlock_bh(&pf->aq_wait_lock);
+
+ ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
+ timeout);
+ switch (task->state) {
+ case ICE_AQ_TASK_WAITING:
+ err = ret < 0 ? ret : -ETIMEDOUT;
+ break;
+ case ICE_AQ_TASK_CANCELED:
+ err = ret < 0 ? ret : -ECANCELED;
+ break;
+ case ICE_AQ_TASK_COMPLETE:
+ err = ret < 0 ? ret : 0;
+ break;
+ default:
+ WARN(1, "Unexpected AdminQ wait task state %u", task->state);
+ err = -EINVAL;
+ break;
+ }
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_del(&task->entry);
+ spin_unlock_bh(&pf->aq_wait_lock);
+ kfree(task);
+
+ return err;
+}
+
+/**
+ * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
+ * @pf: pointer to the PF private structure
+ * @opcode: the opcode of the event
+ * @event: the event to check
+ *
+ * Loops over the current list of pending threads waiting for an AdminQ event.
+ * For each matching task, copy the contents of the event into the task
+ * structure and wake up the thread.
+ *
+ * If multiple threads wait for the same opcode, they will all be woken up.
+ *
+ * Note that event->msg_buf will only be duplicated if the event has a buffer
+ * with enough space already allocated. Otherwise, only the descriptor and
+ * message length will be copied.
+ *
+ * Returns: true if an event was found, false otherwise
+ */
+static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
+ struct ice_rq_event_info *event)
+{
+ struct ice_aq_task *task;
+ bool found = false;
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
+ if (task->state || task->opcode != opcode)
+ continue;
+
+ memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
+ task->event->msg_len = event->msg_len;
+
+ /* Only copy the data buffer if a destination was set */
+ if (task->event->msg_buf &&
+ task->event->buf_len > event->buf_len) {
+ memcpy(task->event->msg_buf, event->msg_buf,
+ event->buf_len);
+ task->event->buf_len = event->buf_len;
+ }
+
+ task->state = ICE_AQ_TASK_COMPLETE;
+ found = true;
+ }
+ spin_unlock_bh(&pf->aq_wait_lock);
+
+ if (found)
+ wake_up(&pf->aq_wait_queue);
+}
+
+/**
+ * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
+ * @pf: the PF private structure
+ *
+ * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
+ * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
+ */
+static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
+{
+ struct ice_aq_task *task;
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_for_each_entry(task, &pf->aq_wait_list, entry)
+ task->state = ICE_AQ_TASK_CANCELED;
+ spin_unlock_bh(&pf->aq_wait_lock);
+
+ wake_up(&pf->aq_wait_queue);
+}
+
/**
* __ice_clean_ctrlq - helper function to clean controlq rings
* @pf: ptr to struct ice_pf
@@ -1014,6 +1264,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
opcode = le16_to_cpu(event.desc.opcode);
+ /* Notify any thread that might be waiting for this event */
+ ice_aq_check_events(pf, opcode, &event);
+
switch (opcode) {
case ice_aqc_opc_get_link_status:
if (ice_handle_link_event(pf, &event))
@@ -1137,10 +1390,15 @@ static void ice_service_task_complete(struct ice_pf *pf)
/**
* ice_service_task_stop - stop service task and cancel works
* @pf: board private structure
+ *
+ * Return 0 if the __ICE_SERVICE_DIS bit was not already set,
+ * 1 otherwise.
*/
-static void ice_service_task_stop(struct ice_pf *pf)
+static int ice_service_task_stop(struct ice_pf *pf)
{
- set_bit(__ICE_SERVICE_DIS, pf->state);
+ int ret;
+
+ ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state);
if (pf->serv_tmr.function)
del_timer_sync(&pf->serv_tmr);
@@ -1148,6 +1406,7 @@ static void ice_service_task_stop(struct ice_pf *pf)
cancel_work_sync(&pf->serv_task);
clear_bit(__ICE_SERVICE_SCHED, pf->state);
+ return ret;
}
/**
@@ -1382,25 +1641,23 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
goto out;
- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ /* Use the current user PHY configuration. The current user PHY
+ * configuration is initialized during probe from PHY capabilities
+ * software mode, and updated on set PHY configuration.
+ */
+ cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
retcode = -ENOMEM;
goto out;
}
- cfg->phy_type_low = pcaps->phy_type_low;
- cfg->phy_type_high = pcaps->phy_type_high;
- cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
- cfg->low_power_ctrl = pcaps->low_power_ctrl;
- cfg->eee_cap = pcaps->eee_cap;
- cfg->eeer_value = pcaps->eeer_value;
- cfg->link_fec_opt = pcaps->link_fec_options;
+ cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
if (link_up)
cfg->caps |= ICE_AQ_PHY_ENA_LINK;
else
cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
- retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
+ retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
if (retcode) {
dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
vsi->vsi_num, retcode);
@@ -1414,8 +1671,312 @@ out:
}
/**
- * ice_check_media_subtask - Check for media; bring link up if detected.
+ * ice_init_nvm_phy_type - Initialize the NVM PHY type
+ * @pi: port info structure
+ *
+ * Initialize nvm_phy_type_[low|high] for link lenient mode support
+ */
+static int ice_init_nvm_phy_type(struct ice_port_info *pi)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_pf *pf = pi->hw->back;
+ enum ice_status status;
+ int err = 0;
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps,
+ NULL);
+
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
+ err = -EIO;
+ goto out;
+ }
+
+ pf->nvm_phy_type_hi = pcaps->phy_type_high;
+ pf->nvm_phy_type_lo = pcaps->phy_type_low;
+
+out:
+ kfree(pcaps);
+ return err;
+}
+
+/**
+ * ice_init_link_dflt_override - Initialize link default override
+ * @pi: port info structure
+ *
+ * Initialize link default override and PHY total port shutdown during probe
+ */
+static void ice_init_link_dflt_override(struct ice_port_info *pi)
+{
+ struct ice_link_default_override_tlv *ldo;
+ struct ice_pf *pf = pi->hw->back;
+
+ ldo = &pf->link_dflt_override;
+ if (ice_get_link_default_override(ldo, pi))
+ return;
+
+ if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
+ return;
+
+ /* Enable Total Port Shutdown (override/replace link-down-on-close
+ * ethtool private flag) for ports with Port Disable bit set.
+ */
+ set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
+ set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
+}
+
+/**
+ * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
+ * @pi: port info structure
+ *
+ * If default override is enabled, initialized the user PHY cfg speed and FEC
+ * settings using the default override mask from the NVM.
+ *
+ * The PHY should only be configured with the default override settings the
+ * first time media is available. The __ICE_LINK_DEFAULT_OVERRIDE_PENDING state
+ * is used to indicate that the user PHY cfg default override is initialized
+ * and the PHY has not been configured with the default override settings. The
+ * state is set here, and cleared in ice_configure_phy the first time the PHY is
+ * configured.
+ */
+static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
+{
+ struct ice_link_default_override_tlv *ldo;
+ struct ice_aqc_set_phy_cfg_data *cfg;
+ struct ice_phy_info *phy = &pi->phy;
+ struct ice_pf *pf = pi->hw->back;
+
+ ldo = &pf->link_dflt_override;
+
+ /* If link default override is enabled, use to mask NVM PHY capabilities
+ * for speed and FEC default configuration.
+ */
+ cfg = &phy->curr_user_phy_cfg;
+
+ if (ldo->phy_type_low || ldo->phy_type_high) {
+ cfg->phy_type_low = pf->nvm_phy_type_lo &
+ cpu_to_le64(ldo->phy_type_low);
+ cfg->phy_type_high = pf->nvm_phy_type_hi &
+ cpu_to_le64(ldo->phy_type_high);
+ }
+ cfg->link_fec_opt = ldo->fec_options;
+ phy->curr_user_fec_req = ICE_FEC_AUTO;
+
+ set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
+}
+
+/**
+ * ice_init_phy_user_cfg - Initialize the PHY user configuration
+ * @pi: port info structure
+ *
+ * Initialize the current user PHY configuration, speed, FEC, and FC requested
+ * mode to default. The PHY defaults are from get PHY capabilities topology
+ * with media so call when media is first available. An error is returned if
+ * called when media is not available. The PHY initialization completed state is
+ * set here.
+ *
+ * These configurations are used when setting PHY
+ * configuration. The user PHY configuration is updated on set PHY
+ * configuration. Returns 0 on success, negative on failure
+ */
+static int ice_init_phy_user_cfg(struct ice_port_info *pi)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_phy_info *phy = &pi->phy;
+ struct ice_pf *pf = pi->hw->back;
+ enum ice_status status;
+ struct ice_vsi *vsi;
+ int err = 0;
+
+ if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
+ return -EIO;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return -EINVAL;
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
+ NULL);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
+ err = -EIO;
+ goto err_out;
+ }
+
+ ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
+
+ /* check if lenient mode is supported and enabled */
+ if (ice_fw_supports_link_override(&vsi->back->hw) &&
+ !(pcaps->module_compliance_enforcement &
+ ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
+ set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
+
+ /* if link default override is enabled, initialize user PHY
+ * configuration with link default override values
+ */
+ if (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN) {
+ ice_init_phy_cfg_dflt_override(pi);
+ goto out;
+ }
+ }
+
+ /* if link default override is not enabled, initialize PHY using
+ * topology with media
+ */
+ phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
+ pcaps->link_fec_options);
+ phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
+
+out:
+ phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
+ set_bit(__ICE_PHY_INIT_COMPLETE, pf->state);
+err_out:
+ kfree(pcaps);
+ return err;
+}
+
+/**
+ * ice_configure_phy - configure PHY
+ * @vsi: VSI of PHY
+ *
+ * Set the PHY configuration. If the current PHY configuration is the same as
+ * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
+ * configure the based get PHY capabilities for topology with media.
+ */
+static int ice_configure_phy(struct ice_vsi *vsi)
+{
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_set_phy_cfg_data *cfg;
+ struct ice_port_info *pi;
+ enum ice_status status;
+ int err = 0;
+
+ pi = vsi->port_info;
+ if (!pi)
+ return -EINVAL;
+
+ /* Ensure we have media as we cannot configure a medialess port */
+ if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
+ return -EPERM;
+
+ ice_print_topo_conflict(vsi);
+
+ if (vsi->port_info->phy.link_info.topo_media_conflict ==
+ ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
+ return -EPERM;
+
+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
+ return ice_force_phys_link_state(vsi, true);
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ /* Get current PHY config */
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (status) {
+ dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
+ err = -EIO;
+ goto done;
+ }
+
+ /* If PHY enable link is configured and configuration has not changed,
+ * there's nothing to do
+ */
+ if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
+ ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg))
+ goto done;
+
+ /* Use PHY topology as baseline for configuration */
+ memset(pcaps, 0, sizeof(*pcaps));
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
+ NULL);
+ if (status) {
+ dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
+ err = -EIO;
+ goto done;
+ }
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
+
+ /* Speed - If default override pending, use curr_user_phy_cfg set in
+ * ice_init_phy_user_cfg_ldo.
+ */
+ if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING,
+ vsi->back->state)) {
+ cfg->phy_type_low = pi->phy.curr_user_phy_cfg.phy_type_low;
+ cfg->phy_type_high = pi->phy.curr_user_phy_cfg.phy_type_high;
+ } else {
+ u64 phy_low = 0, phy_high = 0;
+
+ ice_update_phy_type(&phy_low, &phy_high,
+ pi->phy.curr_user_speed_req);
+ cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
+ cfg->phy_type_high = pcaps->phy_type_high &
+ cpu_to_le64(phy_high);
+ }
+
+ /* Can't provide what was requested; use PHY capabilities */
+ if (!cfg->phy_type_low && !cfg->phy_type_high) {
+ cfg->phy_type_low = pcaps->phy_type_low;
+ cfg->phy_type_high = pcaps->phy_type_high;
+ }
+
+ /* FEC */
+ ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req);
+
+ /* Can't provide what was requested; use PHY capabilities */
+ if (cfg->link_fec_opt !=
+ (cfg->link_fec_opt & pcaps->link_fec_options)) {
+ cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
+ cfg->link_fec_opt = pcaps->link_fec_options;
+ }
+
+ /* Flow Control - always supported; no need to check against
+ * capabilities
+ */
+ ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req);
+
+ /* Enable link and link update */
+ cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
+
+ status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
+ if (status) {
+ dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
+ err = -EIO;
+ }
+
+ kfree(cfg);
+done:
+ kfree(pcaps);
+ return err;
+}
+
+/**
+ * ice_check_media_subtask - Check for media
* @pf: pointer to PF struct
+ *
+ * If media is available, then initialize PHY user configuration if it is not
+ * been, and configure the PHY if the interface is up.
*/
static void ice_check_media_subtask(struct ice_pf *pf)
{
@@ -1423,15 +1984,12 @@ static void ice_check_media_subtask(struct ice_pf *pf)
struct ice_vsi *vsi;
int err;
- vsi = ice_get_main_vsi(pf);
- if (!vsi)
+ /* No need to check for media if it's already present */
+ if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
return;
- /* No need to check for media if it's already present or the interface
- * is down
- */
- if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) ||
- test_bit(__ICE_DOWN, vsi->state))
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
return;
/* Refresh link info and check if media is present */
@@ -1441,10 +1999,19 @@ static void ice_check_media_subtask(struct ice_pf *pf)
return;
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
- err = ice_force_phys_link_state(vsi, true);
- if (err)
+ if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state))
+ ice_init_phy_user_cfg(pi);
+
+ /* PHY settings are reset on media insertion, reconfigure
+ * PHY to preserve settings.
+ */
+ if (test_bit(__ICE_DOWN, vsi->state) &&
+ test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
return;
- clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
+
+ err = ice_configure_phy(vsi);
+ if (!err)
+ clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
/* A Link Status Event will be generated; the event handler
* will complete bringing the interface up
@@ -1982,9 +2549,6 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
- case XDP_QUERY_PROG:
- xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
- return 0;
case XDP_SETUP_XSK_UMEM:
return ice_xsk_umem_setup(vsi, xdp->xsk.umem,
xdp->xsk.queue_id);
@@ -2779,6 +3343,10 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
+ INIT_HLIST_HEAD(&pf->aq_wait_list);
+ spin_lock_init(&pf->aq_wait_lock);
+ init_waitqueue_head(&pf->aq_wait_queue);
+
/* setup service timer and periodic service task */
timer_setup(&pf->serv_tmr, ice_service_timer, 0);
pf->serv_tmr_period = HZ;
@@ -2949,6 +3517,27 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
+ * ice_is_wol_supported - get NVM state of WoL
+ * @pf: board private structure
+ *
+ * Check if WoL is supported based on the HW configuration.
+ * Returns true if NVM supports and enables WoL for this port, false otherwise
+ */
+bool ice_is_wol_supported(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u16 wol_ctrl;
+
+ /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
+ * word) indicates WoL is not supported on the corresponding PF ID.
+ */
+ if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
+ return false;
+
+ return !(BIT(hw->pf_id) & wol_ctrl);
+}
+
+/**
* ice_vsi_recfg_qs - Change the number of queues on a VSI
* @vsi: VSI being changed
* @new_rx: new number of Rx queues
@@ -2995,6 +3584,60 @@ done:
}
/**
+ * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
+ * @pf: PF to configure
+ *
+ * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
+ * VSI can still Tx/Rx VLAN tagged packets.
+ */
+static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct ice_vsi_ctx *ctxt;
+ enum ice_status status;
+ struct ice_hw *hw;
+
+ if (!vsi)
+ return;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return;
+
+ hw = &pf->hw;
+ ctxt->info = vsi->info;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SECURITY_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ /* disable VLAN anti-spoof */
+ ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+
+ /* disable VLAN pruning and keep all other settings */
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ /* allow all VLANs on Tx and don't strip on Rx */
+ ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
+ ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+
+ status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (status) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ } else {
+ vsi->info.sec_flags = ctxt->info.sec_flags;
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+ vsi->info.vlan_flags = ctxt->info.vlan_flags;
+ }
+
+ kfree(ctxt);
+}
+
+/**
* ice_log_pkg_init - log result of DDP package load
* @hw: pointer to hardware info
* @status: status of package load
@@ -3168,11 +3811,11 @@ static enum ice_status ice_send_version(struct ice_pf *pf)
{
struct ice_driver_ver dv;
- dv.major_ver = DRV_VERSION_MAJOR;
- dv.minor_ver = DRV_VERSION_MINOR;
- dv.build_ver = DRV_VERSION_BUILD;
+ dv.major_ver = 0xff;
+ dv.minor_ver = 0xff;
+ dv.build_ver = 0xff;
dv.subbuild_ver = 0;
- strscpy((char *)dv.driver_string, DRV_VERSION,
+ strscpy((char *)dv.driver_string, UTS_RELEASE,
sizeof(dv.driver_string));
return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
}
@@ -3296,6 +3939,33 @@ dflt_pkg_load:
}
/**
+ * ice_print_wake_reason - show the wake up cause in the log
+ * @pf: pointer to the PF struct
+ */
+static void ice_print_wake_reason(struct ice_pf *pf)
+{
+ u32 wus = pf->wakeup_reason;
+ const char *wake_str;
+
+ /* if no wake event, nothing to print */
+ if (!wus)
+ return;
+
+ if (wus & PFPM_WUS_LNKC_M)
+ wake_str = "Link\n";
+ else if (wus & PFPM_WUS_MAG_M)
+ wake_str = "Magic Packet\n";
+ else if (wus & PFPM_WUS_MNG_M)
+ wake_str = "Management\n";
+ else if (wus & PFPM_WUS_FW_RST_WK_M)
+ wake_str = "Firmware Reset\n";
+ else
+ wake_str = "Unknown\n";
+
+ dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
+}
+
+/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in ice_pci_tbl
@@ -3463,8 +4133,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_send_version(pf);
if (err) {
dev_err(dev, "probe failed sending driver version %s. error: %d\n",
- ice_drv_ver, err);
- goto err_alloc_sw_unroll;
+ UTS_RELEASE, err);
+ goto err_send_version_unroll;
}
/* since everything is good, start the service timer */
@@ -3473,14 +4143,60 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_init_link_events(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_link_events failed: %d\n", err);
- goto err_alloc_sw_unroll;
+ goto err_send_version_unroll;
+ }
+
+ err = ice_init_nvm_phy_type(pf->hw.port_info);
+ if (err) {
+ dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
+ goto err_send_version_unroll;
+ }
+
+ err = ice_update_link_info(pf->hw.port_info);
+ if (err) {
+ dev_err(dev, "ice_update_link_info failed: %d\n", err);
+ goto err_send_version_unroll;
+ }
+
+ ice_init_link_dflt_override(pf->hw.port_info);
+
+ /* if media available, initialize PHY settings */
+ if (pf->hw.port_info->phy.link_info.link_info &
+ ICE_AQ_MEDIA_AVAILABLE) {
+ err = ice_init_phy_user_cfg(pf->hw.port_info);
+ if (err) {
+ dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
+ goto err_send_version_unroll;
+ }
+
+ if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+ if (vsi)
+ ice_configure_phy(vsi);
+ }
+ } else {
+ set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
}
ice_verify_cacheline_size(pf);
- /* If no DDP driven features have to be setup, we are done with probe */
- if (ice_is_safe_mode(pf))
+ /* Save wakeup reason register for later use */
+ pf->wakeup_reason = rd32(hw, PFPM_WUS);
+
+ /* check for a power management event */
+ ice_print_wake_reason(pf);
+
+ /* clear wake status, all bits */
+ wr32(hw, PFPM_WUS, U32_MAX);
+
+ /* Disable WoL at init, wait for user to enable */
+ device_set_wakeup_enable(dev, false);
+
+ if (ice_is_safe_mode(pf)) {
+ ice_set_safe_mode_vlan_cfg(pf);
goto probe_done;
+ }
/* initialize DDP driven features */
@@ -3504,6 +4220,8 @@ probe_done:
clear_bit(__ICE_DOWN, pf->state);
return 0;
+err_send_version_unroll:
+ ice_vsi_release_all(pf);
err_alloc_sw_unroll:
ice_devlink_destroy_port(pf);
set_bit(__ICE_SERVICE_DIS, pf->state);
@@ -3522,10 +4240,73 @@ err_init_pf_unroll:
err_exit_unroll:
ice_devlink_unregister(pf);
pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
return err;
}
/**
+ * ice_set_wake - enable or disable Wake on LAN
+ * @pf: pointer to the PF struct
+ *
+ * Simple helper for WoL control
+ */
+static void ice_set_wake(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ bool wol = pf->wol_ena;
+
+ /* clear wake state, otherwise new wake events won't fire */
+ wr32(hw, PFPM_WUS, U32_MAX);
+
+ /* enable / disable APM wake up, no RMW needed */
+ wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
+
+ /* set magic packet filter enabled */
+ wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
+}
+
+/**
+ * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet
+ * @pf: pointer to the PF struct
+ *
+ * Issue firmware command to enable multicast magic wake, making
+ * sure that any locally administered address (LAA) is used for
+ * wake, and that PF reset doesn't undo the LAA.
+ */
+static void ice_setup_mc_magic_wake(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u8 mac_addr[ETH_ALEN];
+ struct ice_vsi *vsi;
+ u8 flags;
+
+ if (!pf->wol_ena)
+ return;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return;
+
+ /* Get current MAC address in case it's an LAA */
+ if (vsi->netdev)
+ ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
+ else
+ ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
+
+ flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
+ ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
+ ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
+
+ status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
+ if (status)
+ dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+}
+
+/**
* ice_remove - Device removal routine
* @pdev: PCI device information struct
*/
@@ -3551,11 +4332,15 @@ static void ice_remove(struct pci_dev *pdev)
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
+ ice_aq_cancel_waiting_tasks(pf);
+
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
+ ice_setup_mc_magic_wake(pf);
ice_devlink_destroy_port(pf);
ice_vsi_release_all(pf);
+ ice_set_wake(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
@@ -3575,9 +4360,231 @@ static void ice_remove(struct pci_dev *pdev)
pci_wait_for_pending_transaction(pdev);
ice_clear_interrupt_scheme(pf);
pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
}
/**
+ * ice_shutdown - PCI callback for shutting down device
+ * @pdev: PCI device information struct
+ */
+static void ice_shutdown(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ ice_remove(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, pf->wol_ena);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+#ifdef CONFIG_PM
+/**
+ * ice_prepare_for_shutdown - prep for PCI shutdown
+ * @pf: board private structure
+ *
+ * Inform or close all dependent features in prep for PCI device shutdown
+ */
+static void ice_prepare_for_shutdown(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u32 v;
+
+ /* Notify VFs of impending reset */
+ if (ice_check_sq_alive(hw, &hw->mailboxq))
+ ice_vc_notify_reset(pf);
+
+ dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
+
+ /* disable the VSIs and their queues that are not already DOWN */
+ ice_pf_dis_all_vsi(pf, false);
+
+ ice_for_each_vsi(pf, v)
+ if (pf->vsi[v])
+ pf->vsi[v]->vsi_num = 0;
+
+ ice_shutdown_all_ctrlq(hw);
+}
+
+/**
+ * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
+ * @pf: board private structure to reinitialize
+ *
+ * This routine reinitialize interrupt scheme that was cleared during
+ * power management suspend callback.
+ *
+ * This should be called during resume routine to re-allocate the q_vectors
+ * and reacquire interrupts.
+ */
+static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int ret, v;
+
+ /* Since we clear MSIX flag during suspend, we need to
+ * set it back during resume...
+ */
+
+ ret = ice_init_interrupt_scheme(pf);
+ if (ret) {
+ dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
+ return ret;
+ }
+
+ /* Remap vectors and rings, after successful re-init interrupts */
+ ice_for_each_vsi(pf, v) {
+ if (!pf->vsi[v])
+ continue;
+
+ ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
+ if (ret)
+ goto err_reinit;
+ ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ }
+
+ ret = ice_req_irq_msix_misc(pf);
+ if (ret) {
+ dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
+ ret);
+ goto err_reinit;
+ }
+
+ return 0;
+
+err_reinit:
+ while (v--)
+ if (pf->vsi[v])
+ ice_vsi_free_q_vectors(pf->vsi[v]);
+
+ return ret;
+}
+
+/**
+ * ice_suspend
+ * @dev: generic device information structure
+ *
+ * Power Management callback to quiesce the device and prepare
+ * for D3 transition.
+ */
+static int __maybe_unused ice_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ice_pf *pf;
+ int disabled, v;
+
+ pf = pci_get_drvdata(pdev);
+
+ if (!ice_pf_state_is_nominal(pf)) {
+ dev_err(dev, "Device is not ready, no need to suspend it\n");
+ return -EBUSY;
+ }
+
+ /* Stop watchdog tasks until resume completion.
+ * Even though it is most likely that the service task is
+ * disabled if the device is suspended or down, the service task's
+ * state is controlled by a different state bit, and we should
+ * store and honor whatever state that bit is in at this point.
+ */
+ disabled = ice_service_task_stop(pf);
+
+ /* Already suspended?, then there is nothing to do */
+ if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) {
+ if (!disabled)
+ ice_service_task_restart(pf);
+ return 0;
+ }
+
+ if (test_bit(__ICE_DOWN, pf->state) ||
+ ice_is_reset_in_progress(pf->state)) {
+ dev_err(dev, "can't suspend device in reset or already down\n");
+ if (!disabled)
+ ice_service_task_restart(pf);
+ return 0;
+ }
+
+ ice_setup_mc_magic_wake(pf);
+
+ ice_prepare_for_shutdown(pf);
+
+ ice_set_wake(pf);
+
+ /* Free vectors, clear the interrupt scheme and release IRQs
+ * for proper hibernation, especially with large number of CPUs.
+ * Otherwise hibernation might fail when mapping all the vectors back
+ * to CPU0.
+ */
+ ice_free_irq_msix_misc(pf);
+ ice_for_each_vsi(pf, v) {
+ if (!pf->vsi[v])
+ continue;
+ ice_vsi_free_q_vectors(pf->vsi[v]);
+ }
+ ice_clear_interrupt_scheme(pf);
+
+ pci_wake_from_d3(pdev, pf->wol_ena);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+
+/**
+ * ice_resume - PM callback for waking up from D3
+ * @dev: generic device information structure
+ */
+static int __maybe_unused ice_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ enum ice_reset_req reset_type;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (!pci_device_is_present(pdev))
+ return -ENODEV;
+
+ ret = pci_enable_device_mem(pdev);
+ if (ret) {
+ dev_err(dev, "Cannot enable device after suspend\n");
+ return ret;
+ }
+
+ pf = pci_get_drvdata(pdev);
+ hw = &pf->hw;
+
+ pf->wakeup_reason = rd32(hw, PFPM_WUS);
+ ice_print_wake_reason(pf);
+
+ /* We cleared the interrupt scheme when we suspended, so we need to
+ * restore it now to resume device functionality.
+ */
+ ret = ice_reinit_interrupt_scheme(pf);
+ if (ret)
+ dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
+
+ clear_bit(__ICE_DOWN, pf->state);
+ /* Now perform PF reset and rebuild */
+ reset_type = ICE_RESET_PFR;
+ /* re-enable service task for reset, but allow reset to schedule it */
+ clear_bit(__ICE_SERVICE_DIS, pf->state);
+
+ if (ice_schedule_reset(pf, reset_type))
+ dev_err(dev, "Reset during resume failed.\n");
+
+ clear_bit(__ICE_SUSPENDED, pf->state);
+ ice_service_task_restart(pf);
+
+ /* Restart the service task */
+ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+/**
* ice_pci_err_detected - warning that PCI error has been detected
* @pdev: PCI device information struct
* @err: the type of PCI error
@@ -3673,6 +4680,8 @@ static void ice_pci_err_resume(struct pci_dev *pdev)
return;
}
+ ice_restore_all_vfs_msi_state(pdev);
+
ice_do_reset(pf, ICE_RESET_PFR);
ice_service_task_restart(pf);
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
@@ -3742,6 +4751,8 @@ static const struct pci_device_id ice_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
+static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
+
static const struct pci_error_handlers ice_pci_err_handler = {
.error_detected = ice_pci_err_detected,
.slot_reset = ice_pci_err_slot_reset,
@@ -3755,6 +4766,10 @@ static struct pci_driver ice_driver = {
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
+#ifdef CONFIG_PM
+ .driver.pm = &ice_pm_ops,
+#endif /* CONFIG_PM */
+ .shutdown = ice_shutdown,
.sriov_configure = ice_sriov_configure,
.err_handler = &ice_pci_err_handler
};
@@ -3769,7 +4784,7 @@ static int __init ice_module_init(void)
{
int status;
- pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
+ pr_info("%s\n", ice_driver_string);
pr_info("%s\n", ice_copyright);
ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
@@ -4275,6 +5290,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi->tx_linearize = 0;
vsi->rx_buf_failed = 0;
vsi->rx_page_failed = 0;
+ vsi->rx_gro_dropped = 0;
rcu_read_lock();
@@ -4289,6 +5305,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi_stats->rx_bytes += bytes;
vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
+ vsi->rx_gro_dropped += ring->rx_stats.gro_dropped;
}
/* update XDP Tx rings counters */
@@ -4320,7 +5337,7 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
ice_update_eth_stats(vsi);
cur_ns->tx_errors = cur_es->tx_errors;
- cur_ns->rx_dropped = cur_es->rx_discards;
+ cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped;
cur_ns->tx_dropped = cur_es->tx_discards;
cur_ns->multicast = cur_es->rx_multicast;
@@ -4963,10 +5980,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
if (err)
goto err_sched_init_port;
- err = ice_update_link_info(hw->port_info);
- if (err)
- dev_err(dev, "Get link status error %d\n", err);
-
/* start misc vector */
err = ice_req_irq_msix_misc(pf);
if (err) {
@@ -5667,20 +6680,30 @@ int ice_open(struct net_device *netdev)
/* Set PHY if there is media, otherwise, turn off PHY */
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
- err = ice_force_phys_link_state(vsi, true);
+ clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
+ if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) {
+ err = ice_init_phy_user_cfg(pi);
+ if (err) {
+ netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
+ err);
+ return err;
+ }
+ }
+
+ err = ice_configure_phy(vsi);
if (err) {
netdev_err(netdev, "Failed to set physical link up, error %d\n",
err);
return err;
}
} else {
+ set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
err = ice_aq_set_link_restart_an(pi, false, NULL);
if (err) {
netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
vsi->vsi_num, err);
return err;
}
- set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags);
}
err = ice_vsi_open(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index b049c1c30c88..5903a36763de 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -108,6 +108,76 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
}
/**
+ * ice_aq_update_nvm
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @command_flags: command parameters
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update the NVM using the admin queue commands (0x0703)
+ */
+enum ice_status
+ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command, u8 command_flags,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+ struct ice_aqc_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
+
+ cmd->cmd_flags |= command_flags;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = cpu_to_le16(length);
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, data, length, cd);
+}
+
+/**
+ * ice_aq_erase_nvm
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @cd: pointer to command details structure or NULL
+ *
+ * Erase the NVM sector using the admin queue commands (0x0702)
+ */
+enum ice_status
+ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+ struct ice_aqc_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase);
+
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->length = cpu_to_le16(ICE_AQC_NVM_ERASE_LEN);
+ cmd->offset_low = 0;
+ cmd->offset_high = 0;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_read_sr_word_aq - Reads Shadow RAM via AQ
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
@@ -172,8 +242,7 @@ void ice_release_nvm(struct ice_hw *hw)
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
-static enum ice_status
-ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
enum ice_status status;
@@ -197,7 +266,7 @@ ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
* Area (PFA) and returns the TLV pointer and length. The caller can
* use these to read the variable length TLV value.
*/
-static enum ice_status
+enum ice_status
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
@@ -635,3 +704,119 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
return status;
}
+
+/**
+ * ice_nvm_write_activate
+ * @hw: pointer to the HW struct
+ * @cmd_flags: NVM activate admin command bits (banks to be validated)
+ *
+ * Update the control word with the required banks' validity bits
+ * and dumps the Shadow RAM to flash (0x0707)
+ */
+enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
+{
+ struct ice_aqc_nvm *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.nvm;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
+
+ cmd->cmd_flags = cmd_flags;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_nvm_update_empr
+ * @hw: pointer to the HW struct
+ *
+ * Update empr (0x0709). This command allows SW to
+ * request an EMPR to activate new FW.
+ */
+enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_update_empr);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/* ice_nvm_set_pkg_data
+ * @hw: pointer to the HW struct
+ * @del_pkg_data_flag: If is set then the current pkg_data store by FW
+ * is deleted.
+ * If bit is set to 1, then buffer should be size 0.
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set package data (0x070A). This command is equivalent to the reception
+ * of a PLDM FW Update GetPackageData cmd. This command should be sent
+ * as part of the NVM update as the first cmd in the flow.
+ */
+
+enum ice_status
+ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
+ u16 length, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_nvm_pkg_data *cmd;
+ struct ice_aq_desc desc;
+
+ if (length != 0 && !data)
+ return ICE_ERR_PARAM;
+
+ cmd = &desc.params.pkg_data;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_pkg_data);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (del_pkg_data_flag)
+ cmd->cmd_flags |= ICE_AQC_NVM_PKG_DELETE;
+
+ return ice_aq_send_cmd(hw, &desc, data, length, cd);
+}
+
+/* ice_nvm_pass_component_tbl
+ * @hw: pointer to the HW struct
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ * @transfer_flag: parameter for determining stage of the update
+ * @comp_response: a pointer to the response from the 0x070B AQC.
+ * @comp_response_code: a pointer to the response code from the 0x070B AQC.
+ * @cd: pointer to command details structure or NULL
+ *
+ * Pass component table (0x070B). This command is equivalent to the reception
+ * of a PLDM FW Update PassComponentTable cmd. This command should be sent once
+ * per component. It can be only sent after Set Package Data cmd and before
+ * actual update. FW will assume these commands are going to be sent until
+ * the TransferFlag is set to End or StartAndEnd.
+ */
+
+enum ice_status
+ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_nvm_pass_comp_tbl *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (!data || !comp_response || !comp_response_code)
+ return ICE_ERR_PARAM;
+
+ cmd = &desc.params.pass_comp_tbl;
+
+ ice_fill_dflt_direct_cmd_desc(&desc,
+ ice_aqc_opc_nvm_pass_component_tbl);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd->transfer_flag = transfer_flag;
+ status = ice_aq_send_cmd(hw, &desc, data, length, cd);
+
+ if (!status) {
+ *comp_response = cmd->component_response;
+ *comp_response_code = cmd->component_response_code;
+ }
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 165eda07b93d..8d430909f846 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -11,6 +11,26 @@ enum ice_status
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
enum ice_status
+ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ u16 module_type);
+enum ice_status
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
enum ice_status ice_init_nvm(struct ice_hw *hw);
+enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
+enum ice_status
+ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command, u8 command_flags,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
+enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
+enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags);
+enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw);
+enum ice_status
+ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
+ u16 length, struct ice_sq_cd *cd);
+enum ice_status
+ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code, struct ice_sq_cd *cd);
#endif /* _ICE_NVM_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 0475134295e4..44a228530253 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -129,7 +129,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
*/
enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_get_elem *buf, u16 buf_size,
+ struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
@@ -149,8 +149,8 @@ enum ice_status
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info)
{
+ struct ice_aqc_txsched_elem_data elem;
struct ice_sched_node *parent;
- struct ice_aqc_get_elem elem;
struct ice_sched_node *node;
enum ice_status status;
struct ice_hw *hw;
@@ -170,7 +170,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
return ICE_ERR_PARAM;
}
- /* query the current node information from FW before additing it
+ /* query the current node information from FW before adding it
* to the SW DB
*/
status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
@@ -195,7 +195,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
node->parent = parent;
node->tx_sched_layer = layer;
parent->children[parent->num_children++] = node;
- memcpy(&node->info, &elem.generic[0], sizeof(node->info));
+ node->info = elem;
return 0;
}
@@ -238,7 +238,7 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
enum ice_status status;
u16 buf_size;
- buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
+ buf_size = struct_size(buf, teid, num_nodes);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -423,7 +423,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
*/
static enum ice_status
ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_conf_elem *buf, u16 buf_size,
+ struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_cfgd, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
@@ -443,8 +443,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
* Suspend scheduling elements (0x0409)
*/
static enum ice_status
-ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_suspend_resume_elem *buf,
+ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
@@ -464,8 +463,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
* resume scheduling elements (0x040A)
*/
static enum ice_status
-ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_suspend_resume_elem *buf,
+ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
@@ -506,9 +504,9 @@ static enum ice_status
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
bool suspend)
{
- struct ice_aqc_suspend_resume_elem *buf;
u16 i, buf_size, num_elem_ret = 0;
enum ice_status status;
+ __le32 *buf;
buf_size = sizeof(*buf) * num_nodes;
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
@@ -516,7 +514,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
return ICE_ERR_NO_MEMORY;
for (i = 0; i < num_nodes; i++)
- buf->teid[i] = cpu_to_le32(node_teids[i]);
+ buf[i] = cpu_to_le32(node_teids[i]);
if (suspend)
status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
@@ -580,7 +578,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
/**
* ice_aq_rl_profile - performs a rate limiting task
* @hw: pointer to the HW struct
- * @opcode:opcode for add, query, or remove profile(s)
+ * @opcode: opcode for add, query, or remove profile(s)
* @num_profiles: the number of profiles
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -591,7 +589,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
*/
static enum ice_status
ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
- u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
{
struct ice_aqc_rl_profile *cmd;
@@ -622,13 +620,11 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
*/
static enum ice_status
ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
- struct ice_aqc_rl_profile_generic_elem *buf,
- u16 buf_size, u16 *num_profiles_added,
- struct ice_sq_cd *cd)
+ struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
+ u16 *num_profiles_added, struct ice_sq_cd *cd)
{
- return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles,
- num_profiles, buf,
- buf_size, num_profiles_added, cd);
+ return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
+ buf, buf_size, num_profiles_added, cd);
}
/**
@@ -644,13 +640,12 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
*/
static enum ice_status
ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
- struct ice_aqc_rl_profile_generic_elem *buf,
- u16 buf_size, u16 *num_profiles_removed,
- struct ice_sq_cd *cd)
+ struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
+ u16 *num_profiles_removed, struct ice_sq_cd *cd)
{
return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
- num_profiles, buf,
- buf_size, num_profiles_removed, cd);
+ num_profiles, buf, buf_size,
+ num_profiles_removed, cd);
}
/**
@@ -666,7 +661,7 @@ static enum ice_status
ice_sched_del_rl_profile(struct ice_hw *hw,
struct ice_aqc_rl_profile_info *rl_info)
{
- struct ice_aqc_rl_profile_generic_elem *buf;
+ struct ice_aqc_rl_profile_elem *buf;
u16 num_profiles_removed;
enum ice_status status;
u16 num_profiles = 1;
@@ -675,8 +670,7 @@ ice_sched_del_rl_profile(struct ice_hw *hw,
return ICE_ERR_IN_USE;
/* Safe to remove profile ID */
- buf = (struct ice_aqc_rl_profile_generic_elem *)
- &rl_info->profile;
+ buf = &rl_info->profile;
status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
&num_profiles_removed, NULL);
if (status || num_profiles_removed != num_profiles)
@@ -831,7 +825,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
size_t buf_size;
u32 teid;
- buf_size = struct_size(buf, generic, num_nodes - 1);
+ buf_size = struct_size(buf, generic, num_nodes);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -1282,6 +1276,53 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
}
/**
+ * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
+ * @pi: port information structure
+ * @vsi_node: software VSI handle
+ * @qgrp_node: first queue group node identified for scanning
+ * @owner: LAN or RDMA
+ *
+ * This function retrieves a free LAN or RDMA queue group node by scanning
+ * qgrp_node and its siblings for the queue group with the fewest number
+ * of queues currently assigned.
+ */
+static struct ice_sched_node *
+ice_sched_get_free_qgrp(struct ice_port_info *pi,
+ struct ice_sched_node *vsi_node,
+ struct ice_sched_node *qgrp_node, u8 owner)
+{
+ struct ice_sched_node *min_qgrp;
+ u8 min_children;
+
+ if (!qgrp_node)
+ return qgrp_node;
+ min_children = qgrp_node->num_children;
+ if (!min_children)
+ return qgrp_node;
+ min_qgrp = qgrp_node;
+ /* scan all queue groups until find a node which has less than the
+ * minimum number of children. This way all queue group nodes get
+ * equal number of shares and active. The bandwidth will be equally
+ * distributed across all queues.
+ */
+ while (qgrp_node) {
+ /* make sure the qgroup node is part of the VSI subtree */
+ if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
+ if (qgrp_node->num_children < min_children &&
+ qgrp_node->owner == owner) {
+ /* replace the new min queue group node */
+ min_qgrp = qgrp_node;
+ min_children = min_qgrp->num_children;
+ /* break if it has no children, */
+ if (!min_children)
+ break;
+ }
+ qgrp_node = qgrp_node->sibling;
+ }
+ return min_qgrp;
+}
+
+/**
* ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -1294,7 +1335,7 @@ struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner)
{
- struct ice_sched_node *vsi_node, *qgrp_node = NULL;
+ struct ice_sched_node *vsi_node, *qgrp_node;
struct ice_vsi_ctx *vsi_ctx;
u16 max_children;
u8 qgrp_layer;
@@ -1308,7 +1349,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
vsi_node = vsi_ctx->sched.vsi_node[tc];
/* validate invalid VSI ID */
if (!vsi_node)
- goto lan_q_exit;
+ return NULL;
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
@@ -1321,8 +1362,8 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
qgrp_node = qgrp_node->sibling;
}
-lan_q_exit:
- return qgrp_node;
+ /* Select the best queue group */
+ return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
}
/**
@@ -1867,7 +1908,7 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
* @node: pointer to node
* @info: node info to update
*
- * It updates the HW DB, and local SW DB of node. It updates the scheduling
+ * Update the HW DB, and local SW DB of node. Update the scheduling
* parameters of node from argument info data buffer (Info->data buf) and
* returns success or error on config sched element failure. The caller
* needs to hold scheduler lock.
@@ -1876,18 +1917,18 @@ static enum ice_status
ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_aqc_txsched_elem_data *info)
{
- struct ice_aqc_conf_elem buf;
+ struct ice_aqc_txsched_elem_data buf;
enum ice_status status;
u16 elem_cfgd = 0;
u16 num_elems = 1;
- buf.generic[0] = *info;
+ buf = *info;
/* Parent TEID is reserved field in this aq call */
- buf.generic[0].parent_teid = 0;
+ buf.parent_teid = 0;
/* Element type is reserved field in this aq call */
- buf.generic[0].data.elem_type = 0;
+ buf.data.elem_type = 0;
/* Flags is reserved field in this aq call */
- buf.generic[0].data.flags = 0;
+ buf.data.flags = 0;
/* Update HW DB */
/* Configure element node */
@@ -2131,9 +2172,9 @@ static struct ice_aqc_rl_profile_info *
ice_sched_add_rl_profile(struct ice_port_info *pi,
enum ice_rl_type rl_type, u32 bw, u8 layer_num)
{
- struct ice_aqc_rl_profile_generic_elem *buf;
struct ice_aqc_rl_profile_info *rl_prof_elem;
u16 profiles_added = 0, num_profiles = 1;
+ struct ice_aqc_rl_profile_elem *buf;
enum ice_status status;
struct ice_hw *hw;
u8 profile_type;
@@ -2159,8 +2200,8 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
hw = pi->hw;
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
- if (rl_prof_elem->profile.flags == profile_type &&
- rl_prof_elem->bw == bw)
+ if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
+ profile_type && rl_prof_elem->bw == bw)
/* Return existing profile ID info */
return rl_prof_elem;
@@ -2182,8 +2223,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
/* Create new entry in HW DB */
- buf = (struct ice_aqc_rl_profile_generic_elem *)
- &rl_prof_elem->profile;
+ buf = &rl_prof_elem->profile;
status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
&profiles_added, NULL);
if (status || profiles_added != num_profiles)
@@ -2391,7 +2431,8 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
/* Check the existing list for RL profile */
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
- if (rl_prof_elem->profile.flags == profile_type &&
+ if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
+ profile_type &&
le16_to_cpu(rl_prof_elem->profile.profile_id) ==
profile_id) {
if (rl_prof_elem->prof_id_ref)
@@ -2553,8 +2594,8 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
return 0;
return ice_sched_rm_rl_profile(pi, layer_num,
- rl_prof_info->profile.flags,
- old_id);
+ rl_prof_info->profile.flags &
+ ICE_AQC_RL_PROFILE_TYPE_M, old_id);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index f0593cfb6521..0e55ae0d446f 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -56,7 +56,7 @@ struct ice_sched_agg_info {
/* FW AQ command calls */
enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_get_elem *buf, u16 buf_size,
+ struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index ff7d16ac693e..c3a6c41385ee 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -29,25 +29,17 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
- sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
+ (DUMMY_ETH_HDR_LEN * \
+ sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
- sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
#define ICE_SW_RULE_LG_ACT_SIZE(n) \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
- sizeof(struct ice_sw_rule_lg_act) - \
- sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
- ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
+ ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
- sizeof(struct ice_sw_rule_vsi_list) - \
- sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
- ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
+ ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
@@ -87,7 +79,7 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
* @num_elems: pointer to number of elements
* @cd: pointer to command details structure or NULL
*
- * Get switch configuration (0x0200) to be placed in 'buff'.
+ * Get switch configuration (0x0200) to be placed in buf.
* This admin command returns information such as initial VSI/port number
* and switch ID it belongs to.
*
@@ -104,13 +96,13 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
* parsing the response buffer.
*/
static enum ice_status
-ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
+ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
u16 buf_size, u16 *req_desc, u16 *num_elems,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sw_cfg *cmd;
- enum ice_status status;
struct ice_aq_desc desc;
+ enum ice_status status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
cmd = &desc.params.get_sw_conf;
@@ -449,7 +441,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_status status;
u16 buf_len;
- buf_len = sizeof(*sw_buf);
+ buf_len = struct_size(sw_buf, elem, 1);
sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
@@ -503,6 +495,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
+ enum ice_status status;
if (opc != ice_aqc_opc_add_sw_rules &&
opc != ice_aqc_opc_update_sw_rules &&
@@ -514,7 +507,12 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
desc.params.sw_rules.num_rules_fltr_entry_index =
cpu_to_le16(num_rules);
- return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
+ status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
+ if (opc != ice_aqc_opc_add_sw_rules &&
+ hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ status = ICE_ERR_DOES_NOT_EXIST;
+
+ return status;
}
/* ice_init_port_info - Initialize port_info with switch configuration data
@@ -550,7 +548,7 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
*/
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
{
- struct ice_aqc_get_sw_cfg_resp *rbuf;
+ struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
enum ice_status status;
u16 req_desc = 0;
u16 num_elems;
@@ -568,19 +566,19 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
* writing a non-zero value in req_desc
*/
do {
+ struct ice_aqc_get_sw_cfg_resp_elem *ele;
+
status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
&req_desc, &num_elems, NULL);
if (status)
break;
- for (i = 0; i < num_elems; i++) {
- struct ice_aqc_get_sw_cfg_resp_elem *ele;
+ for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
u16 pf_vf_num, swid, vsi_port_num;
bool is_vf = false;
u8 res_type;
- ele = rbuf[i].elements;
vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
@@ -856,8 +854,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
m_ent->fltr_info.fwd_id.hw_vsi_id;
act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
- act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
- ICE_LG_ACT_VSI_LIST_ID_M;
+ act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
@@ -2037,7 +2034,8 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
return ICE_ERR_NO_MEMORY;
@@ -2691,7 +2689,7 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 buf_len;
/* Allocate resource */
- buf_len = sizeof(*buf);
+ buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -2729,7 +2727,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 buf_len;
/* Free resource */
- buf_len = sizeof(*buf);
+ buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index abdb137c8bb7..9d0d6b0025cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -509,8 +509,8 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
return 0;
}
-static unsigned int ice_rx_frame_truesize(struct ice_ring *rx_ring,
- unsigned int size)
+static unsigned int
+ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
{
unsigned int truesize;
@@ -631,10 +631,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */
- if (likely(page)) {
- rx_ring->rx_stats.page_reuse_count++;
+ if (likely(page))
return true;
- }
/* alloc new page for storage */
page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
@@ -1033,7 +1031,6 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (ice_can_reuse_rx_page(rx_buf)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
- rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
@@ -1254,12 +1251,12 @@ construct_skb:
* @itr: ITR value to update
*
* Calculate how big of an increment should be applied to the ITR value passed
- * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
+ * in based on wmem_default, SKB overhead, ethernet overhead, and the current
* link speed.
*
* The following is a calculation derived from:
* wmem_default / (size + overhead) = desired_pkts_per_int
- * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
+ * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
* (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
*
* Assuming wmem_default is 212992 and overhead is 640 bytes per
@@ -2294,10 +2291,30 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
- stale = &skb_shinfo(skb)->frags[0];
- for (;;) {
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
sum += skb_frag_size(frag++);
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > ICE_MAX_DATA_PER_TXD) {
+ int align_pad = -(skb_frag_off(stale)) &
+ (ICE_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > ICE_MAX_DATA_PER_TXD);
+ }
+
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@@ -2305,7 +2322,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
- sum -= skb_frag_size(stale++);
+ sum -= stale_size;
}
return false;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index e70c4619edc3..51b4df7a59d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -193,7 +193,7 @@ struct ice_rxq_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buf_failed;
- u64 page_reuse_count;
+ u64 gro_dropped; /* GRO returned dropped */
};
/* this enum matches hardware bits and is meant to be used by DYN_CTLN
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 02b12736ea80..bc2f4390b51d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -191,7 +191,12 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- napi_gro_receive(&rx_ring->q_vector->napi, skb);
+ if (napi_gro_receive(&rx_ring->q_vector->napi, skb) == GRO_DROP) {
+ /* this is tracked separately to help us debug stack drops */
+ rx_ring->rx_stats.gro_dropped++;
+ netdev_dbg(rx_ring->netdev, "Receive Queue %d: Dropped packet from GRO\n",
+ rx_ring->q_index);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index c1ad8622e65c..4cdccfadf274 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -87,6 +87,12 @@ enum ice_fc_mode {
ICE_FC_DFLT
};
+enum ice_phy_cache_mode {
+ ICE_FC_MODE = 0,
+ ICE_SPEED_MODE,
+ ICE_FEC_MODE
+};
+
enum ice_fec_mode {
ICE_FEC_NONE = 0,
ICE_FEC_RS,
@@ -94,6 +100,14 @@ enum ice_fec_mode {
ICE_FEC_AUTO
};
+struct ice_phy_cache_mode_data {
+ union {
+ enum ice_fec_mode curr_user_fec_req;
+ enum ice_fc_mode curr_user_fc_req;
+ u16 curr_user_speed_req;
+ } data;
+};
+
enum ice_set_fc_aq_failures {
ICE_SET_FC_AQ_FAIL_NONE = 0,
ICE_SET_FC_AQ_FAIL_GET,
@@ -104,6 +118,7 @@ enum ice_set_fc_aq_failures {
/* Various MAC types */
enum ice_mac_type {
ICE_MAC_UNKNOWN = 0,
+ ICE_MAC_E810,
ICE_MAC_GENERIC,
};
@@ -160,6 +175,13 @@ struct ice_phy_info {
u64 phy_type_high;
enum ice_media_type media_type;
u8 get_link_info;
+ /* Please refer to struct ice_aqc_get_link_status_data to get
+ * detail of enable bit in curr_user_speed_req
+ */
+ u16 curr_user_speed_req;
+ enum ice_fec_mode curr_user_fec_req;
+ enum ice_fc_mode curr_user_fc_req;
+ struct ice_aqc_set_phy_cfg_data curr_user_phy_cfg;
};
/* protocol enumeration for filters */
@@ -222,6 +244,15 @@ struct ice_hw_common_caps {
u8 rss_table_entry_width; /* RSS Entry width in bits */
u8 dcb;
+
+ bool nvm_update_pending_nvm;
+ bool nvm_update_pending_orom;
+ bool nvm_update_pending_netlist;
+#define ICE_NVM_PENDING_NVM_IMAGE BIT(0)
+#define ICE_NVM_PENDING_OROM BIT(1)
+#define ICE_NVM_PENDING_NETLIST BIT(2)
+ bool nvm_unified_update;
+#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
};
/* Function specific capabilities */
@@ -290,7 +321,29 @@ struct ice_nvm_info {
u32 flash_size; /* Size of available flash in bytes */
u8 major_ver; /* major version of NVM package */
u8 minor_ver; /* minor version of dev starter */
- u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+};
+
+struct ice_link_default_override_tlv {
+ u8 options;
+#define ICE_LINK_OVERRIDE_OPT_M 0x3F
+#define ICE_LINK_OVERRIDE_STRICT_MODE BIT(0)
+#define ICE_LINK_OVERRIDE_EPCT_DIS BIT(1)
+#define ICE_LINK_OVERRIDE_PORT_DIS BIT(2)
+#define ICE_LINK_OVERRIDE_EN BIT(3)
+#define ICE_LINK_OVERRIDE_AUTO_LINK_DIS BIT(4)
+#define ICE_LINK_OVERRIDE_EEE_EN BIT(5)
+ u8 phy_config;
+#define ICE_LINK_OVERRIDE_PHY_CFG_S 8
+#define ICE_LINK_OVERRIDE_PHY_CFG_M (0xC3 << ICE_LINK_OVERRIDE_PHY_CFG_S)
+#define ICE_LINK_OVERRIDE_PAUSE_M 0x3
+#define ICE_LINK_OVERRIDE_LESM_EN BIT(6)
+#define ICE_LINK_OVERRIDE_AUTO_FEC_EN BIT(7)
+ u8 fec_options;
+#define ICE_LINK_OVERRIDE_FEC_OPT_M 0xFF
+ u8 rsvd1;
+ u64 phy_type_low;
+ u64 phy_type_high;
};
#define ICE_NVM_VER_LEN 32
@@ -356,7 +409,7 @@ enum ice_rl_type {
#define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */
#define ICE_SCHED_DFLT_RL_PROF_ID 0
#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
-#define ICE_SCHED_DFLT_BW_WT 1
+#define ICE_SCHED_DFLT_BW_WT 4
#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
@@ -444,6 +497,7 @@ struct ice_dcb_app_priority_table {
#define ICE_APP_SEL_ETHTYPE 0x1
#define ICE_APP_SEL_TCPIP 0x2
#define ICE_CEE_APP_SEL_ETHTYPE 0x0
+#define ICE_SR_LINK_DEFAULT_OVERRIDE_PTR 0x134
#define ICE_CEE_APP_SEL_TCPIP 0x1
struct ice_dcbx_cfg {
@@ -709,6 +763,7 @@ struct ice_hw_port_stats {
/* Checksum and Shadow RAM pointers */
#define ICE_SR_BOOT_CFG_PTR 0x132
+#define ICE_SR_NVM_WOL_CFG 0x19
#define ICE_NVM_OROM_VER_OFF 0x02
#define ICE_SR_PBA_BLOCK_PTR 0x16
#define ICE_SR_NVM_DEV_STARTER_VER 0x18
@@ -725,7 +780,21 @@ struct ice_hw_port_stats {
#define ICE_OROM_VER_SHIFT 24
#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
#define ICE_SR_PFA_PTR 0x40
+#define ICE_SR_1ST_NVM_BANK_PTR 0x42
+#define ICE_SR_1ST_OROM_BANK_PTR 0x44
+#define ICE_SR_NETLIST_BANK_PTR 0x46
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
+
+/* Link override related */
+#define ICE_SR_PFA_LINK_OVERRIDE_WORDS 10
+#define ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS 4
+#define ICE_SR_PFA_LINK_OVERRIDE_OFFSET 2
+#define ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET 1
+#define ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET 2
+#define ICE_FW_API_LINK_OVERRIDE_MAJ 1
+#define ICE_FW_API_LINK_OVERRIDE_MIN 5
+#define ICE_FW_API_LINK_OVERRIDE_PATCH 2
+
#define ICE_SR_WORDS_IN_1KB 512
/* Hash redirection LUT for VSI - maximum array size */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 16a2f2526ccc..71497776ac62 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -932,6 +932,8 @@ static int ice_set_per_vf_res(struct ice_pf *pf)
num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
+ } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
} else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
num_msix_per_vf = ICE_MIN_INTR_PER_VF;
} else {
@@ -1593,31 +1595,6 @@ err_unroll_intr:
}
/**
- * ice_pf_state_is_nominal - checks the PF for nominal state
- * @pf: pointer to PF to check
- *
- * Check the PF's state for a collection of bits that would indicate
- * the PF is in a state that would inhibit normal operation for
- * driver functionality.
- *
- * Returns true if PF is in a nominal state.
- * Returns false otherwise
- */
-static bool ice_pf_state_is_nominal(struct ice_pf *pf)
-{
- DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
-
- if (!pf)
- return false;
-
- bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
- if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
- return false;
-
- return true;
-}
-
-/**
* ice_pci_sriov_ena - Enable or change number of VFs
* @pf: pointer to the PF structure
* @num_vfs: number of VFs to allocate
@@ -2997,8 +2974,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->max_frame = qpi->rxq.max_pkt_size;
}
- /* VF can request to configure less than allocated queues
- * or default allocated queues. So update the VSI with new number
+ /* VF can request to configure less than allocated queues or default
+ * allocated queues. So update the VSI with new number
*/
vsi->num_txq = num_txq;
vsi->num_rxq = num_rxq;
@@ -4096,3 +4073,33 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
}
}
}
+
+/**
+ * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
+ * @pdev: pointer to a pci_dev structure
+ *
+ * Called when recovering from a PF FLR to restore interrupt capability to
+ * the VFs.
+ */
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
+{
+ struct pci_dev *vfdev;
+ u16 vf_id;
+ int pos;
+
+ if (!pci_num_vf(pdev))
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
+ &vf_id);
+ vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn && vfdev->physfn == pdev)
+ pci_restore_msi_state(vfdev);
+ vfdev = pci_get_device(pdev->vendor, vf_id,
+ vfdev);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 67aa9110fdd1..0f519fba3770 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -32,6 +32,7 @@
#define ICE_MAX_RSS_QS_PER_VF 16
#define ICE_NUM_VF_MSIX_MED 17
#define ICE_NUM_VF_MSIX_SMALL 5
+#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_TRIES 40
#define ICE_MAX_VF_RESET_SLEEP_MS 20
@@ -114,6 +115,7 @@ void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -146,6 +148,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
#define ice_vf_lan_overflow_event(pf, event) do {} while (0)
#define ice_print_vfs_mdd_events(pf) do {} while (0)
#define ice_print_vf_rx_mdd_event(vf) do {} while (0)
+#define ice_restore_all_vfs_msi_state(pdev) do {} while (0)
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index b6f928c9e9c9..20ac5fca68c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -206,12 +206,14 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_ring *tx_ring, *rx_ring;
struct ice_q_vector *q_vector;
+ u16 size;
int err;
if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
return -EINVAL;
- qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
+ size = struct_size(qg_buf, txqs, 1);
+ qg_buf = kzalloc(size, GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
@@ -228,7 +230,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
- memset(qg_buf, 0, sizeof(*qg_buf));
+ memset(qg_buf, 0, size);
qg_buf->num_txqs = 1;
err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
if (err)
@@ -296,7 +298,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
}
}
-
/**
* ice_xsk_umem_disable - disable a UMEM region
* @vsi: Current VSI
@@ -592,7 +593,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (!size)
break;
-
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
rx_buf->xdp->data_end = rx_buf->xdp->data + size;
xsk_buff_dma_sync_for_cpu(rx_buf->xdp);
@@ -704,8 +704,6 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
- if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
- xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
return budget > 0 && work_done;
@@ -781,12 +779,8 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
if (xsk_frames)
xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
- if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) {
- if (xdp_ring->next_to_clean == xdp_ring->next_to_use)
- xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
- else
- xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
- }
+ if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
+ xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);